summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore8
-rwxr-xr-xREADME31
-rw-r--r--SConstruct321
-rw-r--r--bson/bson-inl.h373
-rw-r--r--bson/bson.h45
-rw-r--r--bson/bsondemo/bsondemo.cpp6
-rw-r--r--bson/bsondemo/bsondemo.vcxproj8
-rw-r--r--bson/bsonelement.h50
-rw-r--r--bson/bsonmisc.h16
-rw-r--r--bson/bsonobj.h134
-rw-r--r--bson/bsonobjbuilder.h121
-rw-r--r--bson/bsonobjiterator.h54
-rw-r--r--bson/inline_decls.h69
-rw-r--r--bson/oid.cpp31
-rw-r--r--bson/ordering.h23
-rw-r--r--bson/stringdata.h52
-rw-r--r--bson/util/atomic_int.h11
-rw-r--r--bson/util/builder.h87
-rw-r--r--bson/util/misc.h21
-rwxr-xr-xbuildscripts/errorcodes.py62
-rw-r--r--buildscripts/hacks_ubuntu.py10
-rw-r--r--buildscripts/makealldists.py291
-rw-r--r--buildscripts/makedist.py940
-rw-r--r--buildscripts/mergerepositories.py194
-rw-r--r--buildscripts/packager.py982
-rwxr-xr-xbuildscripts/smoke.py42
-rw-r--r--client/clientOnly.cpp5
-rw-r--r--client/connpool.cpp223
-rw-r--r--client/connpool.h119
-rw-r--r--client/dbclient.cpp147
-rw-r--r--client/dbclient.h100
-rw-r--r--client/dbclient_rs.cpp335
-rw-r--r--client/dbclient_rs.h86
-rw-r--r--client/dbclientcursor.cpp123
-rw-r--r--client/dbclientcursor.h82
-rw-r--r--client/distlock.cpp921
-rw-r--r--client/distlock.h164
-rw-r--r--client/distlock_test.cpp394
-rw-r--r--client/examples/clientTest.cpp29
-rw-r--r--client/examples/httpClientTest.cpp33
-rw-r--r--client/examples/insert_demo.cpp47
-rw-r--r--client/examples/rs.cpp80
-rwxr-xr-xclient/examples/simple_client_demo.vcxproj92
-rwxr-xr-xclient/examples/simple_client_demo.vcxproj.filters21
-rw-r--r--client/examples/whereExample.cpp3
-rw-r--r--client/mongo_client_lib.cpp31
-rw-r--r--client/parallel.cpp369
-rw-r--r--client/parallel.h22
-rw-r--r--client/redef_macros.h5
-rw-r--r--client/simple_client_demo.cpp36
-rw-r--r--client/syncclusterconnection.cpp42
-rw-r--r--client/syncclusterconnection.h22
-rw-r--r--client/undef_macros.h2
-rw-r--r--db/btree.cpp1198
-rw-r--r--db/btree.h942
-rw-r--r--db/btreebuilder.cpp184
-rw-r--r--db/btreebuilder.h53
-rw-r--r--db/btreecursor.cpp394
-rw-r--r--db/cap.cpp12
-rw-r--r--db/client.cpp241
-rw-r--r--db/client.h28
-rw-r--r--db/clientcursor.cpp276
-rw-r--r--db/clientcursor.h56
-rw-r--r--db/cloner.cpp123
-rw-r--r--db/cloner.h39
-rw-r--r--db/cmdline.cpp125
-rw-r--r--db/cmdline.h69
-rw-r--r--db/commands.cpp51
-rw-r--r--db/commands.h34
-rw-r--r--db/commands/distinct.cpp29
-rw-r--r--db/commands/find_and_modify.cpp153
-rw-r--r--db/commands/group.cpp37
-rw-r--r--db/commands/isself.cpp18
-rw-r--r--db/commands/mr.cpp465
-rw-r--r--db/commands/mr.h49
-rw-r--r--db/common.cpp39
-rw-r--r--db/compact.cpp361
-rw-r--r--db/compact.h50
-rw-r--r--db/concurrency.h9
-rw-r--r--db/curop.h98
-rw-r--r--db/cursor.h20
-rw-r--r--db/database.cpp106
-rw-r--r--db/database.h20
-rw-r--r--db/db.cpp489
-rw-r--r--db/db.h38
-rwxr-xr-x[-rw-r--r--]db/db.vcxproj1627
-rwxr-xr-xdb/db.vcxproj.filters96
-rwxr-xr-xdb/db_10.sln28
-rw-r--r--db/dbcommands.cpp495
-rw-r--r--db/dbcommands_admin.cpp226
-rw-r--r--db/dbcommands_generic.cpp150
-rw-r--r--db/dbeval.cpp6
-rw-r--r--db/dbhelpers.cpp36
-rw-r--r--db/dbmessage.cpp108
-rw-r--r--db/dbmessage.h137
-rw-r--r--db/dbwebserver.cpp35
-rw-r--r--db/diskloc.h16
-rw-r--r--db/driverHelpers.cpp2
-rw-r--r--db/dur.cpp292
-rw-r--r--db/dur.h10
-rw-r--r--db/dur_commitjob.cpp19
-rw-r--r--db/dur_commitjob.h19
-rw-r--r--db/dur_journal.cpp240
-rw-r--r--db/dur_journal.h16
-rw-r--r--db/dur_journalformat.h58
-rw-r--r--db/dur_journalimpl.h22
-rw-r--r--db/dur_preplogbuffer.cpp52
-rw-r--r--db/dur_recover.cpp186
-rw-r--r--db/dur_recover.h15
-rw-r--r--db/dur_stats.h3
-rw-r--r--db/dur_writetodatafiles.cpp24
-rw-r--r--db/durop.cpp3
-rw-r--r--db/durop.h2
-rw-r--r--db/extsort.cpp15
-rw-r--r--db/extsort.h59
-rw-r--r--db/geo/2d.cpp2937
-rw-r--r--db/geo/core.h90
-rw-r--r--db/geo/haystack.cpp18
-rw-r--r--db/index.cpp140
-rw-r--r--db/index.h85
-rw-r--r--db/indexkey.cpp381
-rw-r--r--db/indexkey.h29
-rw-r--r--db/instance.cpp285
-rw-r--r--db/instance.h14
-rw-r--r--db/introspect.cpp61
-rw-r--r--db/introspect.h3
-rw-r--r--db/jsobj.cpp477
-rw-r--r--db/json.cpp21
-rw-r--r--db/key.cpp671
-rw-r--r--db/key.h112
-rw-r--r--db/lasterror.cpp4
-rw-r--r--db/matcher.cpp610
-rw-r--r--db/matcher.h132
-rw-r--r--db/matcher_covered.cpp32
-rw-r--r--db/modules/mms.cpp4
-rw-r--r--db/mongommf.cpp118
-rw-r--r--db/mongommf.h5
-rw-r--r--db/namespace-inl.h12
-rw-r--r--db/namespace.cpp27
-rw-r--r--db/namespace.h77
-rw-r--r--db/nonce.cpp50
-rw-r--r--db/nonce.h20
-rw-r--r--db/oplog.cpp216
-rw-r--r--db/oplog.h141
-rw-r--r--db/oplogreader.h51
-rw-r--r--db/ops/delete.cpp242
-rw-r--r--db/ops/delete.h33
-rw-r--r--db/ops/query.cpp (renamed from db/query.cpp)358
-rw-r--r--db/ops/query.h (renamed from db/query.h)154
-rw-r--r--db/ops/update.cpp (renamed from db/update.cpp)431
-rw-r--r--db/ops/update.h (renamed from db/update.h)47
-rw-r--r--db/pdfile.cpp880
-rw-r--r--db/pdfile.h94
-rw-r--r--db/projection.cpp2
-rw-r--r--db/projection.h2
-rw-r--r--db/queryoptimizer.cpp829
-rw-r--r--db/queryoptimizer.h543
-rw-r--r--db/queryoptimizercursor.cpp387
-rw-r--r--db/querypattern.cpp54
-rw-r--r--db/querypattern.h76
-rw-r--r--db/queryutil-inl.h153
-rw-r--r--db/queryutil.cpp716
-rw-r--r--db/queryutil.h744
-rw-r--r--db/record.cpp230
-rw-r--r--db/repl.cpp832
-rw-r--r--db/repl.h212
-rw-r--r--db/repl/connections.h15
-rw-r--r--db/repl/consensus.cpp118
-rw-r--r--db/repl/health.cpp55
-rw-r--r--db/repl/health.h14
-rw-r--r--db/repl/heartbeat.cpp89
-rw-r--r--db/repl/manager.cpp62
-rw-r--r--db/repl/multicmd.h6
-rw-r--r--db/repl/replset_commands.cpp97
-rw-r--r--db/repl/rs.cpp270
-rw-r--r--db/repl/rs.h256
-rw-r--r--db/repl/rs_config.cpp348
-rw-r--r--db/repl/rs_config.h144
-rw-r--r--db/repl/rs_initialsync.cpp135
-rw-r--r--db/repl/rs_initiate.cpp41
-rw-r--r--db/repl/rs_member.h19
-rw-r--r--db/repl/rs_rollback.cpp28
-rw-r--r--db/repl/rs_sync.cpp341
-rw-r--r--db/repl_block.cpp49
-rw-r--r--db/repl_block.h1
-rw-r--r--db/replpair.h238
-rw-r--r--db/replutil.h98
-rw-r--r--db/restapi.cpp12
-rw-r--r--db/scanandorder.cpp93
-rw-r--r--db/scanandorder.h120
-rw-r--r--db/security.cpp74
-rwxr-xr-x[-rw-r--r--]db/security.h87
-rw-r--r--db/security_commands.cpp175
-rw-r--r--db/security_common.cpp (renamed from db/security_key.cpp)35
-rw-r--r--db/security_common.h83
-rw-r--r--db/security_key.h47
-rw-r--r--db/stats/counters.h2
-rw-r--r--db/stats/snapshots.cpp14
-rw-r--r--db/stats/top.cpp12
-rw-r--r--dbtests/basictests.cpp77
-rw-r--r--dbtests/btreetests.cpp1702
-rw-r--r--dbtests/btreetests.inl1702
-rw-r--r--dbtests/clienttests.cpp2
-rw-r--r--dbtests/cursortests.cpp66
-rw-r--r--dbtests/dbtests.cpp3
-rw-r--r--dbtests/directclienttests.cpp31
-rw-r--r--dbtests/framework.cpp65
-rw-r--r--dbtests/jsobjtests.cpp381
-rw-r--r--dbtests/jsontests.cpp23
-rw-r--r--dbtests/jstests.cpp119
-rw-r--r--dbtests/mockdbclient.h97
-rw-r--r--dbtests/namespacetests.cpp500
-rw-r--r--dbtests/pairingtests.cpp344
-rw-r--r--dbtests/pdfiletests.cpp4
-rw-r--r--dbtests/perf/perftest.cpp70
-rw-r--r--dbtests/perftests.cpp691
-rw-r--r--dbtests/queryoptimizertests.cpp2885
-rw-r--r--dbtests/querytests.cpp179
-rw-r--r--dbtests/queryutiltests.cpp989
-rw-r--r--dbtests/repltests.cpp183
-rw-r--r--dbtests/socktests.cpp2
-rw-r--r--dbtests/spin_lock_test.cpp13
-rwxr-xr-xdbtests/test.sln26
-rw-r--r--dbtests/test.vcxproj1486
-rwxr-xr-xdbtests/test.vcxproj.filters275
-rw-r--r--dbtests/threadedtests.cpp389
-rw-r--r--dbtests/updatetests.cpp17
-rw-r--r--distsrc/README74
-rwxr-xr-x[-rw-r--r--]distsrc/client/SConstruct11
-rw-r--r--docs/errors.md1564
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/ageoutjournalfiles.js16
-rw-r--r--jstests/and.js86
-rw-r--r--jstests/and2.js27
-rw-r--r--jstests/and3.js66
-rw-r--r--jstests/andor.js105
-rw-r--r--jstests/apitest_dbcollection.js2
-rw-r--r--jstests/array_match2.js25
-rw-r--r--jstests/array_match3.js13
-rw-r--r--jstests/arrayfind2.js3
-rw-r--r--jstests/arrayfind4.js22
-rw-r--r--jstests/arrayfind5.js23
-rw-r--r--jstests/auth/auth1.js5
-rw-r--r--jstests/auth/auth2.js23
-rw-r--r--jstests/auth/rename.js40
-rw-r--r--jstests/auth1.js17
-rw-r--r--jstests/auth2.js6
-rw-r--r--jstests/bench_test1.js16
-rw-r--r--jstests/bench_test2.js41
-rw-r--r--jstests/big_object1.js2
-rw-r--r--jstests/binData.js14
-rw-r--r--jstests/capped.js8
-rw-r--r--jstests/capped2.js10
-rw-r--r--jstests/capped5.js7
-rw-r--r--jstests/capped6.js2
-rw-r--r--jstests/capped8.js40
-rw-r--r--jstests/capped9.js28
-rw-r--r--jstests/cappeda.js33
-rw-r--r--jstests/compact.js37
-rwxr-xr-xjstests/compact_speed_test.js61
-rw-r--r--jstests/date1.js5
-rw-r--r--jstests/date2.js13
-rw-r--r--jstests/date3.js31
-rw-r--r--jstests/dbcase.js16
-rw-r--r--jstests/dbcase2.js9
-rw-r--r--jstests/dbhash.js10
-rw-r--r--jstests/delx.js1
-rw-r--r--jstests/disk/directoryperdb.js2
-rw-r--r--jstests/disk/diskfull.js8
-rw-r--r--jstests/disk/newcollection.js20
-rw-r--r--jstests/disk/norepeat.js2
-rw-r--r--jstests/disk/quota.js47
-rw-r--r--jstests/disk/quota2.js38
-rw-r--r--jstests/disk/repair3.js2
-rw-r--r--jstests/disk/repair5.js43
-rw-r--r--jstests/distinct1.js1
-rw-r--r--jstests/distinct_index1.js10
-rw-r--r--jstests/drop2.js2
-rw-r--r--jstests/drop3.js29
-rw-r--r--jstests/dropdb.js17
-rw-r--r--jstests/dropdb_race.js44
-rw-r--r--jstests/dur/closeall.js76
-rw-r--r--jstests/dur/data/empty.bson0
-rw-r--r--jstests/dur/diskfull.js51
-rw-r--r--jstests/dur/dropdb.js21
-rwxr-xr-xjstests/dur/dur1.js25
-rwxr-xr-xjstests/dur/dur1_tool.js152
-rw-r--r--jstests/dur/indexbg.js7
-rw-r--r--jstests/dur/indexbg2.js19
-rwxr-xr-xjstests/dur/manyRestart.js4
-rw-r--r--jstests/eval_nolock.js2
-rw-r--r--jstests/evalb.js2
-rw-r--r--jstests/evalc.js11
-rw-r--r--jstests/evald.js10
-rw-r--r--jstests/exists3.js21
-rw-r--r--jstests/exists4.js20
-rw-r--r--jstests/exists5.js33
-rw-r--r--jstests/exists6.js63
-rw-r--r--jstests/exists7.js21
-rw-r--r--jstests/exists8.js76
-rw-r--r--jstests/exists9.js41
-rw-r--r--jstests/find8.js27
-rw-r--r--jstests/find_and_modify2.js6
-rw-r--r--jstests/fsync.js17
-rw-r--r--jstests/geo10.js21
-rw-r--r--jstests/geo4.js2
-rw-r--r--jstests/geo_array0.js25
-rw-r--r--jstests/geo_array1.js30
-rw-r--r--jstests/geo_array2.js163
-rw-r--r--jstests/geo_borders.js263
-rw-r--r--jstests/geo_center_sphere2.js158
-rw-r--r--jstests/geo_distinct.js16
-rw-r--r--jstests/geo_fiddly_box.js44
-rw-r--r--jstests/geo_fiddly_box2.js32
-rw-r--r--jstests/geo_group.js35
-rw-r--r--jstests/geo_mapreduce.js56
-rw-r--r--jstests/geo_mapreduce2.js36
-rw-r--r--jstests/geo_multinest0.js63
-rw-r--r--jstests/geo_multinest1.js37
-rw-r--r--jstests/geo_oob_sphere.js42
-rw-r--r--jstests/geo_poly_edge.js22
-rw-r--r--jstests/geo_poly_line.js17
-rw-r--r--jstests/geo_polygon1.js74
-rw-r--r--jstests/geo_polygon2.js266
-rw-r--r--jstests/geo_polygon3.js54
-rw-r--r--jstests/geo_regex0.js18
-rw-r--r--jstests/geo_small_large.js151
-rw-r--r--jstests/geo_uniqueDocs.js38
-rw-r--r--jstests/getlog1.js24
-rw-r--r--jstests/group7.js43
-rw-r--r--jstests/hint1.js12
-rw-r--r--jstests/idhack.js23
-rw-r--r--jstests/in8.js23
-rw-r--r--jstests/in9.js35
-rw-r--r--jstests/ina.js15
-rw-r--r--jstests/index11.js30
-rw-r--r--jstests/index9.js8
-rw-r--r--jstests/index_big1.js39
-rwxr-xr-xjstests/index_bigkeys.js78
-rw-r--r--jstests/index_check5.js2
-rw-r--r--jstests/index_check8.js12
-rw-r--r--jstests/index_fornew.js13
-rw-r--r--jstests/index_maxkey.js27
-rwxr-xr-xjstests/indexbindata.js0
-rw-r--r--jstests/indexk.js58
-rw-r--r--jstests/indexl.js27
-rw-r--r--jstests/indexm.js38
-rw-r--r--jstests/indexn.js41
-rw-r--r--jstests/indexo.js32
-rw-r--r--jstests/indexp.js58
-rw-r--r--jstests/indexq.js14
-rw-r--r--jstests/indexr.js47
-rw-r--r--jstests/indexs.js21
-rw-r--r--jstests/indext.js21
-rw-r--r--jstests/indexu.js137
-rw-r--r--jstests/indexv.js18
-rw-r--r--jstests/indexw.js14
-rw-r--r--jstests/insert1.js3
-rw-r--r--jstests/libs/geo_near_random.js37
-rw-r--r--jstests/libs/key1 (renamed from jstests/replsets/key1)0
-rw-r--r--jstests/libs/key2 (renamed from jstests/replsets/key2)0
-rw-r--r--jstests/libs/testconfig4
-rw-r--r--jstests/mr_errorhandling.js2
-rw-r--r--jstests/mr_merge2.js37
-rw-r--r--jstests/numberint.js92
-rw-r--r--jstests/numberlong2.js32
-rw-r--r--jstests/numberlong3.js25
-rw-r--r--jstests/or1.js2
-rw-r--r--jstests/or2.js3
-rw-r--r--jstests/or3.js4
-rw-r--r--jstests/or4.js2
-rw-r--r--jstests/ord.js1
-rw-r--r--jstests/org.js19
-rw-r--r--jstests/orh.js17
-rw-r--r--jstests/ori.js48
-rw-r--r--jstests/orj.js121
-rw-r--r--jstests/ork.js11
-rw-r--r--jstests/orl.js13
-rw-r--r--jstests/orm.js29
-rw-r--r--jstests/orn.js22
-rw-r--r--jstests/profile1.js144
-rw-r--r--jstests/profile2.js19
-rw-r--r--jstests/profile3.js26
-rw-r--r--jstests/push.js36
-rw-r--r--jstests/query1.js3
-rw-r--r--jstests/regex2.js8
-rw-r--r--jstests/regex6.js11
-rw-r--r--jstests/regexa.js19
-rw-r--r--jstests/remove10.js28
-rw-r--r--jstests/remove2.js5
-rw-r--r--jstests/remove9.js16
-rw-r--r--jstests/rename.js19
-rw-r--r--jstests/repl/basic1.js19
-rw-r--r--jstests/repl/dbcase.js95
-rw-r--r--jstests/repl/drop_dups.js68
-rw-r--r--jstests/repl/mastermaster1.js23
-rw-r--r--jstests/repl/mod_move.js69
-rw-r--r--jstests/repl/pair1.js100
-rw-r--r--jstests/repl/pair2.js71
-rw-r--r--jstests/repl/pair3.js245
-rw-r--r--jstests/repl/pair4.js160
-rw-r--r--jstests/repl/pair5.js95
-rw-r--r--jstests/repl/pair6.js115
-rw-r--r--jstests/repl/pair7.js85
-rw-r--r--jstests/repl/repl2.js29
-rw-r--r--jstests/repl/repl3.js58
-rw-r--r--jstests/repl/replacePeer1.js82
-rw-r--r--jstests/repl/replacePeer2.js86
-rw-r--r--jstests/repl/snapshot2.js72
-rw-r--r--jstests/repl/snapshot3.js53
-rw-r--r--jstests/replsets/auth1.js35
-rw-r--r--jstests/replsets/cloneDb.js18
-rw-r--r--jstests/replsets/config1.js21
-rwxr-xr-xjstests/replsets/downstream.js36
-rw-r--r--jstests/replsets/fastsync.js151
-rw-r--r--jstests/replsets/initial_sync1.js5
-rw-r--r--jstests/replsets/initial_sync3.js37
-rw-r--r--jstests/replsets/maintenance.js32
-rw-r--r--jstests/replsets/majority.js60
-rw-r--r--jstests/replsets/randomcommands1.js29
-rw-r--r--jstests/replsets/reconfig.js69
-rw-r--r--jstests/replsets/remove1.js130
-rw-r--r--jstests/replsets/replset1.js22
-rw-r--r--jstests/replsets/replset3.js2
-rw-r--r--jstests/replsets/replset5.js88
-rw-r--r--jstests/replsets/replsetadd.js34
-rw-r--r--jstests/replsets/replsetarb1.js33
-rw-r--r--jstests/replsets/replsetarb2.js13
-rw-r--r--jstests/replsets/replsetarb3.js144
-rw-r--r--jstests/replsets/replsetfreeze.js4
-rw-r--r--jstests/replsets/replsetrestart1.js14
-rw-r--r--jstests/replsets/replsetrestart2.js8
-rw-r--r--jstests/replsets/rollback2.js19
-rw-r--r--jstests/replsets/rollback4.js117
-rw-r--r--jstests/replsets/rslib.js44
-rw-r--r--jstests/replsets/slavedelay1.js104
-rw-r--r--jstests/replsets/stale_clustered.js101
-rw-r--r--jstests/replsets/stepdown.js142
-rwxr-xr-xjstests/replsets/stepdown2.js139
-rw-r--r--jstests/replsets/sync1.js396
-rw-r--r--jstests/replsets/sync2.js48
-rw-r--r--jstests/replsets/tags.js154
-rw-r--r--jstests/replsets/tags2.js44
-rw-r--r--jstests/replsets/toostale.js34
-rw-r--r--jstests/replsets/twosets.js35
-rw-r--r--jstests/set7.js16
-rw-r--r--jstests/sharding/addshard1.js2
-rw-r--r--jstests/sharding/addshard4.js26
-rw-r--r--jstests/sharding/array_shard_key.js127
-rw-r--r--jstests/sharding/auth.js177
-rw-r--r--jstests/sharding/count_slaveok.js69
-rw-r--r--jstests/sharding/drop_sharded_db.js62
-rw-r--r--jstests/sharding/features2.js11
-rw-r--r--jstests/sharding/features3.js61
-rw-r--r--jstests/sharding/group_slaveok.js68
-rw-r--r--jstests/sharding/index1.js174
-rw-r--r--jstests/sharding/migrateBig.js2
-rw-r--r--jstests/sharding/migrateMemory.js54
-rw-r--r--jstests/sharding/multi_mongos1.js3
-rw-r--r--jstests/sharding/multi_mongos2.js61
-rw-r--r--jstests/sharding/parallel.js38
-rw-r--r--jstests/sharding/shard3.js12
-rw-r--r--jstests/sharding/shard6.js3
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js3
-rw-r--r--jstests/sharding/shard_keycount.js45
-rw-r--r--jstests/sharding/sharding_with_keyfile.js69
-rwxr-xr-xjstests/sharding/sharding_with_keyfile.key3
-rw-r--r--jstests/sharding/sync6.js81
-rw-r--r--jstests/sharding/sync7.js63
-rw-r--r--jstests/shell1.js6
-rw-r--r--jstests/shellkillop.js126
-rw-r--r--jstests/shellspawn.js6
-rw-r--r--jstests/skip1.js15
-rw-r--r--jstests/slowNightly/background.js51
-rw-r--r--jstests/slowNightly/command_line_parsing.js12
-rw-r--r--jstests/slowNightly/dur_big_atomic_update.js17
-rw-r--r--jstests/slowNightly/dur_remove_old_journals.js27
-rw-r--r--jstests/slowNightly/geo_axis_aligned.js108
-rw-r--r--jstests/slowNightly/geo_mnypts.js51
-rw-r--r--jstests/slowNightly/geo_polygon.js53
-rw-r--r--jstests/slowNightly/index_check10.js133
-rw-r--r--jstests/slowNightly/index_check9.js2
-rw-r--r--jstests/slowNightly/replReads.js108
-rw-r--r--jstests/slowNightly/replsets_priority1.js173
-rw-r--r--jstests/slowNightly/sharding_balance1.js3
-rw-r--r--jstests/slowNightly/sharding_balance4.js8
-rw-r--r--jstests/slowNightly/sharding_migrateBigObject.js61
-rw-r--r--jstests/slowNightly/sharding_multiple_ns_rs.js49
-rw-r--r--jstests/slowNightly/sharding_passthrough.js16
-rw-r--r--jstests/slowNightly/sharding_rs1.js8
-rw-r--r--jstests/slowNightly/sharding_rs2.js22
-rw-r--r--jstests/slowNightly/sharding_rs_arb1.js40
-rw-r--r--jstests/slowNightly/sync6_slow.js82
-rw-r--r--jstests/slowWeekly/geo_full.js487
-rw-r--r--jstests/slowWeekly/geo_mnypts_plus_fields.js98
-rw-r--r--jstests/slowWeekly/query_yield2.js2
-rw-r--r--jstests/slowWeekly/repair2.js29
-rw-r--r--jstests/slowWeekly/update_yield1.js2
-rw-r--r--jstests/sort10.js48
-rw-r--r--jstests/sort2.js22
-rw-r--r--jstests/sort7.js25
-rw-r--r--jstests/sort8.js30
-rw-r--r--jstests/sort9.js26
-rw-r--r--jstests/sorta.js26
-rw-r--r--jstests/tool/csv1.js8
-rw-r--r--jstests/tool/csvexport1.js45
-rw-r--r--jstests/tool/csvexport2.js31
-rw-r--r--jstests/tool/csvimport1.js40
-rw-r--r--jstests/tool/data/a.tsv2
-rw-r--r--jstests/tool/data/csvimport1.csv8
-rw-r--r--jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--jstests/tool/dumprestore5.js36
-rw-r--r--jstests/tool/dumprestore6.js27
-rw-r--r--jstests/tool/exportimport1.js29
-rw-r--r--jstests/tool/tsv1.js32
-rw-r--r--jstests/type2.js19
-rw-r--r--jstests/type3.js68
-rw-r--r--jstests/unique2.js53
-rw-r--r--jstests/uniqueness.js13
-rw-r--r--jstests/update.js13
-rw-r--r--jstests/update_blank1.js12
-rw-r--r--jstests/update_invalid1.js6
-rw-r--r--jstests/updatea.js6
-rw-r--r--jstests/updatef.js24
-rw-r--r--jstests/updateg.js17
-rw-r--r--pch.cpp10
-rw-r--r--pch.h36
-rw-r--r--rpm/mongo.spec4
-rw-r--r--rpm/mongod.conf6
-rw-r--r--s/balance.cpp68
-rw-r--r--s/balance.h4
-rw-r--r--s/balancer_policy.cpp20
-rw-r--r--s/chunk.cpp557
-rw-r--r--s/chunk.h103
-rw-r--r--s/client.cpp10
-rw-r--r--s/client.h6
-rw-r--r--s/commands_admin.cpp204
-rw-r--r--s/commands_public.cpp559
-rw-r--r--s/config.cpp264
-rw-r--r--s/config.h19
-rw-r--r--s/config_migrate.cpp2
-rw-r--r--s/cursors.cpp24
-rw-r--r--s/d_chunk_manager.cpp25
-rw-r--r--s/d_chunk_manager.h17
-rw-r--r--s/d_logic.cpp14
-rw-r--r--s/d_logic.h3
-rw-r--r--s/d_migrate.cpp258
-rw-r--r--s/d_split.cpp100
-rw-r--r--s/d_state.cpp48
-rw-r--r--s/d_writeback.cpp80
-rw-r--r--s/d_writeback.h41
-rw-r--r--s/dbgrid.vcxproj1201
-rwxr-xr-xs/dbgrid.vcxproj.filters137
-rw-r--r--s/grid.cpp52
-rw-r--r--s/mr_shard.cpp312
-rw-r--r--s/mr_shard.h232
-rw-r--r--s/request.cpp28
-rw-r--r--s/request.h4
-rw-r--r--s/s_only.cpp11
-rw-r--r--s/security.cpp112
-rw-r--r--s/server.cpp65
-rw-r--r--s/server.h2
-rw-r--r--s/shard.cpp53
-rw-r--r--s/shard.h19
-rw-r--r--s/shard_version.cpp74
-rw-r--r--s/shard_version.h1
-rw-r--r--s/shardconnection.cpp47
-rw-r--r--s/shardkey.cpp20
-rw-r--r--s/shardkey.h16
-rw-r--r--s/strategy.cpp26
-rw-r--r--s/strategy.h6
-rw-r--r--s/strategy_shard.cpp270
-rw-r--r--s/strategy_single.cpp31
-rw-r--r--s/util.h2
-rw-r--r--s/writeback_listener.cpp106
-rw-r--r--s/writeback_listener.h6
-rw-r--r--scripting/bench.cpp105
-rw-r--r--scripting/engine.cpp45
-rw-r--r--scripting/engine.h43
-rw-r--r--scripting/engine_java.cpp10
-rw-r--r--scripting/engine_spidermonkey.cpp129
-rw-r--r--scripting/engine_spidermonkey.h34
-rw-r--r--scripting/engine_v8.cpp1166
-rw-r--r--scripting/engine_v8.h122
-rw-r--r--scripting/sm_db.cpp120
-rw-r--r--scripting/utils.cpp19
-rw-r--r--scripting/v8_db.cpp605
-rw-r--r--scripting/v8_db.h132
-rw-r--r--scripting/v8_utils.cpp78
-rw-r--r--scripting/v8_utils.h6
-rw-r--r--scripting/v8_wrapper.cpp569
-rw-r--r--scripting/v8_wrapper.h15
-rw-r--r--server.h21
-rw-r--r--shell/collection.js52
-rw-r--r--shell/db.js77
-rw-r--r--shell/dbshell.cpp198
-rw-r--r--shell/mongo.js9
-rw-r--r--shell/mongo_vstudio.cpp491
-rw-r--r--shell/msvc/mongo.vcxproj14
-rw-r--r--shell/msvc/mongo.vcxproj.filters27
-rw-r--r--shell/query.js1
-rwxr-xr-x[-rw-r--r--]shell/servers.js1151
-rw-r--r--shell/shell_utils.cpp103
-rw-r--r--shell/utils.js284
-rw-r--r--shell/utils_sh.js98
-rwxr-xr-xspeed.js13
-rw-r--r--third_party/README6
-rw-r--r--third_party/js-1.7/Makefile.in388
-rw-r--r--third_party/js-1.7/Makefile.ref375
-rw-r--r--third_party/js-1.7/README.html826
-rw-r--r--third_party/js-1.7/SpiderMonkey.rsp12
-rw-r--r--third_party/js-1.7/Y.js19
-rw-r--r--third_party/js-1.7/config.mk186
-rw-r--r--third_party/js-1.7/config/AIX4.1.mk65
-rw-r--r--third_party/js-1.7/config/AIX4.2.mk64
-rw-r--r--third_party/js-1.7/config/AIX4.3.mk65
-rw-r--r--third_party/js-1.7/config/CVS/Entries36
-rw-r--r--third_party/js-1.7/config/CVS/Repository1
-rw-r--r--third_party/js-1.7/config/CVS/Root1
-rw-r--r--third_party/js-1.7/config/CVS/Tag1
-rw-r--r--third_party/js-1.7/config/Darwin.mk83
-rwxr-xr-xthird_party/js-1.7/config/Darwin1.3.mk81
-rwxr-xr-xthird_party/js-1.7/config/Darwin1.4.mk41
-rwxr-xr-xthird_party/js-1.7/config/Darwin5.2.mk81
-rw-r--r--third_party/js-1.7/config/Darwin5.3.mk81
-rw-r--r--third_party/js-1.7/config/HP-UXB.10.10.mk77
-rw-r--r--third_party/js-1.7/config/HP-UXB.10.20.mk77
-rw-r--r--third_party/js-1.7/config/HP-UXB.11.00.mk80
-rw-r--r--third_party/js-1.7/config/IRIX.mk87
-rw-r--r--third_party/js-1.7/config/IRIX5.3.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.1.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.2.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.3.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.5.mk44
-rw-r--r--third_party/js-1.7/config/Linux_All.mk103
-rwxr-xr-xthird_party/js-1.7/config/Mac_OS10.0.mk82
-rw-r--r--third_party/js-1.7/config/OSF1V4.0.mk72
-rw-r--r--third_party/js-1.7/config/OSF1V5.0.mk69
-rw-r--r--third_party/js-1.7/config/SunOS4.1.4.mk101
-rw-r--r--third_party/js-1.7/config/SunOS5.3.mk91
-rw-r--r--third_party/js-1.7/config/SunOS5.4.mk92
-rw-r--r--third_party/js-1.7/config/SunOS5.5.1.mk44
-rw-r--r--third_party/js-1.7/config/SunOS5.5.mk87
-rw-r--r--third_party/js-1.7/config/SunOS5.6.mk89
-rw-r--r--third_party/js-1.7/config/SunOS5.7.mk44
-rw-r--r--third_party/js-1.7/config/SunOS5.8.mk44
-rw-r--r--third_party/js-1.7/config/SunOS5.9.mk44
-rw-r--r--third_party/js-1.7/config/WINNT4.0.mk117
-rw-r--r--third_party/js-1.7/config/WINNT5.0.mk117
-rw-r--r--third_party/js-1.7/config/WINNT5.1.mk117
-rw-r--r--third_party/js-1.7/config/WINNT5.2.mk117
-rw-r--r--third_party/js-1.7/config/dgux.mk64
-rw-r--r--third_party/js-1.7/fdlibm/.cvsignore7
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Entries87
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Repository1
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Root1
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Tag1
-rw-r--r--third_party/js-1.7/fdlibm/Makefile.in127
-rw-r--r--third_party/js-1.7/fdlibm/Makefile.ref192
-rw-r--r--third_party/js-1.7/fdlibm/e_acos.c147
-rw-r--r--third_party/js-1.7/fdlibm/e_acosh.c105
-rw-r--r--third_party/js-1.7/fdlibm/e_asin.c156
-rw-r--r--third_party/js-1.7/fdlibm/e_atan2.c165
-rw-r--r--third_party/js-1.7/fdlibm/e_atanh.c110
-rw-r--r--third_party/js-1.7/fdlibm/e_cosh.c133
-rw-r--r--third_party/js-1.7/fdlibm/e_exp.c202
-rw-r--r--third_party/js-1.7/fdlibm/e_fmod.c184
-rw-r--r--third_party/js-1.7/fdlibm/e_gamma.c71
-rw-r--r--third_party/js-1.7/fdlibm/e_gamma_r.c70
-rw-r--r--third_party/js-1.7/fdlibm/e_hypot.c173
-rw-r--r--third_party/js-1.7/fdlibm/e_j0.c524
-rw-r--r--third_party/js-1.7/fdlibm/e_j1.c523
-rw-r--r--third_party/js-1.7/fdlibm/e_jn.c315
-rw-r--r--third_party/js-1.7/fdlibm/e_lgamma.c71
-rw-r--r--third_party/js-1.7/fdlibm/e_lgamma_r.c347
-rw-r--r--third_party/js-1.7/fdlibm/e_log.c184
-rw-r--r--third_party/js-1.7/fdlibm/e_log10.c134
-rw-r--r--third_party/js-1.7/fdlibm/e_pow.c386
-rw-r--r--third_party/js-1.7/fdlibm/e_rem_pio2.c222
-rw-r--r--third_party/js-1.7/fdlibm/e_remainder.c120
-rw-r--r--third_party/js-1.7/fdlibm/e_scalb.c89
-rw-r--r--third_party/js-1.7/fdlibm/e_sinh.c122
-rw-r--r--third_party/js-1.7/fdlibm/e_sqrt.c497
-rw-r--r--third_party/js-1.7/fdlibm/fdlibm.h273
-rw-r--r--third_party/js-1.7/fdlibm/fdlibm.mak1453
-rw-r--r--third_party/js-1.7/fdlibm/fdlibm.mdpbin0 -> 42143 bytes
-rw-r--r--third_party/js-1.7/fdlibm/k_cos.c135
-rw-r--r--third_party/js-1.7/fdlibm/k_rem_pio2.c354
-rw-r--r--third_party/js-1.7/fdlibm/k_sin.c114
-rw-r--r--third_party/js-1.7/fdlibm/k_standard.c785
-rw-r--r--third_party/js-1.7/fdlibm/k_tan.c170
-rw-r--r--third_party/js-1.7/fdlibm/s_asinh.c101
-rw-r--r--third_party/js-1.7/fdlibm/s_atan.c175
-rw-r--r--third_party/js-1.7/fdlibm/s_cbrt.c133
-rw-r--r--third_party/js-1.7/fdlibm/s_ceil.c120
-rw-r--r--third_party/js-1.7/fdlibm/s_copysign.c72
-rw-r--r--third_party/js-1.7/fdlibm/s_cos.c118
-rw-r--r--third_party/js-1.7/fdlibm/s_erf.c356
-rw-r--r--third_party/js-1.7/fdlibm/s_expm1.c267
-rw-r--r--third_party/js-1.7/fdlibm/s_fabs.c70
-rw-r--r--third_party/js-1.7/fdlibm/s_finite.c71
-rw-r--r--third_party/js-1.7/fdlibm/s_floor.c121
-rw-r--r--third_party/js-1.7/fdlibm/s_frexp.c99
-rw-r--r--third_party/js-1.7/fdlibm/s_ilogb.c85
-rw-r--r--third_party/js-1.7/fdlibm/s_isnan.c74
-rw-r--r--third_party/js-1.7/fdlibm/s_ldexp.c66
-rw-r--r--third_party/js-1.7/fdlibm/s_lib_version.c73
-rw-r--r--third_party/js-1.7/fdlibm/s_log1p.c211
-rw-r--r--third_party/js-1.7/fdlibm/s_logb.c79
-rw-r--r--third_party/js-1.7/fdlibm/s_matherr.c64
-rw-r--r--third_party/js-1.7/fdlibm/s_modf.c132
-rw-r--r--third_party/js-1.7/fdlibm/s_nextafter.c124
-rw-r--r--third_party/js-1.7/fdlibm/s_rint.c131
-rw-r--r--third_party/js-1.7/fdlibm/s_scalbn.c107
-rw-r--r--third_party/js-1.7/fdlibm/s_signgam.c40
-rw-r--r--third_party/js-1.7/fdlibm/s_significand.c68
-rw-r--r--third_party/js-1.7/fdlibm/s_sin.c118
-rw-r--r--third_party/js-1.7/fdlibm/s_tan.c112
-rw-r--r--third_party/js-1.7/fdlibm/s_tanh.c122
-rw-r--r--third_party/js-1.7/fdlibm/w_acos.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_acosh.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_asin.c80
-rw-r--r--third_party/js-1.7/fdlibm/w_atan2.c79
-rw-r--r--third_party/js-1.7/fdlibm/w_atanh.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_cosh.c77
-rw-r--r--third_party/js-1.7/fdlibm/w_exp.c88
-rw-r--r--third_party/js-1.7/fdlibm/w_fmod.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_gamma.c85
-rw-r--r--third_party/js-1.7/fdlibm/w_gamma_r.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_hypot.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_j0.c105
-rw-r--r--third_party/js-1.7/fdlibm/w_j1.c106
-rw-r--r--third_party/js-1.7/fdlibm/w_jn.c128
-rw-r--r--third_party/js-1.7/fdlibm/w_lgamma.c85
-rw-r--r--third_party/js-1.7/fdlibm/w_lgamma_r.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_log.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_log10.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_pow.c99
-rw-r--r--third_party/js-1.7/fdlibm/w_remainder.c77
-rw-r--r--third_party/js-1.7/fdlibm/w_scalb.c95
-rw-r--r--third_party/js-1.7/fdlibm/w_sinh.c77
-rw-r--r--third_party/js-1.7/fdlibm/w_sqrt.c77
-rw-r--r--third_party/js-1.7/js.c3181
-rw-r--r--third_party/js-1.7/js.mak4344
-rw-r--r--third_party/js-1.7/js.mdpbin0 -> 17922 bytes
-rw-r--r--third_party/js-1.7/js.msg301
-rw-r--r--third_party/js-1.7/js.pkg2
-rw-r--r--third_party/js-1.7/js3240.rc79
-rw-r--r--third_party/js-1.7/jsOS240.def654
-rw-r--r--third_party/js-1.7/jsapi.c5011
-rw-r--r--third_party/js-1.7/jsapi.h2220
-rw-r--r--third_party/js-1.7/jsarena.c502
-rw-r--r--third_party/js-1.7/jsarena.h303
-rw-r--r--third_party/js-1.7/jsarray.c1864
-rw-r--r--third_party/js-1.7/jsarray.h95
-rw-r--r--third_party/js-1.7/jsatom.c999
-rw-r--r--third_party/js-1.7/jsatom.h456
-rw-r--r--third_party/js-1.7/jsbit.h195
-rw-r--r--third_party/js-1.7/jsbool.c227
-rw-r--r--third_party/js-1.7/jsbool.h76
-rw-r--r--third_party/js-1.7/jsclist.h139
-rw-r--r--third_party/js-1.7/jscntxt.c1229
-rw-r--r--third_party/js-1.7/jscntxt.h1013
-rw-r--r--third_party/js-1.7/jscompat.h57
-rw-r--r--third_party/js-1.7/jsconfig.h208
-rw-r--r--third_party/js-1.7/jsconfig.mk181
-rw-r--r--third_party/js-1.7/jscpucfg.c380
-rw-r--r--third_party/js-1.7/jscpucfg.h212
-rw-r--r--third_party/js-1.7/jsdate.c2371
-rw-r--r--third_party/js-1.7/jsdate.h120
-rw-r--r--third_party/js-1.7/jsdbgapi.c1439
-rw-r--r--third_party/js-1.7/jsdbgapi.h406
-rw-r--r--third_party/js-1.7/jsdhash.c826
-rw-r--r--third_party/js-1.7/jsdhash.h581
-rw-r--r--third_party/js-1.7/jsdtoa.c3132
-rw-r--r--third_party/js-1.7/jsdtoa.h130
-rw-r--r--third_party/js-1.7/jsemit.c6845
-rw-r--r--third_party/js-1.7/jsemit.h743
-rw-r--r--third_party/js-1.7/jsexn.c1348
-rw-r--r--third_party/js-1.7/jsexn.h96
-rw-r--r--third_party/js-1.7/jsfile.c2735
-rw-r--r--third_party/js-1.7/jsfile.h56
-rw-r--r--third_party/js-1.7/jsfile.msg90
-rw-r--r--third_party/js-1.7/jsfun.c2330
-rw-r--r--third_party/js-1.7/jsfun.h170
-rw-r--r--third_party/js-1.7/jsgc.c3201
-rw-r--r--third_party/js-1.7/jsgc.h368
-rw-r--r--third_party/js-1.7/jshash.c483
-rw-r--r--third_party/js-1.7/jshash.h151
-rw-r--r--third_party/js-1.7/jsify.pl485
-rw-r--r--third_party/js-1.7/jsinterp.c6216
-rw-r--r--third_party/js-1.7/jsinterp.h361
-rw-r--r--third_party/js-1.7/jsiter.c1080
-rw-r--r--third_party/js-1.7/jsiter.h114
-rw-r--r--third_party/js-1.7/jskeyword.tbl124
-rw-r--r--third_party/js-1.7/jskwgen.c460
-rw-r--r--third_party/js-1.7/jslibmath.h266
-rw-r--r--third_party/js-1.7/jslock.c1303
-rw-r--r--third_party/js-1.7/jslock.h266
-rw-r--r--third_party/js-1.7/jslocko.asm60
-rw-r--r--third_party/js-1.7/jslog2.c94
-rw-r--r--third_party/js-1.7/jslong.c281
-rw-r--r--third_party/js-1.7/jslong.h437
-rw-r--r--third_party/js-1.7/jsmath.c514
-rw-r--r--third_party/js-1.7/jsmath.h57
-rw-r--r--third_party/js-1.7/jsnum.c1147
-rw-r--r--third_party/js-1.7/jsnum.h268
-rw-r--r--third_party/js-1.7/jsobj.c5035
-rw-r--r--third_party/js-1.7/jsobj.h596
-rw-r--r--third_party/js-1.7/jsopcode.c4794
-rw-r--r--third_party/js-1.7/jsopcode.h318
-rw-r--r--third_party/js-1.7/jsopcode.tbl478
-rw-r--r--third_party/js-1.7/jsosdep.h115
-rw-r--r--third_party/js-1.7/jsotypes.h202
-rw-r--r--third_party/js-1.7/jsparse.c6547
-rw-r--r--third_party/js-1.7/jsparse.h438
-rw-r--r--third_party/js-1.7/jsprf.c1264
-rw-r--r--third_party/js-1.7/jsprf.h150
-rw-r--r--third_party/js-1.7/jsproto.tbl116
-rw-r--r--third_party/js-1.7/jsprvtd.h202
-rw-r--r--third_party/js-1.7/jspubtd.h667
-rw-r--r--third_party/js-1.7/jsregexp.c4206
-rw-r--r--third_party/js-1.7/jsregexp.h183
-rw-r--r--third_party/js-1.7/jsscan.c2101
-rw-r--r--third_party/js-1.7/jsscan.h389
-rw-r--r--third_party/js-1.7/jsscope.c1776
-rw-r--r--third_party/js-1.7/jsscope.h407
-rw-r--r--third_party/js-1.7/jsscript.c1717
-rw-r--r--third_party/js-1.7/jsscript.h225
-rw-r--r--third_party/js-1.7/jsshell.msg50
-rw-r--r--third_party/js-1.7/jsstddef.h83
-rw-r--r--third_party/js-1.7/jsstr.c4818
-rw-r--r--third_party/js-1.7/jsstr.h500
-rw-r--r--third_party/js-1.7/jstypes.h464
-rw-r--r--third_party/js-1.7/jsutil.c198
-rw-r--r--third_party/js-1.7/jsutil.h106
-rw-r--r--third_party/js-1.7/jsxdrapi.c835
-rw-r--r--third_party/js-1.7/jsxdrapi.h223
-rw-r--r--third_party/js-1.7/jsxml.c8357
-rw-r--r--third_party/js-1.7/jsxml.h332
-rw-r--r--third_party/js-1.7/lock_SunOS.s114
-rw-r--r--third_party/js-1.7/perfect.js39
-rw-r--r--third_party/js-1.7/plify_jsdhash.sed33
-rw-r--r--third_party/js-1.7/prmjtime.c439
-rw-r--r--third_party/js-1.7/prmjtime.h95
-rw-r--r--third_party/js-1.7/resource.h15
-rw-r--r--third_party/js-1.7/rules.mk193
-rw-r--r--third_party/js-1.7/win32.order391
-rw-r--r--third_party/linenoise/Makefile7
-rw-r--r--third_party/linenoise/README.markdown47
-rw-r--r--third_party/linenoise/example.c27
-rw-r--r--third_party/linenoise/history.txt3
-rw-r--r--third_party/linenoise/linenoise.cpp836
-rw-r--r--third_party/linenoise/linenoise.h55
-rw-r--r--third_party/linenoise/linenoise_win32.cpp442
-rw-r--r--third_party/pcre-7.4/config-cmake.h.in (renamed from pcre-7.4/config-cmake.h.in)0
-rw-r--r--third_party/pcre-7.4/config.h (renamed from pcre-7.4/config.h)22
-rw-r--r--third_party/pcre-7.4/config.h.generic (renamed from pcre-7.4/config.h.generic)0
-rw-r--r--third_party/pcre-7.4/config.h.in (renamed from pcre-7.4/config.h.in)0
-rw-r--r--third_party/pcre-7.4/dftables.c (renamed from pcre-7.4/dftables.c)0
-rw-r--r--third_party/pcre-7.4/pcre.h (renamed from pcre-7.4/pcre.h)0
-rw-r--r--third_party/pcre-7.4/pcre.h.generic (renamed from pcre-7.4/pcre.h.generic)0
-rw-r--r--third_party/pcre-7.4/pcre.h.in (renamed from pcre-7.4/pcre.h.in)0
-rw-r--r--third_party/pcre-7.4/pcre_chartables.c (renamed from pcre-7.4/pcre_chartables.c)0
-rw-r--r--third_party/pcre-7.4/pcre_chartables.c.dist (renamed from pcre-7.4/pcre_chartables.c.dist)0
-rw-r--r--third_party/pcre-7.4/pcre_compile.c (renamed from pcre-7.4/pcre_compile.c)0
-rw-r--r--third_party/pcre-7.4/pcre_config.c (renamed from pcre-7.4/pcre_config.c)0
-rw-r--r--third_party/pcre-7.4/pcre_dfa_exec.c (renamed from pcre-7.4/pcre_dfa_exec.c)0
-rw-r--r--third_party/pcre-7.4/pcre_exec.c (renamed from pcre-7.4/pcre_exec.c)4
-rw-r--r--third_party/pcre-7.4/pcre_fullinfo.c (renamed from pcre-7.4/pcre_fullinfo.c)0
-rw-r--r--third_party/pcre-7.4/pcre_get.c (renamed from pcre-7.4/pcre_get.c)0
-rw-r--r--third_party/pcre-7.4/pcre_globals.c (renamed from pcre-7.4/pcre_globals.c)0
-rw-r--r--third_party/pcre-7.4/pcre_info.c (renamed from pcre-7.4/pcre_info.c)0
-rw-r--r--third_party/pcre-7.4/pcre_internal.h (renamed from pcre-7.4/pcre_internal.h)0
-rw-r--r--third_party/pcre-7.4/pcre_maketables.c (renamed from pcre-7.4/pcre_maketables.c)0
-rw-r--r--third_party/pcre-7.4/pcre_newline.c (renamed from pcre-7.4/pcre_newline.c)0
-rw-r--r--third_party/pcre-7.4/pcre_ord2utf8.c (renamed from pcre-7.4/pcre_ord2utf8.c)0
-rw-r--r--third_party/pcre-7.4/pcre_refcount.c (renamed from pcre-7.4/pcre_refcount.c)0
-rw-r--r--third_party/pcre-7.4/pcre_scanner.cc (renamed from pcre-7.4/pcre_scanner.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_scanner.h (renamed from pcre-7.4/pcre_scanner.h)4
-rw-r--r--third_party/pcre-7.4/pcre_scanner_unittest.cc (renamed from pcre-7.4/pcre_scanner_unittest.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece.cc (renamed from pcre-7.4/pcre_stringpiece.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece.h (renamed from pcre-7.4/pcre_stringpiece.h)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece.h.in (renamed from pcre-7.4/pcre_stringpiece.h.in)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece_unittest.cc (renamed from pcre-7.4/pcre_stringpiece_unittest.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_study.c (renamed from pcre-7.4/pcre_study.c)0
-rw-r--r--third_party/pcre-7.4/pcre_tables.c (renamed from pcre-7.4/pcre_tables.c)0
-rw-r--r--third_party/pcre-7.4/pcre_try_flipped.c (renamed from pcre-7.4/pcre_try_flipped.c)0
-rw-r--r--third_party/pcre-7.4/pcre_ucp_searchfuncs.c (renamed from pcre-7.4/pcre_ucp_searchfuncs.c)0
-rw-r--r--third_party/pcre-7.4/pcre_valid_utf8.c (renamed from pcre-7.4/pcre_valid_utf8.c)0
-rw-r--r--third_party/pcre-7.4/pcre_version.c (renamed from pcre-7.4/pcre_version.c)0
-rw-r--r--third_party/pcre-7.4/pcre_xclass.c (renamed from pcre-7.4/pcre_xclass.c)0
-rw-r--r--third_party/pcre-7.4/pcrecpp.cc (renamed from pcre-7.4/pcrecpp.cc)2
-rw-r--r--third_party/pcre-7.4/pcrecpp.h (renamed from pcre-7.4/pcrecpp.h)0
-rw-r--r--third_party/pcre-7.4/pcrecpp_internal.h (renamed from pcre-7.4/pcrecpp_internal.h)0
-rw-r--r--third_party/pcre-7.4/pcrecpp_unittest.cc (renamed from pcre-7.4/pcrecpp_unittest.cc)0
-rw-r--r--third_party/pcre-7.4/pcrecpparg.h (renamed from pcre-7.4/pcrecpparg.h)0
-rw-r--r--third_party/pcre-7.4/pcrecpparg.h.in (renamed from pcre-7.4/pcrecpparg.h.in)0
-rw-r--r--third_party/pcre-7.4/pcredemo.c (renamed from pcre-7.4/pcredemo.c)0
-rw-r--r--third_party/pcre-7.4/pcregrep.c (renamed from pcre-7.4/pcregrep.c)0
-rw-r--r--third_party/pcre-7.4/pcreposix.c (renamed from pcre-7.4/pcreposix.c)0
-rw-r--r--third_party/pcre-7.4/pcreposix.h (renamed from pcre-7.4/pcreposix.h)0
-rw-r--r--third_party/pcre-7.4/pcretest.c (renamed from pcre-7.4/pcretest.c)0
-rw-r--r--third_party/pcre-7.4/ucp.h (renamed from pcre-7.4/ucp.h)0
-rw-r--r--third_party/pcre-7.4/ucpinternal.h (renamed from pcre-7.4/ucpinternal.h)0
-rw-r--r--third_party/pcre-7.4/ucptable.h (renamed from pcre-7.4/ucptable.h)0
-rw-r--r--third_party/pcre.py38
-rw-r--r--third_party/sm.py100
-rw-r--r--third_party/snappy.py11
-rwxr-xr-xthird_party/snappy/COPYING28
-rwxr-xr-xthird_party/snappy/README135
-rwxr-xr-xthird_party/snappy/config.h124
-rwxr-xr-xthird_party/snappy/snappy-internal.h150
-rwxr-xr-xthird_party/snappy/snappy-sinksource.cc72
-rwxr-xr-xthird_party/snappy/snappy-sinksource.h136
-rwxr-xr-xthird_party/snappy/snappy-stubs-internal.cc42
-rwxr-xr-xthird_party/snappy/snappy-stubs-internal.h478
-rwxr-xr-xthird_party/snappy/snappy-stubs-public.h85
-rwxr-xr-xthird_party/snappy/snappy.cc1026
-rwxr-xr-xthird_party/snappy/snappy.h155
-rw-r--r--tools/bridge.cpp9
-rw-r--r--tools/dump.cpp134
-rw-r--r--tools/export.cpp74
-rw-r--r--tools/import.cpp327
-rw-r--r--tools/restore.cpp43
-rw-r--r--tools/sniffer.cpp30
-rw-r--r--tools/stat.cpp72
-rw-r--r--tools/tool.cpp82
-rw-r--r--tools/tool.h4
-rw-r--r--tools/top.cpp196
-rw-r--r--util/alignedbuilder.cpp41
-rw-r--r--util/alignedbuilder.h16
-rw-r--r--util/array.h7
-rw-r--r--util/assert_util.cpp71
-rw-r--r--util/assert_util.h76
-rw-r--r--util/background.cpp70
-rw-r--r--util/background.h49
-rw-r--r--util/bson_util.h42
-rw-r--r--util/bufreader.h2
-rw-r--r--util/checksum.h37
-rw-r--r--util/compress.cpp31
-rw-r--r--util/compress.h21
-rw-r--r--util/concurrency/list.h28
-rw-r--r--util/concurrency/mutex.h103
-rw-r--r--util/concurrency/race.h49
-rw-r--r--util/concurrency/rwlock.h250
-rw-r--r--[-rwxr-xr-x]util/concurrency/shared_mutex_win.hpp29
-rw-r--r--util/concurrency/spin_lock.cpp45
-rw-r--r--util/concurrency/spin_lock.h33
-rw-r--r--util/concurrency/synchronization.cpp36
-rw-r--r--util/concurrency/synchronization.h18
-rw-r--r--util/concurrency/value.h51
-rw-r--r--util/concurrency/vars.cpp4
-rw-r--r--util/file.h76
-rw-r--r--util/file_allocator.cpp73
-rw-r--r--util/file_allocator.h6
-rw-r--r--util/goodies.h154
-rw-r--r--util/hashtab.h14
-rw-r--r--util/log.cpp38
-rw-r--r--util/log.h53
-rw-r--r--util/logfile.cpp87
-rw-r--r--util/logfile.h3
-rw-r--r--util/message.cpp764
-rw-r--r--util/mmap.cpp30
-rw-r--r--util/mmap.h38
-rw-r--r--util/mmap_posix.cpp24
-rw-r--r--util/mmap_win.cpp13
-rwxr-xr-xutil/mongoutils/README2
-rw-r--r--util/mongoutils/str.h8
-rw-r--r--util/mongoutils/test.cpp2
-rw-r--r--util/net/hostandport.h (renamed from util/hostandport.h)40
-rw-r--r--util/net/httpclient.cpp (renamed from util/httpclient.cpp)45
-rw-r--r--util/net/httpclient.h (renamed from util/httpclient.h)16
-rw-r--r--util/net/listen.cpp391
-rw-r--r--util/net/listen.h190
-rw-r--r--util/net/message.cpp64
-rw-r--r--util/net/message.h (renamed from util/message.h)210
-rw-r--r--util/net/message_port.cpp298
-rw-r--r--util/net/message_port.h107
-rw-r--r--util/net/message_server.h (renamed from util/message_server.h)16
-rw-r--r--util/net/message_server_asio.cpp (renamed from util/message_server_asio.cpp)0
-rw-r--r--util/net/message_server_port.cpp (renamed from util/message_server_port.cpp)66
-rw-r--r--util/net/miniwebserver.cpp (renamed from util/miniwebserver.cpp)23
-rw-r--r--util/net/miniwebserver.h (renamed from util/miniwebserver.h)10
-rw-r--r--util/net/sock.cpp713
-rw-r--r--util/net/sock.h256
-rw-r--r--util/optime.h36
-rw-r--r--util/paths.h42
-rw-r--r--util/processinfo.h6
-rw-r--r--util/processinfo_darwin.cpp5
-rw-r--r--util/processinfo_win32.cpp3
-rw-r--r--util/queue.h6
-rw-r--r--util/ramlog.cpp190
-rw-r--r--util/ramlog.h144
-rw-r--r--util/sock.cpp235
-rw-r--r--util/sock.h303
-rw-r--r--util/stringutils.h102
-rw-r--r--util/time_support.h80
-rw-r--r--util/timer.h83
-rw-r--r--util/util.cpp22
-rw-r--r--util/version.cpp145
-rw-r--r--util/version.h2
1003 files changed, 191592 insertions, 24726 deletions
diff --git a/.gitignore b/.gitignore
index 3847ca4..26afcde 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
.sconsign.dblite
.sconf_temp
perf.data
+massif.out.*
*~
*.swp
@@ -77,6 +78,7 @@ mongoimport
mongosniff
mongobridge
mongostat
+mongotop
bsondump
*.tgz
@@ -115,6 +117,12 @@ debian/mongodb
#osx
.DS_Store
+#third party
+third_party/js-1.7/jsautocfg.h
+third_party/js-1.7/jsautokw.h
+third_party/js-1.7/jskwgen
+third_party/js-1.7/jscpucfg
+
# QtCreator
*.config
*.creator
diff --git a/README b/README
index 7cf7653..ed84a68 100755
--- a/README
+++ b/README
@@ -1,14 +1,21 @@
MongoDB README
-DOCUMENTATION
-
- http://www.mongodb.org/
-
+Welcome to MongoDB!
+
COMPONENTS
mongod - The database process.
mongos - Sharding controller.
mongo - The database shell (uses interactive javascript).
+
+UTILITIES
+
+ mongodump - MongoDB dump tool - for backups, snapshots, etc..
+ mongorestore - MongoDB restore a dump
+ mongoexport - Export a single collection to test (JSON, CSV)
+ mongoimport - Import from JSON or CSV
+ mongofiles - Utility for putting and getting files from MongoDB GridFS
+ mongostat - Show performance statistics
BUILDING
@@ -31,11 +38,20 @@ RUNNING
DRIVERS
- Client drivers for most programming languages are available at mongodb.org.
+ Client drivers for most programming languages are available at mongodb.org. Use the
+ shell ("mongo") for administrative tasks.
-NOTES
+DOCUMENTATION
- Mongo uses memory mapped files. If built as a 32 bit executable, you will
+ http://www.mongodb.org/
+
+MAIL LISTS AND IRC
+
+ http://www.mongodb.org/display/DOCS/Community
+
+32 BIT BUILD NOTES
+
+ MongoDB uses memory mapped files. If built as a 32 bit executable, you will
not be able to work with large (multi-gigabyte) databases. However, 32 bit
builds work fine with small development databases.
@@ -48,4 +64,3 @@ LICENSE
As an exception, the files in the client/, debian/, rpm/,
utils/mongoutils, and all subdirectories thereof are made available under
the terms of the Apache License, version 2.0.
-
diff --git a/SConstruct b/SConstruct
index 41383b1..4e46052 100644
--- a/SConstruct
+++ b/SConstruct
@@ -22,6 +22,7 @@ import urllib
import urllib2
import buildscripts
import buildscripts.bb
+import stat
from buildscripts import utils
buildscripts.bb.checkOk()
@@ -31,12 +32,12 @@ def findSettingsSetup():
sys.path.append( ".." )
sys.path.append( "../../" )
-
-
# --- options ----
options = {}
+options_topass = {}
+
def add_option( name, help , nargs , contibutesToVariantDir , dest=None ):
if dest is None:
@@ -57,7 +58,7 @@ def add_option( name, help , nargs , contibutesToVariantDir , dest=None ):
def get_option( name ):
return GetOption( name )
-def has_option( name ):
+def _has_option( name ):
x = get_option( name )
if x is None:
return False
@@ -70,6 +71,12 @@ def has_option( name ):
return True
+def has_option( name ):
+ x = _has_option(name)
+ options_topass[name] = x
+ return x
+
+
def get_variant_dir():
a = []
@@ -114,6 +121,7 @@ add_option( "64" , "whether to force 64 bit" , 0 , True , "force64" )
add_option( "32" , "whether to force 32 bit" , 0 , True , "force32" )
add_option( "cxx", "compiler to use" , 1 , True )
+add_option( "cc", "compiler to use for c" , 1 , True )
add_option( "cpppath", "Include path if you have headers in a nonstandard directory" , 1 , True )
add_option( "libpath", "Library path if you have libraries in a nonstandard directory" , 1 , True )
@@ -127,10 +135,10 @@ add_option( "staticlibpath", "comma separated list of dirs to search for staticl
add_option( "boost-compiler", "compiler used for boost (gcc41)" , 1 , True , "boostCompiler" )
add_option( "boost-version", "boost version for linking(1_38)" , 1 , True , "boostVersion" )
-
# experimental features
add_option( "mm", "use main memory instead of memory mapped files" , 0 , True )
add_option( "asio" , "Use Asynchronous IO (NOT READY YET)" , 0 , True )
+add_option( "ssl" , "Enable SSL" , 0 , True )
# library choices
add_option( "usesm" , "use spider monkey for javascript" , 0 , True )
@@ -139,20 +147,24 @@ add_option( "usev8" , "use v8 for javascript" , 0 , True )
# mongo feature options
add_option( "noshell", "don't build shell" , 0 , True )
add_option( "safeshell", "don't let shell scripts run programs (still, don't run untrusted scripts)" , 0 , True )
+add_option( "win2008plus", "use newer operating system API features" , 0 , False )
# dev tools
add_option( "d", "debug build no optimization, etc..." , 0 , True , "debugBuild" )
add_option( "dd", "debug build no optimization, additional debug logging, etc..." , 0 , False , "debugBuildAndLogging" )
add_option( "durableDefaultOn" , "have durable default to on" , 0 , True )
+add_option( "durableDefaultOff" , "have durable default to off" , 0 , True )
add_option( "pch" , "use precompiled headers to speed up the build (experimental)" , 0 , True , "usePCH" )
add_option( "distcc" , "use distcc for distributing builds" , 0 , False )
+add_option( "clang" , "use clang++ rather than g++ (experimental)" , 0 , True )
# debugging/profiling help
# to use CPUPROFILE=/tmp/profile
# to view pprof -gv mongod /tmp/profile
add_option( "pg", "link against profiler" , 0 , False , "profile" )
+add_option( "tcmalloc" , "link against tcmalloc" , 0 , False )
add_option( "gdbserver" , "build in gdb server support" , 0 , True )
add_option( "heapcheck", "link to heap-checking malloc-lib and look for memory leaks during tests" , 0 , False )
@@ -211,6 +223,13 @@ env = Environment( MSVS_ARCH=msarch , tools = ["default", "gch"], toolpath = '.'
if has_option( "cxx" ):
env["CC"] = get_option( "cxx" )
env["CXX"] = get_option( "cxx" )
+elif has_option("clang"):
+ env["CC"] = 'clang'
+ env["CXX"] = 'clang++'
+
+if has_option( "cc" ):
+ env["CC"] = get_option( "cc" )
+
env["LIBPATH"] = []
if has_option( "libpath" ):
@@ -222,13 +241,15 @@ if has_option( "cpppath" ):
env.Append( CPPDEFINES=[ "_SCONS" , "MONGO_EXPOSE_MACROS" ] )
env.Append( CPPPATH=[ "." ] )
-
if has_option( "safeshell" ):
env.Append( CPPDEFINES=[ "MONGO_SAFE_SHELL" ] )
if has_option( "durableDefaultOn" ):
env.Append( CPPDEFINES=[ "_DURABLEDEFAULTON" ] )
+if has_option( "durableDefaultOff" ):
+ env.Append( CPPDEFINES=[ "_DURABLEDEFAULTOFF" ] )
+
boostCompiler = GetOption( "boostCompiler" )
if boostCompiler is None:
boostCompiler = ""
@@ -243,6 +264,7 @@ else:
if ( not ( usesm or usev8 or justClientLib) ):
usesm = True
+ options_topass["usesm"] = True
distBuild = len( COMMAND_LINE_TARGETS ) == 1 and ( str( COMMAND_LINE_TARGETS[0] ) == "s3dist" or str( COMMAND_LINE_TARGETS[0] ) == "dist" )
@@ -305,47 +327,53 @@ if has_option( "full" ):
# ------ SOURCE FILE SETUP -----------
-commonFiles = Split( "pch.cpp buildinfo.cpp db/common.cpp db/indexkey.cpp db/jsobj.cpp bson/oid.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp db/projection.cpp shell/mongo.cpp db/security_key.cpp" )
-commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/file_allocator.cpp" , "util/message.cpp" ,
- "util/assert_util.cpp" , "util/log.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/concurrency/vars.cpp", "util/concurrency/task.cpp", "util/debug_util.cpp",
+commonFiles = Split( "pch.cpp buildinfo.cpp db/indexkey.cpp db/jsobj.cpp bson/oid.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp db/querypattern.cpp db/projection.cpp shell/mongo.cpp db/security_common.cpp db/security_commands.cpp" )
+commonFiles += [ "util/background.cpp" , "util/util.cpp" , "util/file_allocator.cpp" ,
+ "util/assert_util.cpp" , "util/log.cpp" , "util/ramlog.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/concurrency/vars.cpp", "util/concurrency/task.cpp", "util/debug_util.cpp",
"util/concurrency/thread_pool.cpp", "util/password.cpp", "util/version.cpp", "util/signal_handlers.cpp",
"util/histogram.cpp", "util/concurrency/spin_lock.cpp", "util/text.cpp" , "util/stringutils.cpp" ,
"util/concurrency/synchronization.cpp" ]
-commonFiles += Glob( "util/*.c" )
+commonFiles += [ "util/net/sock.cpp" , "util/net/httpclient.cpp" , "util/net/message.cpp" , "util/net/message_port.cpp" , "util/net/listen.cpp" ]
+commonFiles += Glob( "util/*.c" )
commonFiles += Split( "client/connpool.cpp client/dbclient.cpp client/dbclient_rs.cpp client/dbclientcursor.cpp client/model.cpp client/syncclusterconnection.cpp client/distlock.cpp s/shardconnection.cpp" )
#mmap stuff
+coreDbFiles = [ "db/commands.cpp" ]
+coreServerFiles = [ "util/net/message_server_port.cpp" ,
+ "client/parallel.cpp" , "db/common.cpp",
+ "util/net/miniwebserver.cpp" , "db/dbwebserver.cpp" ,
+ "db/matcher.cpp" , "db/dbcommands_generic.cpp" , "db/dbmessage.cpp" ]
+
+mmapFiles = [ "util/mmap.cpp" ]
+
if has_option( "mm" ):
- commonFiles += [ "util/mmap_mm.cpp" ]
+ mmapFiles += [ "util/mmap_mm.cpp" ]
elif os.sys.platform == "win32":
- commonFiles += [ "util/mmap_win.cpp" ]
+ mmapFiles += [ "util/mmap_win.cpp" ]
else:
- commonFiles += [ "util/mmap_posix.cpp" ]
+ mmapFiles += [ "util/mmap_posix.cpp" ]
-coreDbFiles = [ "db/commands.cpp" ]
-coreServerFiles = [ "util/message_server_port.cpp" ,
- "client/parallel.cpp" ,
- "util/miniwebserver.cpp" , "db/dbwebserver.cpp" ,
- "db/matcher.cpp" , "db/dbcommands_generic.cpp" ]
+coreServerFiles += mmapFiles
processInfoFiles = [ "util/processinfo.cpp" ]
if os.path.exists( "util/processinfo_" + os.sys.platform + ".cpp" ):
processInfoFiles += [ "util/processinfo_" + os.sys.platform + ".cpp" ]
+elif os.sys.platform == "linux3":
+ processInfoFiles += [ "util/processinfo_linux2.cpp" ]
else:
processInfoFiles += [ "util/processinfo_none.cpp" ]
coreServerFiles += processInfoFiles
-
-
if has_option( "asio" ):
- coreServerFiles += [ "util/message_server_asio.cpp" ]
+ coreServerFiles += [ "util/net/message_server_asio.cpp" ]
-serverOnlyFiles = Split( "util/logfile.cpp util/alignedbuilder.cpp db/mongommf.cpp db/dur.cpp db/durop.cpp db/dur_writetodatafiles.cpp db/dur_preplogbuffer.cpp db/dur_commitjob.cpp db/dur_recover.cpp db/dur_journal.cpp db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/repl/rs.cpp db/repl/consensus.cpp db/repl/rs_initiate.cpp db/repl/replset_commands.cpp db/repl/manager.cpp db/repl/health.cpp db/repl/heartbeat.cpp db/repl/rs_config.cpp db/repl/rs_rollback.cpp db/repl/rs_sync.cpp db/repl/rs_initialsync.cpp db/oplog.cpp db/repl_block.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/cap.cpp db/matcher_covered.cpp db/dbeval.cpp db/restapi.cpp db/dbhelpers.cpp db/instance.cpp db/client.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/security.cpp db/queryoptimizer.cpp db/extsort.cpp db/cmdline.cpp" )
+# mongod files - also files used in tools. present in dbtests, but not in mongos and not in client libs.
+serverOnlyFiles = Split( "util/compress.cpp db/key.cpp db/btreebuilder.cpp util/logfile.cpp util/alignedbuilder.cpp db/mongommf.cpp db/dur.cpp db/durop.cpp db/dur_writetodatafiles.cpp db/dur_preplogbuffer.cpp db/dur_commitjob.cpp db/dur_recover.cpp db/dur_journal.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/repl/rs.cpp db/repl/consensus.cpp db/repl/rs_initiate.cpp db/repl/replset_commands.cpp db/repl/manager.cpp db/repl/health.cpp db/repl/heartbeat.cpp db/repl/rs_config.cpp db/repl/rs_rollback.cpp db/repl/rs_sync.cpp db/repl/rs_initialsync.cpp db/oplog.cpp db/repl_block.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/cap.cpp db/matcher_covered.cpp db/dbeval.cpp db/restapi.cpp db/dbhelpers.cpp db/instance.cpp db/client.cpp db/database.cpp db/pdfile.cpp db/record.cpp db/cursor.cpp db/security.cpp db/queryoptimizer.cpp db/queryoptimizercursor.cpp db/extsort.cpp db/cmdline.cpp" )
-serverOnlyFiles += [ "db/index.cpp" ] + Glob( "db/geo/*.cpp" )
+serverOnlyFiles += [ "db/index.cpp" , "db/scanandorder.cpp" ] + Glob( "db/geo/*.cpp" ) + Glob( "db/ops/*.cpp" )
serverOnlyFiles += [ "db/dbcommands.cpp" , "db/dbcommands_admin.cpp" ]
serverOnlyFiles += Glob( "db/commands/*.cpp" )
@@ -361,10 +389,8 @@ elif usev8:
else:
scriptingFiles += [ "scripting/engine_none.cpp" ]
-coreServerFiles += scriptingFiles
-
coreShardFiles = [ "s/config.cpp" , "s/grid.cpp" , "s/chunk.cpp" , "s/shard.cpp" , "s/shardkey.cpp" ]
-shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/client.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/config_migrate.cpp" , "s/s_only.cpp" , "s/stats.cpp" , "s/balance.cpp" , "s/balancer_policy.cpp" , "db/cmdline.cpp" , "s/writeback_listener.cpp" , "s/shard_version.cpp" ]
+shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/client.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/config_migrate.cpp" , "s/s_only.cpp" , "s/stats.cpp" , "s/balance.cpp" , "s/balancer_policy.cpp" , "db/cmdline.cpp" , "s/writeback_listener.cpp" , "s/shard_version.cpp", "s/mr_shard.cpp", "s/security.cpp" ]
serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" , "s/d_writeback.cpp" , "s/d_migrate.cpp" , "s/d_state.cpp" , "s/d_split.cpp" , "client/distlock_test.cpp" , "s/d_chunk_manager.cpp" ]
serverOnlyFiles += [ "db/module.cpp" ] + Glob( "db/modules/*.cpp" )
@@ -463,7 +489,7 @@ if "darwin" == os.sys.platform:
env.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
env.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib"]) )
-elif "linux2" == os.sys.platform:
+elif "linux2" == os.sys.platform or "linux3" == os.sys.platform:
linux = True
platform = "linux"
@@ -508,6 +534,9 @@ elif "win32" == os.sys.platform:
#if force64:
# release = True
+ if has_option( "win2008plus" ):
+ env.Append( CPPDEFINES=[ "MONGO_USE_SRW_ON_WINDOWS" ] )
+
for pathdir in env['ENV']['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(pathdir, 'cl.exe')):
print( "found visual studio at " + pathdir )
@@ -541,20 +570,14 @@ elif "win32" == os.sys.platform:
boostLibs = []
- env.Append(CPPPATH=[ "js/src/" ])
- env.Append(CPPPATH=["../js/src/"])
- env.Append(LIBPATH=["../js/src"])
- env.Append(LIBPATH=["../js/"])
-
- env.Append( CPPDEFINES=[ "OLDJS" ] )
env.Append( CPPDEFINES=[ "_UNICODE" ] )
env.Append( CPPDEFINES=[ "UNICODE" ] )
winSDKHome = findVersion( [ "C:/Program Files/Microsoft SDKs/Windows/", "C:/Program Files (x86)/Microsoft SDKs/Windows/" ] ,
- [ "v7.0A", "v7.0", "v6.1", "v6.0a", "v6.0" ] )
+ [ "v7.1", "v7.0A", "v7.0", "v6.1", "v6.0a", "v6.0" ] )
print( "Windows SDK Root '" + winSDKHome + "'" )
- env.Append( CPPPATH=[ boostDir , "pcre-7.4" , winSDKHome + "/Include" ] )
+ env.Append( CPPPATH=[ boostDir , winSDKHome + "/Include" ] )
# consider adding /MP build with multiple processes option.
@@ -565,23 +588,25 @@ elif "win32" == os.sys.platform:
# some warnings we don't like:
env.Append( CPPFLAGS=" /wd4355 /wd4800 /wd4267 /wd4244 " )
- env.Append( CPPDEFINES=["WIN32","_CONSOLE","_CRT_SECURE_NO_WARNINGS","HAVE_CONFIG_H","PCRE_STATIC","SUPPORT_UCP","SUPPORT_UTF8","PSAPI_VERSION=1" ] )
+ # PSAPI_VERSION relates to process api dll Psapi.dll.
+ env.Append( CPPDEFINES=["_CONSOLE","_CRT_SECURE_NO_WARNINGS","PSAPI_VERSION=1" ] )
- #env.Append( CPPFLAGS=' /Yu"pch.h" ' ) # this would be for pre-compiled headers, could play with it later
+ # this would be for pre-compiled headers, could play with it later
+ #env.Append( CPPFLAGS=' /Yu"pch.h" ' )
- # docs say don't use /FD from command line
- # /Gy funtion level linking
+ # docs say don't use /FD from command line (minimal rebuild)
+ # /Gy function level linking
# /Gm is minimal rebuild, but may not work in parallel mode.
if release:
env.Append( CPPDEFINES=[ "NDEBUG" ] )
- env.Append( CPPFLAGS= " /O2 /MT /Gy /Zi /TP /errorReport:none " )
+ env.Append( CPPFLAGS= " /O2 /Gy " )
+ env.Append( CPPFLAGS= " /MT /Zi /TP /errorReport:none " )
# TODO: this has caused some linking problems :
# /GL whole program optimization
# /LTCG link time code generation
env.Append( CPPFLAGS= " /GL " )
env.Append( LINKFLAGS=" /LTCG " )
else:
-
# /Od disable optimization
# /ZI debug info w/edit & continue
# /TP it's a c++ file
@@ -596,10 +621,6 @@ elif "win32" == os.sys.platform:
if debugLogging:
env.Append( CPPDEFINES=[ "_DEBUG" ] )
- if os.path.exists("../readline/lib") :
- env.Append( LIBPATH=["../readline/lib"] )
- env.Append( CPPPATH=["../readline/include"] )
-
if force64 and os.path.exists( boostDir + "/lib/vs2010_64" ):
env.Append( LIBPATH=[ boostDir + "/lib/vs2010_64" ] )
elif not force64 and os.path.exists( boostDir + "/lib/vs2010_32" ):
@@ -618,26 +639,6 @@ elif "win32" == os.sys.platform:
else:
env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRT " )
- def pcreFilter(x):
- name = x.name
- if x.name.endswith( "dftables.c" ):
- return False
- if x.name.endswith( "pcredemo.c" ):
- return False
- if x.name.endswith( "pcretest.c" ):
- return False
- if x.name.endswith( "unittest.cc" ):
- return False
- if x.name.endswith( "pcregrep.c" ):
- return False
- return True
-
- pcreFiles = []
- pcreFiles += filter( pcreFilter , Glob( "pcre-7.4/*.c" ) )
- pcreFiles += filter( pcreFilter , Glob( "pcre-7.4/*.cc" ) )
- commonFiles += pcreFiles
- allClientFiles += pcreFiles
-
winLibString = "ws2_32.lib kernel32.lib advapi32.lib Psapi.lib"
if force64:
@@ -668,11 +669,15 @@ if nix:
if has_option( "distcc" ):
env["CXX"] = "distcc " + env["CXX"]
+ # -Winvalid-pch Warn if a precompiled header (see Precompiled Headers) is found in the search path but can't be used.
env.Append( CPPFLAGS="-fPIC -fno-strict-aliasing -ggdb -pthread -Wall -Wsign-compare -Wno-unknown-pragmas -Winvalid-pch" )
# env.Append( " -Wconversion" ) TODO: this doesn't really work yet
if linux:
env.Append( CPPFLAGS=" -Werror " )
- env.Append( CPPFLAGS=" -fno-builtin-memcmp " ) # glibc's memcmp is faster than gcc's
+ if not has_option('clang'):
+ env.Append( CPPFLAGS=" -fno-builtin-memcmp " ) # glibc's memcmp is faster than gcc's
+
+ env.Append( CPPDEFINES="_FILE_OFFSET_BITS=64" )
env.Append( CXXFLAGS=" -Wnon-virtual-dtor " )
env.Append( LINKFLAGS=" -fPIC -pthread -rdynamic" )
env.Append( LIBS=[] )
@@ -688,7 +693,7 @@ if nix:
env.Append( CPPFLAGS=" -O0 -fstack-protector " );
env['ENV']['GLIBCXX_FORCE_NEW'] = 1; # play nice with valgrind
else:
- env.Append( CPPFLAGS=" -O3" )
+ env.Append( CPPFLAGS=" -O3 " )
#env.Append( CPPFLAGS=" -fprofile-generate" )
#env.Append( LINKFLAGS=" -fprofile-generate" )
# then:
@@ -717,26 +722,59 @@ if nix:
# pre-compiled headers
if usePCH and 'Gch' in dir( env ):
print( "using precompiled headers" )
+ if has_option('clang'):
+ #env['GCHSUFFIX'] = '.pch' # clang++ uses pch.h.pch rather than pch.h.gch
+ #env.Prepend( CXXFLAGS=' -include pch.h ' ) # clang++ only uses pch from command line
+ print( "ERROR: clang pch is broken for now" )
+ Exit(1)
env['Gch'] = env.Gch( [ "pch.h" ] )[0]
elif os.path.exists('pch.h.gch'):
print( "removing precompiled headers" )
os.unlink('pch.h.gch') # gcc uses the file if it exists
if usev8:
- env.Append( CPPPATH=["../v8/include/"] )
- env.Append( LIBPATH=["../v8/"] )
-
+ env.Prepend( CPPPATH=["../v8/include/"] )
+ env.Prepend( LIBPATH=["../v8/"] )
if "uname" in dir(os):
hacks = buildscripts.findHacks( os.uname() )
if hacks is not None:
hacks.insert( env , { "linux64" : linux64 } )
+if has_option( "ssl" ):
+ env.Append( CPPDEFINES=["MONGO_SSL"] )
+ env.Append( LIBS=["ssl"] )
+ if darwin:
+ env.Append( LIBS=["crypto"] )
+
try:
umask = os.umask(022)
except OSError:
pass
+if not windows:
+ for keysuffix in [ "1" , "2" ]:
+ keyfile = "jstests/libs/key%s" % keysuffix
+ os.chmod( keyfile , stat.S_IWUSR|stat.S_IRUSR )
+
+for x in os.listdir( "third_party" ):
+ if not x.endswith( ".py" ) or x.find( "#" ) >= 0:
+ continue
+
+ shortName = x.rpartition( "." )[0]
+ path = "third_party/%s" % x
+
+
+ myModule = imp.load_module( "third_party_%s" % shortName , open( path , "r" ) , path , ( ".py" , "r" , imp.PY_SOURCE ) )
+ fileLists = { "commonFiles" : commonFiles , "serverOnlyFiles" : serverOnlyFiles , "scriptingFiles" : scriptingFiles }
+
+ options_topass["windows"] = windows
+ options_topass["nix"] = nix
+
+ myModule.configure( env , fileLists , options_topass )
+
+coreServerFiles += scriptingFiles
+
# --- check system ---
def getSysInfo():
@@ -783,7 +821,7 @@ def bigLibString( myenv ):
return s
-def doConfigure( myenv , needPcre=True , shell=False ):
+def doConfigure( myenv , shell=False ):
conf = Configure(myenv)
myenv["LINKFLAGS_CLEAN"] = list( myenv["LINKFLAGS"] )
myenv["LIBS_CLEAN"] = list( myenv["LIBS"] )
@@ -836,10 +874,6 @@ def doConfigure( myenv , needPcre=True , shell=False ):
return False
- if needPcre and not conf.CheckCXXHeader( 'pcrecpp.h' ):
- print( "can't find pcre" )
- Exit(1)
-
if not conf.CheckCXXHeader( "boost/filesystem/operations.hpp" ):
print( "can't find boost headers" )
if shell:
@@ -866,10 +900,6 @@ def doConfigure( myenv , needPcre=True , shell=False ):
if not conf.CheckCXXHeader( "execinfo.h" ):
myenv.Append( CPPDEFINES=[ "NOEXECINFO" ] )
- if nix and needPcre:
- myCheckLib( "pcrecpp" , True )
- myCheckLib( "pcre" , True )
-
myenv["_HAVEPCAP"] = myCheckLib( ["pcap", "wpcap"] )
removeIfInList( myenv["LIBS"] , "pcap" )
removeIfInList( myenv["LIBS"] , "wpcap" )
@@ -880,76 +910,15 @@ def doConfigure( myenv , needPcre=True , shell=False ):
else:
m.configure( conf , myenv )
- # XP_* is for spidermonkey.
- # this is outside of usesm block so don't have to rebuild for java
- if windows:
- myenv.Append( CPPDEFINES=[ "XP_WIN" ] )
- else:
- myenv.Append( CPPDEFINES=[ "XP_UNIX" ] )
-
if solaris:
conf.CheckLib( "nsl" )
- if usesm:
-
- # see http://www.mongodb.org/pages/viewpageattachments.action?pageId=12157032
- J = [ "mozjs" , "js", "js_static" ]
- if windows:
- if msarch == "amd64":
- if release:
- J = [ "js64r", "js", "mozjs" , "js_static" ]
- else:
- J = "js64d"
- print( "looking for js64d.lib for spidermonkey. (available at mongodb.org prebuilt)" );
- else:
- if not force32:
- print( "Assuming a 32 bit build is desired" )
- if release:
- J = [ "js32r", "js", "mozjs" , "js_static" ]
- else:
- J = [ "js32d", "js", "mozjs" , "js_static" ]
-
- myCheckLib( J , True )
- mozHeader = "js"
- if bigLibString(myenv).find( "mozjs" ) >= 0:
- mozHeader = "mozjs"
-
- if not conf.CheckHeader( mozHeader + "/jsapi.h" ):
- if conf.CheckHeader( "jsapi.h" ):
- myenv.Append( CPPDEFINES=[ "OLDJS" ] )
- else:
- print( "no spider monkey headers!" )
- Exit(1)
-
if usev8:
if debugBuild:
myCheckLib( [ "v8_g" , "v8" ] , True )
else:
myCheckLib( "v8" , True )
- if shell:
- haveReadLine = False
- if darwin:
- myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
- if force64:
- myCheckLib( "readline" , True )
- myCheckLib( "ncurses" , True )
- else:
- myenv.Append( LINKFLAGS=" /usr/lib/libreadline.dylib " )
- elif openbsd:
- myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
- myCheckLib( "termcap" , True )
- myCheckLib( "readline" , True )
- elif myCheckLib( "readline" , release and nix , staticOnly=release ):
- myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
- myCheckLib( "ncurses" , staticOnly=release )
- myCheckLib( "tinfo" , staticOnly=release )
- else:
- print( "\n*** notice: no readline library, mongo shell will not have nice interactive line editing ***\n" )
-
- if linux:
- myCheckLib( "rt" , True )
-
# requires ports devel/libexecinfo to be installed
if freebsd or openbsd:
myCheckLib( "execinfo", True )
@@ -1006,6 +975,10 @@ def doConfigure( myenv , needPcre=True , shell=False ):
myenv.Append(LINKCOM=" $STATICFILES")
myenv.Append(STATICFILES=staticlibfiles)
+ if has_option( "tcmalloc" ):
+ myCheckLib( "tcmalloc" , True ); # if successful, appedded 'tcmalloc' to myenv[ LIBS ]
+
+
return conf.Finish()
env = doConfigure( env )
@@ -1076,8 +1049,13 @@ clientEnv.Prepend( LIBS=[ "mongoclient"] )
clientEnv.Prepend( LIBPATH=["."] )
clientEnv["CPPDEFINES"].remove( "MONGO_EXPOSE_MACROS" )
l = clientEnv[ "LIBS" ]
-removeIfInList( l , "pcre" )
-removeIfInList( l , "pcrecpp" )
+
+# profile guided
+#if windows:
+# if release:
+# env.Append( LINKFLAGS="/PGD:test.pgd" )
+# env.Append( LINKFLAGS="/LTCG:PGINSTRUMENT" )
+# env.Append( LINKFLAGS="/LTCG:PGOPTIMIZE" )
testEnv = env.Clone()
testEnv.Append( CPPPATH=["../"] )
@@ -1095,7 +1073,7 @@ def checkErrorCodes():
checkErrorCodes()
# main db target
-mongodOnlyFiles = [ "db/db.cpp" ]
+mongodOnlyFiles = [ "db/db.cpp", "db/compact.cpp" ]
if windows:
mongodOnlyFiles.append( "util/ntservice.cpp" )
mongod = env.Program( "mongod" , commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + mongodOnlyFiles )
@@ -1103,7 +1081,7 @@ Default( mongod )
# tools
allToolFiles = commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + [ "client/gridfs.cpp", "tools/tool.cpp" ]
-normalTools = [ "dump" , "restore" , "export" , "import" , "files" , "stat" ]
+normalTools = [ "dump" , "restore" , "export" , "import" , "files" , "stat" , "top" ]
env.Alias( "tools" , [ add_exe( "mongo" + x ) for x in normalTools ] )
for x in normalTools:
env.Program( "mongo" + x , allToolFiles + [ "tools/" + x + ".cpp" ] )
@@ -1133,7 +1111,7 @@ clientTests += [ clientEnv.Program( "authTest" , [ "client/examples/authTest.cpp
clientTests += [ clientEnv.Program( "httpClientTest" , [ "client/examples/httpClientTest.cpp" ] ) ]
clientTests += [ clientEnv.Program( "bsondemo" , [ "bson/bsondemo/bsondemo.cpp" ] ) ]
-# testing
+# dbtests test binary
test = testEnv.Program( "test" , Glob( "dbtests/*.cpp" ) )
if windows:
testEnv.Alias( "test" , "test.exe" )
@@ -1144,17 +1122,23 @@ clientTests += [ clientEnv.Program( "clientTest" , [ "client/examples/clientTest
mongosniff_built = False
if darwin or clientEnv["_HAVEPCAP"]:
mongosniff_built = True
- sniffEnv = clientEnv.Clone()
+ sniffEnv = env.Clone()
sniffEnv.Append( CPPDEFINES="MONGO_EXPOSE_MACROS" )
+
if not windows:
sniffEnv.Append( LIBS=[ "pcap" ] )
else:
sniffEnv.Append( LIBS=[ "wpcap" ] )
+
+ sniffEnv.Prepend( LIBPATH=["."] )
+ sniffEnv.Append( LIBS=[ "mongotestfiles" ] )
+
sniffEnv.Program( "mongosniff" , "tools/sniffer.cpp" )
# --- shell ---
-env.JSHeader( "shell/mongo.cpp" , ["shell/utils.js","shell/db.js","shell/mongo.js","shell/mr.js","shell/query.js","shell/collection.js"] )
+# note, if you add a file here, need to add in engine.cpp currently
+env.JSHeader( "shell/mongo.cpp" , Glob( "shell/utils*.js" ) + [ "shell/db.js","shell/mongo.js","shell/mr.js","shell/query.js","shell/collection.js"] )
env.JSHeader( "shell/mongo-server.cpp" , [ "shell/servers.js"] )
@@ -1168,50 +1152,21 @@ if release and ( ( darwin and force64 ) or linux64 ):
if noshell:
print( "not building shell" )
elif not onlyServer:
- weird = force64 and not windows and not solaris
-
- if weird:
- shellEnv["CFLAGS"].remove("-m64")
- shellEnv["CXXFLAGS"].remove("-m64")
- shellEnv["LINKFLAGS"].remove("-m64")
- shellEnv["CPPPATH"].remove( "/usr/64/include" )
- shellEnv["LIBPATH"].remove( "/usr/64/lib" )
- shellEnv.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
- shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib", "/usr/local/lib" ]) )
-
l = shellEnv["LIBS"]
- removeIfInList( l , "pcre" )
- removeIfInList( l , "pcrecpp" )
-
if windows:
shellEnv.Append( LIBS=["winmm.lib"] )
coreShellFiles = [ "shell/dbshell.cpp" , "shell/shell_utils.cpp" , "shell/mongo-server.cpp" ]
- if weird:
- shell32BitFiles = coreShellFiles
- for f in allClientFiles:
- shell32BitFiles.append( "32bit/" + str( f ) )
- for f in scriptingFiles:
- shell32BitFiles.append( "32bit/" + str( f ) )
- for f in processInfoFiles:
- shell32BitFiles.append( "32bit/" + str( f ) )
- shellEnv.VariantDir( "32bit" , "." , duplicate=1 )
- else:
- shellEnv.Prepend( LIBPATH=[ "." ] )
-
- shellEnv = doConfigure( shellEnv , needPcre=False , shell=True )
+ coreShellFiles.append( "third_party/linenoise/linenoise.cpp" )
- if weird:
- mongo = shellEnv.Program( "mongo" , shell32BitFiles )
- else:
- shellEnv.Prepend( LIBS=[ "mongoshellfiles"] )
- mongo = shellEnv.Program( "mongo" , coreShellFiles )
+ shellEnv.Prepend( LIBPATH=[ "." ] )
+
+ shellEnv = doConfigure( shellEnv , shell=True )
- if weird:
- Depends( "32bit/shell/mongo.cpp" , "shell/mongo.cpp" )
- Depends( "32bit/shell/mongo-server.cpp" , "shell/mongo-server.cpp" )
+ shellEnv.Prepend( LIBS=[ "mongoshellfiles"] )
+ mongo = shellEnv.Program( "mongo" , coreShellFiles )
# ---- RUNNING TESTS ----
@@ -1258,7 +1213,7 @@ if not onlyServer and not noshell:
addSmoketest( "smokeClone", [ "mongo", "mongod" ] )
addSmoketest( "smokeRepl", [ "mongo", "mongod", "mongobridge" ] )
addSmoketest( "smokeReplSets", [ "mongo", "mongod", "mongobridge" ] )
- addSmoketest( "smokeDur", [ add_exe( "mongo" ) , add_exe( "mongod" ) ] )
+ addSmoketest( "smokeDur", [ add_exe( "mongo" ) , add_exe( "mongod" ) , add_exe('mongorestore') ] )
addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongodump" ), add_exe( "mongorestore" ) ] )
addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
@@ -1500,7 +1455,7 @@ env.Alias( "core" , [ add_exe( "mongo" ) , add_exe( "mongod" ) , add_exe( "mongo
#headers
if installSetup.headers:
- for id in [ "", "util/", "util/mongoutils/", "util/concurrency/", "db/" , "db/stats/" , "db/repl/" , "client/" , "bson/", "bson/util/" , "s/" , "scripting/" ]:
+ for id in [ "", "util/", "util/net/", "util/mongoutils/", "util/concurrency/", "db/" , "db/stats/" , "db/repl/" , "db/ops/" , "client/" , "bson/", "bson/util/" , "s/" , "scripting/" ]:
env.Install( installDir + "/" + installSetup.headerRoot + "/mongo/" + id , Glob( id + "*.h" ) )
env.Install( installDir + "/" + installSetup.headerRoot + "/mongo/" + id , Glob( id + "*.hpp" ) )
diff --git a/bson/bson-inl.h b/bson/bson-inl.h
index 5b4c490..b86d667 100644
--- a/bson/bson-inl.h
+++ b/bson/bson-inl.h
@@ -1,4 +1,7 @@
-// bsoninlines.h
+/** @file bsoninlines.h
+ a goal here is that the most common bson methods can be used inline-only, a la boost.
+ thus some things are inline that wouldn't necessarily be otherwise.
+*/
/* Copyright 2009 10gen Inc.
*
@@ -18,18 +21,158 @@
#pragma once
#include <map>
-#include "util/atomic_int.h"
-#include "util/misc.h"
-#include "../util/hex.h"
+#include <limits>
+
+#if defined(_WIN32)
+#undef max
+#undef min
+#endif
namespace mongo {
- inline BSONObjIterator BSONObj::begin() {
+ inline bool isNaN(double d) {
+ return d != d;
+ }
+
+ /* must be same type when called, unless both sides are #s
+ this large function is in header to facilitate inline-only use of bson
+ */
+ inline int compareElementValues(const BSONElement& l, const BSONElement& r) {
+ int f;
+
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined: // EOO and Undefined are same canonicalType
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ f = l.canonicalType() - r.canonicalType();
+ if ( f<0 ) return -1;
+ return f==0 ? 0 : 1;
+ case Bool:
+ return *l.value() - *r.value();
+ case Timestamp:
+ // unsigned compare for timestamps - note they are not really dates but (ordinal + time_t)
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case Date:
+ {
+ long long a = (long long) l.Date().millis;
+ long long b = (long long) r.Date().millis;
+ if( a < b )
+ return -1;
+ return a == b ? 0 : 1;
+ }
+ case NumberLong:
+ if( r.type() == NumberLong ) {
+ long long L = l._numberLong();
+ long long R = r._numberLong();
+ if( L < R ) return -1;
+ if( L == R ) return 0;
+ return 1;
+ }
+ goto dodouble;
+ case NumberInt:
+ if( r.type() == NumberInt ) {
+ int L = l._numberInt();
+ int R = r._numberInt();
+ if( L < R ) return -1;
+ return L == R ? 0 : 1;
+ }
+ // else fall through
+ case NumberDouble:
+dodouble:
+ {
+ double left = l.number();
+ double right = r.number();
+ if( left < right )
+ return -1;
+ if( left == right )
+ return 0;
+ if( isNaN(left) )
+ return isNaN(right) ? 0 : -1;
+ return 1;
+ }
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ /* todo: a utf sort order version one day... */
+ {
+ // we use memcmp as we allow zeros in UTF8 strings
+ int lsz = l.valuestrsize();
+ int rsz = r.valuestrsize();
+ int common = min(lsz, rsz);
+ int res = memcmp(l.valuestr(), r.valuestr(), common);
+ if( res )
+ return res;
+ // longer string is the greater one
+ return lsz-rsz;
+ }
+ case Object:
+ case Array:
+ return l.embeddedObject().woCompare( r.embeddedObject() );
+ case DBRef: {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case BinData: {
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int rsz = r.objsize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value()+4, r.value()+4, lsz+1);
+ }
+ case RegEx: {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ case CodeWScope : {
+ f = l.canonicalType() - r.canonicalType();
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
+ if ( f )
+ return f;
+ return 0;
+ }
+ default:
+ assert( false);
+ }
+ return -1;
+ }
+
+ /* wo = "well ordered" */
+ inline int BSONElement::woCompare( const BSONElement &e,
+ bool considerFieldName ) const {
+ int lt = (int) canonicalType();
+ int rt = (int) e.canonicalType();
+ int x = lt - rt;
+ if( x != 0 && (!isNumber() || !e.isNumber()) )
+ return x;
+ if ( considerFieldName ) {
+ x = strcmp(fieldName(), e.fieldName());
+ if ( x != 0 )
+ return x;
+ }
+ x = compareElementValues(*this, e);
+ return x;
+ }
+
+ inline BSONObjIterator BSONObj::begin() const {
return BSONObjIterator(*this);
}
inline BSONObj BSONElement::embeddedObjectUserCheck() const {
- if ( isABSONObj() )
+ if ( MONGO_likely(isABSONObj()) )
return BSONObj(value());
stringstream ss;
ss << "invalid parameter: expected an object (" << fieldName() << ")";
@@ -48,6 +191,21 @@ namespace mongo {
return BSONObj( value() + 4 + 4 + strSizeWNull );
}
+ // deep (full) equality
+ inline bool BSONObj::equal(const BSONObj &rhs) const {
+ BSONObjIterator i(*this);
+ BSONObjIterator j(rhs);
+ BSONElement l,r;
+ do {
+ // so far, equal...
+ l = i.next();
+ r = j.next();
+ if ( l.eoo() )
+ return r.eoo();
+ } while( l == r );
+ return false;
+ }
+
inline NOINLINE_DECL void BSONObj::_assertInvalid() const {
StringBuilder ss;
int os = objsize();
@@ -64,9 +222,10 @@ namespace mongo {
getOwned() method. the presumption being that is better.
*/
inline NOINLINE_DECL BSONObj BSONObj::copy() const {
- char *p = (char*) malloc(objsize());
- memcpy(p, objdata(), objsize());
- return BSONObj(p, true);
+ Holder *h = (Holder*) malloc(objsize() + sizeof(unsigned));
+ h->zero();
+ memcpy(h->data, objdata(), objsize());
+ return BSONObj(h);
}
inline BSONObj BSONObj::getOwned() const {
@@ -88,16 +247,18 @@ namespace mongo {
return b.obj();
}
- inline bool BSONObj::hasElement(const char *name) const {
- if ( !isEmpty() ) {
- BSONObjIterator it(*this);
- while ( it.moreWithEOO() ) {
- BSONElement e = it.next();
- if ( strcmp(name, e.fieldName()) == 0 )
- return true;
+ inline void BSONObj::getFields(unsigned n, const char **fieldNames, BSONElement *fields) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *p = e.fieldName();
+ for( unsigned i = 0; i < n; i++ ) {
+ if( strcmp(p, fieldNames[i]) == 0 ) {
+ fields[i] = e;
+ break;
+ }
}
}
- return false;
}
inline BSONElement BSONObj::getField(const StringData& name) const {
@@ -110,6 +271,21 @@ namespace mongo {
return BSONElement();
}
+ inline int BSONObj::getIntField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.isNumber() ? (int) e.number() : std::numeric_limits< int >::min();
+ }
+
+ inline bool BSONObj::getBoolField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.type() == Bool ? e.boolean() : false;
+ }
+
+ inline const char * BSONObj::getStringField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.type() == String ? e.valuestr() : "";
+ }
+
/* add all the fields from the object specified to this object */
inline BSONObjBuilder& BSONObjBuilder::appendElements(BSONObj x) {
BSONObjIterator it(x);
@@ -141,7 +317,7 @@ namespace mongo {
}
- inline bool BSONObj::isValid() {
+ inline bool BSONObj::isValid() const {
int x = objsize();
return x > 0 && x <= BSONObjMaxInternalSize;
}
@@ -302,8 +478,6 @@ namespace mongo {
s << ( isArray ? " ]" : " }" );
}
- extern unsigned getRandomNumber();
-
inline void BSONElement::validate() const {
const BSONType t = type();
@@ -398,7 +572,7 @@ namespace mongo {
break;
case RegEx: {
const char *p = value();
- size_t len1 = ( maxLen == -1 ) ? strlen( p ) : mongo::strnlen( p, remain );
+ size_t len1 = ( maxLen == -1 ) ? strlen( p ) : (size_t)mongo::strnlen( p, remain );
//massert( 10318 , "Invalid regex string", len1 != -1 ); // ERH - 4/28/10 - don't think this does anything
p = p + len1 + 1;
size_t len2;
@@ -417,7 +591,7 @@ namespace mongo {
StringBuilder ss;
ss << "BSONElement: bad type " << (int) type();
string msg = ss.str();
- massert( 10320 , msg.c_str(),false);
+ massert( 13655 , msg.c_str(),false);
}
}
totalSize = x + fieldNameSize() + 1; // BSONType
@@ -425,6 +599,72 @@ namespace mongo {
return totalSize;
}
+ inline int BSONElement::size() const {
+ if ( totalSize >= 0 )
+ return totalSize;
+
+ int x = 0;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case mongo::Bool:
+ x = 1;
+ break;
+ case NumberInt:
+ x = 4;
+ break;
+ case Timestamp:
+ case mongo::Date:
+ case NumberDouble:
+ case NumberLong:
+ x = 8;
+ break;
+ case jstOID:
+ x = 12;
+ break;
+ case Symbol:
+ case Code:
+ case mongo::String:
+ x = valuestrsize() + 4;
+ break;
+ case DBRef:
+ x = valuestrsize() + 4 + 12;
+ break;
+ case CodeWScope:
+ case Object:
+ case mongo::Array:
+ x = objsize();
+ break;
+ case BinData:
+ x = valuestrsize() + 4 + 1/*subtype*/;
+ break;
+ case RegEx:
+ {
+ const char *p = value();
+ size_t len1 = strlen(p);
+ p = p + len1 + 1;
+ size_t len2;
+ len2 = strlen( p );
+ x = (int) (len1 + 1 + len2 + 1);
+ }
+ break;
+ default:
+ {
+ StringBuilder ss;
+ ss << "BSONElement: bad type " << (int) type();
+ string msg = ss.str();
+ massert(10320 , msg.c_str(),false);
+ }
+ }
+ totalSize = x + fieldNameSize() + 1; // BSONType
+
+ return totalSize;
+ }
+
inline string BSONElement::toString( bool includeFieldName, bool full ) const {
StringBuilder s;
toString(s, includeFieldName, full);
@@ -438,7 +678,7 @@ namespace mongo {
s << "EOO";
break;
case mongo::Date:
- s << "new Date(" << date() << ')';
+ s << "new Date(" << (long long) date() << ')';
break;
case RegEx: {
s << "/" << regex() << '/';
@@ -492,8 +732,8 @@ namespace mongo {
case Symbol:
case mongo::String:
s << '"';
- if ( !full && valuestrsize() > 80 ) {
- s.write(valuestr(), 70);
+ if ( !full && valuestrsize() > 160 ) {
+ s.write(valuestr(), 150);
s << "...\"";
}
else {
@@ -662,4 +902,87 @@ namespace mongo {
b.append( q , t );
return BSONFieldValue<BSONObj>( _name , b.obj() );
}
+
+ // used by jsonString()
+ inline string escape( string s , bool escape_slash=false) {
+ StringBuilder ret;
+ for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
+ switch ( *i ) {
+ case '"':
+ ret << "\\\"";
+ break;
+ case '\\':
+ ret << "\\\\";
+ break;
+ case '/':
+ ret << (escape_slash ? "\\/" : "/");
+ break;
+ case '\b':
+ ret << "\\b";
+ break;
+ case '\f':
+ ret << "\\f";
+ break;
+ case '\n':
+ ret << "\\n";
+ break;
+ case '\r':
+ ret << "\\r";
+ break;
+ case '\t':
+ ret << "\\t";
+ break;
+ default:
+ if ( *i >= 0 && *i <= 0x1f ) {
+ //TODO: these should be utf16 code-units not bytes
+ char c = *i;
+ ret << "\\u00" << toHexLower(&c, 1);
+ }
+ else {
+ ret << *i;
+ }
+ }
+ }
+ return ret.str();
+ }
+
+ inline string BSONObj::hexDump() const {
+ stringstream ss;
+ const char *d = objdata();
+ int size = objsize();
+ for( int i = 0; i < size; ++i ) {
+ ss.width( 2 );
+ ss.fill( '0' );
+ ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec;
+ if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) )
+ ss << '\'' << d[ i ] << '\'';
+ if ( i != size - 1 )
+ ss << ' ';
+ }
+ return ss.str();
+ }
+
+ inline void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ) {
+ BSONObjIterator i(keyPattern);
+ BSONObjIterator j(values);
+
+ while ( i.more() && j.more() ) {
+ appendAs( j.next() , i.next().fieldName() );
+ }
+
+ assert( ! i.more() );
+ assert( ! j.more() );
+ }
+
+ inline BSONObj BSONObj::removeField(const StringData& name) const {
+ BSONObjBuilder b;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *fname = e.fieldName();
+ if( strcmp(name.data(), fname) )
+ b.append(e);
+ }
+ return b.obj();
+ }
}
diff --git a/bson/bson.h b/bson/bson.h
index ba1b751..9515adf 100644
--- a/bson/bson.h
+++ b/bson/bson.h
@@ -1,11 +1,9 @@
-/* NOTE: Standalone bson header for when not using MongoDB.
- See also: bsondemo.
+/** @file bson.h
- MongoDB includes ../db/jsobj.h instead. This file, however, pulls in much less code / dependencies.
-*/
+ Main bson include file for mongodb c++ clients. MongoDB includes ../db/jsobj.h instead.
+ This file, however, pulls in much less code / dependencies.
-/** @file bson.h
- BSON classes
+ @see bsondemo
*/
/*
@@ -25,7 +23,7 @@
*/
/**
- bo and its helpers
+ Main include file for C++ BSON module when using standalone (sans MongoDB client).
"BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
represented in JSON (plus a few extensions useful for databases & other languages).
@@ -42,10 +40,11 @@
*/
#endif
+#include <cstdlib>
+#include <memory>
#include <iostream>
#include <sstream>
#include <boost/utility.hpp>
-#include "util/builder.h"
namespace bson {
@@ -56,7 +55,7 @@ namespace bson {
public:
assertion( unsigned u , const string& s )
: id( u ) , msg( s ) {
- mongo::StringBuilder ss;
+ stringstream ss;
ss << "BsonAssertion id: " << u << " " << s;
full = ss.str();
}
@@ -101,23 +100,11 @@ namespace mongo {
#endif
}
-#include "../bson/bsontypes.h"
-#include "../bson/oid.h"
-#include "../bson/bsonelement.h"
-#include "../bson/bsonobj.h"
-#include "../bson/bsonmisc.h"
-#include "../bson/bsonobjbuilder.h"
-#include "../bson/bsonobjiterator.h"
-#include "../bson/bson-inl.h"
-
-namespace mongo {
-
- inline unsigned getRandomNumber() {
-#if defined(_WIN32)
- return rand();
-#else
- return random();
-#endif
- }
-
-}
+#include "util/builder.h"
+#include "bsontypes.h"
+#include "oid.h"
+#include "bsonelement.h"
+#include "bsonobj.h"
+#include "bsonobjbuilder.h"
+#include "bsonobjiterator.h"
+#include "bson-inl.h"
diff --git a/bson/bsondemo/bsondemo.cpp b/bson/bsondemo/bsondemo.cpp
index ec83f5e..b53a7b3 100644
--- a/bson/bsondemo/bsondemo.cpp
+++ b/bson/bsondemo/bsondemo.cpp
@@ -4,6 +4,12 @@
Requires boost (headers only).
Works headers only (the parts actually exercised herein that is - some functions require .cpp files).
+
+ To build and run:
+ g++ -o bsondemo bsondemo.cpp
+ ./bsondemo
+
+ Windows: project files are available in this directory for bsondemo.cpp for use with Visual Studio.
*/
/*
diff --git a/bson/bsondemo/bsondemo.vcxproj b/bson/bsondemo/bsondemo.vcxproj
index bb82a50..2ad5389 100644
--- a/bson/bsondemo/bsondemo.vcxproj
+++ b/bson/bsondemo/bsondemo.vcxproj
@@ -89,7 +89,7 @@
<ClCompile>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>No</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@@ -109,7 +109,7 @@
<ClCompile>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
<PrecompiledHeader>
@@ -128,7 +128,7 @@
<ClCompile>
<Optimization>MaxSpeed</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>true</FunctionLevelLinking>
<PrecompiledHeader>
@@ -151,7 +151,7 @@
<ClCompile>
<Optimization>MaxSpeed</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>true</FunctionLevelLinking>
<PrecompiledHeader>
diff --git a/bson/bsonelement.h b/bson/bsonelement.h
index 23d59fa..5487d8d 100644
--- a/bson/bsonelement.h
+++ b/bson/bsonelement.h
@@ -20,6 +20,14 @@
#include <vector>
#include <string.h>
#include "util/builder.h"
+#include "bsontypes.h"
+
+namespace mongo {
+ class OpTime;
+ class BSONObj;
+ class BSONElement;
+ class BSONObjBuilder;
+}
namespace bson {
typedef mongo::BSONElement be;
@@ -29,9 +37,6 @@ namespace bson {
namespace mongo {
- class OpTime;
- class BSONElement;
-
/* l and r MUST have same type when called: check that first. */
int compareElementValues(const BSONElement& l, const BSONElement& r);
@@ -120,7 +125,8 @@ namespace mongo {
/** Size of the element.
@param maxLen If maxLen is specified, don't scan more than maxLen bytes to calculate size.
*/
- int size( int maxLen = -1 ) const;
+ int size( int maxLen ) const;
+ int size() const;
/** Wrap this element up as a singleton object. */
BSONObj wrap() const;
@@ -155,15 +161,18 @@ namespace mongo {
return *value() ? true : false;
}
+ bool booleanSafe() const { return isBoolean() && boolean(); }
+
/** Retrieve a java style date value from the element.
Ensure element is of type Date before calling.
+ @see Bool(), trueValue()
*/
Date_t date() const {
return *reinterpret_cast< const Date_t* >( value() );
}
/** Convert the value to boolean, regardless of its type, in a javascript-like fashion
- (i.e., treat zero and null as false).
+ (i.e., treats zero and null and eoo as false).
*/
bool trueValue() const;
@@ -203,7 +212,9 @@ namespace mongo {
}
/** Size (length) of a string element.
- You must assure of type String first. */
+ You must assure of type String first.
+ @return string size including terminating null
+ */
int valuestrsize() const {
return *reinterpret_cast< const int* >( value() );
}
@@ -359,6 +370,7 @@ namespace mongo {
return *reinterpret_cast< const mongo::OID* >( start );
}
+ /** this does not use fieldName in the comparison, just the value */
bool operator<( const BSONElement& other ) const {
int x = (int)canonicalType() - (int)other.canonicalType();
if ( x < 0 ) return true;
@@ -366,19 +378,30 @@ namespace mongo {
return compareElementValues(*this,other) < 0;
}
- // If maxLen is specified, don't scan more than maxLen bytes.
- explicit BSONElement(const char *d, int maxLen = -1) : data(d) {
- fieldNameSize_ = -1;
- if ( eoo() )
+ // @param maxLen don't scan more than maxLen bytes
+ explicit BSONElement(const char *d, int maxLen) : data(d) {
+ if ( eoo() ) {
+ totalSize = 1;
fieldNameSize_ = 0;
+ }
else {
+ totalSize = -1;
+ fieldNameSize_ = -1;
if ( maxLen != -1 ) {
int size = (int) strnlen( fieldName(), maxLen - 1 );
massert( 10333 , "Invalid field name", size != -1 );
fieldNameSize_ = size + 1;
}
}
+ }
+
+ explicit BSONElement(const char *d) : data(d) {
+ fieldNameSize_ = -1;
totalSize = -1;
+ if ( eoo() ) {
+ fieldNameSize_ = 0;
+ totalSize = 1;
+ }
}
string _asCode() const;
@@ -399,7 +422,10 @@ namespace mongo {
const BSONElement& chk(int t) const {
if ( t != type() ) {
StringBuilder ss;
- ss << "wrong type for BSONElement (" << fieldName() << ") " << type() << " != " << t;
+ if( eoo() )
+ ss << "field not found, expected type " << t;
+ else
+ ss << "wrong type for field (" << fieldName() << ") " << type() << " != " << t;
uasserted(13111, ss.str() );
}
return *this;
@@ -477,7 +503,7 @@ namespace mongo {
return true;
}
- /** True if element is of a numeric type. */
+ /** @return true if element is of a numeric type. */
inline bool BSONElement::isNumber() const {
switch( type() ) {
case NumberLong:
diff --git a/bson/bsonmisc.h b/bson/bsonmisc.h
index 96be12a..8abb487 100644
--- a/bson/bsonmisc.h
+++ b/bson/bsonmisc.h
@@ -29,20 +29,16 @@ namespace mongo {
class BSONObjCmp {
public:
- BSONObjCmp( const BSONObj &_order = BSONObj() ) : order( _order ) {}
+ BSONObjCmp( const BSONObj &order = BSONObj() ) : _order( order ) {}
bool operator()( const BSONObj &l, const BSONObj &r ) const {
- return l.woCompare( r, order ) < 0;
+ return l.woCompare( r, _order ) < 0;
}
+ BSONObj order() const { return _order; }
private:
- BSONObj order;
+ BSONObj _order;
};
- class BSONObjCmpDefaultOrder : public BSONObjCmp {
- public:
- BSONObjCmpDefaultOrder() : BSONObjCmp( BSONObj() ) {}
- };
-
- typedef set< BSONObj, BSONObjCmpDefaultOrder > BSONObjSetDefaultOrder;
+ typedef set<BSONObj,BSONObjCmp> BSONObjSet;
enum FieldCompareResult {
LEFT_SUBFIELD = -2,
@@ -202,4 +198,6 @@ namespace mongo {
int _sizes[SIZE];
};
+ // considers order
+ bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs);
}
diff --git a/bson/bsonobj.h b/bson/bsonobj.h
index 3ca6b8c..9e948f3 100644
--- a/bson/bsonobj.h
+++ b/bson/bsonobj.h
@@ -17,15 +17,18 @@
#pragma once
+#include <boost/intrusive_ptr.hpp>
#include <set>
#include <list>
#include <vector>
+#include "util/atomic_int.h"
#include "util/builder.h"
#include "stringdata.h"
namespace mongo {
typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
+ typedef multiset< BSONElement, BSONElementCmpWithoutField > BSONElementMSet;
/**
C++ representation of a "BSON" object -- that is, an extended JSON-style
@@ -69,11 +72,19 @@ namespace mongo {
public:
/** Construct a BSONObj from data in the proper format.
- @param ifree true if the BSONObj should free() the msgdata when
- it destructs.
+ * Use this constructor when something else owns msgdata's buffer
*/
- explicit BSONObj(const char *msgdata, bool ifree = false) {
- init(msgdata, ifree);
+ explicit BSONObj(const char *msgdata) {
+ init(msgdata);
+ }
+
+ /** Construct a BSONObj from data in the proper format.
+ * Use this constructor when you want BSONObj to free(holder) when it is no longer needed
+ * BSONObj::Holder has an extra 4 bytes for a ref-count before the start of the object
+ */
+ class Holder;
+ explicit BSONObj(Holder* holder) {
+ init(holder);
}
explicit BSONObj(const Record *r);
@@ -81,7 +92,9 @@ namespace mongo {
/** Construct an empty BSONObj -- that is, {}. */
BSONObj();
- ~BSONObj() { /*defensive:*/ _objdata = 0; }
+ ~BSONObj() {
+ _objdata = 0; // defensive
+ }
/**
A BSONObj can use a buffer it "owns" or one it does not.
@@ -113,7 +126,9 @@ namespace mongo {
*/
bool isOwned() const { return _holder.get() != 0; }
- /* make sure the data buffer is under the control of this BSONObj and not a remote buffer */
+ /** assure the data buffer is under the control of this BSONObj and not a remote buffer
+ @see isOwned()
+ */
BSONObj getOwned() const;
/** @return a new full (and owned) copy of the object. */
@@ -133,6 +148,11 @@ namespace mongo {
/** note: addFields always adds _id even if not specified */
int addFields(BSONObj& from, set<string>& fields); /* returns n added */
+ /** remove specified field and return a new object with the remaining fields.
+ slowish as builds a full new object
+ */
+ BSONObj removeField(const StringData& name) const;
+
/** returns # of top level fields in the object
note: iterates to count the fields
*/
@@ -141,20 +161,26 @@ namespace mongo {
/** adds the field names to the fields set. does NOT clear it (appends). */
int getFieldNames(set<string>& fields) const;
- /** return has eoo() true if no match
- supports "." notation to reach into embedded objects
+ /** @return the specified element. element.eoo() will be true if not found.
+ @param name field to find. supports dot (".") notation to reach into embedded objects.
+ for example "x.y" means "in the nested object in field x, retrieve field y"
*/
BSONElement getFieldDotted(const char *name) const;
- /** return has eoo() true if no match
- supports "." notation to reach into embedded objects
+ /** @return the specified element. element.eoo() will be true if not found.
+ @param name field to find. supports dot (".") notation to reach into embedded objects.
+ for example "x.y" means "in the nested object in field x, retrieve field y"
*/
BSONElement getFieldDotted(const string& name) const {
return getFieldDotted( name.c_str() );
}
- /** Like getFieldDotted(), but expands multikey arrays and returns all matching objects
+ /** Like getFieldDotted(), but expands arrays and returns all matching objects.
+ * Turning off expandLastArray allows you to retrieve nested array objects instead of
+ * their contents.
*/
- void getFieldsDotted(const StringData& name, BSONElementSet &ret ) const;
+ void getFieldsDotted(const StringData& name, BSONElementSet &ret, bool expandLastArray = true ) const;
+ void getFieldsDotted(const StringData& name, BSONElementMSet &ret, bool expandLastArray = true ) const;
+
/** Like getFieldDotted(), but returns first array encountered while traversing the
dotted fields of name. The name variable is updated to represent field
names with respect to the returned element. */
@@ -165,6 +191,14 @@ namespace mongo {
*/
BSONElement getField(const StringData& name) const;
+ /** Get several fields at once. This is faster than separate getField() calls as the size of
+ elements iterated can then be calculated only once each.
+ @param n number of fieldNames, and number of elements in the fields array
+ @param fields if a field is found its element is stored in its corresponding position in this array.
+ if not found the array element is unchanged.
+ */
+ void getFields(unsigned n, const char **fieldNames, BSONElement *fields) const;
+
/** Get the field of the specified name. eoo() is true on the returned
element if not found.
*/
@@ -184,7 +218,9 @@ namespace mongo {
}
/** @return true if field exists */
- bool hasField( const char * name ) const { return ! getField( name ).eoo(); }
+ bool hasField( const char * name ) const { return !getField(name).eoo(); }
+ /** @return true if field exists */
+ bool hasElement(const char *name) const { return hasField(name); }
/** @return "" if DNE or wrong type */
const char * getStringField(const char *name) const;
@@ -195,7 +231,9 @@ namespace mongo {
/** @return INT_MIN if not present - does some type conversions */
int getIntField(const char *name) const;
- /** @return false if not present */
+ /** @return false if not present
+ @see BSONElement::trueValue()
+ */
bool getBoolField(const char *name) const;
/**
@@ -224,7 +262,7 @@ namespace mongo {
int objsize() const { return *(reinterpret_cast<const int*>(objdata())); }
/** performs a cursory check on the object's size only. */
- bool isValid();
+ bool isValid() const;
/** @return if the user is a valid user doc
criter: isValid() no . or $ field names
@@ -255,7 +293,6 @@ namespace mongo {
int woCompare(const BSONObj& r, const BSONObj &ordering = BSONObj(),
bool considerFieldName=true) const;
-
bool operator<( const BSONObj& other ) const { return woCompare( other ) < 0; }
bool operator<=( const BSONObj& other ) const { return woCompare( other ) <= 0; }
bool operator>( const BSONObj& other ) const { return woCompare( other ) > 0; }
@@ -266,10 +303,12 @@ namespace mongo {
*/
int woSortOrder( const BSONObj& r , const BSONObj& sortKey , bool useDotted=false ) const;
+ bool equal(const BSONObj& r) const;
+
/** This is "shallow equality" -- ints and doubles won't match. for a
deep equality test use woCompare (which is slower).
*/
- bool woEqual(const BSONObj& r) const {
+ bool binaryEqual(const BSONObj& r) const {
int os = objsize();
if ( os == r.objsize() ) {
return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
@@ -280,8 +319,13 @@ namespace mongo {
/** @return first field of the object */
BSONElement firstElement() const { return BSONElement(objdata() + 4); }
- /** @return true if field exists in the object */
- bool hasElement(const char *name) const;
+ /** faster than firstElement().fieldName() - for the first element we can easily find the fieldname without
+ computing the element size.
+ */
+ const char * firstElementFieldName() const {
+ const char *p = objdata() + 4;
+ return *p == EOO ? "" : p+1;
+ }
/** Get the _id field from the object. For good performance drivers should
assure that _id is the first element of the object; however, correct operation
@@ -315,9 +359,7 @@ namespace mongo {
/** @return an md5 value for this object. */
string md5() const;
- bool operator==( const BSONObj& other ) const {
- return woCompare( other ) == 0;
- }
+ bool operator==( const BSONObj& other ) const { return equal( other ); }
enum MatchType {
Equality = 0,
@@ -376,34 +418,52 @@ namespace mongo {
...
}
*/
- BSONObjIterator begin();
+ BSONObjIterator begin() const;
void appendSelfToBufBuilder(BufBuilder& b) const {
assert( objsize() );
b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
}
- private:
- class Holder {
+#pragma pack(1)
+ class Holder : boost::noncopyable {
+ private:
+ Holder(); // this class should never be explicitly created
+ AtomicUInt refCount;
public:
- Holder( const char *objdata ) :
- _objdata( objdata ) {
- }
- ~Holder() {
- free((void *)_objdata);
- _objdata = 0;
+ char data[4]; // start of object
+
+ void zero() { refCount.zero(); }
+
+ // these are called automatically by boost::intrusive_ptr
+ friend void intrusive_ptr_add_ref(Holder* h) { h->refCount++; }
+ friend void intrusive_ptr_release(Holder* h) {
+#if defined(_DEBUG) // cant use dassert or DEV here
+ assert((int)h->refCount > 0); // make sure we haven't already freed the buffer
+#endif
+ if(--(h->refCount) == 0){
+#if defined(_DEBUG)
+ unsigned sz = (unsigned&) *h->data;
+ assert(sz < BSONObjMaxInternalSize * 3);
+ memset(h->data, 0xdd, sz);
+#endif
+ free(h);
+ }
}
- private:
- const char *_objdata;
};
+#pragma pack()
+ private:
const char *_objdata;
- boost::shared_ptr< Holder > _holder;
+ boost::intrusive_ptr< Holder > _holder;
void _assertInvalid() const;
- void init(const char *data, bool ifree) {
- if ( ifree )
- _holder.reset( new Holder( data ) );
+
+ void init(Holder *holder) {
+ _holder = holder; // holder is now managed by intrusive_ptr
+ init(holder->data);
+ }
+ void init(const char *data) {
_objdata = data;
if ( !isValid() )
_assertInvalid();
diff --git a/bson/bsonobjbuilder.h b/bson/bsonobjbuilder.h
index a39b529..86a52ac 100644
--- a/bson/bsonobjbuilder.h
+++ b/bson/bsonobjbuilder.h
@@ -24,10 +24,15 @@
#include <limits>
#include <cmath>
-using namespace std;
+#include <boost/static_assert.hpp>
+#include "bsonelement.h"
+#include "bsonobj.h"
+#include "bsonmisc.h"
namespace mongo {
+ using namespace std;
+
#if defined(_WIN32)
// warning: 'this' : used in base member initializer list
#pragma warning( disable : 4355 )
@@ -81,18 +86,21 @@ namespace mongo {
class BSONObjBuilder : boost::noncopyable {
public:
/** @param initsize this is just a hint as to the final size of the object */
- BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize), _offset( 0 ), _s( this ) , _tracker(0) , _doneCalled(false) {
- _b.skip(4); /*leave room for size field*/
+ BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize + sizeof(unsigned)), _offset( sizeof(unsigned) ), _s( this ) , _tracker(0) , _doneCalled(false) {
+ _b.appendNum((unsigned)0); // ref-count
+ _b.skip(4); /*leave room for size field and ref-count*/
}
- /* dm why do we have this/need this? not clear to me, comment please tx. */
- /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder */
+ /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder
+ * This is for more efficient adding of subobjects/arrays. See docs for subobjStart for example.
+ */
BSONObjBuilder( BufBuilder &baseBuilder ) : _b( baseBuilder ), _buf( 0 ), _offset( baseBuilder.len() ), _s( this ) , _tracker(0) , _doneCalled(false) {
_b.skip( 4 );
}
- BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() ), _offset(0), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
- _b.skip( 4 );
+ BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() + sizeof(unsigned) ), _offset( sizeof(unsigned) ), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
+ _b.appendNum((unsigned)0); // ref-count
+ _b.skip(4);
}
~BSONObjBuilder() {
@@ -146,9 +154,17 @@ namespace mongo {
return *this;
}
-
/** add header for a new subobject and return bufbuilder for writing to
- the subobject's body */
+ * the subobject's body
+ *
+ * example:
+ *
+ * BSONObjBuilder b;
+ * BSONObjBuilder sub (b.subobjStart("fieldName"));
+ * // use sub
+ * sub.done()
+ * // use b and convert to object
+ */
BufBuilder &subobjStart(const StringData& fieldName) {
_b.appendNum((char) Object);
_b.appendStr(fieldName);
@@ -218,7 +234,7 @@ namespace mongo {
long long x = n;
if ( x < 0 )
x = x * -1;
- if ( x < ( numeric_limits<int>::max() / 2 ) )
+ if ( x < ( (numeric_limits<int>::max)() / 2 ) ) // extra () to avoid max macro on windows
append( fieldName , (int)n );
else
append( fieldName , n );
@@ -247,14 +263,13 @@ namespace mongo {
return *this;
}
-
BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ) {
static long long maxInt = (int)pow( 2.0 , 30.0 );
static long long maxDouble = (long long)pow( 2.0 , 40.0 );
-
- if ( l < maxInt )
+ long long x = l >= 0 ? l : -l;
+ if ( x < maxInt )
append( fieldName , (int)l );
- else if ( l < maxDouble )
+ else if ( x < maxDouble )
append( fieldName , (double)l );
else
append( fieldName , l );
@@ -366,12 +381,13 @@ namespace mongo {
return *this;
}
- /** Append a string element. len DOES include terminating nul */
- BSONObjBuilder& append(const StringData& fieldName, const char *str, int len) {
+ /** Append a string element.
+ @param sz size includes terminating null character */
+ BSONObjBuilder& append(const StringData& fieldName, const char *str, int sz) {
_b.appendNum((char) String);
_b.appendStr(fieldName);
- _b.appendNum((int)len);
- _b.appendBuf(str, len);
+ _b.appendNum((int)sz);
+ _b.appendBuf(str, sz);
return *this;
}
/** Append a string element */
@@ -517,6 +533,10 @@ namespace mongo {
template < class T >
BSONObjBuilder& append( const StringData& fieldName, const list< T >& vals );
+ /** Append a set of values. */
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const set< T >& vals );
+
/**
* destructive
* The returned BSONObj will free the buffer when it is finished.
@@ -525,8 +545,10 @@ namespace mongo {
BSONObj obj() {
bool own = owned();
massert( 10335 , "builder does not own memory", own );
- int l;
- return BSONObj(decouple(l), true);
+ doneFast();
+ BSONObj::Holder* h = (BSONObj::Holder*)_b.buf();
+ decouple(); // sets _b.buf() to NULL
+ return BSONObj(h);
}
/** Fetch the object we have built.
@@ -535,7 +557,7 @@ namespace mongo {
would like the BSONObj to last longer than the builder.
*/
BSONObj done() {
- return BSONObj(_done(), /*ifree*/false);
+ return BSONObj(_done());
}
// Like 'done' above, but does not construct a BSONObj to return to the caller.
@@ -569,7 +591,7 @@ namespace mongo {
void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
static string numStr( int i ) {
- if (i>=0 && i<100)
+ if (i>=0 && i<100 && numStrsReady)
return numStrs[i];
StringBuilder o;
o << i;
@@ -623,6 +645,8 @@ namespace mongo {
int len() const { return _b.len(); }
+ BufBuilder& bb() { return _b; }
+
private:
char* _done() {
if ( _doneCalled )
@@ -647,6 +671,7 @@ namespace mongo {
bool _doneCalled;
static const string numStrs[100]; // cache of 0 to 99 inclusive
+ static bool numStrsReady; // for static init safety. see comments in db/jsobj.cpp
};
class BSONArrayBuilder : boost::noncopyable {
@@ -692,7 +717,23 @@ namespace mongo {
return *this;
}
- BufBuilder &subobjStart( const StringData& name = "0" ) {
+ // These two just use next position
+ BufBuilder &subobjStart() { return _b.subobjStart( num() ); }
+ BufBuilder &subarrayStart() { return _b.subarrayStart( num() ); }
+
+ // These fill missing entries up to pos. if pos is < next pos is ignored
+ BufBuilder &subobjStart(int pos) {
+ fill(pos);
+ return _b.subobjStart( num() );
+ }
+ BufBuilder &subarrayStart(int pos) {
+ fill(pos);
+ return _b.subarrayStart( num() );
+ }
+
+ // These should only be used where you really need interface compatability with BSONObjBuilder
+ // Currently they are only used by update.cpp and it should probably stay that way
+ BufBuilder &subobjStart( const StringData& name ) {
fill( name );
return _b.subobjStart( num() );
}
@@ -720,7 +761,16 @@ namespace mongo {
long int n = strtol( name.data(), &r, 10 );
if ( *r )
uasserted( 13048, (string)"can't append to array using string field name [" + name.data() + "]" );
- while( _i < n )
+ fill(n);
+ }
+
+ void fill (int upTo){
+ // if this is changed make sure to update error message and jstests/set7.js
+ const int maxElems = 1500000;
+ BOOST_STATIC_ASSERT(maxElems < (BSONObjMaxUserSize/10));
+ uassert(15891, "can't backfill array to larger than 1,500,000 elements", upTo <= maxElems);
+
+ while( _i < upTo )
append( nullElt() );
}
@@ -749,16 +799,27 @@ namespace mongo {
return *this;
}
- template < class T >
- inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
+ template < class L >
+ inline BSONObjBuilder& _appendIt( BSONObjBuilder& _this, const StringData& fieldName, const L& vals ) {
BSONObjBuilder arrBuilder;
int n = 0;
- for( typename list< T >::const_iterator i = vals.begin(); i != vals.end(); i++ )
- arrBuilder.append( numStr(n++), *i );
- appendArray( fieldName, arrBuilder.done() );
- return *this;
+ for( typename L::const_iterator i = vals.begin(); i != vals.end(); i++ )
+ arrBuilder.append( BSONObjBuilder::numStr(n++), *i );
+ _this.appendArray( fieldName, arrBuilder.done() );
+ return _this;
}
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
+ return _appendIt< list< T > >( *this, fieldName, vals );
+ }
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const set< T >& vals ) {
+ return _appendIt< set< T > >( *this, fieldName, vals );
+ }
+
+
// $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT 6));
inline BSONObj OR(const BSONObj& a, const BSONObj& b)
{ return BSON( "$or" << BSON_ARRAY(a << b) ); }
diff --git a/bson/bsonobjiterator.h b/bson/bsonobjiterator.h
index 6e6a69e..39ae24d 100644
--- a/bson/bsonobjiterator.h
+++ b/bson/bsonobjiterator.h
@@ -26,6 +26,8 @@ namespace mongo {
Note each BSONObj ends with an EOO element: so you will get more() on an empty
object, although next().eoo() will be true.
+ The BSONObj must stay in scope for the duration of the iterator's execution.
+
todo: we may want to make a more stl-like iterator interface for this
with things like begin() and end()
*/
@@ -35,39 +37,44 @@ namespace mongo {
*/
BSONObjIterator(const BSONObj& jso) {
int sz = jso.objsize();
- if ( sz == 0 ) {
+ if ( MONGO_unlikely(sz == 0) ) {
_pos = _theend = 0;
return;
}
_pos = jso.objdata() + 4;
- _theend = jso.objdata() + sz;
+ _theend = jso.objdata() + sz - 1;
}
BSONObjIterator( const char * start , const char * end ) {
_pos = start + 4;
- _theend = end;
+ _theend = end - 1;
}
/** @return true if more elements exist to be enumerated. */
- bool more() { return _pos < _theend && _pos[0]; }
+ bool more() { return _pos < _theend; }
/** @return true if more elements exist to be enumerated INCLUDING the EOO element which is always at the end. */
- bool moreWithEOO() { return _pos < _theend; }
+ bool moreWithEOO() { return _pos <= _theend; }
/** @return the next element in the object. For the final element, element.eoo() will be true. */
- BSONElement next( bool checkEnd = false ) {
- assert( _pos < _theend );
- BSONElement e( _pos, checkEnd ? (int)(_theend - _pos) : -1 );
- _pos += e.size( checkEnd ? (int)(_theend - _pos) : -1 );
+ BSONElement next( bool checkEnd ) {
+ assert( _pos <= _theend );
+ BSONElement e( _pos, checkEnd ? (int)(_theend + 1 - _pos) : -1 );
+ _pos += e.size( checkEnd ? (int)(_theend + 1 - _pos) : -1 );
+ return e;
+ }
+ BSONElement next() {
+ assert( _pos <= _theend );
+ BSONElement e(_pos);
+ _pos += e.size();
return e;
}
-
void operator++() { next(); }
void operator++(int) { next(); }
BSONElement operator*() {
- assert( _pos < _theend );
- return BSONElement(_pos, -1);
+ assert( _pos <= _theend );
+ return BSONElement(_pos);
}
private:
@@ -102,6 +109,29 @@ namespace mongo {
int _cur;
};
+ /** transform a BSON array into a vector of BSONElements.
+ we match array # positions with their vector position, and ignore
+ any fields with non-numeric field names.
+ */
+ inline vector<BSONElement> BSONElement::Array() const {
+ chk(mongo::Array);
+ vector<BSONElement> v;
+ BSONObjIterator i(Obj());
+ while( i.more() ) {
+ BSONElement e = i.next();
+ const char *f = e.fieldName();
+ try {
+ unsigned u = stringToNum(f);
+ assert( u < 1000000 );
+ if( u >= v.size() )
+ v.resize(u+1);
+ v[u] = e;
+ }
+ catch(unsigned) { }
+ }
+ return v;
+ }
+
/** Similar to BOOST_FOREACH
*
* because the iterator is defined outside of the for, you must use {} around
diff --git a/bson/inline_decls.h b/bson/inline_decls.h
index 1605611..30da9b4 100644
--- a/bson/inline_decls.h
+++ b/bson/inline_decls.h
@@ -1,20 +1,19 @@
-// inline.h
-
-/**
-* Copyright (C) 2010 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
+// inline_decls.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#pragma once
@@ -31,3 +30,39 @@
#define NOINLINE_DECL
#endif
+
+namespace mongo {
+
+/* Note: do not clutter code with these -- ONLY use in hot spots / significant loops. */
+
+#if !defined(__GNUC__)
+
+// branch prediction. indicate we expect to be true
+# define MONGO_likely(x) ((bool)(x))
+
+// branch prediction. indicate we expect to be false
+# define MONGO_unlikely(x) ((bool)(x))
+
+# if defined(_WIN32)
+ // prefetch data from memory
+ inline void prefetch(const void *p) {
+#if defined(_MM_HINT_T0)
+ _mm_prefetch((char *) p, _MM_HINT_T0);
+#endif
+ }
+#else
+ inline void prefetch(void *p) { }
+#endif
+
+#else
+
+# define MONGO_likely(x) ( __builtin_expect((bool)(x), 1) )
+# define MONGO_unlikely(x) ( __builtin_expect((bool)(x), 0) )
+
+ inline void prefetch(void *p) {
+ __builtin_prefetch(p);
+ }
+
+#endif
+
+}
diff --git a/bson/oid.cpp b/bson/oid.cpp
index 6aa0730..3aee14a 100644
--- a/bson/oid.cpp
+++ b/bson/oid.cpp
@@ -19,6 +19,7 @@
#include "oid.h"
#include "util/atomic_int.h"
#include "../db/nonce.h"
+#include "bsonobjbuilder.h"
BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
@@ -34,7 +35,7 @@ namespace mongo {
#elif defined(__linux__) || defined(__APPLE__) || defined(__sunos__)
pid = (unsigned short) getpid();
#else
- pid = (unsigned short) security.getNonce();
+ pid = (unsigned short) Security::getNonce();
#endif
return pid;
}
@@ -53,13 +54,13 @@ namespace mongo {
// this is not called often, so the following is not expensive, and gives us some
// testing that nonce generation is working right and that our OIDs are (perhaps) ok.
{
- nonce a = security.getNonce();
- nonce b = security.getNonce();
- nonce c = security.getNonce();
+ nonce64 a = Security::getNonceDuringInit();
+ nonce64 b = Security::getNonceDuringInit();
+ nonce64 c = Security::getNonceDuringInit();
assert( !(a==b && b==c) );
}
- unsigned long long n = security.getNonce();
+ unsigned long long n = Security::getNonceDuringInit();
OID::MachineAndPid x = ourMachine = (OID::MachineAndPid&) n;
foldInPid(x);
return x;
@@ -96,7 +97,7 @@ namespace mongo {
}
void OID::init() {
- static AtomicUInt inc = (unsigned) security.getNonce();
+ static AtomicUInt inc = (unsigned) Security::getNonce();
{
unsigned t = (unsigned) time(0);
@@ -151,4 +152,22 @@ namespace mongo {
return time;
}
+ const string BSONObjBuilder::numStrs[] = {
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
+ "20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
+ "30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
+ "40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
+ "50", "51", "52", "53", "54", "55", "56", "57", "58", "59",
+ "60", "61", "62", "63", "64", "65", "66", "67", "68", "69",
+ "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
+ "80", "81", "82", "83", "84", "85", "86", "87", "88", "89",
+ "90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
+ };
+
+ // This is to ensure that BSONObjBuilder doesn't try to use numStrs before the strings have been constructed
+ // I've tested just making numStrs a char[][], but the overhead of constructing the strings each time was too high
+ // numStrsReady will be 0 until after numStrs is initialized because it is a static variable
+ bool BSONObjBuilder::numStrsReady = (numStrs[0].size() > 0);
+
}
diff --git a/bson/ordering.h b/bson/ordering.h
index 749e20d..bca3296 100644
--- a/bson/ordering.h
+++ b/bson/ordering.h
@@ -19,15 +19,22 @@
namespace mongo {
- /** A precomputation of a BSON key pattern.
+ // todo: ideally move to db/ instead of bson/, but elim any dependencies first
+
+ /** A precomputation of a BSON index or sort key pattern. That is something like:
+ { a : 1, b : -1 }
The constructor is private to make conversion more explicit so we notice where we call make().
Over time we should push this up higher and higher.
- */
+ */
class Ordering {
- const unsigned bits;
- const unsigned nkeys;
- Ordering(unsigned b,unsigned n) : bits(b),nkeys(n) { }
+ unsigned bits;
+ Ordering(unsigned b) : bits(b) { }
public:
+ Ordering(const Ordering& r) : bits(r.bits) { }
+ void operator=(const Ordering& r) {
+ bits = r.bits;
+ }
+
/** so, for key pattern { a : 1, b : -1 }
get(0) == 1
get(1) == -1
@@ -39,12 +46,12 @@ namespace mongo {
// for woCompare...
unsigned descending(unsigned mask) const { return bits & mask; }
- operator string() const {
+ /*operator string() const {
StringBuilder buf(32);
for ( unsigned i=0; i<nkeys; i++)
buf.append( get(i) > 0 ? "+" : "-" );
return buf.str();
- }
+ }*/
static Ordering make(const BSONObj& obj) {
unsigned b = 0;
@@ -59,7 +66,7 @@ namespace mongo {
b |= (1 << n);
n++;
}
- return Ordering(b,n);
+ return Ordering(b);
}
};
diff --git a/bson/stringdata.h b/bson/stringdata.h
index 46cdb7a..352dc51 100644
--- a/bson/stringdata.h
+++ b/bson/stringdata.h
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-#ifndef BSON_STRINDATA_HEADER
-#define BSON_STRINDATA_HEADER
+#pragma once
#include <string>
#include <cstring>
@@ -25,29 +24,31 @@ namespace mongo {
using std::string;
- // A StringData object wraps a 'const string&' or a 'const char*' without
- // copying its contents. The most common usage is as a function argument that
- // takes any of the two forms of strings above. Fundamentally, this class tries
- // go around the fact that string literals in C++ are char[N]'s.
- //
- // Note that the object StringData wraps around must be alive while the StringDAta
- // is.
-
+ /** A StringData object wraps a 'const string&' or a 'const char*' without
+ * copying its contents. The most common usage is as a function argument that
+ * takes any of the two forms of strings above. Fundamentally, this class tries
+ * go around the fact that string literals in C++ are char[N]'s.
+ *
+ * Note that the object StringData wraps around must be alive while the StringData
+ * is.
+ */
class StringData {
public:
- // Construct a StringData explicilty, for the case where the lenght of
- // string is not known. 'c' must be a pointer to a null-terminated string.
+ /** Construct a StringData, for the case where the length of
+ * string is not known. 'c' must be a pointer to a null-terminated string.
+ */
StringData( const char* c )
: _data(c), _size((unsigned) strlen(c)) {}
- // Construct a StringData explicitly, for the case where the length of the string
- // is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
- // must be the length that std::strlen(c) would return, a.k.a the index of the
- // terminator in c.
- StringData( const char* c, size_t strlenOfc )
- : _data(c), _size((unsigned) strlenOfc) {}
+ /** Construct a StringData explicitly, for the case where the length of the string
+ * is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
+ * must be the length that std::strlen(c) would return, a.k.a the index of the
+ * terminator in c.
+ */
+ StringData( const char* c, unsigned len )
+ : _data(c), _size(len) {}
- // Construct a StringData explicitly, for the case of a std::string.
+ /** Construct a StringData, for the case of a std::string. */
StringData( const string& s )
: _data(s.c_str()), _size((unsigned) s.size()) {}
@@ -59,19 +60,12 @@ namespace mongo {
: _data(&val[0]), _size(N-1) {}
// accessors
-
- const char* const data() const { return _data; }
+ const char* data() const { return _data; }
const unsigned size() const { return _size; }
private:
- // There are two assumptions we use bellow.
- // '_data' *always* finishes with a null terminator
- // 'size' does *not* account for the null terminator
- // These assumptions may make it easier to minimize changes to existing code.
- const char* const _data;
- const unsigned _size;
+ const char* const _data; // is always null terminated
+ const unsigned _size; // 'size' does not include the null terminator
};
} // namespace mongo
-
-#endif // BSON_STRINGDATA_HEADER
diff --git a/bson/util/atomic_int.h b/bson/util/atomic_int.h
index 1573552..e85a023 100644
--- a/bson/util/atomic_int.h
+++ b/bson/util/atomic_int.h
@@ -36,15 +36,17 @@ namespace mongo {
inline AtomicUInt operator--(); // --prefix
inline AtomicUInt operator--(int); // postfix--
- inline void zero() { x = 0; } // TODO: this isn't thread safe
+ inline void zero();
volatile unsigned x;
};
#if defined(_WIN32)
+ void AtomicUInt::zero() {
+ InterlockedExchange((volatile long*)&x, 0);
+ }
AtomicUInt AtomicUInt::operator++() {
- // InterlockedIncrement returns the new value
- return InterlockedIncrement((volatile long*)&x); //long is 32bits in Win64
+ return InterlockedIncrement((volatile long*)&x);
}
AtomicUInt AtomicUInt::operator++(int) {
return InterlockedIncrement((volatile long*)&x)-1;
@@ -57,6 +59,7 @@ namespace mongo {
}
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
// this is in GCC >= 4.1
+ inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe - maybe
AtomicUInt AtomicUInt::operator++() {
return __sync_add_and_fetch(&x, 1);
}
@@ -70,8 +73,8 @@ namespace mongo {
return __sync_fetch_and_add(&x, -1);
}
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+ inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe
// from boost 1.39 interprocess/detail/atomic.hpp
-
inline unsigned atomic_int_helper(volatile unsigned *x, int val) {
int r;
asm volatile
diff --git a/bson/util/builder.h b/bson/util/builder.h
index 6f4ff9e..710c2d4 100644
--- a/bson/util/builder.h
+++ b/bson/util/builder.h
@@ -20,8 +20,6 @@
#include <string>
#include <string.h>
#include <stdio.h>
-#include <boost/shared_ptr.hpp>
-
#include "../inline_decls.h"
#include "../stringdata.h"
@@ -49,11 +47,47 @@ namespace mongo {
void msgasserted(int msgid, const char *msg);
- class BufBuilder {
+ class TrivialAllocator {
+ public:
+ void* Malloc(size_t sz) { return malloc(sz); }
+ void* Realloc(void *p, size_t sz) { return realloc(p, sz); }
+ void Free(void *p) { free(p); }
+ };
+
+ class StackAllocator {
+ public:
+ enum { SZ = 512 };
+ void* Malloc(size_t sz) {
+ if( sz <= SZ ) return buf;
+ return malloc(sz);
+ }
+ void* Realloc(void *p, size_t sz) {
+ if( p == buf ) {
+ if( sz <= SZ ) return buf;
+ void *d = malloc(sz);
+ memcpy(d, p, SZ);
+ return d;
+ }
+ return realloc(p, sz);
+ }
+ void Free(void *p) {
+ if( p != buf )
+ free(p);
+ }
+ private:
+ char buf[SZ];
+ };
+
+ template< class Allocator >
+ class _BufBuilder {
+ // non-copyable, non-assignable
+ _BufBuilder( const _BufBuilder& );
+ _BufBuilder& operator=( const _BufBuilder& );
+ Allocator al;
public:
- BufBuilder(int initsize = 512) : size(initsize) {
+ _BufBuilder(int initsize = 512) : size(initsize) {
if ( size > 0 ) {
- data = (char *) malloc(size);
+ data = (char *) al.Malloc(size);
if( data == 0 )
msgasserted(10000, "out of memory BufBuilder");
}
@@ -62,22 +96,23 @@ namespace mongo {
}
l = 0;
}
- ~BufBuilder() {
- kill();
- }
+ ~_BufBuilder() { kill(); }
void kill() {
if ( data ) {
- free(data);
+ al.Free(data);
data = 0;
}
}
- void reset( int maxSize = 0 ) {
+ void reset() {
+ l = 0;
+ }
+ void reset( int maxSize ) {
l = 0;
if ( maxSize && size > maxSize ) {
- free(data);
- data = (char*)malloc(maxSize);
+ al.Free(data);
+ data = (char*)al.Malloc(maxSize);
size = maxSize;
}
}
@@ -94,6 +129,9 @@ namespace mongo {
/* assume ownership of the buffer - you must then free() it */
void decouple() { data = 0; }
+ void appendUChar(unsigned char j) {
+ *((unsigned char*)grow(sizeof(unsigned char))) = j;
+ }
void appendChar(char j) {
*((char*)grow(sizeof(char))) = j;
}
@@ -131,13 +169,15 @@ namespace mongo {
appendBuf(&s, sizeof(T));
}
- void appendStr(const StringData &str , bool includeEOO = true ) {
- const int len = str.size() + ( includeEOO ? 1 : 0 );
+ void appendStr(const StringData &str , bool includeEndingNull = true ) {
+ const int len = str.size() + ( includeEndingNull ? 1 : 0 );
memcpy(grow(len), str.data(), len);
}
+ /** @return length of current string */
int len() const { return l; }
void setlen( int newLen ) { l = newLen; }
+ /** @return size of the buffer */
int getSize() const { return size; }
/* returns the pre-grow write position */
@@ -160,7 +200,7 @@ namespace mongo {
a = l + 16 * 1024;
if ( a > BufferMaxSize )
msgasserted(13548, "BufBuilder grow() > 64MB");
- data = (char *) realloc(data, a);
+ data = (char *) al.Realloc(data, a);
size= a;
}
@@ -171,6 +211,21 @@ namespace mongo {
friend class StringBuilder;
};
+ typedef _BufBuilder<TrivialAllocator> BufBuilder;
+
+ /** The StackBufBuilder builds smaller datasets on the stack instead of using malloc.
+ this can be significantly faster for small bufs. However, you can not decouple() the
+ buffer with StackBufBuilder.
+ While designed to be a variable on the stack, if you were to dynamically allocate one,
+ nothing bad would happen. In fact in some circumstances this might make sense, say,
+ embedded in some other object.
+ */
+ class StackBufBuilder : public _BufBuilder<StackAllocator> {
+ public:
+ StackBufBuilder() : _BufBuilder<StackAllocator>(StackAllocator::SZ) { }
+ void decouple(); // not allowed. not implemented.
+ };
+
#if defined(_WIN32)
#pragma warning( push )
// warning C4996: 'sprintf': This function or variable may be unsafe. Consider using sprintf_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS.
@@ -236,6 +291,8 @@ namespace mongo {
void reset( int maxSize = 0 ) { _buf.reset( maxSize ); }
std::string str() const { return std::string(_buf.data, _buf.l); }
+
+ int len() const { return _buf.l; }
private:
BufBuilder _buf;
diff --git a/bson/util/misc.h b/bson/util/misc.h
index b31f36f..33764e3 100644
--- a/bson/util/misc.h
+++ b/bson/util/misc.h
@@ -1,4 +1,4 @@
-/* @file util.h
+/* @file misc.h
*/
/*
@@ -91,4 +91,23 @@ namespace mongo {
return i;
return -1;
}
+
+ inline bool isNumber( char c ) {
+ return c >= '0' && c <= '9';
+ }
+
+ inline unsigned stringToNum(const char *str) {
+ unsigned x = 0;
+ const char *p = str;
+ while( 1 ) {
+ if( !isNumber(*p) ) {
+ if( *p == 0 && p != str )
+ break;
+ throw 0;
+ }
+ x = x * 10 + *p++ - '0';
+ }
+ return x;
+ }
+
}
diff --git a/buildscripts/errorcodes.py b/buildscripts/errorcodes.py
index a105647..dec1030 100755
--- a/buildscripts/errorcodes.py
+++ b/buildscripts/errorcodes.py
@@ -31,22 +31,58 @@ def assignErrorCodes():
codes = []
-def readErrorCodes( callback ):
- ps = [ re.compile( "([um]asser(t|ted)) *\( *(\d+)" ) ,
- re.compile( "(User|Msg)Exceptio(n)\( *(\d+)" )
+def readErrorCodes( callback, replaceZero = False ):
+ ps = [ re.compile( "(([umsg]asser(t|ted))) *\(( *)(\d+)" ) ,
+ re.compile( "((User|Msg|MsgAssertion)Exceptio(n))\(( *)(\d+)" ) ,
+ re.compile( "(((verify))) *\(( *)(\d+)" )
]
+
for x in utils.getAllSourceFiles():
+
+ needReplace = [False]
+ lines = []
+ lastCodes = [0]
lineNum = 1
+
for line in open( x ):
+
for p in ps:
- for m in p.findall( line ):
- codes.append( ( x , lineNum , line , m[2] ) )
- callback( x , lineNum , line , m[2] )
- lineNum = lineNum + 1
+
+ def repl( m ):
+ m = m.groups()
+
+ start = m[0]
+ spaces = m[3]
+ code = m[4]
+ if code == '0' and replaceZero :
+ code = getNextCode( lastCodes )
+ lastCodes.append( code )
+ code = str( code )
+ needReplace[0] = True
+
+ print( "Adding code " + code + " to line " + x + ":" + str( lineNum ) )
+
+ else :
+ codes.append( ( x , lineNum , line , code ) )
+ callback( x , lineNum , line , code )
+
+ return start + "(" + spaces + code
+
+ line = re.sub( p, repl, line )
+ if replaceZero : lines.append( line )
+ lineNum = lineNum + 1
+
+ if replaceZero and needReplace[0] :
+ print( "Replacing file " + x )
+ of = open( x + ".tmp", 'w' )
+ of.write( "".join( lines ) )
+ of.close()
+ os.rename( x + ".tmp", x )
+
-def getNextCode():
- highest = [0]
+def getNextCode( lastCodes = [0] ):
+ highest = [max(lastCodes)]
def check( fileName , lineNum , line , code ):
code = int( code )
if code > highest[0]:
@@ -64,7 +100,7 @@ def checkErrorCodes():
print( "%s:%d:%s %s" % seen[code] )
errors.append( seen[code] )
seen[code] = ( fileName , lineNum , line , code )
- readErrorCodes( checkDups )
+ readErrorCodes( checkDups, True )
return len( errors ) == 0
def getBestMessage( err , start ):
@@ -81,13 +117,11 @@ def getBestMessage( err , start ):
def genErrorOutput():
- g = utils.getGitVersion()
-
if os.path.exists( "docs/errors.md" ):
i = open( "docs/errors.md" , "r" )
- out = open( "docs/errors.md" , 'w' )
+ out = open( "docs/errors.md" , 'wb' )
out.write( "MongoDB Error Codes\n==========\n\n\n" )
prev = ""
@@ -107,7 +141,7 @@ def genErrorOutput():
out.write( f + "\n----\n" )
prev = f
- url = "http://github.com/mongodb/mongo/blob/" + g + "/" + f + "#L" + str(l)
+ url = "http://github.com/mongodb/mongo/blob/master/" + f + "#L" + str(l)
out.write( "* " + str(num) + " [code](" + url + ") " + getBestMessage( line , str(num) ) + "\n" )
diff --git a/buildscripts/hacks_ubuntu.py b/buildscripts/hacks_ubuntu.py
index 977d2df..3de1a6f 100644
--- a/buildscripts/hacks_ubuntu.py
+++ b/buildscripts/hacks_ubuntu.py
@@ -2,11 +2,13 @@
import os
def insert( env , options ):
-
- if not foundxulrunner( env , options ):
- if os.path.exists( "usr/include/mozjs/" ):
- env.Append( CPPDEFINES=[ "MOZJS" ] )
+ # now that sm is in the source tree, don't need this
+ # if not foundxulrunner( env , options ):
+ # if os.path.exists( "usr/include/mozjs/" ):
+ # env.Append( CPPDEFINES=[ "MOZJS" ] )
+
+ return
def foundxulrunner( env , options ):
best = None
diff --git a/buildscripts/makealldists.py b/buildscripts/makealldists.py
deleted file mode 100644
index 6b6f365..0000000
--- a/buildscripts/makealldists.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import with_statement
-import subprocess
-import sys
-import os
-import time
-import tempfile
-import errno
-import glob
-import shutil
-import settings
-import simples3
-
-def s3bucket():
- return simples3.S3Bucket(settings.bucket, settings.id, settings.key)
-
-def s3cp (bucket, filename, s3name):
- defaultacl="public-read"
- print "putting %s to %s" % (filename, s3name)
- bucket.put(s3name, open(filename, "rb").read(), acl=defaultacl)
-
-def pushrepo(repodir):
- files=subprocess.Popen(['find', repodir, '-type', 'f'], stdout=subprocess.PIPE).communicate()[0][:-1].split('\n')
- bucket=s3bucket()
- olddebs=[t[0] for t in bucket.listdir(prefix='distros/') if t[0].endswith('.deb')]
- newdebs=[]
- for fn in files:
- if len(fn) == 0:
- continue
- tail = fn[len(repodir):]
- # Note: be very careful not to produce s3names containing
- # sequences of repeated slashes: s3 doesn't treat a////b as
- # equivalent to a/b.
- s3name1='distros-archive/'+time.strftime('%Y%m%d')+tail
- s3name2='distros'+tail
- s3cp(bucket, fn, s3name1)
- s3cp(bucket, fn, s3name2)
- if s3name1.endswith('.deb'):
- newdebs.append(s3name1)
- # FIXME: we ought to clean out old debs eventually, but this will
- # blow away too much if we're trying to push a subset of what's
- # supposed to be available.
- #[bucket.delete(deb) for deb in set(olddebs).difference(set(newdebs))]
-
-def cat (inh, outh):
- inh.seek(0)
- for line in inh:
- outh.write(line)
- inh.close()
-
-# This generates all tuples from mixed-radix counting system, essentially.
-def gen(listlist):
- dim=len(listlist)
- a=[0 for ignore in listlist]
- while True:
- yield [listlist[i][a[i]] for i in range(dim)]
- a[0]+=1
- for j in range(dim):
- if a[j] == len(listlist[j]):
- if j<dim-1:
- a[j+1]+=1
- else:
- return
- a[j]=0
-
-def dirify(string):
- return (string if string[-1:] in '\/' else string+'/')
-def fileify(string):
- return (string if string[-1:] not in '\/' else string.rstrip('\/'))
-
-# WTF: os.makedirs errors if the leaf exists?
-def makedirs(f):
- try:
- os.makedirs(f)
- except OSError: # as exc: # Python >2.5
- exc=sys.exc_value
- if exc.errno == errno.EEXIST:
- pass
- else:
- raise exc
-
-
-
-# This is a fairly peculiar thing to want to do, but our build process
-# creates several apt repositories for each mongo version we build on
-# any given Debian/Ubutnu release. To merge repositories together, we
-# must concatenate the Packages.gz files.
-def merge_directories_concatenating_conflicts (target, sources):
- print sources
- target = dirify(target)
- for source in sources:
- source = dirify(source)
- files = subprocess.Popen(["find", source, "-type", "f"], stdout=subprocess.PIPE).communicate()[0].split('\n')
- for f in files:
- if f == '':
- continue
- rel = f[len(source):]
- o=target+rel
- makedirs(os.path.dirname(o))
- with open(f) as inh:
- with open(target+rel, "a") as outh:
- outh.write(inh.read())
-
-
-def parse_mongo_version_spec(spec):
- l = spec.split(':')
- if len(l) == 1:
- l+=['','']
- elif len(l) == 2:
- l+=['']
- return l
-
-def logfh(distro, distro_version, arch):
- prefix = "%s-%s-%s.log." % (distro, distro_version, arch)
- # This is a NamedTemporaryFile mostly so that I can tail(1) them
- # as we go.
- return tempfile.NamedTemporaryFile("w+b", -1, prefix=prefix)
-
-def spawn(distro, distro_version, arch, spec, directory, opts):
- argv = ["python", "makedist.py"] + opts + [ directory, distro, distro_version, arch ] + [ spec ]
-# cmd = "mkdir -p %s; cd %s; touch foo.deb; echo %s %s %s %s %s | tee Packages " % ( directory, directory, directory, distro, distro_version, arch, mongo_version )
-# print cmd
-# argv = ["sh", "-c", cmd]
- fh = logfh(distro, distro_version, arch)
- print >> fh, "Running %s" % argv
- # it's often handy to be able to run these things at the shell
- # manually. FIXME: this ought to be slightly less than thoroughly
- # ignorant of quoting issues (as is is now).
- print >> fh, " ".join(argv)
- fh.flush()
- proc = subprocess.Popen(argv, stdin=None, stdout=fh, stderr=fh)
- return (proc, fh, distro, distro_version, arch, spec)
-
-def win(name, logfh, winfh):
- logfh.seek(0)
- print >> winfh, "=== Winner %s ===" % name
- cat(logfh, winfh)
- print >> winfh, "=== End winner %s ===" % name
-
-def lose(name, logfh, losefh):
- logfh.seek(0)
- print >> losefh, "=== Loser %s ===" % name
- cat(logfh, losefh)
- print >> losefh, "=== End loser %s ===" % name
-
-def wait(procs, winfh, losefh, winners, losers):
- print "."
- sys.stdout.flush()
- try:
- (pid, stat) = os.wait()
- except OSError, err:
- print >> sys.stderr, "This shouldn't happen."
- print >> sys.stderr, err
- next
- if pid:
- [tup] = [tup for tup in procs if tup[0].pid == pid]
- (proc, logfh, distro, distro_version, arch, spec) = tup
- procs.remove(tup)
- name = "%s %s %s" % (distro, distro_version, arch)
- if os.WIFEXITED(stat):
- if os.WEXITSTATUS(stat) == 0:
- win(name, logfh, winfh)
- winners.append(name)
- else:
- lose(name, logfh, losefh)
- losers.append(name)
- if os.WIFSIGNALED(stat):
- lose(name, logfh, losefh)
- losers.append(name)
-
-
-
-def __main__():
- # FIXME: getopt & --help.
- print " ".join(sys.argv)
- branches = sys.argv[-1]
- makedistopts = sys.argv[1:-1]
-
- # Output from makedist.py goes here.
- outputroot=tempfile.mkdtemp()
- repodir=tempfile.mkdtemp()
-
- print "makedist output under: %s\ncombined repo: %s\n" % (outputroot, repodir)
- sys.stdout.flush()
- # Add more dist/version/architecture tuples as they're supported.
- dists = (("ubuntu", "10.10"),
- ("ubuntu", "10.4"),
- ("ubuntu", "9.10"),
- ("ubuntu", "9.4"),
- #("ubuntu", "8.10"),
- ("debian", "5.0"),
- ("centos", "5.4"),
- #("fedora", "12"),
- ("fedora", "13"),
- ("fedora", "14"))
- arches = ("x86", "x86_64")
-# mongos = branches.split(',')
- # Run a makedist for each distro/version/architecture tuple above.
- winners = []
- losers = []
- winfh=tempfile.TemporaryFile()
- losefh=tempfile.TemporaryFile()
- procs = []
- count = 0
- for ((distro, distro_version), arch, spec) in gen([dists, arches, [branches]]):
- # FIXME: no x86 fedoras on RackSpace circa 04/10.
- if distro == "fedora" and arch == "x86":
- continue
- count+=1
- opts = makedistopts
- if distro in ["debian", "ubuntu"]:
- outputdir = "%s/deb/%s" % (outputroot, distro)
- elif distro in ["centos", "fedora", "redhat"]:
- outputdir = "%s/rpm/%s/%s/os" % (outputroot, distro, distro_version)
- else:
- raise Exception("unsupported distro %s" % distro)
- #opts += ["--subdirs"]
-
- procs.append(spawn(distro, distro_version, arch, spec, outputdir, opts))
-
- if len(procs) == 8:
- wait(procs, winfh, losefh, winners, losers)
-
- while procs:
- wait(procs, winfh, losefh, winners, losers)
-
- winfh.seek(0)
- losefh.seek(0)
- nwinners=len(winners)
- nlosers=len(losers)
- print "%d winners; %d losers" % (nwinners, nlosers)
- cat(winfh, sys.stdout)
- cat(losefh, sys.stdout)
- print "%d winners; %d losers" % (nwinners, nlosers)
- if count == nwinners + nlosers:
- print "All jobs accounted for"
-# return 0
- else:
- print "Lost some jobs...?"
- return 1
-
- sys.stdout.flush()
- sys.stderr.flush()
-
- # this is sort of ridiculous, but the outputs from rpmbuild look
- # like RPM/<arch>, but the repo wants to look like
- # <arch>/RPM.
- for dist in os.listdir(outputroot+'/rpm'):
- if dist in ["centos", "fedora", "redhat"]:
- distdir="%s/rpm/%s" % (outputroot, dist)
- rpmdirs = subprocess.Popen(["find", distdir, "-type", "d", "-a", "-name", "RPMS"], stdout=subprocess.PIPE).communicate()[0].split('\n')[:-1]
- for rpmdir in rpmdirs:
- for arch in os.listdir(rpmdir):
- archdir="%s/../%s" % (rpmdir, arch)
- os.mkdir(archdir)
- os.rename("%s/%s" % (rpmdir, arch), "%s/RPMS" % (archdir,))
- os.rmdir(rpmdir)
-
-
- for flavor in os.listdir(outputroot):
- argv=["python", "mergerepositories.py", flavor, "%s/%s" % (outputroot, flavor), repodir]
- print "running %s" % argv
- print " ".join(argv)
- r = subprocess.Popen(argv).wait()
- if r != 0:
- raise Exception("mergerepositories.py exited %d" % r)
- print repodir
- #pushrepo(repodir)
- #shutil.rmtree(outputroot)
- #shutil.rmtree(repodir)
-
- return 0
-
-
-if __name__ == '__main__':
- __main__()
-
-
-# FIXME: this ought to be someplace else.
-
-# FIXME: remove this comment when the buildbot does this. After this
-# program, run something that amounts to
-#
-# find /tmp/distros -name *.deb -or -name Packages.gz | while read f; do echo "./s3cp.py $f ${f#/tmp/}"; done
-#
-# where ./s3cp.py is a trivial s3 put executable in this directory.
-
-# merge_directories_concatenating_conflicts('/tmp/distros/debian', '/tmp/distros-20100222/debian/HEAD', '/tmp/distros-20100222/debian/r1.3.2','/tmp/distros-20100222/debian/v1.2')
-
-# merge_directories_concatenating_conflicts('/tmp/distros/ubuntu', '/tmp/distros-20100222/ubuntu/HEAD', '/tmp/distros-20100222/ubuntu/r1.3.2', '/tmp/distros-20100222/ubuntu/v1.2')
diff --git a/buildscripts/makedist.py b/buildscripts/makedist.py
deleted file mode 100644
index b5387c2..0000000
--- a/buildscripts/makedist.py
+++ /dev/null
@@ -1,940 +0,0 @@
-#!/usr/bin/env python
-
-# makedist.py: make a distro package (on an EC2 (or sometimes
-# RackSpace) instance)
-
-# For ease of use, put a file called settings.py someplace in your
-# sys.path, containing something like the following:
-
-# makedist = {
-# # This gets supplied to EC2 to rig up an ssh key for
-# # the remote user.
-# "ec2_sshkey" : "key-id",
-# # And so we need to tell our ssh processes where to find the
-# # appropriate public key file.
-# "ssh_keyfile" : "/path/to/key-id-file"
-# }
-
-# Notes: although there is a Python library for accessing EC2 as a web
-# service, it seemed as if it would be less work to just shell out to
-# the three EC2 management tools we use.
-
-# To make a distribution we must:
-
-# 1. Fire up an EC2 AMI suitable for building.
-# 2. Get any build-dependencies and configurations onto the remote host.
-# 3. Fetch the mongodb source.
-# 4. Run the package building tools.
-# 5. Save the package archives someplace permanent (eventually we
-# ought to install them into a public repository for the distro).
-# Unimplemented:
-# 6. Fire up an EC2 AMI suitable for testing whether the packages
-# install.
-# 7. Check whether the packages install and run.
-
-# The implementations of steps 1, 2, 4, 5, 6, and 7 will depend on the
-# distro of host we're talking to (Ubuntu, CentOS, Debian, etc.).
-
-from __future__ import with_statement
-import subprocess
-import sys
-import signal
-import getopt
-import socket
-import time
-import os.path
-import tempfile
-import string
-import settings
-
-from libcloud.types import Provider
-from libcloud.providers import get_driver
-from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
-from libcloud.base import Node, NodeImage, NodeSize, NodeState
-from libcloud.ssh import ParamikoSSHClient
-
-# For the moment, we don't handle any of the errors we raise, so it
-# suffices to have a simple subclass of Exception that just
-# stringifies according to a desired format.
-class SimpleError(Exception):
- def __init__(self, *args):
- self.args = args
- def __str__(self):
- return self.args[0] % self.args[1:]
-
-class SubcommandError(SimpleError):
- def __init__(self, *args):
- self.status = args[2]
- super(SubcommandError, self).__init__(*args)
-
-class BaseConfigurator (object):
- def __init__ (self, **kwargs):
- self.configuration = []
- self.arch=kwargs["arch"]
- self.distro_name=kwargs["distro_name"]
- self.distro_version=kwargs["distro_version"]
-
- def lookup(self, what, dist, vers, arch):
- for (wht, seq) in self.configuration:
- if what == wht:
- for ((dpat, vpat, apat), payload) in seq:
- # For the moment, our pattern facility is just "*" or exact match.
- if ((dist == dpat or dpat == "*") and
- (vers == vpat or vpat == "*") and
- (arch == apat or apat == "*")):
- return payload
- if getattr(self, what, False):
- return getattr(self, what)
- else:
- raise SimpleError("couldn't find a%s %s configuration for dist=%s, version=%s, arch=%s",
- "n" if ("aeiouAEIOU".find(what[0]) > -1) else "",
- what, dist, vers, arch)
-
- def default(self, what):
- return self.lookup(what, self.distro_name, self.distro_version, self.arch)
- def findOrDefault(self, dict, what):
- return (dict[what] if what in dict else self.lookup(what, self.distro_name, self.distro_version, self.arch))
-
-class BaseHostConfigurator (BaseConfigurator):
- def __init__(self, **kwargs):
- super(BaseHostConfigurator, self).__init__(**kwargs)
- self.configuration += [("distro_arch",
- ((("debian", "*", "x86_64"), "amd64"),
- (("ubuntu", "*", "x86_64"), "amd64"),
- (("debian", "*", "x86"), "i386"),
- (("ubuntu", "*", "x86"), "i386"),
- (("centos", "*", "x86_64"), "x86_64"),
- (("fedora", "*", "x86_64"), "x86_64"),
- (("centos", "*", "x86"), "i386"),
- (("fedora", "*", "x86"), "i386"),
- (("*", "*", "x86_64"), "x86_64"),
- (("*", "*", "x86"), "x86"))) ,
- ]
-
-class LocalHost(object):
- @classmethod
- def runLocally(cls, argv):
- print "running %s" % argv
- r = subprocess.Popen(argv).wait()
- if r != 0:
- raise SubcommandError("subcommand %s exited %d", argv, r)
-
-class EC2InstanceConfigurator(BaseConfigurator):
- def __init__(self, **kwargs):
- super(EC2InstanceConfigurator, self).__init__(**kwargs)
- self.configuration += [("ec2_ami",
- ((("ubuntu", "10.10", "x86_64"), "ami-688c7801"),
- (("ubuntu", "10.10", "x86"), "ami-1a837773"),
- (("ubuntu", "10.4", "x86_64"), "ami-bf07ead6"),
- (("ubuntu", "10.4", "x86"), "ami-f707ea9e"),
- (("ubuntu", "9.10", "x86_64"), "ami-55739e3c"),
- (("ubuntu", "9.10", "x86"), "ami-bb709dd2"),
- (("ubuntu", "9.4", "x86_64"), "ami-eef61587"),
- (("ubuntu", "9.4", "x86"), "ami-ccf615a5"),
- (("ubuntu", "8.10", "x86"), "ami-c0f615a9"),
- (("ubuntu", "8.10", "x86_64"), "ami-e2f6158b"),
- (("ubuntu", "8.4", "x86"), "ami59b35f30"),
- (("ubuntu", "8.4", "x86_64"), "ami-27b35f4e"),
- (("debian", "5.0", "x86"), "ami-dcf615b5"),
- (("debian", "5.0", "x86_64"), "ami-f0f61599"),
- (("centos", "5.4", "x86"), "ami-f8b35e91"),
- (("centos", "5.4", "x86_64"), "ami-ccb35ea5"),
- (("fedora", "8", "x86_64"), "ami-2547a34c"),
- (("fedora", "8", "x86"), "ami-5647a33f"))),
- ("rackspace_imgname",
- ((("fedora", "12", "x86_64"), "Fedora 12"),
- (("fedora", "13", "x86_64"), "Fedora 13"),
- (("fedora", "14", "x86_64"), "Fedora 14"))),
- ("ec2_mtype",
- ((("*", "*", "x86"), "m1.small"),
- (("*", "*", "x86_64"), "m1.large"))),
- ]
-
-class nodeWrapper(object):
- def __init__(self, configurator, **kwargs):
- self.terminate = False if "no_terminate" in kwargs else True
- self.use_internal_name = False
-
- def getHostname(self):
- internal_name=self.node.private_ip[0]
- public_name=self.node.public_ip[0]
- if not (internal_name or external_name):
- raise Exception('host has no name?')
- if self.use_internal_name:
- # FIXME: by inspection, it seems this is sometimes the
- # empty string. Dunno if that's EC2 or libcloud being
- # stupid, but it's not good.
- if internal_name:
- return internal_name
- else:
- return public_name
- else:
- return public_name
-
- def initwait(self):
- print "waiting for node to spin up"
- # Wait for EC2 to tell us the node is running.
- while 1:
- n=None
- # EC2 sometimes takes a while to report a node.
- for i in range(6):
- nodes = [n for n in self.list_nodes() if (n.id==self.node.id)]
- if len(nodes)>0:
- n=nodes[0]
- break
- else:
- time.sleep(10)
- if not n:
- raise Exception("couldn't find node with id %s" % self.node.id)
- if n.state == NodeState.PENDING:
- time.sleep(10)
- else:
- self.node = n
- break
- print "ok"
- # Now wait for the node's sshd to be accepting connections.
- print "waiting for ssh"
- sshwait = True
- if sshwait == False:
- return
- while sshwait:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- try:
- s.connect((self.node.public_ip[0], 22))
- sshwait = False
- print "connected on port 22 (ssh)"
- time.sleep(15) # arbitrary timeout, in case the
- # remote sshd is slow.
- except socket.error, err:
- pass
- finally:
- s.close()
- time.sleep(3) # arbitrary timeout
- print "ok"
-
- def __enter__(self):
- self.start()
- # Note: we don't do an initwait() in __enter__ because if an
- # exception is raised during __enter__, __exit__ doesn't get
- # run (and by inspection RackSpace doesn't let you kill a node
- # that hasn't finished booting yet).
- return self
-
- def __exit__(self, type, value, traceback):
- self.stop()
-
- def stop(self):
- if self.terminate:
- print "Destroying node %s" % self.node.id
- self.node.destroy()
- else:
- print "Not terminating EC2 instance %s." % self.node.id
-
- def setup(self):
- pass
-
-class EC2Instance (nodeWrapper):
- def __init__(self, configurator, **kwargs):
- super(EC2Instance, self).__init__(configurator, **kwargs)
- # Stuff we need to start an instance: AMI name, key and cert
- # files. AMI and mtype default to configuration in this file,
- # but can be overridden.
- self.ec2_ami = configurator.findOrDefault(kwargs, "ec2_ami")
- self.ec2_mtype = configurator.findOrDefault(kwargs, "ec2_mtype")
- self.use_internal_name = True if "use_internal_name" in kwargs else False
- self.ec2_sshkey=kwargs["ec2_sshkey"]
-
- # FIXME: this needs to be a commandline option
- self.ec2_groups = ["default", "buildbot-slave", "dist-slave"]
-
-
- def start(self):
- "Fire up a fresh EC2 instance."
- EC2 = get_driver(Provider.EC2)
- self.driver = EC2NodeDriver(settings.id, settings.key)
- image = NodeImage(self.ec2_ami, self.ec2_ami, EC2)
- size = NodeSize(self.ec2_mtype, self.ec2_mtype, None, None, None, None, EC2)
- self.node = self.driver.create_node(image=image, name=self.ec2_ami, size=size, keyname=self.ec2_sshkey, securitygroup=self.ec2_groups)
- print "Created node %s" % self.node.id
-
- def list_nodes(self):
- return self.driver.list_nodes()
-
-class SshConnectionConfigurator (BaseConfigurator):
- def __init__(self, **kwargs):
- super(SshConnectionConfigurator, self).__init__(**kwargs)
- self.configuration += [("ssh_login",
- # FLAW: this actually depends more on the AMI
- # than the triple.
- ((("debian", "*", "*"), "root"),
- (("ubuntu", "10.10", "*"), "ubuntu"),
- (("ubuntu", "10.4", "*"), "ubuntu"),
- (("ubuntu", "9.10", "*"), "ubuntu"),
- (("ubuntu", "9.4", "*"), "root"),
- (("ubuntu", "8.10", "*"), "root"),
- (("ubuntu", "8.4", "*"), "ubuntu"),
- (("fedora", "*", "*"), "root"),
- (("centos", "*", "*"), "root"))),
- ]
-
-class SshConnection (object):
- def __init__(self, configurator, **kwargs):
- # Stuff we need to talk to the thing properly
- self.ssh_login = configurator.findOrDefault(kwargs, "ssh_login")
-
- self.ssh_host = kwargs["ssh_host"]
- self.ssh_keyfile=kwargs["ssh_keyfile"]
- # Gets set to False when we think we can ssh in.
- self.sshwait = True
-
- def initSsh(self):
- ctlpath="/tmp/ec2-ssh-%s-%s-%s" % (self.ssh_host, self.ssh_login, os.getpid())
- argv = ["ssh", "-o", "StrictHostKeyChecking no",
- "-M", "-o", "ControlPath %s" % ctlpath,
- "-v", "-l", self.ssh_login, "-i", self.ssh_keyfile,
- self.ssh_host]
- print "Setting up ssh master connection with %s" % argv
- self.sshproc = subprocess.Popen(argv)
- self.ctlpath = ctlpath
-
-
- def __enter__(self):
- self.initSsh()
- return self
-
- def __exit__(self, type, value, traceback):
- os.kill(self.sshproc.pid, signal.SIGTERM)
- self.sshproc.wait()
-
- def runRemotely(self, argv):
- """Run a command on the host."""
- LocalHost.runLocally(["ssh", "-o", "StrictHostKeyChecking no",
- "-S", self.ctlpath,
- "-l", self.ssh_login,
- "-i", self.ssh_keyfile,
- self.ssh_host] + argv)
-
- def sendFiles(self, files):
- for (localfile, remotefile) in files:
- LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
- "-o", "ControlMaster auto",
- "-o", "ControlPath %s" % self.ctlpath,
- "-i", self.ssh_keyfile,
- "-rv", localfile,
- self.ssh_login + "@" + self.ssh_host + ":" +
- ("" if remotefile is None else remotefile) ])
-
- def recvFiles(self, files):
- for (remotefile, localfile) in files:
- LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
- "-o", "ControlMaster auto",
- "-o", "ControlPath %s" % self.ctlpath,
- "-i", self.ssh_keyfile,
- "-rv",
- self.ssh_login + "@" + self.ssh_host +
- ":" + remotefile,
- "." if localfile is None else localfile ])
-
-
-class ScriptFileConfigurator (BaseConfigurator):
- deb_productdir = "dists"
- rpm_productdir = "/usr/src/redhat/RPMS" # FIXME: this could be
- # ~/redhat/RPMS or
- # something elsewhere
-
- preamble_commands = """
-set -x # verbose execution, for debugging
-set -e # errexit, stop on errors
-"""
- # Strictly speaking, we don't need to mangle debian files on rpm
- # systems (and vice versa), but (a) it doesn't hurt anything to do
- # so, and (b) mangling files the same way everywhere could
- # conceivably help uncover bugs in the hideous hideous sed
- # programs we're running here. (N.B., for POSIX wonks: POSIX sed
- # doesn't support either in-place file editing, which we use
- # below. So if we end up wanting to run these mangling commands
- # e.g., on a BSD, we'll need to make them fancier.)
- mangle_files_commands ="""
-# On debianoids, the package names in the changelog and control file
-# must agree, and only files in a subdirectory of debian/ matching the
-# package name will get included in the .deb, so we also have to mangle
-# the rules file.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '1s/.*([^)]*)/{pkg_name}{pkg_name_suffix} ({pkg_version})/' debian/changelog ) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^Source:.*/Source: {pkg_name}{pkg_name_suffix}/;
-s/^Package:.*mongodb/Package: {pkg_name}{pkg_name_suffix}\\
-Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|$(CURDIR)/debian/mongodb/|$(CURDIR)/debian/{pkg_name}{pkg_name_suffix}/|g' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|debian/mongodb.manpages|debian/{pkg_name}{pkg_name_suffix}.manpages|g' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}\\
-Conflicts: {pkg_name_conflicts}/; /^Version:/s/.*/Version: {pkg_version}/; /Requires.*mongo/s/mongo/{pkg_name}{pkg_name_suffix}/;' rpm/mongo.spec )
-# Debian systems require some ridiculous workarounds to get an init
-# script at /etc/init.d/mongodb when the packge name isn't the init
-# script name. Note: dh_installinit --name won't work, because that
-# option would require the init script under debian/ to be named
-# mongodb.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" &&
-ln debian/init.d debian/{pkg_name}{pkg_name_suffix}.mongodb.init &&
-ln debian/mongodb.upstart debian/{pkg_name}{pkg_name_suffix}.mongodb.upstart &&
-sed -i 's/dh_installinit/dh_installinit --name=mongodb/' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
-"""
-
- # If we're just packaging up nightlies, do this:
- nightly_build_mangle_files="""
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/scons[[:space:]]*$/d; s^scons.*install^mkdir -p debian/{pkg_name}{pkg_name_suffix} \&\& wget http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG mv -v ARG debian/{pkg_name}{pkg_name_suffix}/usr \&\& (rm debian/{pkg_name}{pkg_name_suffix}/usr/bin/mongosniff || true)^' debian/rules)
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^BuildRequires:.*//; s/scons.*\ -c//; s/scons.*\ all//; s^scons.*install^(mkdir -p $RPM_BUILD_ROOT/usr ; cd /tmp \&\& curl http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz > mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG cp -pRv ARG $RPM_BUILD_ROOT/usr \&\& (rm -r $RPM_BUILD_ROOT/usr/bin/mongosniff $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a $RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/include/mongo || true))^' rpm/mongo.spec)
-# Upstream nightlies no longer contain libmongoclient.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/%package devel/{{N;N;d;}}; /%description devel/{{N;N;N;N;N;d;}}; /%files devel/{{N;N;N;d;}};' rpm/mongo.spec )
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
-"""
-#$RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a
- mangle_files_for_new_deb_xulrunner_commands = """
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/xulrunner-dev/xulrunner-1.9.2-dev/g' debian/control )
-"""
-
- mangle_files_for_ancient_redhat_commands = """
-# Ancient RedHats ship with very old boosts and non-UTF8-aware js
-# libraries, so we need to link statically to those.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|^scons.*((inst)all)|scons --prefix=$RPM_BUILD_ROOT/usr --extralib=nspr4 --staticlib=boost_system-mt,boost_thread-mt,boost_filesystem-mt,boost_program_options-mt,js $1|' rpm/mongo.spec )
-"""
-
- deb_prereq_commands = """
-# Configure debconf to never prompt us for input.
-export DEBIAN_FRONTEND=noninteractive
-apt-get update
-apt-get install -y {pkg_prereq_str}
-"""
-
- deb_build_commands="""
-mkdir -p "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
-mkdir -p "{pkg_product_dir}/{distro_version}/10gen/source"
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}"; debuild ) || exit 1
-# Try installing it
-dpkg -i {pkg_name}{pkg_name_suffix}*.deb
-ps ax | grep mongo || {{ echo "no running mongo" >/dev/stderr; exit 1; }}
-dpkg --remove $(for f in {pkg_name}{pkg_name_suffix}*.deb ; do echo ${{f%%_*}}; done)
-dpkg --purge $(for f in {pkg_name}{pkg_name_suffix}*.deb ; do echo ${{f%%_*}}; done)
-cp {pkg_name}{pkg_name_suffix}*.deb "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
-cp {pkg_name}{pkg_name_suffix}*.dsc "{pkg_product_dir}/{distro_version}/10gen/source"
-cp {pkg_name}{pkg_name_suffix}*.tar.gz "{pkg_product_dir}/{distro_version}/10gen/source"
-dpkg-scanpackages "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}" /dev/null | gzip -9c > "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}/Packages.gz"
-dpkg-scansources "{pkg_product_dir}/{distro_version}/10gen/source" /dev/null | gzip -9c > "{pkg_product_dir}/{distro_version}/10gen/source/Sources.gz"
-"""
- centos_prereq_commands = """
-rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/{distro_arch}/epel-release-5-4.noarch.rpm
-yum -y install {pkg_prereq_str}
-"""
- fedora_prereq_commands = """
-#rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/{distro_arch}/epel-release-5-4.noarch.rpm
-yum -y install {pkg_prereq_str}
-"""
- rpm_build_commands="""
-for d in BUILD BUILDROOT RPMS SOURCES SPECS SRPMS; do mkdir -p {rpmbuild_dir}/$d; done
-cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" {rpmbuild_dir}/SPECS/{pkg_name}{pkg_name_suffix}.spec
-tar -cpzf {rpmbuild_dir}/SOURCES/"{pkg_name}{pkg_name_suffix}-{pkg_version}".tar.gz "{pkg_name}{pkg_name_suffix}-{pkg_version}"
-rpmbuild -ba --target={distro_arch} {rpmbuild_dir}/SPECS/{pkg_name}{pkg_name_suffix}.spec
-# FIXME: should install the rpms, check if mongod is running.
-"""
- # FIXME: this is clean, but adds 40 minutes or so to the build process.
- old_rpm_precommands = """
-yum install -y bzip2-devel python-devel libicu-devel chrpath zlib-devel nspr-devel readline-devel ncurses-devel
-# FIXME: this is just some random URL found on rpmfind some day in 01/2010.
-wget ftp://194.199.20.114/linux/EPEL/5Client/SRPMS/js-1.70-8.el5.src.rpm
-rpm -ivh js-1.70-8.el5.src.rpm
-sed -i 's/XCFLAGS.*$/XCFLAGS=\"%{{optflags}} -fPIC -DJS_C_STRINGS_ARE_UTF8\" \\\\/' /usr/src/redhat/SPECS/js.spec
-rpmbuild -ba /usr/src/redhat/SPECS/js.spec
-rpm -Uvh /usr/src/redhat/RPMS/{distro_arch}/js-1.70-8.{distro_arch}.rpm
-rpm -Uvh /usr/src/redhat/RPMS/{distro_arch}/js-devel-1.70-8.{distro_arch}.rpm
-# FIXME: this is just some random URL found on rpmfind some day in 01/2010.
-wget ftp://195.220.108.108/linux/sourceforge/g/project/gr/gridiron2/support-files/FC10%20source%20RPMs/boost-1.38.0-1.fc10.src.rpm
-rpm -ivh boost-1.38.0-1.fc10.src.rpm
-rpmbuild -ba /usr/src/redhat/SPECS/boost.spec
-rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-1.38.0-1.{distro_arch}.rpm
-rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-devel-1.38.0-1.{distro_arch}.rpm
-"""
-
- # This horribleness is an attempt to work around ways that you're
- # not really meant to package things for Debian unless you are
- # Debian.
-
- # On very old Debianoids, libboost-<foo>-dev will be some old
- # boost that's not as thready as we want, but which Eliot says
- # will work; on very new Debianoids, libbost-<foo>-dev is what we
- # want.
- unversioned_deb_boost_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev"]
- # On some in-between Debianoids, libboost-<foo>-dev is still a
- # 1.34, but 1.35 packages are available, so we want those.
- versioned_deb_boost_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev"]
-
- new_versioned_deb_boost_prereqs = ["libboost-thread1.42-dev", "libboost-filesystem1.42-dev", "libboost-program-options1.42-dev", "libboost-date-time1.42-dev", "libboost1.42-dev"]
- unversioned_deb_xulrunner_prereqs = ["xulrunner-dev"]
-
- old_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9-dev"]
- new_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9.2-dev"]
-
- common_deb_prereqs = [ "build-essential", "dpkg-dev", "libreadline-dev", "libpcap-dev", "libpcre3-dev", "git-core", "scons", "debhelper", "devscripts", "git-core" ]
-
- centos_preqres = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
- fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git", "curl" ]
-
- def __init__(self, **kwargs):
- super(ScriptFileConfigurator, self).__init__(**kwargs)
- # FIXME: this method is disabled until we get back around to
- # actually building from source.
- if None: # kwargs["mongo_version"][0] == 'r':
- self.get_mongo_commands = """
-wget -Otarball.tgz "http://github.com/mongodb/mongo/tarball/{mongo_version}";
-tar xzf tarball.tgz
-mv "`tar tzf tarball.tgz | sed 's|/.*||' | sort -u | head -n1`" "{pkg_name}{pkg_name_suffix}-{pkg_version}"
-"""
- else:
- self.get_mongo_commands = """
-git clone git://github.com/mongodb/mongo.git
-"""
- # This is disabled for the moment. it's for building the
- # tip of some versioned branch.
- if None: #kwargs['mongo_version'][0] == 'v':
- self.get_mongo_commands +="""
-( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "`git log origin/{mongo_version} | sed -n '1s/^commit //p;q'`" ) | tar xf -
-"""
- else:
- self.get_mongo_commands += """
-( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "{mongo_version}" ) | tar xf -
-"""
-
- if "local_mongo_dir" in kwargs:
- self.mangle_files_commands = """( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && rm -rf debian rpm && cp -pvR ~/pkg/* . )
-""" + self.mangle_files_commands
-
- self.configuration += [("pkg_product_dir",
- ((("ubuntu", "*", "*"), self.deb_productdir),
- (("debian", "*", "*"), self.deb_productdir),
- (("fedora", "*", "*"), "~/rpmbuild/RPMS"),
- (("centos", "*", "*"), "/usr/src/redhat/RPMS"))),
- ("pkg_prereqs",
- ((("ubuntu", "9.4", "*"),
- self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "9.10", "*"),
- self.unversioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "10.10", "*"),
- self.new_versioned_deb_boost_prereqs + self.new_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "10.4", "*"),
- self.unversioned_deb_boost_prereqs + self.new_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "8.10", "*"),
- self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "8.4", "*"),
- self.unversioned_deb_boost_prereqs + self.old_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("debian", "5.0", "*"),
- self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("fedora", "*", "*"),
- self.fedora_prereqs),
- (("centos", "5.4", "*"),
- self.centos_preqres))),
- # FIXME: this is deprecated
- ("commands",
- ((("debian", "*", "*"),
- self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
- (("ubuntu", "10.4", "*"),
- self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands + self.deb_build_commands),
- (("ubuntu", "*", "*"),
- self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
- (("centos", "*", "*"),
- self.preamble_commands + self.old_rpm_precommands + self.centos_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands + self.rpm_build_commands),
- (("fedora", "*", "*"),
- self.preamble_commands + self.old_rpm_precommands + self.fedora_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.rpm_build_commands))),
- ("preamble_commands",
- ((("*", "*", "*"), self.preamble_commands),
- )),
- ("install_prereqs",
- ((("debian", "*", "*"), self.deb_prereq_commands),
- (("ubuntu", "*", "*"), self.deb_prereq_commands),
- (("centos", "*", "*"), self.centos_prereq_commands),
- (("fedora", "*", "*"), self.fedora_prereq_commands))),
- ("get_mongo",
- ((("*", "*", "*"), self.get_mongo_commands),
- )),
- ("mangle_mongo",
- ((("debian", "*", "*"), self.mangle_files_commands),
- (("ubuntu", "10.10", "*"),
- self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands),
- (("ubuntu", "10.4", "*"),
- self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands),
- (("ubuntu", "*", "*"), self.mangle_files_commands),
- (("centos", "*", "*"),
- self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands),
- (("fedora", "*", "*"),
- self.mangle_files_commands))),
- ("build_prerequisites",
- ((("fedora", "*", "*"), self.old_rpm_precommands),
- (("centos", "*", "*"), self.old_rpm_precommands),
- (("*", "*", "*"), ''))),
- ("install_for_packaging",
- ((("debian", "*", "*"),""),
- (("ubuntu", "*", "*"),""),
- (("fedora", "*", "*"), ""),
- (("centos", "*", "*"),""))),
- ("build_package",
- ((("debian", "*", "*"),
- self.deb_build_commands),
- (("ubuntu", "*", "*"),
- self.deb_build_commands),
- (("fedora", "*", "*"),
- self.rpm_build_commands),
- (("centos", "*", "*"),
- self.rpm_build_commands))),
- ("pkg_name",
- ((("debian", "*", "*"), "mongodb"),
- (("ubuntu", "*", "*"), "mongodb"),
- (("centos", "*", "*"), "mongo"),
- (("fedora", "*", "*"), "mongo"))),
- # FIXME: there should be a command-line argument for this.
- ("pkg_name_conflicts",
- ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot", "-oldstable"]),
- )),
- ("rpmbuild_dir",
- ((("fedora", "*", "*"), "~/rpmbuild"),
- (("centos", "*", "*"), "/usr/src/redhat"),
- (("*", "*","*"), ''),
- )),
- ]
-
-
-
-
-class ScriptFile(object):
- def __init__(self, configurator, **kwargs):
- self.configurator = configurator
- self.mongo_version_spec = kwargs['mongo_version_spec']
- self.mongo_arch = kwargs["arch"] if kwargs["arch"] == "x86_64" else "i686"
- self.pkg_prereqs = configurator.default("pkg_prereqs")
- self.pkg_name = configurator.default("pkg_name")
- self.pkg_product_dir = configurator.default("pkg_product_dir")
- #self.formatter = configurator.default("commands")
- self.distro_name = configurator.default("distro_name")
- self.distro_version = configurator.default("distro_version")
- self.distro_arch = configurator.default("distro_arch")
-
- def bogoformat(self, fmt, **kwargs):
- r = ''
- i = 0
- while True:
- c = fmt[i]
- if c in '{}':
- i+=1
- c2=fmt[i]
- if c2 == c:
- r+=c
- else:
- j=i
- while True:
- p=fmt[j:].find('}')
- if p == -1:
- raise Exception("malformed format string starting at %d: no closing brace" % i)
- else:
- j+=p
- if len(fmt) > (j+1) and fmt[j+1]=='}':
- j+=2
- else:
- break
- key = fmt[i:j]
- r+=kwargs[key]
- i=j
- else:
- r+=c
- i+=1
- if i==len(fmt):
- return r
-
- def fmt(self, formatter, **kwargs):
- try:
- return string.Formatter.format(formatter, kwargs)
- finally:
- return self.bogoformat(formatter, **kwargs)
-
- def genscript(self):
- script=''
- formatter = self.configurator.default("preamble_commands") + self.configurator.default("install_prereqs")
- script+=self.fmt(formatter,
- distro_name=self.distro_name,
- distro_version=self.distro_version,
- distro_arch=self.distro_arch,
- pkg_name=self.pkg_name,
- pkg_product_dir=self.pkg_product_dir,
- mongo_arch=self.mongo_arch,
- pkg_prereq_str=" ".join(self.pkg_prereqs),
- )
-
- specs=self.mongo_version_spec.split(',')
- for spec in specs:
- (version, pkg_name_suffix, pkg_version) = parse_mongo_version_spec(spec)
- mongo_version = version if version[0] != 'n' else ('HEAD' if version == 'nlatest' else 'r'+version[1:]) #'HEAD'
- mongo_pub_version = version.lstrip('n') if version[0] in 'n' else 'latest'
- pkg_name_suffix = pkg_name_suffix if pkg_name_suffix else ''
- pkg_version = pkg_version
- pkg_name_conflicts = list(self.configurator.default("pkg_name_conflicts") if pkg_name_suffix else [])
- pkg_name_conflicts.remove(pkg_name_suffix) if pkg_name_suffix and pkg_name_suffix in pkg_name_conflicts else []
- formatter = self.configurator.default("get_mongo") + self.configurator.default("mangle_mongo") + (self.configurator.nightly_build_mangle_files if version[0] == 'n' else '') +(self.configurator.default("build_prerequisites") if version[0] != 'n' else '') + self.configurator.default("install_for_packaging") + self.configurator.default("build_package")
- script+=self.fmt(formatter,
- mongo_version=mongo_version,
- distro_name=self.distro_name,
- distro_version=self.distro_version,
- distro_arch=self.distro_arch,
- pkg_prereq_str=" ".join(self.pkg_prereqs),
- pkg_name=self.pkg_name,
- pkg_name_suffix=pkg_name_suffix,
- pkg_version=pkg_version,
- pkg_product_dir=self.pkg_product_dir,
- # KLUDGE: rpm specs and deb
- # control files use
- # comma-separated conflicts,
- # but there's no reason to
- # suppose this works elsewhere
- pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in pkg_name_conflicts]),
- mongo_arch=self.mongo_arch,
- mongo_pub_version=mongo_pub_version,
- rpmbuild_dir=self.configurator.default('rpmbuild_dir'))
- script+='rm -rf mongo'
- return script
-
- def __enter__(self):
- self.localscript=None
- # One of tempfile or I is very stupid.
- (fh, name) = tempfile.mkstemp('', "makedist.", ".")
- try:
- pass
- finally:
- os.close(fh)
- with open(name, 'w+') as fh:
- fh.write(self.genscript())
- self.localscript=name
- return self
-
- def __exit__(self, type, value, traceback):
- if self.localscript:
- os.unlink(self.localscript)
-
-class Configurator(SshConnectionConfigurator, EC2InstanceConfigurator, ScriptFileConfigurator, BaseHostConfigurator):
- def __init__(self, **kwargs):
- super(Configurator, self).__init__(**kwargs)
-
-class rackspaceInstance(nodeWrapper):
- def __init__(self, configurator, **kwargs):
- super(rackspaceInstance, self).__init__(configurator, **kwargs)
- self.imgname=configurator.default('rackspace_imgname')
-
- def start(self):
- driver = get_driver(Provider.RACKSPACE)
- self.conn = driver(settings.rackspace_account, settings.rackspace_api_key)
- name=self.imgname+'-'+str(os.getpid())
- images=filter(lambda x: (x.name.find(self.imgname) > -1), self.conn.list_images())
- sizes=self.conn.list_sizes()
- sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
- node = None
- if len(images) > 1:
- raise Exception("too many images with \"%s\" in the name" % self.imgname)
- if len(images) < 1:
- raise Exception("too few images with \"%s\" in the name" % self.imgname)
- image = images[0]
- self.node = self.conn.create_node(image=image, name=name, size=sizes[0])
- # Note: the password is available only in the response to the
- # create_node request, not in subsequent list_nodes()
- # requests; so although the node objects we get back from
- # list_nodes() are usuable for most things, we must hold onto
- # the initial password.
- self.password = self.node.extra['password']
- print self.node
-
- def list_nodes(self):
- return self.conn.list_nodes()
-
- def setup(self):
- self.putSshKey()
-
- def putSshKey(self):
- keyfile=settings.makedist['ssh_keyfile']
- ssh = ParamikoSSHClient(hostname = self.node.public_ip[0], password = self.password)
- ssh.connect()
- print "putting ssh public key"
- ssh.put(".ssh/authorized_keys", contents=open(keyfile+'.pub').read(), chmod=0600)
- print "ok"
-
-def parse_mongo_version_spec (spec):
- foo = spec.split(":")
- mongo_version = foo[0] # this can be a commit id, a
- # release id "r1.2.2", or a branch name
- # starting with v.
- if len(foo) > 1:
- pkg_name_suffix = foo[1]
- if len(foo) > 2 and foo[2]:
- pkg_version = foo[2]
- else:
- pkg_version = time.strftime("%Y%m%d")
- if not pkg_name_suffix:
- if mongo_version[0] in ["r", "v"]:
- nums = mongo_version.split(".")
- if int(nums[1]) % 2 == 0:
- pkg_name_suffix = "-stable"
- else:
- pkg_name_suffix = "-unstable"
- else:
- pkg_name_suffix = ""
- return (mongo_version, pkg_name_suffix, pkg_version)
-
-def main():
-# checkEnvironment()
-
- (kwargs, args) = processArguments()
- (rootdir, distro_name, distro_version, arch, mongo_version_spec) = args[:5]
- # FIXME: there are a few other characters that we can't use in
- # file names on Windows, in case this program really needs to run
- # there.
- distro_name = distro_name.replace('/', '-').replace('\\', '-')
- distro_version = distro_version.replace('/', '-').replace('\\', '-')
- arch = arch.replace('/', '-').replace('\\', '-')
- try:
- import settings
- if "makedist" in dir ( settings ):
- for key in ["ec2_sshkey", "ssh_keyfile", "gpg_homedir" ]:
- if key not in kwargs and key in settings.makedist:
- kwargs[key] = settings.makedist[key]
- except Exception, err:
- print "No settings: %s. Continuing anyway..." % err
- pass
-
- kwargs["distro_name"] = distro_name
- kwargs["distro_version"] = distro_version
- kwargs["arch"] = arch
- kwargs['mongo_version_spec'] = mongo_version_spec
-
- kwargs["localdir"] = rootdir
- # FIXME: this should also include the mongo version or something.
-# if "subdirs" in kwargs:
-# kwargs["localdir"] = "%s/%s/%s/%s/%s" % (rootdir, distro_name, distro_version, arch, kwargs["mongo_version"])
-# else:
-
-
-
-
- kwargs['gpg_homedir'] = kwargs["gpg_homedir"] if "gpg_homedir" in kwargs else os.path.expanduser("~/.gnupg")
- configurator = Configurator(**kwargs)
- LocalHost.runLocally(["mkdir", "-p", kwargs["localdir"]])
- with ScriptFile(configurator, **kwargs) as script:
- with open(script.localscript) as f:
- print """# Going to run the following on a fresh AMI:"""
- print f.read()
- time.sleep(10)
- # FIXME: it's not the best to have two different pathways for
- # the different hosting services, but...
- with EC2Instance(configurator, **kwargs) if kwargs['distro_name'] != 'fedora' else rackspaceInstance(configurator, **kwargs) as host:
- host.initwait()
- host.setup()
- kwargs["ssh_host"] = host.getHostname()
- with SshConnection(configurator, **kwargs) as ssh:
- ssh.runRemotely(["uname -a; ls /"])
- ssh.runRemotely(["mkdir", "pkg"])
- if "local_mongo_dir" in kwargs:
- ssh.sendFiles([(kwargs["local_mongo_dir"]+'/'+d, "pkg") for d in ["rpm", "debian"]])
- ssh.sendFiles([(kwargs['gpg_homedir'], ".gnupg")])
- ssh.sendFiles([(script.localscript, "makedist.sh")])
- ssh.runRemotely((["sudo"] if ssh.ssh_login != "root" else [])+ ["sh", "makedist.sh"])
- ssh.recvFiles([(script.pkg_product_dir, kwargs['localdir'])])
-
-def processArguments():
- # flagspec [ (short, long, argument?, description, argname)* ]
- flagspec = [ ("?", "usage", False, "Print a (useless) usage message", None),
- ("h", "help", False, "Print a help message and exit", None),
- ("N", "no-terminate", False, "Leave the EC2 instance running at the end of the job", None),
- ("S", "subdirs", False, "Create subdirectories of the output directory based on distro name, version, and architecture", None),
- ("I", "use-internal-name", False, "Use the EC2 internal hostname for sshing", None),
- (None, "gpg-homedir", True, "Local directory of gpg junk", "STRING"),
- (None, "local-mongo-dir", True, "Copy packaging files from local mongo checkout", "DIRECTORY"),
- ]
- shortopts = "".join([t[0] + (":" if t[2] else "") for t in flagspec if t[0] is not None])
- longopts = [t[1] + ("=" if t[2] else "") for t in flagspec]
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
- except getopt.GetoptError, err:
- print str(err)
- sys.exit(2)
-
- # Normalize the getopt-parsed options.
- kwargs = {}
- for (opt, arg) in opts:
- flag = opt
- opt = opt.lstrip("-")
- if flag[:2] == '--': #long opt
- kwargs[opt.replace('-', '_')] = arg
- elif flag[:1] == "-": #short opt
- ok = False
- for tuple in flagspec:
- if tuple[0] == opt:
- ok = True
- kwargs[tuple[1].replace('-', '_')] = arg
- break
- if not ok:
- raise SimpleError("this shouldn't happen: unrecognized option flag: %s", opt)
- else:
- raise SimpleError("this shouldn't happen: non-option returned from getopt()")
-
- if "help" in kwargs:
- print "Usage: %s [OPTIONS] DIRECTORY DISTRO DISTRO-VERSION ARCHITECTURE MONGO-VERSION-SPEC" % sys.argv[0]
- print """Build some packages on new EC2 AMI instances, leave packages under DIRECTORY.
-
-MONGO-VERSION-SPEC has the syntax
-Commit(:Pkg-Name-Suffix(:Pkg-Version)). If Commit starts with an 'r',
-build from a tagged release; if Commit starts with an 'n', package up
-a nightly build; if Commit starts with a 'v', build from the HEAD of a
-version branch; otherwise, build whatever git commit is identified by
-Commit. Pkg-Name-Suffix gets appended to the package name, and
-defaults to "-stable" and "-unstable" if Commit looks like it
-designates a stable or unstable release/branch, respectively.
-Pkg-Version is used as the package version, and defaults to YYYYMMDD.
-Examples:
-
- HEAD # build a snapshot of HEAD, name the package
- # "mongodb", use YYYYMMDD for the version
-
- HEAD:-snap # build a snapshot of HEAD, name the package
- # "mongodb-snap", use YYYYMMDD for the version
-
- HEAD:-snap:123 # build a snapshot of HEAD, name the package
- # "mongodb-snap", use 123 for the version
-
- HEAD:-suffix:1.3 # build a snapshot of HEAD, name the package
- # "mongodb-snapshot", use "1.3 for the version
-
- r1.2.3 # build a package of the 1.2.3 release, call it "mongodb-stable",
- # make the package version YYYYMMDD.
-
- v1.2:-stable: # build a package of the HEAD of the 1.2 branch
-
- decafbad:-foo:123 # build git commit "decafbad", call the package
- # "mongodb-foo" with package version 123.
-
-Options:"""
- for t in flagspec:
- print "%-20s\t%s." % ("%4s--%s%s:" % ("-%s, " % t[0] if t[0] else "", t[1], ("="+t[4]) if t[4] else ""), t[3])
- print """
-Mandatory arguments to long options are also mandatory for short
-options."""
- sys.exit(0)
-
- if "usage" in kwargs:
- print "Usage: %s [OPTIONS] OUTPUT-DIR DISTRO-NAME DISTRO-VERSION ARCHITECTURE MONGO-VERSION-SPEC" % sys.argv[0]
- sys.exit(0)
-
-
- return (kwargs, args)
-
-
-if __name__ == "__main__":
- main()
-
-# Examples:
-
-# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot,v1.4:-stable,v1.5:-unstable
-# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 nlatest:-snapshot,n1.4.2:-stable,n1.5.0:-unstable
diff --git a/buildscripts/mergerepositories.py b/buildscripts/mergerepositories.py
deleted file mode 100644
index 028b6e2..0000000
--- a/buildscripts/mergerepositories.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import with_statement
-from libcloud.types import Provider
-from libcloud.providers import get_driver
-from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
-from libcloud.base import Node, NodeImage, NodeSize, NodeState
-
-# libcloud's SSH client seems to be one of those pointless wrappers
-# that (at the moment) both doesn't add anything to the thing it wraps
-# (Paramiko) and also fails to expose the underlying thing's features.
-# What's wrong with people?
-#from libcloud.ssh import SSHClient
-
-import time
-import sys
-import settings
-import subprocess
-import os
-import socket
-
-EC2 = get_driver(Provider.EC2)
-EC2Driver=EC2NodeDriver(settings.id, settings.key)
-
-def tryEC2():
-
- image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
- size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
-
- node = None
- try:
- node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, keyname="kp1", securitygroup=['default', 'dist-slave', 'buildbot-slave'])
- print node
- print node.id
- while node.state == NodeState.PENDING:
- time.sleep(3)
- finally:
- if node:
- node.destroy()
-
-
-class node(object):
- def initWait(self):
- while 1:
- n=None
- # EC2 sometimes takes a while to report a node.
- for i in range(6):
- nodes = [n for n in self.list_nodes() if (n.id==self.node.id)]
- if len(nodes)>0:
- n=nodes[0]
- break
- else:
- time.sleep(10)
- if not n:
- raise Exception("couldn't find node with id %s" % self.node.id)
- if n.state == NodeState.PENDING:
- time.sleep(10)
- else:
- self.node = n
- break
- print "ok"
- # Now wait for the node's sshd to be accepting connections.
- print "waiting for ssh"
- sshwait = True
- if sshwait == False:
- return
- while sshwait:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- try:
- s.connect((self.node.public_ip[0], 22))
- sshwait = False
- print "connected on port 22 (ssh)"
- time.sleep(15) # arbitrary timeout, in case the
- # remote sshd is slow.
- except socket.error, err:
- pass
- finally:
- s.close()
- time.sleep(3) # arbitrary timeout
- print "ok"
-
- def __enter__(self):
- return self
-
- def __exit__(self, arg0, arg1, arg2):
- print "shutting down node %s" % self.node
- self.node.destroy()
-
-# I don't think libcloud's Nodes implement __enter__ and __exit__, and
-# I like the with statement for ensuring that we don't leak nodes when
-# we don't have to.
-class ec2node(node):
- def list_nodes(self):
- return EC2Driver.list_nodes()
-
-class ubuntuNode(ec2node):
- def __init__(self):
- image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
- size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
-
- self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
-
-class centosNode(ec2node):
- def __init__(self):
- image=NodeImage('ami-ccb35ea5', 'ubuntu 10.4', EC2)
- size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
-
- self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
-
-class rackspaceNode(node):
- def list_nodes(self):
- self.conn.list_nodes()
-
-class fedora11Node(rackspaceNode):
- def __init__(self):
- driver = get_driver(Provider.RACKSPACE)
- self.conn = driver(settings.rackspace_account, settings.rackspace_api_key)
- string='Fedora 11'
- images=filter(lambda x: (x.name.find(string) > -1), self.conn.list_images())
- sizes=self.conn.list_sizes()
- sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
- node = None
- if len(images) != 1:
- raise "too many images with \"%s\" in the name" % string
- image = images[0]
- self.node = self.conn.create_node(image=image, name=string, size=sizes[0])
- print self.node
- self.password = self.node.extra['password']
-
-class Err(Exception):
- pass
-
-def merge_yum_repo(dir, outdir):
- dirtail=dir.rstrip('\/').split('/')[-1]
- keyfile=settings.makedist['ssh_keyfile']
- makeyumrepo="""find . -name RPMS | while read dir; do (cd $dir/.. && createrepo .); done"""
- with centosNode() as centos:
- centos.initWait()
- print centos.node
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "root@"+centos.node.public_ip[0]+":"])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "root@"+centos.node.public_ip[0], "cd ./" + dirtail + " && " + makeyumrepo])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no", "-i", keyfile, "-r", "root@"+centos.node.public_ip[0]+":./"+dirtail +'/*', outdir])
-
-
-
-def merge_apt_repo(dir, outdir):
- dirtail=dir.rstrip('\/').split('/')[-1]
-
- gpgdir=settings.makedist['gpg_homedir']
- keyfile=settings.makedist['ssh_keyfile']
-
- makeaptrepo="""for x in debian ubuntu; do (cd $x; for d in `find . -name *.deb | sed 's|^./||; s|/[^/]*$||' | sort -u`; do dpkg-scanpackages $d > $d/Packages; gzip -9c $d/Packages > $d/Packages.gz; done) ; done"""
- makereleaseprologue="""Origin: 10gen
-Label: 10gen
-Suite: 10gen
-Codename: VVVVVV
-Version: VVVVVV
-Architectures: i386 amd64
-Components: 10gen
-Description: 10gen packages"""
- makeaptrelease="""find . -maxdepth 3 -mindepth 3 | while read d; do ( cd $d && (echo '%s' | sed s/VVVVVV/$(basename $(pwd))/; apt-ftparchive release .) > /tmp/Release && mv /tmp/Release . && gpg -r `gpg --list-keys | grep uid | awk '{print $(NF)}'` --no-secmem-warning --no-tty -abs --output Release.gpg Release ); done""" % makereleaseprologue
- with ubuntuNode() as ubuntu:
- ubuntu.initWait()
- print ubuntu.node
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sudo", "sh", "-c", "\"export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get -y install debhelper\""])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "ubuntu@"+ubuntu.node.public_ip[0]+":"])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", gpgdir, "ubuntu@"+ubuntu.node.public_ip[0]+":.gnupg"])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sh", "-c", "\"ls -lR ./" + dirtail + "\""])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrepo])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrelease])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no", "-i", keyfile, "-r", "ubuntu@"+ubuntu.node.public_ip[0]+":./"+dirtail +'/*', outdir])
-
-
-def run_for_effect(argv):
- print " ".join(argv)
- r=subprocess.Popen(argv).wait()
- if r!=0:
- raise Err("subprocess %s exited %d" % (argv, r))
-
-if __name__ == "__main__":
- (flavor, dir, outdir) = sys.argv[-3:]
-
- if flavor == "deb":
- merge_apt_repo(dir, outdir)
- elif flavor == "rpm":
- merge_yum_repo(dir, outdir)
- else:
- Err("unknown pkg flavor %s" % flavor)
- # TODO: yum repositories
-
-
- #main()
- #tryRackSpace()
diff --git a/buildscripts/packager.py b/buildscripts/packager.py
new file mode 100644
index 0000000..400239c
--- /dev/null
+++ b/buildscripts/packager.py
@@ -0,0 +1,982 @@
+#!/usr/bin/python
+
+# This program makes Debian and RPM repositories for MongoDB, by
+# downloading our tarballs of statically linked executables and
+# insinuating them into Linux packages. It must be run on a
+# Debianoid, since Debian provides tools to make RPMs, but RPM-based
+# systems don't provide debian packaging crud.
+
+# Notes:
+#
+# * Almost anything that you want to be able to influence about how a
+# package construction must be embedded in some file that the
+# packaging tool uses for input (e.g., debian/rules, debian/control,
+# debian/changelog; or the RPM specfile), and the precise details are
+# arbitrary and silly. So this program generates all the relevant
+# inputs to the packaging tools.
+#
+# * Once a .deb or .rpm package is made, there's a separate layer of
+# tools that makes a "repository" for use by the apt/yum layers of
+# package tools. The layouts of these repositories are arbitrary and
+# silly, too.
+#
+# * Before you run the program on a new host, these are the
+# prerequisites:
+#
+# apt-get install dpkg-dev rpm debhelper fakeroot ia32-libs createrepo git-core
+# echo "Now put the dist gnupg signing keys in ~root/.gnupg"
+
+import errno
+import getopt
+import httplib
+import os
+import re
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+import urlparse
+
+# For the moment, this program runs on the host that also serves our
+# repositories to the world, so the last thing the program does is
+# move the repositories into place. Make this be the path where the
+# web server will look for repositories.
+REPOPATH="/var/www/repo"
+
+# The 10gen names for the architectures we support.
+ARCHES=["i686", "x86_64"]
+
+# Made up names for the flavors of distribution we package for.
+DISTROS=["debian-sysvinit", "ubuntu-upstart", "redhat"]
+
+# When we're preparing a directory containing packaging tool inputs
+# and our binaries, use this relative subdirectory for placing the
+# binaries.
+BINARYDIR="BINARIES"
+
+class Spec(object):
+ def __init__(self, specstr):
+ tup = specstr.split(":")
+ self.ver = tup[0]
+ # Hack: the second item in the tuple is treated as a suffix if
+ # it lacks an equals sign; otherwise it's the start of named
+ # parameters.
+ self.suf = None
+ if len(tup) > 1 and tup[1].find("=") == -1:
+ self.suf = tup[1]
+ # Catch-all for any other parameters to the packaging.
+ i = 2 if self.suf else 1
+ self.params = dict([s.split("=") for s in tup[i:]])
+ for key in self.params.keys():
+ assert(key in ["suffix", "revision"])
+
+ def version(self):
+ return self.ver
+
+ def version_better_than(self, version_string):
+ # FIXME: this is wrong, but I'm in a hurry.
+ # e.g., "1.8.2" < "1.8.10", "1.8.2" < "1.8.2-rc1"
+ return self.ver > version_string
+
+ def suffix(self):
+ # suffix is what we tack on after pkgbase.
+ if self.suf:
+ return self.suf
+ elif "suffix" in self.params:
+ return self.params["suffix"]
+ else:
+ return "-10gen" if int(self.ver.split(".")[1])%2==0 else "-10gen-unstable"
+
+
+ def pversion(self, distro):
+ # Note: Debian packages have funny rules about dashes in
+ # version numbers, and RPM simply forbids dashes. pversion
+ # will be the package's version number (but we need to know
+ # our upstream version too).
+ if re.search("^(debian|ubuntu)", distro.name()):
+ return re.sub("-", "~", self.ver)
+ elif re.search("(redhat|fedora|centos)", distro.name()):
+ return re.sub("\\d+-", "", self.ver)
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+ def param(self, param):
+ if param in self.params:
+ return self.params[param]
+ return None
+
+class Distro(object):
+ def __init__(self, string):
+ self.n=string
+
+ def name(self):
+ return self.n
+
+ def pkgbase(self):
+ # pkgbase is the first part of the package's name on
+ # this distro.
+ return "mongo" if re.search("(redhat|fedora|centos)", self.n) else "mongodb"
+
+ def archname(self, arch):
+ if re.search("^(debian|ubuntu)", self.n):
+ return "i386" if arch.endswith("86") else "amd64"
+ elif re.search("^(centos|redhat|fedora)", self.n):
+ return "i686" if arch.endswith("86") else "x86_64"
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+ def repodir(self, arch):
+ """Return the directory where we'll place the package files for
+ (distro, distro_version) in that distro's preferred repository
+ layout (as distinct from where that distro's packaging building
+ tools place the package files)."""
+ if re.search("^(debian|ubuntu)", self.n):
+ return "repo/%s/dists/dist/10gen/binary-%s/" % (self.n, self.archname(arch))
+ elif re.search("(redhat|fedora|centos)", self.n):
+ return "repo/%s/os/%s/RPMS/" % (self.n, self.archname(arch))
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+ def make_pkg(self, arch, spec, srcdir):
+ if re.search("^(debian|ubuntu)", self.n):
+ return make_deb(self, arch, spec, srcdir)
+ elif re.search("^(centos|redhat|fedora)", self.n):
+ return make_rpm(self, arch, spec, srcdir)
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+def main(argv):
+ (flags, specs) = parse_args(argv[1:])
+ distros=[Distro(distro) for distro in DISTROS]
+
+ oldcwd=os.getcwd()
+ srcdir=oldcwd+"/../"
+
+ # We do all our work in a randomly-created directory. You can set
+ # TEMPDIR to influence where this program will do stuff.
+ prefix=tempfile.mkdtemp()
+ print "Working in directory %s" % prefix
+
+ # This will be a list of directories where we put packages in
+ # "repository layout".
+ repos=[]
+
+ os.chdir(prefix)
+ try:
+ # Download the binaries.
+ urlfmt="http://fastdl.mongodb.org/linux/mongodb-linux-%s-%s.tgz"
+ for (spec, arch) in crossproduct(specs, ARCHES):
+ httpget(urlfmt % (arch, spec.version()), ensure_dir(tarfile(arch, spec)))
+
+ # Build a pacakge for each distro/spec/arch tuple, and
+ # accumulate the repository-layout directories.
+ for (distro, spec, arch) in crossproduct(distros, specs, ARCHES):
+ repos.append(make_package(distro, arch, spec, srcdir))
+
+ # Build the repos' metadatas.
+ for repo in set(repos):
+ print repo
+ make_repo(repo)
+
+ finally:
+ os.chdir(oldcwd)
+ if "-n" not in flags:
+ move_repos_into_place(prefix+"/repo", REPOPATH)
+ # FIXME: try shutil.rmtree some day.
+ sysassert(["rm", "-rv", prefix])
+
+def parse_args(args):
+ if len(args) == 0:
+ print """Usage: packager.py [OPTS] SPEC1 SPEC2 ... SPECn
+
+Options:
+
+ -n: Just build the packages, don't publish them as a repo
+ or clean out the working directory
+
+Each SPEC is a mongodb version string optionally followed by a colon
+and some parameters, of the form <paramname>=<value>. Supported
+parameters:
+
+ suffix -- suffix to append to the package's base name. (If
+ unsupplied, suffixes default based on the parity of the
+ middle number in the version.)
+
+ revision -- least-order version number to packaging systems
+"""
+ sys.exit(0)
+
+ try:
+ (flags, args) = getopt.getopt(args, "n")
+ except getopt.GetoptError, err:
+ print str(err)
+ sys.exit(2)
+ flags=dict(flags)
+ specs=[Spec(arg) for arg in args]
+ return (flags, specs)
+
+def crossproduct(*seqs):
+ """A generator for iterating all the tuples consisting of elements
+ of seqs."""
+ l = len(seqs)
+ if l == 0:
+ pass
+ elif l == 1:
+ for i in seqs[0]:
+ yield [i]
+ else:
+ for lst in crossproduct(*seqs[:-1]):
+ for i in seqs[-1]:
+ lst2=list(lst)
+ lst2.append(i)
+ yield lst2
+
+def sysassert(argv):
+ """Run argv and assert that it exited with status 0."""
+ print "In %s, running %s" % (os.getcwd(), " ".join(argv))
+ sys.stdout.flush()
+ sys.stderr.flush()
+ assert(subprocess.Popen(argv).wait()==0)
+
+def backtick(argv):
+ """Run argv and return its output string."""
+ print "In %s, running %s" % (os.getcwd(), " ".join(argv))
+ sys.stdout.flush()
+ sys.stderr.flush()
+ return subprocess.Popen(argv, stdout=subprocess.PIPE).communicate()[0]
+
+def ensure_dir(filename):
+ """Make sure that the directory that's the dirname part of
+ filename exists, and return filename."""
+ dirpart = os.path.dirname(filename)
+ try:
+ os.makedirs(dirpart)
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+ return filename
+
+
+def tarfile(arch, spec):
+ """Return the location where we store the downloaded tarball for
+ (arch, spec)"""
+ return "dl/mongodb-linux-%s-%s.tar.gz" % (spec.version(), arch)
+
+def setupdir(distro, arch, spec):
+ # The setupdir will be a directory containing all inputs to the
+ # distro's packaging tools (e.g., package metadata files, init
+ # scripts, etc), along with the already-built binaries). In case
+ # the following format string is unclear, an example setupdir
+ # would be dst/x86_64/debian-sysvinit/mongodb-10gen-unstable/
+ return "dst/%s/%s/%s%s-%s/" % (arch, distro.name(), distro.pkgbase(), spec.suffix(), spec.pversion(distro))
+
+def httpget(url, filename):
+ """Download the contents of url to filename, return filename."""
+ print "Fetching %s to %s." % (url, filename)
+ conn = None
+ u=urlparse.urlparse(url)
+ assert(u.scheme=='http')
+ try:
+ conn = httplib.HTTPConnection(u.hostname)
+ conn.request("GET", u.path)
+ t=filename+'.TMP'
+ res = conn.getresponse()
+ # FIXME: follow redirects
+ if res.status==200:
+ f = open(t, 'w')
+ try:
+ f.write(res.read())
+ finally:
+ f.close()
+
+ else:
+ raise Exception("HTTP error %d" % res.status)
+ os.rename(t, filename)
+ finally:
+ if conn:
+ conn.close()
+ return filename
+
+def unpack_binaries_into(arch, spec, where):
+ """Unpack the tarfile for (arch, spec) into directory where."""
+ rootdir=os.getcwd()
+ ensure_dir(where)
+ # Note: POSIX tar doesn't require support for gtar's "-C" option,
+ # and Python's tarfile module prior to Python 2.7 doesn't have the
+ # features to make this detail easy. So we'll just do the dumb
+ # thing and chdir into where and run tar there.
+ os.chdir(where)
+ try:
+ sysassert(["tar", "xvzf", rootdir+"/"+tarfile(arch, spec), "mongodb-linux-%s-%s/bin" % (arch, spec.version())])
+ os.rename("mongodb-linux-%s-%s/bin" % (arch, spec.version()), "bin")
+ os.rmdir("mongodb-linux-%s-%s" % (arch, spec.version()))
+ except Exception:
+ exc=sys.exc_value
+ os.chdir(rootdir)
+ raise exc
+ os.chdir(rootdir)
+
+def make_package(distro, arch, spec, srcdir):
+ """Construct the package for (arch, distro, spec), getting
+ packaging files from srcdir and any user-specified suffix from
+ suffixes"""
+
+ sdir=setupdir(distro, arch, spec)
+ ensure_dir(sdir)
+ # Note that the RPM packages get their man pages from the debian
+ # directory, so the debian directory is needed in all cases (and
+ # innocuous in the debianoids' sdirs).
+ for pkgdir in ["debian", "rpm"]:
+ print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)
+ # FIXME: sh-dash-cee is bad. See if tarfile can do this.
+ sysassert(["sh", "-c", "(cd \"%s\" && git archive r%s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.version(), pkgdir, sdir)])
+ # Splat the binaries under sdir. The "build" stages of the
+ # packaging infrastructure will move the binaries to wherever they
+ # need to go.
+ unpack_binaries_into(arch, spec, sdir+("%s/usr/"%BINARYDIR))
+ # Remove the mongosniff binary due to libpcap dynamic
+ # linkage. FIXME: this removal should go away
+ # eventually.
+ os.unlink(sdir+("%s/usr/bin/mongosniff"%BINARYDIR))
+ return distro.make_pkg(arch, spec, srcdir)
+
+def make_repo(repodir):
+ if re.search("(debian|ubuntu)", repodir):
+ make_deb_repo(repodir)
+ elif re.search("(centos|redhat|fedora)", repodir):
+ make_rpm_repo(repodir)
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+def make_deb(distro, arch, spec, srcdir):
+ # I can't remember the details anymore, but the initscript/upstart
+ # job files' names must match the package name in some way; and
+ # see also the --name flag to dh_installinit in the generated
+ # debian/rules file.
+ suffix=spec.suffix()
+ sdir=setupdir(distro, arch, spec)
+ if re.search("sysvinit", distro.name()):
+ os.link(sdir+"debian/init.d", sdir+"debian/%s%s.mongodb.init" % (distro.pkgbase(), suffix))
+ os.unlink(sdir+"debian/mongodb.upstart")
+ elif re.search("upstart", distro.name()):
+ os.link(sdir+"debian/mongodb.upstart", sdir+"debian/%s%s.upstart" % (distro.pkgbase(), suffix))
+ os.unlink(sdir+"debian/init.d")
+ else:
+ raise Exception("unknown debianoid flavor: not sysvinit or upstart?")
+ # Rewrite the control and rules files
+ write_debian_control_file(sdir+"debian/control", spec)
+ write_debian_rules_file(sdir+"debian/rules", spec)
+ write_debian_changelog(sdir+"debian/changelog", spec, srcdir)
+ distro_arch=distro.archname(arch)
+ # Do the packaging.
+ oldcwd=os.getcwd()
+ try:
+ os.chdir(sdir)
+ sysassert(["dpkg-buildpackage", "-a"+distro_arch])
+ finally:
+ os.chdir(oldcwd)
+ r=distro.repodir(arch)
+ ensure_dir(r)
+ # FIXME: see if shutil.copyfile or something can do this without
+ # much pain.
+ sysassert(["cp", "-v", sdir+"../%s%s_%s%s_%s.deb"%(distro.pkgbase(), suffix, spec.pversion(distro), "-"+spec.param("revision") if spec.param("revision") else"", distro_arch), r])
+ return r
+
+def make_deb_repo(repo):
+ # Note: the Debian repository Packages files must be generated
+ # very carefully in order to be usable.
+ oldpwd=os.getcwd()
+ os.chdir(repo+"../../../../")
+ try:
+ dirs=set([os.path.dirname(deb)[2:] for deb in backtick(["find", ".", "-name", "*.deb"]).split()])
+ for d in dirs:
+ s=backtick(["dpkg-scanpackages", d, "/dev/null"])
+ f=open(d+"/Packages", "w")
+ try:
+ f.write(s)
+ finally:
+ f.close()
+ b=backtick(["gzip", "-9c", d+"/Packages"])
+ f=open(d+"/Packages.gz", "wb")
+ try:
+ f.write(b)
+ finally:
+ f.close()
+ finally:
+ os.chdir(oldpwd)
+ # Notes: the Release{,.gpg} files must live in a special place,
+ # and must be created after all the Packages.gz files have been
+ # done.
+ s="""
+Origin: 10gen
+Label: 10gen
+Suite: 10gen
+Codename: %s
+Version: %s
+Architectures: i386 amd64
+Components: 10gen
+Description: 10gen packages
+""" % ("dist", "dist")
+ if os.path.exists(repo+"../../Release"):
+ os.unlink(repo+"../../Release")
+ if os.path.exists(repo+"../../Release.gpg"):
+ os.unlink(repo+"../../Release.gpg")
+ oldpwd=os.getcwd()
+ os.chdir(repo+"../../")
+ s2=backtick(["apt-ftparchive", "release", "."])
+ try:
+ f=open("Release", 'w')
+ try:
+ f.write(s)
+ f.write(s2)
+ finally:
+ f.close()
+
+ arg=None
+ for line in backtick(["gpg", "--list-keys"]).split("\n"):
+ tokens=line.split()
+ if len(tokens)>0 and tokens[0] == "uid":
+ arg=tokens[-1]
+ break
+ # Note: for some reason, I think --no-tty might be needed
+ # here, but maybe not.
+ sysassert(["gpg", "-r", arg, "--no-secmem-warning", "-abs", "--output", "Release.gpg", "Release"])
+ finally:
+ os.chdir(oldpwd)
+
+
+def move_repos_into_place(src, dst):
+ # Find all the stuff in src/*, move it to a freshly-created
+ # directory beside dst, then play some games with symlinks so that
+ # dst is a name the new stuff and dst+".old" names the previous
+ # one. This feels like a lot of hooey for something so trivial.
+
+ # First, make a crispy fresh new directory to put the stuff in.
+ i=0
+ while True:
+ date_suffix=time.strftime("%Y-%m-%d")
+ dname=dst+".%s.%d" % (date_suffix, i)
+ try:
+ os.mkdir(dname)
+ break
+ except OSError:
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+ i=i+1
+
+ # Put the stuff in our new directory.
+ for r in os.listdir(src):
+ sysassert(["cp", "-rv", src + "/" + r, dname])
+
+ # Make a symlink to the new directory; the symlink will be renamed
+ # to dst shortly.
+ i=0
+ while True:
+ tmpnam=dst+".TMP.%d" % i
+ try:
+ os.symlink(dname, tmpnam)
+ break
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+ i=i+1
+
+ # Make a symlink to the old directory; this symlink will be
+ # renamed shortly, too.
+ oldnam=None
+ if os.path.exists(dst):
+ i=0
+ while True:
+ oldnam=dst+".old.%d" % i
+ try:
+ os.symlink(os.readlink(dst), oldnam)
+ break
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+
+ os.rename(tmpnam, dst)
+ if oldnam:
+ os.rename(oldnam, dst+".old")
+
+
+def write_debian_changelog(path, spec, srcdir):
+ oldcwd=os.getcwd()
+ os.chdir(srcdir)
+ preamble=""
+ if spec.param("revision"):
+ preamble="""mongodb%s (%s-%s) unstable; urgency=low
+
+ * Bump revision number
+
+ -- Richard Kreuter <richard@10gen.com> %s
+
+""" % (spec.suffix(), spec.pversion(Distro("debian")), spec.param("revision"), time.strftime("%a, %d %b %Y %H:%m:%S %z"))
+ try:
+ s=preamble+backtick(["sh", "-c", "git archive r%s debian/changelog | tar xOf -" % spec.version()])
+ finally:
+ os.chdir(oldcwd)
+ f=open(path, 'w')
+ lines=s.split("\n")
+ # If the first line starts with "mongodb", it's not a revision
+ # preamble, and so frob the version number.
+ lines[0]=re.sub("^mongodb \\(.*\\)", "mongodb (%s)" % (spec.pversion(Distro("debian"))), lines[0])
+ # Rewrite every changelog entry starting in mongodb<space>
+ lines=[re.sub("^mongodb ", "mongodb%s " % (spec.suffix()), l) for l in lines]
+ lines=[re.sub("^ --", " --", l) for l in lines]
+ s="\n".join(lines)
+ try:
+ f.write(s)
+ finally:
+ f.close()
+
+def write_debian_control_file(path, spec):
+ s="""Source: @@PACKAGE_BASENAME@@
+Section: devel
+Priority: optional
+Maintainer: Richard Kreuter <richard@10gen.com>
+Build-Depends:
+Standards-Version: 3.8.0
+Homepage: http://www.mongodb.org
+
+Package: @@PACKAGE_BASENAME@@
+Conflicts: @@PACKAGE_CONFLICTS@@
+Architecture: any
+Depends: libc6 (>= 2.3.2), libgcc1 (>= 1:4.1.1), libstdc++6 (>= 4.1.1)
+Description: An object/document-oriented database
+ MongoDB is a high-performance, open source, schema-free
+ document-oriented data store that's easy to deploy, manage
+ and use. It's network accessible, written in C++ and offers
+ the following features :
+ .
+ * Collection oriented storage - easy storage of object-
+ style data
+ * Full index support, including on inner objects
+ * Query profiling
+ * Replication and fail-over support
+ * Efficient storage of binary data including large
+ objects (e.g. videos)
+ * Auto-sharding for cloud-level scalability (Q209)
+ .
+ High performance, scalability, and reasonable depth of
+ functionality are the goals for the project.
+"""
+ s=re.sub("@@PACKAGE_BASENAME@@", "mongodb%s" % spec.suffix(), s)
+ conflict_suffixes=["", "-stable", "-unstable", "-nightly", "-10gen", "-10gen-unstable"]
+ conflict_suffixes.remove(spec.suffix())
+ s=re.sub("@@PACKAGE_CONFLICTS@@", ", ".join(["mongodb"+suffix for suffix in conflict_suffixes]), s)
+ f=open(path, 'w')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+
+def write_debian_rules_file(path, spec):
+ # Note debian/rules is a makefile, so for visual disambiguation we
+ # make all tabs here \t.
+ s="""#!/usr/bin/make -f
+# -*- makefile -*-
+# Sample debian/rules that uses debhelper.
+# This file was originally written by Joey Hess and Craig Small.
+# As a special exception, when this file is copied by dh-make into a
+# dh-make output file, you may use that output file without restriction.
+# This special exception was added by Craig Small in version 0.37 of dh-make.
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+
+configure: configure-stamp
+configure-stamp:
+\tdh_testdir
+ # Add here commands to configure the package.
+
+\ttouch configure-stamp
+
+
+build: build-stamp
+
+build-stamp: configure-stamp
+\tdh_testdir
+
+ # Add here commands to compile the package.
+# THE FOLLOWING LINE IS INTENTIONALLY COMMENTED.
+\t# scons
+ #docbook-to-man debian/mongodb.sgml > mongodb.1
+\tls debian/*.1 > debian/@@PACKAGE_NAME@@.manpages
+
+\ttouch $@
+
+clean:
+\tdh_testdir
+\tdh_testroot
+\trm -f build-stamp configure-stamp
+
+\t# FIXME: scons freaks out at the presence of target files
+\t# under debian/mongodb.
+\t#scons -c
+\trm -rf $(CURDIR)/debian/@@PACKAGE_NAME@@
+\trm -f config.log
+\trm -f mongo
+\trm -f mongod
+\trm -f mongoimportjson
+\trm -f mongoexport
+\trm -f mongorestore
+\trm -f mongodump
+\trm -f mongofiles
+\trm -f .sconsign.dblite
+\trm -f libmongoclient.a
+\trm -rf client/*.o
+\trm -rf tools/*.o
+\trm -rf shell/*.o
+\trm -rf .sconf_temp
+\trm -f buildscripts/*.pyc
+\trm -f *.pyc
+\trm -f buildinfo.cpp
+\tdh_clean debian/files
+
+install: build
+\tdh_testdir
+\tdh_testroot
+\tdh_prep
+\tdh_installdirs
+
+# THE FOLLOWING LINE IS INTENTIONALLY COMMENTED.
+\t# scons --prefix=$(CURDIR)/debian/mongodb/usr install
+\tcp -v $(CURDIR)/@@BINARYDIR@@/usr/bin/* $(CURDIR)/debian/@@PACKAGE_NAME@@/usr/bin
+\tmkdir -p $(CURDIR)/debian/@@PACKAGE_NAME@@/etc
+\tcp $(CURDIR)/debian/mongodb.conf $(CURDIR)/debian/@@PACKAGE_NAME@@/etc/mongodb.conf
+
+\tmkdir -p $(CURDIR)/debian/@@PACKAGE_NAME@@/usr/share/lintian/overrides/
+\tinstall -m 644 $(CURDIR)/debian/lintian-overrides \
+\t\t$(CURDIR)/debian/@@PACKAGE_NAME@@/usr/share/lintian/overrides/@@PACKAGE_NAME@@
+
+# Build architecture-independent files here.
+binary-indep: build install
+# We have nothing to do by default.
+
+# Build architecture-dependent files here.
+binary-arch: build install
+\tdh_testdir
+\tdh_testroot
+\tdh_installchangelogs
+\tdh_installdocs
+\tdh_installexamples
+#\tdh_install
+#\tdh_installmenu
+#\tdh_installdebconf\t
+#\tdh_installlogrotate
+#\tdh_installemacsen
+#\tdh_installpam
+#\tdh_installmime
+\tdh_installinit --name=@@PACKAGE_BASENAME@@
+#\tdh_installinfo
+\tdh_installman
+\tdh_link
+\tdh_strip
+\tdh_compress
+\tdh_fixperms
+\tdh_installdeb
+\tdh_shlibdeps
+\tdh_gencontrol
+\tdh_md5sums
+\tdh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
+"""
+ s=re.sub("@@PACKAGE_NAME@@", "mongodb%s" % spec.suffix(), s)
+ s=re.sub("@@PACKAGE_BASENAME@@", "mongodb", s)
+ s=re.sub("@@BINARYDIR@@", BINARYDIR, s)
+ f=open(path, 'w')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+ # FIXME: some versions of debianoids seem to
+ # need the rules file to be 755?
+ os.chmod(path, stat.S_IXUSR|stat.S_IWUSR|stat.S_IRUSR|stat.S_IXGRP|stat.S_IRGRP|stat.S_IXOTH|stat.S_IWOTH)
+
+def make_rpm(distro, arch, spec, srcdir):
+ # Create the specfile.
+ suffix=spec.suffix()
+ sdir=setupdir(distro, arch, spec)
+ specfile=sdir+"rpm/mongo%s.spec" % suffix
+ write_rpm_spec_file(specfile, spec)
+ topdir=ensure_dir(os.getcwd()+'/rpmbuild/')
+ for subdir in ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
+ ensure_dir("%s/%s/" % (topdir, subdir))
+ distro_arch=distro.archname(arch)
+ # RPM tools take these macro files that define variables in
+ # RPMland. Unfortunately, there's no way to tell RPM tools to use
+ # a given file *in addition* to the files that it would already
+ # load, so we have to figure out what it would normally load,
+ # augment that list, and tell RPM to use the augmented list. To
+ # figure out what macrofiles ordinarily get loaded, older RPM
+ # versions had a parameter called "macrofiles" that could be
+ # extracted from "rpm --showrc". But newer RPM versions don't
+ # have this. To tell RPM what macros to use, older versions of
+ # RPM have a --macros option that doesn't work; on these versions,
+ # you can put a "macrofiles" parameter into an rpmrc file. But
+ # that "macrofiles" setting doesn't do anything for newer RPM
+ # versions, where you have to use the --macros flag instead. And
+ # all of this is to let us do our work with some guarantee that
+ # we're not clobbering anything that doesn't belong to us. Why is
+ # RPM so braindamaged?
+ macrofiles=[l for l in backtick(["rpm", "--showrc"]).split("\n") if l.startswith("macrofiles")]
+ flags=[]
+ macropath=os.getcwd()+"/macros"
+ write_rpm_macros_file(macropath, topdir)
+ if len(macrofiles)>0:
+ macrofiles=macrofiles[0]+":"+macropath
+ rcfile=os.getcwd()+"/rpmrc"
+ write_rpmrc_file(rcfile, macrofiles)
+ flags=["--rpmrc", rcfile]
+ else:
+ # This hard-coded hooey came from some box running RPM
+ # 4.4.2.3. It may not work over time, but RPM isn't sanely
+ # configurable.
+ flags=["--macros", "/usr/lib/rpm/macros:/usr/lib/rpm/%s-linux/macros:/etc/rpm/macros.*:/etc/rpm/macros:/etc/rpm/%s-linux/macros:~/.rpmmacros:%s" % (distro_arch, distro_arch, macropath)]
+ # Put the specfile and the tar'd up binaries and stuff in
+ # place. FIXME: see if shutil.copyfile can do this without too
+ # much hassle.
+ sysassert(["cp", "-v", specfile, topdir+"SPECS/"])
+ oldcwd=os.getcwd()
+ os.chdir(sdir+"/../")
+ try:
+ sysassert(["tar", "-cpzf", topdir+"SOURCES/mongo%s-%s.tar.gz" % (suffix, spec.pversion(distro)), os.path.basename(os.path.dirname(sdir))])
+ finally:
+ os.chdir(oldcwd)
+ # Do the build.
+ sysassert(["rpmbuild", "-ba", "--target", distro_arch] + flags + ["%s/SPECS/mongo%s.spec" % (topdir, suffix)])
+ r=distro.repodir(arch)
+ ensure_dir(r)
+ # FIXME: see if some combination of shutil.copy<hoohah> and glob
+ # can do this without shelling out.
+ sysassert(["sh", "-c", "cp -v \"%s/RPMS/%s/\"*.rpm \"%s\""%(topdir, distro_arch, r)])
+ return r
+
+def make_rpm_repo(repo):
+ oldpwd=os.getcwd()
+ os.chdir(repo+"../")
+ try:
+ sysassert(["createrepo", "."])
+ finally:
+ os.chdir(oldpwd)
+
+
+def write_rpmrc_file(path, string):
+ f=open(path, 'w')
+ try:
+ f.write(string)
+ finally:
+ f.close()
+
+def write_rpm_macros_file(path, topdir):
+ f=open(path, 'w')
+ try:
+ f.write("%%_topdir %s" % topdir)
+ finally:
+ f.close()
+
+def write_rpm_spec_file(path, spec):
+ s="""Name: @@PACKAGE_BASENAME@@
+Conflicts: @@PACKAGE_CONFLICTS@@
+Obsoletes: @@PACKAGE_OBSOLETES@@
+Version: @@PACKAGE_VERSION@@
+Release: mongodb_@@PACKAGE_REVISION@@%{?dist}
+Summary: mongo client shell and tools
+License: AGPL 3.0
+URL: http://www.mongodb.org
+Group: Applications/Databases
+
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+
+%description
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+It features dynamic profileable queries, full indexing, replication
+and fail-over support, efficient storage of large binary data objects,
+and auto-sharding.
+
+This package provides the mongo shell, import/export tools, and other
+client utilities.
+
+%package server
+Summary: mongo server, sharding server, and support scripts
+Group: Applications/Databases
+Requires: @@PACKAGE_BASENAME@@
+
+%description server
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo server software, mongo sharding server
+softwware, default configuration files, and init.d scripts.
+
+%package devel
+Summary: Headers and libraries for mongo development.
+Group: Applications/Databases
+
+%description devel
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo static library and header files needed
+to develop mongo client software.
+
+%prep
+%setup
+
+%build
+#scons --prefix=$RPM_BUILD_ROOT/usr all
+# XXX really should have shared library here
+
+%install
+#scons --prefix=$RPM_BUILD_ROOT/usr install
+mkdir -p $RPM_BUILD_ROOT/usr
+cp -rv @@BINARYDIR@@/usr/bin $RPM_BUILD_ROOT/usr
+mkdir -p $RPM_BUILD_ROOT/usr/share/man/man1
+cp debian/*.1 $RPM_BUILD_ROOT/usr/share/man/man1/
+# FIXME: remove this rm when mongosniff is back in the package
+rm -v $RPM_BUILD_ROOT/usr/share/man/man1/mongosniff.1*
+mkdir -p $RPM_BUILD_ROOT/etc/rc.d/init.d
+cp -v rpm/init.d-mongod $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
+chmod a+x $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
+mkdir -p $RPM_BUILD_ROOT/etc
+cp -v rpm/mongod.conf $RPM_BUILD_ROOT/etc/mongod.conf
+mkdir -p $RPM_BUILD_ROOT/etc/sysconfig
+cp -v rpm/mongod.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/mongod
+mkdir -p $RPM_BUILD_ROOT/var/lib/mongo
+mkdir -p $RPM_BUILD_ROOT/var/log/mongo
+touch $RPM_BUILD_ROOT/var/log/mongo/mongod.log
+
+%clean
+#scons -c
+rm -rf $RPM_BUILD_ROOT
+
+%pre server
+if ! /usr/bin/id -g mongod &>/dev/null; then
+ /usr/sbin/groupadd -r mongod
+fi
+if ! /usr/bin/id mongod &>/dev/null; then
+ /usr/sbin/useradd -M -r -g mongod -d /var/lib/mongo -s /bin/false \
+ -c mongod mongod > /dev/null 2>&1
+fi
+
+%post server
+if test $1 = 1
+then
+ /sbin/chkconfig --add mongod
+fi
+
+%preun server
+if test $1 = 0
+then
+ /sbin/chkconfig --del mongod
+fi
+
+%postun server
+if test $1 -ge 1
+then
+ /sbin/service mongod condrestart >/dev/null 2>&1 || :
+fi
+
+%files
+%defattr(-,root,root,-)
+#%doc README GNU-AGPL-3.0.txt
+
+%{_bindir}/bsondump
+%{_bindir}/mongo
+%{_bindir}/mongodump
+%{_bindir}/mongoexport
+%{_bindir}/mongofiles
+%{_bindir}/mongoimport
+%{_bindir}/mongorestore
+#@@VERSION>1.9@@%{_bindir}/mongotop
+%{_bindir}/mongostat
+# FIXME: uncomment when mongosniff is back in the package
+#%{_bindir}/mongosniff
+
+# FIXME: uncomment this when there's a stable release whose source
+# tree contains a bsondump man page.
+#@@VERSION>1.9@@%{_mandir}/man1/bsondump.1*
+%{_mandir}/man1/mongo.1*
+%{_mandir}/man1/mongodump.1*
+%{_mandir}/man1/mongoexport.1*
+%{_mandir}/man1/mongofiles.1*
+%{_mandir}/man1/mongoimport.1*
+%{_mandir}/man1/mongorestore.1*
+%{_mandir}/man1/mongostat.1*
+# FIXME: uncomment when mongosniff is back in the package
+#%{_mandir}/man1/mongosniff.1*
+
+%files server
+%defattr(-,root,root,-)
+%config(noreplace) /etc/mongod.conf
+%{_bindir}/mongod
+%{_bindir}/mongos
+%{_mandir}/man1/mongod.1*
+%{_mandir}/man1/mongos.1*
+/etc/rc.d/init.d/mongod
+/etc/sysconfig/mongod
+#/etc/rc.d/init.d/mongos
+%attr(0755,mongod,mongod) %dir /var/lib/mongo
+%attr(0755,mongod,mongod) %dir /var/log/mongo
+%attr(0640,mongod,mongod) %config(noreplace) %verify(not md5 size mtime) /var/log/mongo/mongod.log
+
+%changelog
+* Thu Jan 28 2010 Richard M Kreuter <richard@10gen.com>
+- Minor fixes.
+
+* Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> -
+- Wrote mongo.spec.
+"""
+ suffix=spec.suffix()
+ s=re.sub("@@PACKAGE_BASENAME@@", "mongo%s" % suffix, s)
+ s=re.sub("@@PACKAGE_VERSION@@", spec.pversion(Distro("redhat")), s)
+ # FIXME, maybe: the RPM guide says that Release numbers ought to
+ # be integers starting at 1, but we use "mongodb_1{%dist}",
+ # whatever the hell that means.
+ s=re.sub("@@PACKAGE_REVISION@@", str(int(spec.param("revision"))+1) if spec.param("revision") else "1", s)
+ s=re.sub("@@BINARYDIR@@", BINARYDIR, s)
+ conflict_suffixes=["", "-10gen", "-10gen-unstable"]
+ conflict_suffixes.remove(suffix)
+ s=re.sub("@@PACKAGE_CONFLICTS@@", ", ".join(["mongo"+_ for _ in conflict_suffixes]), s)
+ if suffix == "-10gen":
+ s=re.sub("@@PACKAGE_PROVIDES@@", "mongo-stable", s)
+ s=re.sub("@@PACKAGE_OBSOLETES@@", "mongo-stable", s)
+ elif suffix == "-10gen-unstable":
+ s=re.sub("@@PACKAGE_PROVIDES@@", "mongo-unstable", s)
+ s=re.sub("@@PACKAGE_OBSOLETES@@", "mongo-unstable", s)
+ else:
+ raise Exception("BUG: unknown suffix %s" % suffix)
+
+ lines=[]
+ for line in s.split("\n"):
+ m = re.search("@@VERSION>(.*)@@(.*)", line)
+ if m and spec.version_better_than(m.group(1)):
+ lines.append(m.group(2))
+ else:
+ lines.append(line)
+ s="\n".join(lines)
+
+ f=open(path, 'w')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/buildscripts/smoke.py b/buildscripts/smoke.py
index 5fdd26f..c46b5d1 100755
--- a/buildscripts/smoke.py
+++ b/buildscripts/smoke.py
@@ -110,7 +110,7 @@ class mongod(object):
sock.connect(("localhost", int(port)))
sock.close()
- def did_mongod_start(self, port=mongod_port, timeout=20):
+ def did_mongod_start(self, port=mongod_port, timeout=300):
while timeout > 0:
time.sleep(1)
try:
@@ -119,6 +119,7 @@ class mongod(object):
except Exception,e:
print >> sys.stderr, e
timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
return False
def start(self):
@@ -145,9 +146,13 @@ class mongod(object):
utils.ensureDir(dir_name)
argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
if self.kwargs.get('small_oplog'):
- argv += ["--master", "--oplogSize", "128"]
+ argv += ["--master", "--oplogSize", "256"]
if self.slave:
argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
print "running " + " ".join(argv)
self.proc = Popen(argv)
if not self.did_mongod_start(self.port):
@@ -240,11 +245,15 @@ def check_db_hashes(master, slave):
# Blech.
def skipTest(path):
if small_oplog:
- if os.path.basename(path) in ["cursor8.js", "indexh.js"]:
+ if os.path.basename(path) in ["cursor8.js", "indexh.js", "dropdb.js"]:
return True
return False
def runTest(test):
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
(path, usedb) = test
(ignore, ext) = os.path.splitext(path)
if skipTest(path):
@@ -269,11 +278,26 @@ def runTest(test):
"--port", mongod_port]
else:
raise Bug("fell off in extenstion case: %s" % path)
+ sys.stderr.write( "starting test : %s \n" % os.path.basename(path) )
+ sys.stderr.flush()
print " *******************************************"
print " Test : " + os.path.basename(path) + " ..."
t1 = time.time()
# FIXME: we don't handle the case where the subprocess
# hangs... that's bad.
+ if argv[0].endswith( 'mongo' ) and not '--eval' in argv :
+ argv = argv + [ '--eval', 'TestData = new Object();' +
+ 'TestData.testPath = "' + path + '";' +
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' +
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' +
+ 'TestData.noJournal = ' + ( 'true' if no_journal else 'false' ) + ";" +
+ 'TestData.noJournalPrealloc = ' + ( 'true' if no_preallocj else 'false' ) + ";" ]
+
+ if argv[0].endswith( 'test' ) and no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+
+
+ print argv
r = call(argv, cwd=test_path)
t2 = time.time()
print " " + str((t2 - t1) * 1000) + "ms"
@@ -295,7 +319,7 @@ def run_tests(tests):
# The reason we use with is so that we get __exit__ semantics
- with mongod(small_oplog=small_oplog) as master:
+ with mongod(small_oplog=small_oplog,no_journal=no_journal,no_preallocj=no_preallocj) as master:
with mongod(slave=True) if small_oplog else Nothing() as slave:
if small_oplog:
master.wait_for_repl()
@@ -415,7 +439,7 @@ def add_exe(e):
return e
def main():
- global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, smoke_db_prefix, test_path
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, smoke_db_prefix, test_path
parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
parser.add_option('--mode', dest='mode', default='suite',
help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
@@ -441,6 +465,12 @@ def main():
parser.add_option('--small-oplog', dest='small_oplog', default=False,
action="store_true",
help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
global tests
(options, tests) = parser.parse_args()
@@ -461,6 +491,8 @@ def main():
continue_on_failure = options.continue_on_failure
smoke_db_prefix = options.smoke_db_prefix
small_oplog = options.small_oplog
+ no_journal = options.no_journal
+ no_preallocj = options.no_preallocj
if options.File:
if options.File == '-':
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index 5725e5f..11890c8 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -17,7 +17,6 @@
#include "pch.h"
#include "../client/dbclient.h"
-#include "../db/dbhelpers.h"
#include "../db/cmdline.h"
#include "../s/shard.h"
@@ -29,6 +28,10 @@ namespace mongo {
bool dbexitCalled = false;
+ void exitCleanly( ExitCode code ) {
+ dbexit( code );
+ }
+
void dbexit( ExitCode returnCode, const char *whyMsg , bool tryToGetLock ) {
dbexitCalled = true;
out() << "dbexit called" << endl;
diff --git a/client/connpool.cpp b/client/connpool.cpp
index 23d14da..2d7c37b 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -36,8 +36,9 @@ namespace mongo {
}
}
- void PoolForHost::done( DBClientBase * c ) {
+ void PoolForHost::done( DBConnectionPool * pool, DBClientBase * c ) {
if ( _pool.size() >= _maxPerHost ) {
+ pool->onDestory( c );
delete c;
}
else {
@@ -45,16 +46,24 @@ namespace mongo {
}
}
- DBClientBase * PoolForHost::get() {
+ DBClientBase * PoolForHost::get( DBConnectionPool * pool , double socketTimeout ) {
time_t now = time(0);
-
+
while ( ! _pool.empty() ) {
StoredConnection sc = _pool.top();
_pool.pop();
- if ( sc.ok( now ) )
- return sc.conn;
- delete sc.conn;
+
+ if ( ! sc.ok( now ) ) {
+ pool->onDestory( sc.conn );
+ delete sc.conn;
+ continue;
+ }
+
+ assert( sc.conn->getSoTimeout() == socketTimeout );
+
+ return sc.conn;
+
}
return NULL;
@@ -75,14 +84,34 @@ namespace mongo {
}
}
+ void PoolForHost::getStaleConnections( vector<DBClientBase*>& stale ) {
+ time_t now = time(0);
+
+ vector<StoredConnection> all;
+ while ( ! _pool.empty() ) {
+ StoredConnection c = _pool.top();
+ _pool.pop();
+
+ if ( c.ok( now ) )
+ all.push_back( c );
+ else
+ stale.push_back( c.conn );
+ }
+
+ for ( size_t i=0; i<all.size(); i++ ) {
+ _pool.push( all[i] );
+ }
+ }
+
+
PoolForHost::StoredConnection::StoredConnection( DBClientBase * c ) {
conn = c;
when = time(0);
}
bool PoolForHost::StoredConnection::ok( time_t now ) {
- // if connection has been idle for an hour, kill it
- return ( now - when ) < 3600;
+ // if connection has been idle for 30 minutes, kill it
+ return ( now - when ) < 1800;
}
void PoolForHost::createdOne( DBClientBase * base) {
@@ -97,16 +126,23 @@ namespace mongo {
DBConnectionPool pool;
- DBClientBase* DBConnectionPool::_get(const string& ident) {
+ DBConnectionPool::DBConnectionPool()
+ : _mutex("DBConnectionPool") ,
+ _name( "dbconnectionpool" ) ,
+ _hooks( new list<DBConnectionHook*>() ) {
+ }
+
+ DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
+ assert( ! inShutdown() );
scoped_lock L(_mutex);
- PoolForHost& p = _pools[ident];
- return p.get();
+ PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
+ return p.get( this , socketTimeout );
}
- DBClientBase* DBConnectionPool::_finishCreate( const string& host , DBClientBase* conn ) {
+ DBClientBase* DBConnectionPool::_finishCreate( const string& host , double socketTimeout , DBClientBase* conn ) {
{
scoped_lock L(_mutex);
- PoolForHost& p = _pools[host];
+ PoolForHost& p = _pools[PoolKey(host,socketTimeout)];
p.createdOne( conn );
}
@@ -116,22 +152,22 @@ namespace mongo {
return conn;
}
- DBClientBase* DBConnectionPool::get(const ConnectionString& url) {
- DBClientBase * c = _get( url.toString() );
+ DBClientBase* DBConnectionPool::get(const ConnectionString& url, double socketTimeout) {
+ DBClientBase * c = _get( url.toString() , socketTimeout );
if ( c ) {
onHandedOut( c );
return c;
}
string errmsg;
- c = url.connect( errmsg );
+ c = url.connect( errmsg, socketTimeout );
uassert( 13328 , _name + ": connect failed " + url.toString() + " : " + errmsg , c );
- return _finishCreate( url.toString() , c );
+ return _finishCreate( url.toString() , socketTimeout , c );
}
- DBClientBase* DBConnectionPool::get(const string& host) {
- DBClientBase * c = _get( host );
+ DBClientBase* DBConnectionPool::get(const string& host, double socketTimeout) {
+ DBClientBase * c = _get( host , socketTimeout );
if ( c ) {
onHandedOut( c );
return c;
@@ -141,12 +177,23 @@ namespace mongo {
ConnectionString cs = ConnectionString::parse( host , errmsg );
uassert( 13071 , (string)"invalid hostname [" + host + "]" + errmsg , cs.isValid() );
- c = cs.connect( errmsg );
+ c = cs.connect( errmsg, socketTimeout );
if ( ! c )
throw SocketException( SocketException::CONNECT_ERROR , host , 11002 , str::stream() << _name << " error: " << errmsg );
- return _finishCreate( host , c );
+ return _finishCreate( host , socketTimeout , c );
+ }
+
+ void DBConnectionPool::release(const string& host, DBClientBase *c) {
+ if ( c->isFailed() ) {
+ onDestory( c );
+ delete c;
+ return;
+ }
+ scoped_lock L(_mutex);
+ _pools[PoolKey(host,c->getSoTimeout())].done(this,c);
}
+
DBConnectionPool::~DBConnectionPool() {
// connection closing is handled by ~PoolForHost
}
@@ -160,42 +207,55 @@ namespace mongo {
}
void DBConnectionPool::addHook( DBConnectionHook * hook ) {
- _hooks.push_back( hook );
+ _hooks->push_back( hook );
}
void DBConnectionPool::onCreate( DBClientBase * conn ) {
- if ( _hooks.size() == 0 )
+ if ( _hooks->size() == 0 )
return;
- for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ) {
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
(*i)->onCreate( conn );
}
}
void DBConnectionPool::onHandedOut( DBClientBase * conn ) {
- if ( _hooks.size() == 0 )
+ if ( _hooks->size() == 0 )
return;
- for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ) {
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
(*i)->onHandedOut( conn );
}
}
+ void DBConnectionPool::onDestory( DBClientBase * conn ) {
+ if ( _hooks->size() == 0 )
+ return;
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
+ (*i)->onDestory( conn );
+ }
+ }
+
void DBConnectionPool::appendInfo( BSONObjBuilder& b ) {
- BSONObjBuilder bb( b.subobjStart( "hosts" ) );
+
int avail = 0;
long long created = 0;
map<ConnectionString::ConnectionType,long long> createdByType;
+ set<string> replicaSets;
+
+ BSONObjBuilder bb( b.subobjStart( "hosts" ) );
{
scoped_lock lk( _mutex );
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
if ( i->second.numCreated() == 0 )
continue;
- string s = i->first;
+ string s = str::stream() << i->first.ident << "::" << i->first.timeout;
+
BSONObjBuilder temp( bb.subobjStart( s ) );
temp.append( "available" , i->second.numAvailable() );
temp.appendNumber( "created" , i->second.numCreated() );
@@ -206,9 +266,33 @@ namespace mongo {
long long& x = createdByType[i->second.type()];
x += i->second.numCreated();
+
+ {
+ string setName = i->first.ident;
+ if ( setName.find( "/" ) != string::npos ) {
+ setName = setName.substr( 0 , setName.find( "/" ) );
+ replicaSets.insert( setName );
+ }
+ }
}
}
bb.done();
+
+
+ BSONObjBuilder setBuilder( b.subobjStart( "replicaSets" ) );
+ for ( set<string>::iterator i=replicaSets.begin(); i!=replicaSets.end(); ++i ) {
+ string rs = *i;
+ ReplicaSetMonitorPtr m = ReplicaSetMonitor::get( rs );
+ if ( ! m ) {
+ warning() << "no monitor for set: " << rs << endl;
+ continue;
+ }
+
+ BSONObjBuilder temp( setBuilder.subobjStart( rs ) );
+ m->appendInfo( temp );
+ temp.done();
+ }
+ setBuilder.done();
{
BSONObjBuilder temp( bb.subobjStart( "createdByType" ) );
@@ -223,21 +307,82 @@ namespace mongo {
}
bool DBConnectionPool::serverNameCompare::operator()( const string& a , const string& b ) const{
- string ap = str::before( a , "/" );
- string bp = str::before( b , "/" );
+ const char* ap = a.c_str();
+ const char* bp = b.c_str();
+
+ while (true){
+ if (*ap == '\0' || *ap == '/'){
+ if (*bp == '\0' || *bp == '/')
+ return false; // equal strings
+ else
+ return true; // a is shorter
+ }
+
+ if (*bp == '\0' || *bp == '/')
+ return false; // b is shorter
+
+ if ( *ap < *bp)
+ return true;
+ else if (*ap > *bp)
+ return false;
+
+ ++ap;
+ ++bp;
+ }
+ assert(false);
+ }
+
+ bool DBConnectionPool::poolKeyCompare::operator()( const PoolKey& a , const PoolKey& b ) const {
+ if (DBConnectionPool::serverNameCompare()( a.ident , b.ident ))
+ return true;
- return ap < bp;
+ if (DBConnectionPool::serverNameCompare()( b.ident , a.ident ))
+ return false;
+
+ return a.timeout < b.timeout;
+ }
+
+
+ void DBConnectionPool::taskDoWork() {
+ vector<DBClientBase*> toDelete;
+
+ {
+ // we need to get the connections inside the lock
+ // but we can actually delete them outside
+ scoped_lock lk( _mutex );
+ for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
+ i->second.getStaleConnections( toDelete );
+ }
+ }
+
+ for ( size_t i=0; i<toDelete.size(); i++ ) {
+ try {
+ onDestory( toDelete[i] );
+ delete toDelete[i];
+ }
+ catch ( ... ) {
+ // we don't care if there was a socket error
+ }
+ }
}
// ------ ScopedDbConnection ------
ScopedDbConnection * ScopedDbConnection::steal() {
assert( _conn );
- ScopedDbConnection * n = new ScopedDbConnection( _host , _conn );
+ ScopedDbConnection * n = new ScopedDbConnection( _host , _conn, _socketTimeout );
_conn = 0;
return n;
}
+ void ScopedDbConnection::_setSocketTimeout(){
+ if( ! _conn ) return;
+ if( _conn->type() == ConnectionString::MASTER )
+ (( DBClientConnection* ) _conn)->setSoTimeout( _socketTimeout );
+ else if( _conn->type() == ConnectionString::SYNC )
+ (( SyncClusterConnection* ) _conn)->setAllSoTimeouts( _socketTimeout );
+ }
+
ScopedDbConnection::~ScopedDbConnection() {
if ( _conn ) {
if ( ! _conn->isFailed() ) {
@@ -248,12 +393,14 @@ namespace mongo {
}
}
- ScopedDbConnection::ScopedDbConnection(const Shard& shard )
- : _host( shard.getConnString() ) , _conn( pool.get(_host) ) {
+ ScopedDbConnection::ScopedDbConnection(const Shard& shard, double socketTimeout )
+ : _host( shard.getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
}
- ScopedDbConnection::ScopedDbConnection(const Shard* shard )
- : _host( shard->getConnString() ) , _conn( pool.get(_host) ) {
+ ScopedDbConnection::ScopedDbConnection(const Shard* shard, double socketTimeout )
+ : _host( shard->getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
}
@@ -262,7 +409,7 @@ namespace mongo {
PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
virtual void help( stringstream &help ) const { help<<"internal"; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool) {
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
pool.flush();
return true;
}
@@ -277,7 +424,7 @@ namespace mongo {
PoolStats() : Command( "connPoolStats" ) {}
virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool) {
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
pool.appendInfo( result );
result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
diff --git a/client/connpool.h b/client/connpool.h
index e7f59d6..a37dad7 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -21,9 +21,12 @@
#include "dbclient.h"
#include "redef_macros.h"
+#include "../util/background.h"
+
namespace mongo {
class Shard;
+ class DBConnectionPool;
/**
* not thread safe
@@ -44,7 +47,7 @@ namespace mongo {
int numAvailable() const { return (int)_pool.size(); }
- void createdOne( DBClientBase * base);
+ void createdOne( DBClientBase * base );
long long numCreated() const { return _created; }
ConnectionString::ConnectionType type() const { assert(_created); return _type; }
@@ -52,11 +55,13 @@ namespace mongo {
/**
* gets a connection or return NULL
*/
- DBClientBase * get();
+ DBClientBase * get( DBConnectionPool * pool , double socketTimeout );
- void done( DBClientBase * c );
+ void done( DBConnectionPool * pool , DBClientBase * c );
void flush();
+
+ void getStaleConnections( vector<DBClientBase*>& stale );
static void setMaxPerHost( unsigned max ) { _maxPerHost = max; }
static unsigned getMaxPerHost() { return _maxPerHost; }
@@ -72,6 +77,7 @@ namespace mongo {
};
std::stack<StoredConnection> _pool;
+
long long _created;
ConnectionString::ConnectionType _type;
@@ -83,6 +89,7 @@ namespace mongo {
virtual ~DBConnectionHook() {}
virtual void onCreate( DBClientBase * conn ) {}
virtual void onHandedOut( DBClientBase * conn ) {}
+ virtual void onDestory( DBClientBase * conn ) {}
};
/** Database connection pool.
@@ -100,29 +107,11 @@ namespace mongo {
c.conn()...
}
*/
- class DBConnectionPool {
+ class DBConnectionPool : public PeriodicTask {
public:
- /** compares server namees, but is smart about replica set names */
- struct serverNameCompare {
- bool operator()( const string& a , const string& b ) const;
- };
-
- private:
-
- mongo::mutex _mutex;
- typedef map<string,PoolForHost,serverNameCompare> PoolMap; // servername -> pool
- PoolMap _pools;
- list<DBConnectionHook*> _hooks;
- string _name;
-
- DBClientBase* _get( const string& ident );
-
- DBClientBase* _finishCreate( const string& ident , DBClientBase* conn );
-
- public:
- DBConnectionPool() : _mutex("DBConnectionPool") , _name( "dbconnectionpool" ) { }
+ DBConnectionPool();
~DBConnectionPool();
/** right now just controls some asserts. defaults to "dbconnectionpool" */
@@ -130,22 +119,54 @@ namespace mongo {
void onCreate( DBClientBase * conn );
void onHandedOut( DBClientBase * conn );
+ void onDestory( DBClientBase * conn );
void flush();
- DBClientBase *get(const string& host);
- DBClientBase *get(const ConnectionString& host);
+ DBClientBase *get(const string& host, double socketTimeout = 0);
+ DBClientBase *get(const ConnectionString& host, double socketTimeout = 0);
- void release(const string& host, DBClientBase *c) {
- if ( c->isFailed() ) {
- delete c;
- return;
- }
- scoped_lock L(_mutex);
- _pools[host].done(c);
- }
- void addHook( DBConnectionHook * hook );
+ void release(const string& host, DBClientBase *c);
+
+ void addHook( DBConnectionHook * hook ); // we take ownership
void appendInfo( BSONObjBuilder& b );
+
+ /** compares server namees, but is smart about replica set names */
+ struct serverNameCompare {
+ bool operator()( const string& a , const string& b ) const;
+ };
+
+ virtual string taskName() const { return "DBConnectionPool-cleaner"; }
+ virtual void taskDoWork();
+
+ private:
+ DBConnectionPool( DBConnectionPool& p );
+
+ DBClientBase* _get( const string& ident , double socketTimeout );
+
+ DBClientBase* _finishCreate( const string& ident , double socketTimeout, DBClientBase* conn );
+
+ struct PoolKey {
+ PoolKey( string i , double t ) : ident( i ) , timeout( t ) {}
+ string ident;
+ double timeout;
+ };
+
+ struct poolKeyCompare {
+ bool operator()( const PoolKey& a , const PoolKey& b ) const;
+ };
+
+ typedef map<PoolKey,PoolForHost,poolKeyCompare> PoolMap; // servername -> pool
+
+ mongo::mutex _mutex;
+ string _name;
+
+ PoolMap _pools;
+
+ // pointers owned by me, right now they leak on shutdown
+ // _hooks itself also leaks because it creates a shutdown race condition
+ list<DBConnectionHook*> * _hooks;
+
};
extern DBConnectionPool pool;
@@ -154,9 +175,15 @@ namespace mongo {
public:
AScopedConnection() { _numConnections++; }
virtual ~AScopedConnection() { _numConnections--; }
+
virtual DBClientBase* get() = 0;
virtual void done() = 0;
virtual string getHost() const = 0;
+
+ /**
+ * @return true iff this has a connection to the db
+ */
+ virtual bool ok() const = 0;
/**
* @return total number of current instances of AScopedConnection
@@ -176,19 +203,25 @@ namespace mongo {
/** the main constructor you want to use
throws UserException if can't connect
*/
- explicit ScopedDbConnection(const string& host) : _host(host), _conn( pool.get(host) ) {}
+ explicit ScopedDbConnection(const string& host, double socketTimeout = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
- ScopedDbConnection() : _host( "" ) , _conn(0) {}
+ ScopedDbConnection() : _host( "" ) , _conn(0), _socketTimeout( 0 ) {}
/* @param conn - bind to an existing connection */
- ScopedDbConnection(const string& host, DBClientBase* conn ) : _host( host ) , _conn( conn ) {}
+ ScopedDbConnection(const string& host, DBClientBase* conn, double socketTimeout = 0 ) : _host( host ) , _conn( conn ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
/** throws UserException if can't connect */
- explicit ScopedDbConnection(const ConnectionString& url ) : _host(url.toString()), _conn( pool.get(url) ) {}
+ explicit ScopedDbConnection(const ConnectionString& url, double socketTimeout = 0 ) : _host(url.toString()), _conn( pool.get(url, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
/** throws UserException if can't connect */
- explicit ScopedDbConnection(const Shard& shard );
- explicit ScopedDbConnection(const Shard* shard );
+ explicit ScopedDbConnection(const Shard& shard, double socketTimeout = 0 );
+ explicit ScopedDbConnection(const Shard* shard, double socketTimeout = 0 );
~ScopedDbConnection();
@@ -210,6 +243,8 @@ namespace mongo {
return _conn;
}
+ bool ok() const { return _conn > 0; }
+
string getHost() const { return _host; }
/** Force closure of the connection. You should call this if you leave it in
@@ -242,8 +277,12 @@ namespace mongo {
ScopedDbConnection * steal();
private:
+
+ void _setSocketTimeout();
+
const string _host;
DBClientBase *_conn;
+ const double _socketTimeout;
};
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index bb24199..dadf7e4 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -64,21 +64,23 @@ namespace mongo {
}
- DBClientBase* ConnectionString::connect( string& errmsg ) const {
+ DBClientBase* ConnectionString::connect( string& errmsg, double socketTimeout ) const {
switch ( _type ) {
case MASTER: {
DBClientConnection * c = new DBClientConnection(true);
+ c->setSoTimeout( socketTimeout );
log(1) << "creating new connection to:" << _servers[0] << endl;
if ( ! c->connect( _servers[0] , errmsg ) ) {
delete c;
return 0;
}
+ log(1) << "connected connection!" << endl;
return c;
}
case PAIR:
case SET: {
- DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers );
+ DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers , socketTimeout );
if( ! set->connect() ) {
delete set;
errmsg = "connect failed to set ";
@@ -93,7 +95,8 @@ namespace mongo {
list<HostAndPort> l;
for ( unsigned i=0; i<_servers.size(); i++ )
l.push_back( _servers[i] );
- return new SyncClusterConnection( l );
+ SyncClusterConnection* c = new SyncClusterConnection( l, socketTimeout );
+ return c;
}
case INVALID:
@@ -294,7 +297,7 @@ namespace mongo {
return b.obj();
}
- BSONObj getlasterrorcmdobj = fromjson("{getlasterror:1}");
+ const BSONObj getlasterrorcmdobj = fromjson("{getlasterror:1}");
BSONObj DBClientWithCommands::getLastErrorDetailed() {
BSONObj info;
@@ -314,7 +317,7 @@ namespace mongo {
return e.str();
}
- BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
+ const BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
BSONObj DBClientWithCommands::getPrevError() {
BSONObj info;
@@ -391,6 +394,7 @@ namespace mongo {
}
bool DBClientWithCommands::createCollection(const string &ns, long long size, bool capped, int max, BSONObj *info) {
+ assert(!capped||size);
BSONObj o;
if ( info == 0 ) info = &o;
BSONObjBuilder b;
@@ -529,19 +533,31 @@ namespace mongo {
return DBClientBase::auth(dbname, username, password.c_str(), errmsg, false);
}
- BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+ /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ query() and iterate the cursor.
+ */
+ void DBClientInterface::findN(vector<BSONObj>& out, const string& ns, Query query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions) {
+ out.reserve(nToReturn);
+
auto_ptr<DBClientCursor> c =
- this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
+ this->query(ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions);
- uassert( 10276 , str::stream() << "DBClientBase::findOne: transport error: " << getServerAddress() << " query: " << query.toString(), c.get() );
+ uassert( 10276 , str::stream() << "DBClientBase::findN: transport error: " << getServerAddress() << " query: " << query.toString(), c.get() );
if ( c->hasResultFlag( ResultFlag_ShardConfigStale ) )
- throw StaleConfigException( ns , "findOne has stale config" );
+ throw StaleConfigException( ns , "findN stale config" );
- if ( !c->more() )
- return BSONObj();
+ for( int i = 0; i < nToReturn; i++ ) {
+ if ( !c->more() )
+ break;
+ out.push_back( c->nextSafe().copy() );
+ }
+ }
- return c->nextSafe().copy();
+ BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+ vector<BSONObj> v;
+ findN(v, ns, query, 1, 0, fieldsToReturn, queryOptions);
+ return v.empty() ? BSONObj() : v[0];
}
bool DBClientConnection::connect(const HostAndPort& server, string& errmsg) {
@@ -558,39 +574,50 @@ namespace mongo {
p.reset(new MessagingPort( _so_timeout, _logLevel ));
if (server->getAddr() == "0.0.0.0") {
- failed = true;
+ _failed = true;
return false;
}
+ // if( _so_timeout == 0 ){
+ // printStackTrace();
+ // log() << "Connecting to server " << _serverString << " timeout " << _so_timeout << endl;
+ // }
if ( !p->connect(*server) ) {
stringstream ss;
ss << "couldn't connect to server " << _serverString;
errmsg = ss.str();
- failed = true;
+ _failed = true;
return false;
}
+
+#ifdef MONGO_SSL
+ if ( cmdLine.sslOnNormalPorts ) {
+ p->secure( sslManager() );
+ }
+#endif
+
return true;
}
void DBClientConnection::_checkConnection() {
- if ( !failed )
+ if ( !_failed )
return;
if ( lastReconnectTry && time(0)-lastReconnectTry < 2 ) {
// we wait a little before reconnect attempt to avoid constant hammering.
// but we throw we don't want to try to use a connection in a bad state
- throw SocketException(SocketException::FAILED_STATE);
+ throw SocketException( SocketException::FAILED_STATE , toString() );
}
if ( !autoReconnect )
- throw SocketException(SocketException::FAILED_STATE);
+ throw SocketException( SocketException::FAILED_STATE , toString() );
lastReconnectTry = time(0);
log(_logLevel) << "trying reconnect to " << _serverString << endl;
string errmsg;
- failed = false;
+ _failed = false;
if ( ! _connect(errmsg) ) {
- failed = true;
+ _failed = true;
log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
- throw SocketException(SocketException::CONNECT_ERROR);
+ throw SocketException( SocketException::CONNECT_ERROR , toString() );
}
log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
@@ -675,7 +702,7 @@ namespace mongo {
/* connection CANNOT be used anymore as more data may be on the way from the server.
we have to reconnect.
*/
- failed = true;
+ _failed = true;
p->shutdown();
throw;
}
@@ -683,12 +710,11 @@ namespace mongo {
return n;
}
- void DBClientBase::insert( const string & ns , BSONObj obj ) {
+ void DBClientBase::insert( const string & ns , BSONObj obj , int flags) {
Message toSend;
BufBuilder b;
- int opts = 0;
- b.appendNum( opts );
+ b.appendNum( flags );
b.appendStr( ns );
obj.appendSelfToBufBuilder( b );
@@ -697,12 +723,11 @@ namespace mongo {
say( toSend );
}
- void DBClientBase::insert( const string & ns , const vector< BSONObj > &v ) {
+ void DBClientBase::insert( const string & ns , const vector< BSONObj > &v , int flags) {
Message toSend;
BufBuilder b;
- int opts = 0;
- b.appendNum( opts );
+ b.appendNum( flags );
b.appendStr( ns );
for( vector< BSONObj >::const_iterator i = v.begin(); i != v.end(); ++i )
i->appendSelfToBufBuilder( b );
@@ -750,8 +775,12 @@ namespace mongo {
toSend.setData( dbUpdate , b.buf() , b.len() );
say( toSend );
+
+
}
+
+
auto_ptr<DBClientCursor> DBClientWithCommands::getIndexes( const string &ns ) {
return query( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , BSON( "ns" << ns ) );
}
@@ -816,7 +845,7 @@ namespace mongo {
return ss.str();
}
- bool DBClientWithCommands::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name , bool cache ) {
+ bool DBClientWithCommands::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name , bool cache, bool background, int version ) {
BSONObjBuilder toSave;
toSave.append( "ns" , ns );
toSave.append( "key" , keys );
@@ -834,9 +863,15 @@ namespace mongo {
cacheKey += nn;
}
+ if( version >= 0 )
+ toSave.append("v", version);
+
if ( unique )
toSave.appendBool( "unique", unique );
+ if( background )
+ toSave.appendBool( "background", true );
+
if ( _seenIndexes.count( cacheKey ) )
return 0;
@@ -874,13 +909,13 @@ namespace mongo {
toSend.setData(dbQuery, b.buf(), b.len());
}
- void DBClientConnection::say( Message &toSend ) {
+ void DBClientConnection::say( Message &toSend, bool isRetry ) {
checkConnection();
try {
port().say( toSend );
}
catch( SocketException & ) {
- failed = true;
+ _failed = true;
throw;
}
}
@@ -889,8 +924,8 @@ namespace mongo {
port().piggyBack( toSend );
}
- void DBClientConnection::recv( Message &m ) {
- port().recv(m);
+ bool DBClientConnection::recv( Message &m ) {
+ return port().recv(m);
}
bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
@@ -900,7 +935,7 @@ namespace mongo {
*/
try {
if ( !port().call(toSend, response) ) {
- failed = true;
+ _failed = true;
if ( assertOk )
uasserted( 10278 , str::stream() << "dbclient error communicating with server: " << getServerAddress() );
@@ -908,21 +943,46 @@ namespace mongo {
}
}
catch( SocketException & ) {
- failed = true;
+ _failed = true;
throw;
}
return true;
}
- void DBClientConnection::checkResponse( const char *data, int nReturned ) {
+ BSONElement getErrField(const BSONObj& o) {
+ BSONElement first = o.firstElement();
+ if( strcmp(first.fieldName(), "$err") == 0 )
+ return first;
+
+ // temp - will be DEV only later
+ /*DEV*/
+ if( 1 ) {
+ BSONElement e = o["$err"];
+ if( !e.eoo() ) {
+ wassert(false);
+ }
+ return e;
+ }
+
+ return BSONElement();
+ }
+
+ bool hasErrField( const BSONObj& o ){
+ return ! getErrField( o ).eoo();
+ }
+
+ void DBClientConnection::checkResponse( const char *data, int nReturned, bool* retry, string* host ) {
/* check for errors. the only one we really care about at
* this stage is "not master"
*/
+ *retry = false;
+ *host = _serverString;
+
if ( clientSet && nReturned ) {
assert(data);
BSONObj o(data);
- BSONElement e = o["$err"];
+ BSONElement e = getErrField(o);
if ( e.type() == String && str::contains( e.valuestr() , "not master" ) ) {
clientSet->isntMaster();
}
@@ -930,7 +990,7 @@ namespace mongo {
}
void DBClientConnection::killCursor( long long cursorId ) {
- BufBuilder b;
+ StackBufBuilder b;
b.appendNum( (int)0 ); // reserved
b.appendNum( (int)1 ); // number
b.appendNum( cursorId );
@@ -944,6 +1004,19 @@ namespace mongo {
say(m);
}
+#ifdef MONGO_SSL
+ SSLManager* DBClientConnection::sslManager() {
+ if ( _sslManager )
+ return _sslManager;
+
+ SSLManager* s = new SSLManager(true);
+ _sslManager = s;
+ return s;
+ }
+
+ SSLManager* DBClientConnection::_sslManager = 0;
+#endif
+
AtomicUInt DBClientConnection::_numConnections;
bool DBClientConnection::_lazyKillCursor = true;
diff --git a/client/dbclient.h b/client/dbclient.h
index 9bc71fd..2b4bb85 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -1,4 +1,7 @@
-/** @file dbclient.h - connect to a Mongo database as a database, from C++ */
+/** @file dbclient.h
+
+ Core MongoDB C++ driver interfaces are defined here.
+*/
/* Copyright 2009 10gen Inc.
*
@@ -18,7 +21,8 @@
#pragma once
#include "../pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
+#include "../util/net/message_port.h"
#include "../db/jsobj.h"
#include "../db/json.h"
#include <stack>
@@ -100,6 +104,15 @@ namespace mongo {
RemoveOption_Broadcast = 1 << 1
};
+
+ /**
+ * need to put in DbMesssage::ReservedOptions as well
+ */
+ enum InsertOptions {
+ /** With muli-insert keep processing inserts if one fails */
+ InsertOption_ContinueOnError = 1 << 0
+ };
+
class DBClientBase;
/**
@@ -174,7 +187,7 @@ namespace mongo {
string toString() const { return _string; }
- DBClientBase* connect( string& errmsg ) const;
+ DBClientBase* connect( string& errmsg, double socketTimeout = 0 ) const;
string getSetName() const { return _setName; }
@@ -296,7 +309,7 @@ namespace mongo {
Query& where(const string &jscode) { return where(jscode, BSONObj()); }
/**
- * if this query has an orderby, hint, or some other field
+ * @return true if this query has an orderby, hint, or some other field
*/
bool isComplex( bool * hasDollar = 0 ) const;
@@ -332,12 +345,15 @@ namespace mongo {
virtual ~DBConnector() {}
/** actualServer is set to the actual server where they call went if there was a choice (SlaveOk) */
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 ) = 0;
- virtual void say( Message &toSend ) = 0;
+ virtual void say( Message &toSend, bool isRetry = false ) = 0;
virtual void sayPiggyBack( Message &toSend ) = 0;
- virtual void checkResponse( const char* data, int nReturned ) {}
-
/* used by QueryOption_Exhaust. To use that your subclass must implement this. */
- virtual void recv( Message& m ) { assert(false); }
+ virtual bool recv( Message& m ) { assert(false); return false; }
+ // In general, for lazy queries, we'll need to say, recv, then checkResponse
+ virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) {
+ if( retry ) *retry = false; if( targetHost ) *targetHost = "";
+ }
+ virtual bool lazySupported() const = 0;
};
/**
@@ -348,12 +364,9 @@ namespace mongo {
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
- /** don't use this - called automatically by DBClientCursor for you */
- virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
-
- virtual void insert( const string &ns, BSONObj obj ) = 0;
+ virtual void insert( const string &ns, BSONObj obj , int flags=0) = 0;
- virtual void insert( const string &ns, const vector< BSONObj >& v ) = 0;
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0) = 0;
virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0;
@@ -367,8 +380,15 @@ namespace mongo {
*/
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ query() and iterate the cursor.
+ */
+ void findN(vector<BSONObj>& out, const string&ns, Query query, int nToReturn, int nToSkip = 0, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
virtual string getServerAddress() const = 0;
+ /** don't use this - called automatically by DBClientCursor for you */
+ virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
};
/**
@@ -449,15 +469,19 @@ namespace mongo {
*/
bool createCollection(const string &ns, long long size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
- /** Get error result from the last operation on this connection.
+ /** Get error result from the last write operation (insert/update/delete) on this connection.
@return error message text, or empty string if no error.
*/
string getLastError();
- /** Get error result from the last operation on this connection.
+
+ /** Get error result from the last write operation (insert/update/delete) on this connection.
@return full error object.
*/
virtual BSONObj getLastErrorDetailed();
+ /** Can be called with the returned value from getLastErrorDetailed to extract an error string.
+ If all you need is the string, just call getLastError() instead.
+ */
static string getLastErrorString( const BSONObj& res );
/** Return the last error which has occurred, even if not the very last operation.
@@ -640,13 +664,15 @@ namespace mongo {
@param ns collection to be indexed
@param keys the "key pattern" for the index. e.g., { name : 1 }
@param unique if true, indicates that key uniqueness should be enforced for this index
- @param name if not isn't specified, it will be created from the keys (recommended)
+ @param name if not specified, it will be created from the keys automatically (which is recommended)
@param cache if set to false, the index cache for the connection won't remember this call
+ @param background build index in the background (see mongodb docs/wiki for details)
+ @param v index version. leave at default value. (unit tests set this parameter.)
@return whether or not sent message to db.
should be true on first call, false on subsequent unless resetIndexCache was called
*/
virtual bool ensureIndex( const string &ns , BSONObj keys , bool unique = false, const string &name = "",
- bool cache = true );
+ bool cache = true, bool background = false, int v = -1 );
/**
clears the index cache, so the subsequent call to ensureIndex for any index will go to the server
@@ -748,12 +774,12 @@ namespace mongo {
/**
insert an object into the database
*/
- virtual void insert( const string &ns , BSONObj obj );
+ virtual void insert( const string &ns , BSONObj obj , int flags=0);
/**
insert a vector of objects into the database
*/
- virtual void insert( const string &ns, const vector< BSONObj >& v );
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
/**
remove matching objects from the database
@@ -772,9 +798,10 @@ namespace mongo {
virtual bool callRead( Message& toSend , Message& response ) = 0;
// virtual bool callWrite( Message& toSend , Message& response ) = 0; // TODO: add this if needed
- virtual void say( Message& toSend ) = 0;
-
+
virtual ConnectionString::ConnectionType type() const = 0;
+
+ virtual double getSoTimeout() const = 0;
}; // DBClientBase
@@ -798,7 +825,7 @@ namespace mongo {
Connect timeout is fixed, but short, at 5 seconds.
*/
DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, double so_timeout=0) :
- clientSet(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _so_timeout(so_timeout) {
+ clientSet(cp), _failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _so_timeout(so_timeout) {
_numConnections++;
}
@@ -869,14 +896,14 @@ namespace mongo {
@return true if this connection is currently in a failed state. When autoreconnect is on,
a connection will transition back to an ok state after reconnecting.
*/
- bool isFailed() const { return failed; }
+ bool isFailed() const { return _failed; }
- MessagingPort& port() { return *p; }
+ MessagingPort& port() { assert(p); return *p; }
string toStringLong() const {
stringstream ss;
ss << _serverString;
- if ( failed ) ss << " failed";
+ if ( _failed ) ss << " failed";
return ss.str();
}
@@ -887,11 +914,15 @@ namespace mongo {
virtual void killCursor( long long cursorID );
virtual bool callRead( Message& toSend , Message& response ) { return call( toSend , response ); }
- virtual void say( Message &toSend );
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual bool recv( Message& m );
+ virtual void checkResponse( const char *data, int nReturned, bool* retry = NULL, string* host = NULL );
virtual bool call( Message &toSend, Message &response, bool assertOk = true , string * actualServer = 0 );
virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
- virtual void checkResponse( const char *data, int nReturned );
void setSoTimeout(double to) { _so_timeout = to; }
+ double getSoTimeout() const { return _so_timeout; }
+
+ virtual bool lazySupported() const { return true; }
static int getNumConnections() {
return _numConnections;
@@ -899,16 +930,15 @@ namespace mongo {
static void setLazyKillCursor( bool lazy ) { _lazyKillCursor = lazy; }
static bool getLazyKillCursor() { return _lazyKillCursor; }
-
+
protected:
friend class SyncClusterConnection;
- virtual void recv( Message& m );
virtual void sayPiggyBack( Message &toSend );
DBClientReplicaSet *clientSet;
boost::scoped_ptr<MessagingPort> p;
boost::scoped_ptr<SockAddr> server;
- bool failed;
+ bool _failed;
const bool autoReconnect;
time_t lastReconnectTry;
HostAndPort _server; // remember for reconnects
@@ -916,7 +946,7 @@ namespace mongo {
void _checkConnection();
// throws SocketException if in failed state and not reconnecting or if waiting to reconnect
- void checkConnection() { if( failed ) _checkConnection(); }
+ void checkConnection() { if( _failed ) _checkConnection(); }
map< string, pair<string,string> > authCache;
double _so_timeout;
@@ -924,6 +954,11 @@ namespace mongo {
static AtomicUInt _numConnections;
static bool _lazyKillCursor; // lazy means we piggy back kill cursors on next op
+
+#ifdef MONGO_SSL
+ static SSLManager* sslManager();
+ static SSLManager* _sslManager;
+#endif
};
/** pings server to check if it's up
@@ -932,6 +967,9 @@ namespace mongo {
DBClientBase * createDirectClient();
+ BSONElement getErrField( const BSONObj& result );
+ bool hasErrField( const BSONObj& result );
+
} // namespace mongo
#include "dbclientcursor.h"
diff --git a/client/dbclient_rs.cpp b/client/dbclient_rs.cpp
index 37f6225..2cab1f7 100644
--- a/client/dbclient_rs.cpp
+++ b/client/dbclient_rs.cpp
@@ -54,9 +54,9 @@ namespace mongo {
void run() {
log() << "starting" << endl;
while ( ! inShutdown() ) {
- sleepsecs( 20 );
+ sleepsecs( 10 );
try {
- ReplicaSetMonitor::checkAll();
+ ReplicaSetMonitor::checkAll( true );
}
catch ( std::exception& e ) {
error() << "check failed: " << e.what() << endl;
@@ -99,17 +99,14 @@ namespace mongo {
}
_nodes.push_back( Node( servers[i] , conn.release() ) );
-
+
+ int myLoc = _nodes.size() - 1;
string maybePrimary;
- if (_checkConnection( _nodes[_nodes.size()-1].conn , maybePrimary, false)) {
- break;
- }
+ _checkConnection( _nodes[myLoc].conn.get() , maybePrimary, false, myLoc );
}
}
ReplicaSetMonitor::~ReplicaSetMonitor() {
- for ( unsigned i=0; i<_nodes.size(); i++ )
- delete _nodes[i].conn;
_nodes.clear();
_master = -1;
}
@@ -125,7 +122,16 @@ namespace mongo {
return m;
}
- void ReplicaSetMonitor::checkAll() {
+ ReplicaSetMonitorPtr ReplicaSetMonitor::get( const string& name ) {
+ scoped_lock lk( _setsLock );
+ map<string,ReplicaSetMonitorPtr>::const_iterator i = _sets.find( name );
+ if ( i == _sets.end() )
+ return ReplicaSetMonitorPtr();
+ return i->second;
+ }
+
+
+ void ReplicaSetMonitor::checkAll( bool checkAllSecondaries ) {
set<string> seen;
while ( true ) {
@@ -146,7 +152,7 @@ namespace mongo {
if ( ! m )
break;
- m->check();
+ m->check( checkAllSecondaries );
}
@@ -202,7 +208,7 @@ namespace mongo {
return _nodes[_master].addr;
}
- _check();
+ _check( false );
scoped_lock lk( _lock );
uassert( 10009 , str::stream() << "ReplicaSetMonitor no master found for set: " << _name , _master >= 0 );
@@ -210,34 +216,70 @@ namespace mongo {
}
HostAndPort ReplicaSetMonitor::getSlave( const HostAndPort& prev ) {
- // make sure its valid
- if ( prev.port() > 0 ) {
+ // make sure its valid
+
+ bool wasFound = false;
+
+ // This is always true, since checked in port()
+ assert( prev.port() >= 0 );
+ if( prev.host().size() ){
scoped_lock lk( _lock );
for ( unsigned i=0; i<_nodes.size(); i++ ) {
if ( prev != _nodes[i].addr )
continue;
- if ( _nodes[i].ok )
+ wasFound = true;
+
+ if ( _nodes[i].okForSecondaryQueries() )
return prev;
+
break;
}
}
+ if( prev.host().size() ){
+ if( wasFound ){ LOG(1) << "slave '" << prev << "' is no longer ok to use" << endl; }
+ else{ LOG(1) << "slave '" << prev << "' was not found in the replica set" << endl; }
+ }
+ else LOG(1) << "slave '" << prev << "' is not initialized or invalid" << endl;
+
return getSlave();
}
HostAndPort ReplicaSetMonitor::getSlave() {
- scoped_lock lk( _lock );
- for ( unsigned i=0; i<_nodes.size(); i++ ) {
- _nextSlave = ( _nextSlave + 1 ) % _nodes.size();
- if ( _nextSlave == _master )
- continue;
- if ( _nodes[ _nextSlave ].ok )
- return _nodes[ _nextSlave ].addr;
+ LOG(2) << "selecting new slave from replica set " << getServerAddress() << endl;
+
+ // Logic is to retry three times for any secondary node, if we can't find any secondary, we'll take
+ // any "ok" node
+ // TODO: Could this query hidden nodes?
+ const int MAX = 3;
+ for ( int xxx=0; xxx<MAX; xxx++ ) {
+
+ {
+ scoped_lock lk( _lock );
+
+ unsigned i = 0;
+ for ( ; i<_nodes.size(); i++ ) {
+ _nextSlave = ( _nextSlave + 1 ) % _nodes.size();
+ if ( _nextSlave == _master ){
+ LOG(2) << "not selecting " << _nodes[_nextSlave] << " as it is the current master" << endl;
+ continue;
+ }
+ if ( _nodes[ _nextSlave ].okForSecondaryQueries() || ( _nodes[ _nextSlave ].ok && ( xxx + 1 ) >= MAX ) )
+ return _nodes[ _nextSlave ].addr;
+
+ LOG(2) << "not selecting " << _nodes[_nextSlave] << " as it is not ok to use" << endl;
+ }
+
+ }
+
+ check(false);
}
+
+ LOG(2) << "no suitable slave nodes found, returning default node " << _nodes[ 0 ] << endl;
- return _nodes[ 0 ].addr;
+ return _nodes[0].addr;
}
/**
@@ -266,7 +308,7 @@ namespace mongo {
string host = member["name"].String();
int m = -1;
- if ((m = _find(host)) <= 0) {
+ if ((m = _find(host)) < 0) {
continue;
}
@@ -309,16 +351,34 @@ namespace mongo {
- bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose ) {
+ bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset ) {
scoped_lock lk( _checkConnectionLock );
bool isMaster = false;
bool changed = false;
try {
+ Timer t;
BSONObj o;
c->isMaster(isMaster, &o);
+
+ if ( o["setName"].type() != String || o["setName"].String() != _name ) {
+ warning() << "node: " << c->getServerAddress() << " isn't a part of set: " << _name
+ << " ismaster: " << o << endl;
+ if ( nodesOffset >= 0 )
+ _nodes[nodesOffset].ok = false;
+ return false;
+ }
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << endl;
+ if ( nodesOffset >= 0 ) {
+ _nodes[nodesOffset].pingTimeMillis = t.millis();
+ _nodes[nodesOffset].hidden = o["hidden"].trueValue();
+ _nodes[nodesOffset].secondary = o["secondary"].trueValue();
+ _nodes[nodesOffset].ismaster = o["ismaster"].trueValue();
+
+ _nodes[nodesOffset].lastIsMaster = o.copy();
+ }
+ log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << endl;
+
// add other nodes
if ( o["hosts"].type() == Array ) {
if ( o["primary"].type() == String )
@@ -329,11 +389,14 @@ namespace mongo {
if (o.hasField("passives") && o["passives"].type() == Array) {
_checkHosts(o["passives"].Obj(), changed);
}
-
+
_checkStatus(c);
+
+
}
catch ( std::exception& e ) {
log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception " << c->toString() << ' ' << e.what() << endl;
+ _nodes[nodesOffset].ok = false;
}
if ( changed && _hook )
@@ -342,24 +405,28 @@ namespace mongo {
return isMaster;
}
- void ReplicaSetMonitor::_check() {
+ void ReplicaSetMonitor::_check( bool checkAllSecondaries ) {
bool triedQuickCheck = false;
LOG(1) << "_check : " << getServerAddress() << endl;
+ int newMaster = -1;
+
for ( int retry = 0; retry < 2; retry++ ) {
for ( unsigned i=0; i<_nodes.size(); i++ ) {
- DBClientConnection * c;
+ shared_ptr<DBClientConnection> c;
{
scoped_lock lk( _lock );
c = _nodes[i].conn;
}
string maybePrimary;
- if ( _checkConnection( c , maybePrimary , retry ) ) {
+ if ( _checkConnection( c.get() , maybePrimary , retry , i ) ) {
_master = i;
- return;
+ newMaster = i;
+ if ( ! checkAllSecondaries )
+ return;
}
if ( ! triedQuickCheck && maybePrimary.size() ) {
@@ -367,36 +434,44 @@ namespace mongo {
if ( x >= 0 ) {
triedQuickCheck = true;
string dummy;
- DBClientConnection * testConn;
+ shared_ptr<DBClientConnection> testConn;
{
scoped_lock lk( _lock );
testConn = _nodes[x].conn;
}
- if ( _checkConnection( testConn , dummy , false ) ) {
+ if ( _checkConnection( testConn.get() , dummy , false , x ) ) {
_master = x;
- return;
+ newMaster = x;
+ if ( ! checkAllSecondaries )
+ return;
}
}
}
}
+
+ if ( newMaster >= 0 )
+ return;
+
sleepsecs(1);
}
}
- void ReplicaSetMonitor::check() {
+ void ReplicaSetMonitor::check( bool checkAllSecondaries ) {
// first see if the current master is fine
if ( _master >= 0 ) {
string temp;
- if ( _checkConnection( _nodes[_master].conn , temp , false ) ) {
- // current master is fine, so we're done
- return;
+ if ( _checkConnection( _nodes[_master].conn.get() , temp , false , _master ) ) {
+ if ( ! checkAllSecondaries ) {
+ // current master is fine, so we're done
+ return;
+ }
}
}
// we either have no master, or the current is dead
- _check();
+ _check( checkAllSecondaries );
}
int ReplicaSetMonitor::_find( const string& server ) const {
@@ -419,7 +494,26 @@ namespace mongo {
return i;
return -1;
}
-
+
+ void ReplicaSetMonitor::appendInfo( BSONObjBuilder& b ) const {
+ scoped_lock lk( _lock );
+ BSONArrayBuilder hosts( b.subarrayStart( "hosts" ) );
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ hosts.append( BSON( "addr" << _nodes[i].addr <<
+ // "lastIsMaster" << _nodes[i].lastIsMaster << // this is a potential race, so only used when debugging
+ "ok" << _nodes[i].ok <<
+ "ismaster" << _nodes[i].ismaster <<
+ "hidden" << _nodes[i].hidden <<
+ "secondary" << _nodes[i].secondary <<
+ "pingTimeMillis" << _nodes[i].pingTimeMillis ) );
+
+ }
+ hosts.done();
+
+ b.append( "master" , _master );
+ b.append( "nextSlave" , _nextSlave );
+ }
+
mongo::mutex ReplicaSetMonitor::_setsLock( "ReplicaSetMonitor" );
map<string,ReplicaSetMonitorPtr> ReplicaSetMonitor::_sets;
@@ -428,8 +522,9 @@ namespace mongo {
// ----- DBClientReplicaSet ---------
// --------------------------------
- DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers )
- : _monitor( ReplicaSetMonitor::get( name , servers ) ) {
+ DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout )
+ : _monitor( ReplicaSetMonitor::get( name , servers ) ),
+ _so_timeout( so_timeout ) {
}
DBClientReplicaSet::~DBClientReplicaSet() {
@@ -446,7 +541,7 @@ namespace mongo {
}
_masterHost = _monitor->getMaster();
- _master.reset( new DBClientConnection( true , this ) );
+ _master.reset( new DBClientConnection( true , this , _so_timeout ) );
string errmsg;
if ( ! _master->connect( _masterHost , errmsg ) ) {
_monitor->notifyFailure( _masterHost );
@@ -464,12 +559,12 @@ namespace mongo {
return _slave.get();
_monitor->notifySlaveFailure( _slaveHost );
_slaveHost = _monitor->getSlave();
- }
+ }
else {
_slaveHost = h;
}
- _slave.reset( new DBClientConnection( true , this ) );
+ _slave.reset( new DBClientConnection( true , this , _so_timeout ) );
_slave->connect( _slaveHost );
_auth( _slave.get() );
return _slave.get();
@@ -522,12 +617,12 @@ namespace mongo {
// ------------- simple functions -----------------
- void DBClientReplicaSet::insert( const string &ns , BSONObj obj ) {
- checkMaster()->insert(ns, obj);
+ void DBClientReplicaSet::insert( const string &ns , BSONObj obj , int flags) {
+ checkMaster()->insert(ns, obj, flags);
}
- void DBClientReplicaSet::insert( const string &ns, const vector< BSONObj >& v ) {
- checkMaster()->insert(ns, v);
+ void DBClientReplicaSet::insert( const string &ns, const vector< BSONObj >& v , int flags) {
+ checkMaster()->insert(ns, v, flags);
}
void DBClientReplicaSet::remove( const string &ns , Query obj , bool justOne ) {
@@ -545,12 +640,12 @@ namespace mongo {
// we're ok sending to a slave
// we'll try 2 slaves before just using master
// checkSlave will try a different slave automatically after a failure
- for ( int i=0; i<2; i++ ) {
+ for ( int i=0; i<3; i++ ) {
try {
return checkSlaveQueryResult( checkSlave()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize) );
}
catch ( DBException &e ) {
- log() << "can't query replica set slave " << i << " : " << _slaveHost << e.what() << endl;
+ LOG(1) << "can't query replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
}
}
}
@@ -563,12 +658,12 @@ namespace mongo {
// we're ok sending to a slave
// we'll try 2 slaves before just using master
// checkSlave will try a different slave automatically after a failure
- for ( int i=0; i<2; i++ ) {
+ for ( int i=0; i<3; i++ ) {
try {
return checkSlave()->findOne(ns,query,fieldsToReturn,queryOptions);
}
catch ( DBException &e ) {
- LOG(1) << "can't findone replica set slave " << i << " : " << _slaveHost << e.what() << endl;
+ LOG(1) << "can't findone replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
}
}
}
@@ -584,23 +679,22 @@ namespace mongo {
assert(0);
}
- auto_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( auto_ptr<DBClientCursor> result ){
+ void DBClientReplicaSet::isntMaster() {
+ log() << "got not master for: " << _masterHost << endl;
+ _monitor->notifyFailure( _masterHost );
+ _master.reset();
+ }
- bool isError = result->hasResultFlag( ResultFlag_ErrSet );
+ auto_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( auto_ptr<DBClientCursor> result ){
+ BSONObj error;
+ bool isError = result->peekError( &error );
if( ! isError ) return result;
- BSONObj error = result->peekOne();
-
- BSONElement code = error["code"];
- if( code.eoo() || ! code.isNumber() ){
- warning() << "no code for error from secondary host " << _slaveHost << ", error was " << error << endl;
- return result;
- }
-
// We only check for "not master or secondary" errors here
// If the error code here ever changes, we need to change this code also
- if( code.Int() == 13436 /* not master or secondary */ ){
+ BSONElement code = error["code"];
+ if( code.isNumber() && code.Int() == 13436 /* not master or secondary */ ){
isntSecondary();
throw DBException( str::stream() << "slave " << _slaveHost.toString() << " is no longer secondary", 14812 );
}
@@ -615,20 +709,123 @@ namespace mongo {
_slave.reset();
}
+ void DBClientReplicaSet::say( Message& toSend, bool isRetry ) {
- void DBClientReplicaSet::isntMaster() {
- log() << "got not master for: " << _masterHost << endl;
- _monitor->notifyFailure( _masterHost );
- _master.reset();
+ if( ! isRetry )
+ _lazyState = LazyState();
+
+ int lastOp = -1;
+ bool slaveOk = false;
+
+ if ( ( lastOp = toSend.operation() ) == dbQuery ) {
+ // TODO: might be possible to do this faster by changing api
+ DbMessage dm( toSend );
+ QueryMessage qm( dm );
+ if ( ( slaveOk = ( qm.queryOptions & QueryOption_SlaveOk ) ) ) {
+
+ for ( int i = _lazyState._retries; i < 3; i++ ) {
+ try {
+ DBClientConnection* slave = checkSlave();
+ slave->say( toSend );
+
+ _lazyState._lastOp = lastOp;
+ _lazyState._slaveOk = slaveOk;
+ _lazyState._retries = i;
+ _lazyState._lastClient = slave;
+ return;
+ }
+ catch ( DBException &e ) {
+ LOG(1) << "can't callLazy replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
+ }
+ }
+ }
+ }
+
+ DBClientConnection* master = checkMaster();
+ master->say( toSend );
+
+ _lazyState._lastOp = lastOp;
+ _lazyState._slaveOk = slaveOk;
+ _lazyState._retries = 3;
+ _lazyState._lastClient = master;
+ return;
+ }
+
+ bool DBClientReplicaSet::recv( Message& m ) {
+
+ assert( _lazyState._lastClient );
+
+ // TODO: It would be nice if we could easily wrap a conn error as a result error
+ try {
+ return _lazyState._lastClient->recv( m );
+ }
+ catch( DBException& e ){
+ log() << "could not receive data from " << _lazyState._lastClient << causedBy( e ) << endl;
+ return false;
+ }
+ }
+
+ void DBClientReplicaSet::checkResponse( const char* data, int nReturned, bool* retry, string* targetHost ){
+
+ // For now, do exactly as we did before, so as not to break things. In general though, we
+ // should fix this so checkResponse has a more consistent contract.
+ if( ! retry ){
+ if( _lazyState._lastClient )
+ return _lazyState._lastClient->checkResponse( data, nReturned );
+ else
+ return checkMaster()->checkResponse( data, nReturned );
+ }
+
+ *retry = false;
+ if( targetHost && _lazyState._lastClient ) *targetHost = _lazyState._lastClient->getServerAddress();
+ else if (targetHost) *targetHost = "";
+
+ if( ! _lazyState._lastClient ) return;
+ if( nReturned != 1 && nReturned != -1 ) return;
+
+ BSONObj dataObj;
+ if( nReturned == 1 ) dataObj = BSONObj( data );
+
+ // Check if we should retry here
+ if( _lazyState._lastOp == dbQuery && _lazyState._slaveOk ){
+
+ // Check the error code for a slave not secondary error
+ if( nReturned == -1 ||
+ ( hasErrField( dataObj ) && ! dataObj["code"].eoo() && dataObj["code"].Int() == 13436 ) ){
+
+ bool wasMaster = false;
+ if( _lazyState._lastClient == _slave.get() ){
+ isntSecondary();
+ }
+ else if( _lazyState._lastClient == _master.get() ){
+ wasMaster = true;
+ isntMaster();
+ }
+ else
+ warning() << "passed " << dataObj << " but last rs client " << _lazyState._lastClient->toString() << " is not master or secondary" << endl;
+
+ if( _lazyState._retries < 3 ){
+ _lazyState._retries++;
+ *retry = true;
+ }
+ else{
+ (void)wasMaster; // silence set-but-not-used warning
+ // assert( wasMaster );
+ // printStackTrace();
+ log() << "too many retries (" << _lazyState._retries << "), could not get data from replica set" << endl;
+ }
+ }
+ }
}
+
bool DBClientReplicaSet::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
if ( toSend.operation() == dbQuery ) {
// TODO: might be possible to do this faster by changing api
DbMessage dm( toSend );
QueryMessage qm( dm );
if ( qm.queryOptions & QueryOption_SlaveOk ) {
- for ( int i=0; i<2; i++ ) {
+ for ( int i=0; i<3; i++ ) {
try {
DBClientConnection* s = checkSlave();
if ( actualServer )
@@ -636,7 +833,7 @@ namespace mongo {
return s->call( toSend , response , assertOk );
}
catch ( DBException &e ) {
- LOG(1) << "can't call replica set slave " << i << " : " << _slaveHost << e.what() << endl;
+ LOG(1) << "can't call replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
if ( actualServer )
*actualServer = "";
}
diff --git a/client/dbclient_rs.h b/client/dbclient_rs.h
index 548b46a..b6948a0 100644
--- a/client/dbclient_rs.h
+++ b/client/dbclient_rs.h
@@ -1,4 +1,4 @@
-/** @file dbclient_rs.h - connect to a Replica Set, from C++ */
+/** @file dbclient_rs.h Connect to a Replica Set, from C++ */
/* Copyright 2009 10gen Inc.
*
@@ -43,10 +43,16 @@ namespace mongo {
static ReplicaSetMonitorPtr get( const string& name , const vector<HostAndPort>& servers );
/**
+ * gets a cached Monitor per name or will return none if it doesn't exist
+ */
+ static ReplicaSetMonitorPtr get( const string& name );
+
+
+ /**
* checks all sets for current master and new secondaries
* usually only called from a BackgroundJob
*/
- static void checkAll();
+ static void checkAll( bool checkAllSecondaries );
/**
* this is called whenever the config of any repclia set changes
@@ -81,13 +87,15 @@ namespace mongo {
/**
* checks for current master and new secondaries
*/
- void check();
+ void check( bool checkAllSecondaries );
string getName() const { return _name; }
string getServerAddress() const;
bool contains( const string& server ) const;
+
+ void appendInfo( BSONObjBuilder& b ) const;
private:
/**
@@ -98,7 +106,7 @@ namespace mongo {
*/
ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers );
- void _check();
+ void _check( bool checkAllSecondaries );
/**
* Use replSetGetStatus command to make sure hosts in host list are up
@@ -119,9 +127,10 @@ namespace mongo {
* @param c the connection to check
* @param maybePrimary OUT
* @param verbose
+ * @param nodesOffset - offset into _nodes array, -1 for not in it
* @return if the connection is good
*/
- bool _checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose );
+ bool _checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset );
int _find( const string& server ) const ;
int _find_inlock( const string& server ) const ;
@@ -132,14 +141,44 @@ namespace mongo {
string _name;
struct Node {
- Node( const HostAndPort& a , DBClientConnection* c ) : addr( a ) , conn(c) , ok(true) {}
+ Node( const HostAndPort& a , DBClientConnection* c )
+ : addr( a ) , conn(c) , ok(true) ,
+ ismaster(false), secondary( false ) , hidden( false ) , pingTimeMillis(0) {
+ }
+
+ bool okForSecondaryQueries() const {
+ return ok && secondary && ! hidden;
+ }
+
+ BSONObj toBSON() const {
+ return BSON( "addr" << addr.toString() <<
+ "isMaster" << ismaster <<
+ "secondary" << secondary <<
+ "hidden" << hidden <<
+ "ok" << ok );
+ }
+
+ string toString() const {
+ return toBSON().toString();
+ }
+
HostAndPort addr;
- DBClientConnection* conn;
+ shared_ptr<DBClientConnection> conn;
// if this node is in a failure state
// used for slave routing
// this is too simple, should make it better
bool ok;
+
+ // as reported by ismaster
+ BSONObj lastIsMaster;
+
+ bool ismaster;
+ bool secondary;
+ bool hidden;
+
+ int pingTimeMillis;
+
};
/**
@@ -168,7 +207,7 @@ namespace mongo {
public:
/** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
- DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers );
+ DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout=0 );
virtual ~DBClientReplicaSet();
/** Returns false if nomember of the set were reachable, or neither is
@@ -191,11 +230,11 @@ namespace mongo {
/** throws userassertion "no master found" */
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
- virtual void insert( const string &ns , BSONObj obj );
+ virtual void insert( const string &ns , BSONObj obj , int flags=0);
/** insert multiple objects. Note that single object insert is asynchronous, so this version
is only nominally faster and not worth a special effort to try to use. */
- virtual void insert( const string &ns, const vector< BSONObj >& v );
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
virtual void remove( const string &ns , Query obj , bool justOne = 0 );
@@ -210,11 +249,14 @@ namespace mongo {
// ---- callback pieces -------
- virtual void checkResponse( const char *data, int nReturned ) { checkMaster()->checkResponse( data , nReturned ); }
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual bool recv( Message &toRecv );
+ virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL );
/* this is the callback from our underlying connections to notify us that we got a "not master" error.
*/
void isntMaster();
+
/* this is used to indicate we got a "not master or secondary" error from a secondary.
*/
void isntSecondary();
@@ -225,16 +267,18 @@ namespace mongo {
// ----- informational ----
+ double getSoTimeout() const { return _so_timeout; }
+
string toString() { return getServerAddress(); }
string getServerAddress() const { return _monitor->getServerAddress(); }
virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
+ virtual bool lazySupported() const { return true; }
// ---- low level ------
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 );
- virtual void say( Message &toSend ) { checkMaster()->say( toSend ); }
virtual bool callRead( Message& toSend , Message& response ) { return checkMaster()->callRead( toSend , response ); }
@@ -258,6 +302,8 @@ namespace mongo {
HostAndPort _slaveHost;
scoped_ptr<DBClientConnection> _slave;
+
+ double _so_timeout;
/**
* for storing authentication info
@@ -277,6 +323,22 @@ namespace mongo {
// this could be a security issue, as the password is stored in memory
// not sure if/how we should handle
list<AuthInfo> _auths;
+
+ protected:
+
+ /**
+ * for storing (non-threadsafe) information between lazy calls
+ */
+ class LazyState {
+ public:
+ LazyState() : _lastClient( NULL ), _lastOp( -1 ), _slaveOk( false ), _retries( 0 ) {}
+ DBClientConnection* _lastClient;
+ int _lastOp;
+ bool _slaveOk;
+ int _retries;
+
+ } _lazyState;
+
};
diff --git a/client/dbclientcursor.cpp b/client/dbclientcursor.cpp
index 6c6afc0..5db360e 100644
--- a/client/dbclientcursor.cpp
+++ b/client/dbclientcursor.cpp
@@ -37,8 +37,7 @@ namespace mongo {
return batchSize < nToReturn ? batchSize : nToReturn;
}
- bool DBClientCursor::init() {
- Message toSend;
+ void DBClientCursor::_assembleInit( Message& toSend ) {
if ( !cursorId ) {
assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
}
@@ -50,12 +49,18 @@ namespace mongo {
b.appendNum( cursorId );
toSend.setData( dbGetMore, b.buf(), b.len() );
}
- if ( !_client->call( toSend, *m, false ) ) {
+ }
+
+ bool DBClientCursor::init() {
+ Message toSend;
+ _assembleInit( toSend );
+
+ if ( !_client->call( toSend, *b.m, false ) ) {
// log msg temp?
log() << "DBClientCursor::init call() failed" << endl;
return false;
}
- if ( m->empty() ) {
+ if ( b.m->empty() ) {
// log msg temp?
log() << "DBClientCursor::init message from call() was empty" << endl;
return false;
@@ -63,12 +68,41 @@ namespace mongo {
dataReceived();
return true;
}
+
+ void DBClientCursor::initLazy( bool isRetry ) {
+ verify( 15875 , _client->lazySupported() );
+ Message toSend;
+ _assembleInit( toSend );
+ _client->say( toSend, isRetry );
+ }
+
+ bool DBClientCursor::initLazyFinish( bool& retry ) {
+
+ bool recvd = _client->recv( *b.m );
+
+ // If we get a bad response, return false
+ if ( ! recvd || b.m->empty() ) {
+
+ if( !recvd )
+ log() << "DBClientCursor::init lazy say() failed" << endl;
+ if( b.m->empty() )
+ log() << "DBClientCursor::init message from say() was empty" << endl;
+
+ _client->checkResponse( NULL, -1, &retry, &_lazyHost );
+
+ return false;
+
+ }
+
+ dataReceived( retry, _lazyHost );
+ return ! retry;
+ }
void DBClientCursor::requestMore() {
- assert( cursorId && pos == nReturned );
+ assert( cursorId && b.pos == b.nReturned );
if (haveLimit) {
- nToReturn -= nReturned;
+ nToReturn -= b.nReturned;
assert(nToReturn > 0);
}
BufBuilder b;
@@ -83,7 +117,7 @@ namespace mongo {
if ( _client ) {
_client->call( toSend, *response );
- m = response;
+ this->b.m = response;
dataReceived();
}
else {
@@ -91,7 +125,7 @@ namespace mongo {
ScopedDbConnection conn( _scopedHost );
conn->call( toSend , *response );
_client = conn.get();
- m = response;
+ this->b.m = response;
dataReceived();
_client = 0;
conn.done();
@@ -100,19 +134,24 @@ namespace mongo {
/** with QueryOption_Exhaust, the server just blasts data at us (marked at end with cursorid==0). */
void DBClientCursor::exhaustReceiveMore() {
- assert( cursorId && pos == nReturned );
+ assert( cursorId && b.pos == b.nReturned );
assert( !haveLimit );
auto_ptr<Message> response(new Message());
assert( _client );
_client->recv(*response);
- m = response;
+ b.m = response;
dataReceived();
}
- void DBClientCursor::dataReceived() {
- QueryResult *qr = (QueryResult *) m->singleData();
+ void DBClientCursor::dataReceived( bool& retry, string& host ) {
+
+ QueryResult *qr = (QueryResult *) b.m->singleData();
resultFlags = qr->resultFlags();
+ if ( qr->resultFlags() & ResultFlag_ErrSet ) {
+ wasError = true;
+ }
+
if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
// cursor id no longer valid at the server.
assert( qr->cursorId == 0 );
@@ -127,11 +166,12 @@ namespace mongo {
cursorId = qr->cursorId;
}
- nReturned = qr->nReturned;
- pos = 0;
- data = qr->data();
+ b.nReturned = qr->nReturned;
+ b.pos = 0;
+ b.data = qr->data();
+
+ _client->checkResponse( b.data, b.nReturned, &retry, &host ); // watches for "not master"
- _client->checkResponse( data, nReturned );
/* this assert would fire the way we currently work:
assert( nReturned || cursorId == 0 );
*/
@@ -144,17 +184,17 @@ namespace mongo {
if ( !_putBack.empty() )
return true;
- if (haveLimit && pos >= nToReturn)
+ if (haveLimit && b.pos >= nToReturn)
return false;
- if ( pos < nReturned )
+ if ( b.pos < b.nReturned )
return true;
if ( cursorId == 0 )
return false;
requestMore();
- return pos < nReturned;
+ return b.pos < b.nReturned;
}
BSONObj DBClientCursor::next() {
@@ -165,11 +205,11 @@ namespace mongo {
return ret;
}
- uassert(13422, "DBClientCursor next() called but more() is false", pos < nReturned);
+ uassert(13422, "DBClientCursor next() called but more() is false", b.pos < b.nReturned);
- pos++;
- BSONObj o(data);
- data += o.objsize();
+ b.pos++;
+ BSONObj o(b.data);
+ b.data += o.objsize();
/* todo would be good to make data null at end of batch for safety */
return o;
}
@@ -187,9 +227,9 @@ namespace mongo {
}
*/
- int p = pos;
- const char *d = data;
- while( m && p < nReturned ) {
+ int p = b.pos;
+ const char *d = b.data;
+ while( m && p < b.nReturned ) {
BSONObj o(d);
d += o.objsize();
p++;
@@ -198,6 +238,19 @@ namespace mongo {
}
}
+ bool DBClientCursor::peekError(BSONObj* error){
+ if( ! wasError ) return false;
+
+ vector<BSONObj> v;
+ peek(v, 1);
+
+ assert( v.size() == 1 );
+ assert( hasErrField( v[0] ) );
+
+ if( error ) *error = v[0].getOwned();
+ return true;
+ }
+
void DBClientCursor::attach( AScopedConnection * conn ) {
assert( _scopedHost.size() == 0 );
assert( conn );
@@ -205,14 +258,20 @@ namespace mongo {
if ( conn->get()->type() == ConnectionString::SET ||
conn->get()->type() == ConnectionString::SYNC ) {
- _scopedHost = _client->getServerAddress();
+ if( _lazyHost.size() > 0 )
+ _scopedHost = _lazyHost;
+ else if( _client )
+ _scopedHost = _client->getServerAddress();
+ else
+ massert(14821, "No client or lazy client specified, cannot store multi-host connection.", false);
}
else {
_scopedHost = conn->getHost();
}
-
+
conn->done();
_client = 0;
+ _lazyHost = "";
}
DBClientCursor::~DBClientCursor() {
@@ -221,12 +280,12 @@ namespace mongo {
DESTRUCTOR_GUARD (
- if ( cursorId && _ownCursor ) {
- BufBuilder b;
- b.appendNum( (int)0 ); // reserved
+ if ( cursorId && _ownCursor && ! inShutdown() ) {
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
b.appendNum( (int)1 ); // number
b.appendNum( cursorId );
-
+
Message m;
m.setData( dbKillCursors , b.buf() , b.len() );
diff --git a/client/dbclientcursor.h b/client/dbclientcursor.h
index d176b89..977bd30 100644
--- a/client/dbclientcursor.h
+++ b/client/dbclientcursor.h
@@ -18,7 +18,7 @@
#pragma once
#include "../pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../db/jsobj.h"
#include "../db/json.h"
#include <stack>
@@ -52,7 +52,7 @@ namespace mongo {
if you want to exhaust whatever data has been fetched to the client already but
then perhaps stop.
*/
- int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + nReturned - pos; }
+ int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + b.nReturned - b.pos; }
bool moreInCurrentBatch() { return objsLeftInBatch() > 0; }
/** next
@@ -71,11 +71,11 @@ namespace mongo {
/** throws AssertionException if get back { $err : ... } */
BSONObj nextSafe() {
BSONObj o = next();
- BSONElement e = o.firstElement();
- if( strcmp(e.fieldName(), "$err") == 0 ) {
+ if( strcmp(o.firstElementFieldName(), "$err") == 0 ) {
+ string s = "nextSafe(): " + o.toString();
if( logLevel >= 5 )
- log() << "nextSafe() error " << o.toString() << endl;
- uassert(13106, "nextSafe(): " + o.toString(), false);
+ log() << s << endl;
+ uasserted(13106, s);
}
return o;
}
@@ -86,11 +86,11 @@ namespace mongo {
WARNING: no support for _putBack yet!
*/
void peek(vector<BSONObj>&, int atMost);
- BSONObj peekOne(){
- vector<BSONObj> v;
- peek( v, 1 );
- return v.size() > 0 ? v[0] : BSONObj();
- }
+
+ /**
+ * peek ahead and see if an error occurred, and get the error if so.
+ */
+ bool peekError(BSONObj* error = NULL);
/**
iterate the rest of the cursor and return the number if items
@@ -109,13 +109,9 @@ namespace mongo {
'dead' may be preset yet some data still queued and locally
available from the dbclientcursor.
*/
- bool isDead() const {
- return !this || cursorId == 0;
- }
+ bool isDead() const { return !this || cursorId == 0; }
- bool tailable() const {
- return (opts & QueryOption_CursorTailable) != 0;
- }
+ bool tailable() const { return (opts & QueryOption_CursorTailable) != 0; }
/** see ResultFlagType (constants.h) for flag values
mostly these flags are for internal purposes -
@@ -137,12 +133,9 @@ namespace mongo {
fieldsToReturn(_fieldsToReturn),
opts(queryOptions),
batchSize(bs==1?2:bs),
- m(new Message()),
cursorId(),
- nReturned(),
- pos(),
- data(),
- _ownCursor( true ) {
+ _ownCursor( true ),
+ wasError( false ) {
}
DBClientCursor( DBClientBase* client, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
@@ -151,11 +144,7 @@ namespace mongo {
nToReturn( _nToReturn ),
haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
opts( options ),
- m(new Message()),
- cursorId( _cursorId ),
- nReturned(),
- pos(),
- data(),
+ cursorId(_cursorId),
_ownCursor( true ) {
}
@@ -170,11 +159,31 @@ namespace mongo {
void attach( AScopedConnection * conn );
+ /**
+ * actually does the query
+ */
+ bool init();
+
+ void initLazy( bool isRetry = false );
+ bool initLazyFinish( bool& retry );
+
+ class Batch : boost::noncopyable {
+ friend class DBClientCursor;
+ auto_ptr<Message> m;
+ int nReturned;
+ int pos;
+ const char *data;
+ public:
+ Batch() : m( new Message() ), nReturned(), pos(), data() { }
+ };
+
private:
friend class DBClientBase;
friend class DBClientConnection;
- bool init();
+
int nextBatchSize();
+
+ Batch b;
DBClientBase* _client;
string ns;
BSONObj query;
@@ -184,18 +193,18 @@ namespace mongo {
const BSONObj *fieldsToReturn;
int opts;
int batchSize;
- auto_ptr<Message> m;
stack< BSONObj > _putBack;
int resultFlags;
long long cursorId;
- int nReturned;
- int pos;
- const char *data;
- void dataReceived();
- void requestMore();
- void exhaustReceiveMore(); // for exhaust
bool _ownCursor; // see decouple()
string _scopedHost;
+ string _lazyHost;
+ bool wasError;
+
+ void dataReceived() { bool retry; string lazyHost; dataReceived( retry, lazyHost ); }
+ void dataReceived( bool& retry, string& lazyHost );
+ void requestMore();
+ void exhaustReceiveMore(); // for exhaust
// Don't call from a virtual function
void _assertIfNull() const { uassert(13348, "connection died", this); }
@@ -203,6 +212,9 @@ namespace mongo {
// non-copyable , non-assignable
DBClientCursor( const DBClientCursor& );
DBClientCursor& operator=( const DBClientCursor& );
+
+ // init pieces
+ void _assembleInit( Message& toSend );
};
/** iterate over objects in current batch only - will not cause a network call
diff --git a/client/distlock.cpp b/client/distlock.cpp
index 9ec98ea..cb71159 100644
--- a/client/distlock.cpp
+++ b/client/distlock.cpp
@@ -21,8 +21,7 @@
namespace mongo {
- static string lockPingNS = "config.lockpings";
- static string locksNS = "config.locks";
+ LabeledLevel DistributedLock::logLvl( 1 );
ThreadLocalValue<string> distLockIds("");
@@ -36,7 +35,7 @@ namespace mongo {
static void initModule() {
// cache process string
stringstream ss;
- ss << getHostName() << ":" << time(0) << ":" << rand();
+ ss << getHostName() << ":" << cmdLine.port << ":" << time(0) << ":" << rand();
_cachedProcessString = new string( ss.str() );
}
@@ -59,116 +58,406 @@ namespace mongo {
return s;
}
- void _distLockPingThread( ConnectionString addr ) {
- setThreadName( "LockPinger" );
-
- log() << "creating dist lock ping thread for: " << addr << endl;
- static int loops = 0;
- while( ! inShutdown() ) {
+ class DistributedLockPinger {
+ public:
- string process = getDistLockProcess();
- log(4) << "dist_lock about to ping for: " << process << endl;
+ DistributedLockPinger()
+ : _mutex( "DistributedLockPinger" ) {
+ }
- try {
- ScopedDbConnection conn( addr );
-
- // refresh the entry corresponding to this process in the lockpings collection
- conn->update( lockPingNS ,
- BSON( "_id" << process ) ,
- BSON( "$set" << BSON( "ping" << DATENOW ) ) ,
- true );
- string err = conn->getLastError();
- if ( ! err.empty() ) {
- warning() << "dist_lock process: " << process << " pinging: " << addr << " failed: "
- << err << endl;
- conn.done();
- sleepsecs(30);
- continue;
- }
+ void _distLockPingThread( ConnectionString addr, string process, unsigned long long sleepTime ) {
+
+ setThreadName( "LockPinger" );
+
+ string pingId = pingThreadId( addr, process );
+
+ log( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
+ << " and process " << process
+ << " (sleeping for " << sleepTime << "ms)" << endl;
+
+ static int loops = 0;
+ while( ! inShutdown() && ! shouldKill( addr, process ) ) {
+
+ log( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
+
+ Date_t pingTime;
+
+ try {
+ ScopedDbConnection conn( addr );
+
+ pingTime = jsTime();
- // remove really old entries from the lockpings collection if they're not holding a lock
- // (this may happen if an instance of a process was taken down and no new instance came up to
- // replace it for a quite a while)
- // if the lock is taken, the take-over mechanism should handle the situation
- auto_ptr<DBClientCursor> c = conn->query( locksNS , BSONObj() );
- vector<string> pids;
- while ( c->more() ) {
- BSONObj lock = c->next();
- if ( ! lock["process"].eoo() ) {
- pids.push_back( lock["process"].valuestrsafe() );
+ // refresh the entry corresponding to this process in the lockpings collection
+ conn->update( DistributedLock::lockPingNS ,
+ BSON( "_id" << process ) ,
+ BSON( "$set" << BSON( "ping" << pingTime ) ) ,
+ true );
+
+ string err = conn->getLastError();
+ if ( ! err.empty() ) {
+ warning() << "pinging failed for distributed lock pinger '" << pingId << "'."
+ << causedBy( err ) << endl;
+ conn.done();
+
+ // Sleep for normal ping time
+ sleepmillis(sleepTime);
+ continue;
+ }
+
+ // remove really old entries from the lockpings collection if they're not holding a lock
+ // (this may happen if an instance of a process was taken down and no new instance came up to
+ // replace it for a quite a while)
+ // if the lock is taken, the take-over mechanism should handle the situation
+ auto_ptr<DBClientCursor> c = conn->query( DistributedLock::locksNS , BSONObj() );
+ set<string> pids;
+ while ( c->more() ) {
+ BSONObj lock = c->next();
+ if ( ! lock["process"].eoo() ) {
+ pids.insert( lock["process"].valuestrsafe() );
+ }
+ }
+
+ Date_t fourDays = pingTime - ( 4 * 86400 * 1000 ); // 4 days
+ conn->remove( DistributedLock::lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
+ err = conn->getLastError();
+ if ( ! err.empty() ) {
+ warning() << "ping cleanup for distributed lock pinger '" << pingId << " failed."
+ << causedBy( err ) << endl;
+ conn.done();
+
+ // Sleep for normal ping time
+ sleepmillis(sleepTime);
+ continue;
+ }
+
+ // create index so remove is fast even with a lot of servers
+ if ( loops++ == 0 ) {
+ conn->ensureIndex( DistributedLock::lockPingNS , BSON( "ping" << 1 ) );
+ }
+
+ log( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
+ << " by distributed lock pinger '" << pingId
+ << "', sleeping for " << sleepTime << "ms" << endl;
+
+ // Remove old locks, if possible
+ // Make sure no one else is adding to this list at the same time
+ scoped_lock lk( _mutex );
+
+ int numOldLocks = _oldLockOIDs.size();
+ if( numOldLocks > 0 )
+ log( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
+
+ bool removed = false;
+ for( list<OID>::iterator i = _oldLockOIDs.begin(); i != _oldLockOIDs.end();
+ i = ( removed ? _oldLockOIDs.erase( i ) : ++i ) ) {
+ removed = false;
+ try {
+ // Got OID from lock with id, so we don't need to specify id again
+ conn->update( DistributedLock::locksNS ,
+ BSON( "ts" << *i ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ // Either the update went through or it didn't, either way we're done trying to
+ // unlock
+ log( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
+ removed = true;
+ }
+ catch( UpdateNotTheSame& ) {
+ log( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
+ removed = true;
+ }
+ catch ( std::exception& e) {
+ warning() << "could not remove old distributed lock with ts " << *i
+ << causedBy( e ) << endl;
+ }
+
+ }
+
+ if( numOldLocks > 0 && _oldLockOIDs.size() > 0 ){
+ log( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
}
- }
- Date_t fourDays = jsTime() - ( 4 * 86400 * 1000 ); // 4 days
- conn->remove( lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
- err = conn->getLastError();
- if ( ! err.empty() ) {
- warning() << "dist_lock cleanup request from process: " << process << " to: " << addr
- << " failed: " << err << endl;
conn.done();
- sleepsecs(30);
- continue;
- }
- // create index so remove is fast even with a lot of servers
- if ( loops++ == 0 ) {
- conn->ensureIndex( lockPingNS , BSON( "ping" << 1 ) );
+ }
+ catch ( std::exception& e ) {
+ warning() << "distributed lock pinger '" << pingId << "' detected an exception while pinging."
+ << causedBy( e ) << endl;
}
- conn.done();
+ sleepmillis(sleepTime);
+ }
+
+ warning() << "removing distributed lock ping thread '" << pingId << "'" << endl;
+
+
+ if( shouldKill( addr, process ) )
+ finishKill( addr, process );
+
+ }
+
+ void distLockPingThread( ConnectionString addr, long long clockSkew, string processId, unsigned long long sleepTime ) {
+ try {
+ jsTimeVirtualThreadSkew( clockSkew );
+ _distLockPingThread( addr, processId, sleepTime );
}
catch ( std::exception& e ) {
- warning() << "dist_lock exception during ping: " << e.what() << endl;
+ error() << "unexpected error while running distributed lock pinger for " << addr << ", process " << processId << causedBy( e ) << endl;
}
+ catch ( ... ) {
+ error() << "unknown error while running distributed lock pinger for " << addr << ", process " << processId << endl;
+ }
+ }
- log( loops % 10 == 0 ? 0 : 1) << "dist_lock pinged successfully for: " << process << endl;
- sleepsecs(30);
+ string pingThreadId( const ConnectionString& conn, const string& processId ) {
+ return conn.toString() + "/" + processId;
}
- }
- void distLockPingThread( ConnectionString addr ) {
- try {
- _distLockPingThread( addr );
+ string got( DistributedLock& lock, unsigned long long sleepTime ) {
+
+ // Make sure we don't start multiple threads for a process id
+ scoped_lock lk( _mutex );
+
+ const ConnectionString& conn = lock.getRemoteConnection();
+ const string& processId = lock.getProcessId();
+ string s = pingThreadId( conn, processId );
+
+ // Ignore if we already have a pinging thread for this process.
+ if ( _seen.count( s ) > 0 ) return "";
+
+ // Check our clock skew
+ try {
+ if( lock.isRemoteTimeSkewed() ) {
+ throw LockException( str::stream() << "clock skew of the cluster " << conn.toString() << " is too far out of bounds to allow distributed locking." , 13650 );
+ }
+ }
+ catch( LockException& e) {
+ throw LockException( str::stream() << "error checking clock skew of cluster " << conn.toString() << causedBy( e ) , 13651);
+ }
+
+ boost::thread t( boost::bind( &DistributedLockPinger::distLockPingThread, this, conn, getJSTimeVirtualThreadSkew(), processId, sleepTime) );
+
+ _seen.insert( s );
+
+ return s;
}
- catch ( std::exception& e ) {
- error() << "unexpected error in distLockPingThread: " << e.what() << endl;
+
+ void addUnlockOID( const OID& oid ) {
+ // Modifying the lock from some other thread
+ scoped_lock lk( _mutex );
+ _oldLockOIDs.push_back( oid );
}
- catch ( ... ) {
- error() << "unexpected unknown error in distLockPingThread" << endl;
+
+ bool willUnlockOID( const OID& oid ) {
+ scoped_lock lk( _mutex );
+ return find( _oldLockOIDs.begin(), _oldLockOIDs.end(), oid ) != _oldLockOIDs.end();
}
- }
+ void kill( const ConnectionString& conn, const string& processId ) {
+ // Make sure we're in a consistent state before other threads can see us
+ scoped_lock lk( _mutex );
- class DistributedLockPinger {
- public:
- DistributedLockPinger()
- : _mutex( "DistributedLockPinger" ) {
+ string pingId = pingThreadId( conn, processId );
+
+ assert( _seen.count( pingId ) > 0 );
+ _kill.insert( pingId );
+
+ }
+
+ bool shouldKill( const ConnectionString& conn, const string& processId ) {
+ return _kill.count( pingThreadId( conn, processId ) ) > 0;
}
- void got( const ConnectionString& conn ) {
- string s = conn.toString();
+ void finishKill( const ConnectionString& conn, const string& processId ) {
+ // Make sure we're in a consistent state before other threads can see us
scoped_lock lk( _mutex );
- if ( _seen.count( s ) > 0 )
- return;
- boost::thread t( boost::bind( &distLockPingThread , conn ) );
- _seen.insert( s );
+
+ string pingId = pingThreadId( conn, processId );
+
+ _kill.erase( pingId );
+ _seen.erase( pingId );
+
}
+ set<string> _kill;
set<string> _seen;
mongo::mutex _mutex;
+ list<OID> _oldLockOIDs;
} distLockPinger;
- DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes )
- : _conn(conn),_name(name),_takeoverMinutes(takeoverMinutes) {
- _id = BSON( "_id" << name );
- _ns = "config.locks";
- distLockPinger.got( conn );
+
+ const string DistributedLock::lockPingNS = "config.lockpings";
+ const string DistributedLock::locksNS = "config.locks";
+
+ /**
+ * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom sleep time is
+ * specified (time between pings)
+ */
+ DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout, bool asProcess )
+ : _conn(conn) , _name(name) , _id( BSON( "_id" << name ) ), _processId( asProcess ? getDistLockId() : getDistLockProcess() ),
+ _lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ), _maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ), _lockPing( _maxClockSkew ),
+ _mutex( "DistributedLock" )
+ {
+ log( logLvl - 1 ) << "created new distributed lock for " << name << " on " << conn
+ << " ( lock timeout : " << _lockTimeout
+ << ", ping interval : " << _lockPing << ", process : " << asProcess << " )" << endl;
+ }
+
+ Date_t DistributedLock::getRemoteTime() {
+ return DistributedLock::remoteTime( _conn, _maxNetSkew );
+ }
+
+ bool DistributedLock::isRemoteTimeSkewed() {
+ return !DistributedLock::checkSkew( _conn, NUM_LOCK_SKEW_CHECKS, _maxClockSkew, _maxNetSkew );
+ }
+
+ const ConnectionString& DistributedLock::getRemoteConnection() {
+ return _conn;
+ }
+
+ const string& DistributedLock::getProcessId() {
+ return _processId;
+ }
+
+ /**
+ * Returns the remote time as reported by the cluster or server. The maximum difference between the reported time
+ * and the actual time on the remote server (at the completion of the function) is the maxNetSkew
+ */
+ Date_t DistributedLock::remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew ) {
+
+ ConnectionString server( *cluster.getServers().begin() );
+ ScopedDbConnection conn( server );
+
+ BSONObj result;
+ long long delay;
+
+ try {
+ Date_t then = jsTime();
+ bool success = conn->runCommand( string("admin"), BSON( "serverStatus" << 1 ), result );
+ delay = jsTime() - then;
+
+ if( !success )
+ throw TimeNotFoundException( str::stream() << "could not get status from server "
+ << server.toString() << " in cluster " << cluster.toString()
+ << " to check time", 13647 );
+
+ // Make sure that our delay is not more than 2x our maximum network skew, since this is the max our remote
+ // time value can be off by if we assume a response in the middle of the delay.
+ if( delay > (long long) (maxNetSkew * 2) )
+ throw TimeNotFoundException( str::stream() << "server " << server.toString()
+ << " in cluster " << cluster.toString()
+ << " did not respond within max network delay of "
+ << maxNetSkew << "ms", 13648 );
+ }
+ catch(...) {
+ conn.done();
+ throw;
+ }
+
+ conn.done();
+
+ return result["localTime"].Date() - (delay / 2);
+
+ }
+
+ bool DistributedLock::checkSkew( const ConnectionString& cluster, unsigned skewChecks, unsigned long long maxClockSkew, unsigned long long maxNetSkew ) {
+
+ vector<HostAndPort> servers = cluster.getServers();
+
+ if(servers.size() < 1) return true;
+
+ vector<long long> avgSkews;
+
+ for(unsigned i = 0; i < skewChecks; i++) {
+
+ // Find the average skew for each server
+ unsigned s = 0;
+ for(vector<HostAndPort>::iterator si = servers.begin(); si != servers.end(); ++si,s++) {
+
+ if(i == 0) avgSkews.push_back(0);
+
+ // Could check if this is self, but shouldn't matter since local network connection should be fast.
+ ConnectionString server( *si );
+
+ vector<long long> skew;
+
+ BSONObj result;
+
+ Date_t remote = remoteTime( server, maxNetSkew );
+ Date_t local = jsTime();
+
+ // Remote time can be delayed by at most MAX_NET_SKEW
+
+ // Skew is how much time we'd have to add to local to get to remote
+ avgSkews[s] += (long long) (remote - local);
+
+ log( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
+
+ }
+ }
+
+ // Analyze skews
+
+ long long serverMaxSkew = 0;
+ long long serverMinSkew = 0;
+
+ for(unsigned s = 0; s < avgSkews.size(); s++) {
+
+ long long avgSkew = (avgSkews[s] /= skewChecks);
+
+ // Keep track of max and min skews
+ if(s == 0) {
+ serverMaxSkew = avgSkew;
+ serverMinSkew = avgSkew;
+ }
+ else {
+ if(avgSkew > serverMaxSkew)
+ serverMaxSkew = avgSkew;
+ if(avgSkew < serverMinSkew)
+ serverMinSkew = avgSkew;
+ }
+
+ }
+
+ long long totalSkew = serverMaxSkew - serverMinSkew;
+
+ // Make sure our max skew is not more than our pre-set limit
+ if(totalSkew > (long long) maxClockSkew) {
+ log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
+ return false;
+ }
+
+ log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
+ return true;
+ }
+
+ // For use in testing, ping thread should run indefinitely in practice.
+ bool DistributedLock::killPinger( DistributedLock& lock ) {
+ if( lock._threadId == "") return false;
+
+ distLockPinger.kill( lock._conn, lock._processId );
+ return true;
}
+ // Semantics of this method are basically that if the lock cannot be acquired, returns false, can be retried.
+ // If the lock should not be tried again (some unexpected error) a LockException is thrown.
+ // If we are only trying to re-enter a currently held lock, reenter should be true.
+ // Note: reenter doesn't actually make this lock re-entrant in the normal sense, since it can still only
+ // be unlocked once, instead it is used to verify that the lock is already held.
+ bool DistributedLock::lock_try( const string& why , bool reenter, BSONObj * other ) {
+
+ // TODO: Start pinging only when we actually get the lock?
+ // If we don't have a thread pinger, make sure we shouldn't have one
+ if( _threadId == "" ){
+ scoped_lock lk( _mutex );
+ _threadId = distLockPinger.got( *this, _lockPing );
+ }
+
+ // This should always be true, if not, we are using the lock incorrectly.
+ assert( _name != "" );
- bool DistributedLock::lock_try( string why , BSONObj * other ) {
// write to dummy if 'other' is null
BSONObj dummyOther;
if ( other == NULL )
@@ -182,93 +471,240 @@ namespace mongo {
{
// make sure its there so we can use simple update logic below
- BSONObj o = conn->findOne( _ns , _id ).getOwned();
+ BSONObj o = conn->findOne( locksNS , _id ).getOwned();
+
+ // Case 1: No locks
if ( o.isEmpty() ) {
try {
- log(4) << "dist_lock inserting initial doc in " << _ns << " for lock " << _name << endl;
- conn->insert( _ns , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
+ log( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
+ conn->insert( locksNS , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
}
catch ( UserException& e ) {
- log() << "dist_lock could not insert initial doc: " << e << endl;
+ warning() << "could not insert initial doc for distributed lock " << _name << causedBy( e ) << endl;
}
}
-
+
+ // Case 2: A set lock that we might be able to force
else if ( o["state"].numberInt() > 0 ) {
+
+ string lockName = o["_id"].String() + string("/") + o["process"].String();
+
+ bool canReenter = reenter && o["process"].String() == _processId && ! distLockPinger.willUnlockOID( o["ts"].OID() ) && o["state"].numberInt() == 2;
+ if( reenter && ! canReenter ) {
+ log( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
+ if( o["process"].String() != _processId ) log( logLvl - 1 ) << ", different process " << _processId << endl;
+ else if( o["state"].numberInt() == 2 ) log( logLvl - 1 ) << ", state not finalized" << endl;
+ else log( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
+
+ // reset since we've been bounced by a previous lock not being where we thought it was,
+ // and should go through full forcing process if required.
+ // (in theory we should never see a ping here if used correctly)
+ *other = o; other->getOwned(); conn.done(); resetLastPing();
+ return false;
+ }
+
BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
if ( lastPing.isEmpty() ) {
- // if a lock is taken but there's no ping for it, we're in an inconsistent situation
- // if the lock holder (mongos or d) does not exist anymore, the lock could safely be removed
- // but we'd require analysis of the situation before a manual intervention
- error() << "config.locks: " << _name << " lock is taken by old process? "
- << "remove the following lock if the process is not active anymore: " << o << endl;
- *other = o;
- conn.done();
- return false;
+ log( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
+ // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
+ lastPing = BSON( "_id" << o["process"].String() << "ping" << (Date_t) 0 );
}
- unsigned long long now = jsTime();
- unsigned long long pingTime = lastPing["ping"].Date();
-
- if ( now < pingTime ) {
- // clock skew
- warning() << "dist_lock has detected clock skew of " << ( pingTime - now ) << "ms" << endl;
- *other = o;
- conn.done();
- return false;
+ unsigned long long elapsed = 0;
+ unsigned long long takeover = _lockTimeout;
+
+ log( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.get<0>() << " and ping " << _lastPingCheck.get<1>() << endl;
+
+ try {
+
+ Date_t remote = remoteTime( _conn );
+
+ // Timeout the elapsed time using comparisons of remote clock
+ // For non-finalized locks, timeout 15 minutes since last seen (ts)
+ // For finalized locks, timeout 15 minutes since last ping
+ bool recPingChange = o["state"].numberInt() == 2 && ( _lastPingCheck.get<0>() != lastPing["_id"].String() || _lastPingCheck.get<1>() != lastPing["ping"].Date() );
+ bool recTSChange = _lastPingCheck.get<3>() != o["ts"].OID();
+
+ if( recPingChange || recTSChange ) {
+ // If the ping has changed since we last checked, mark the current date and time
+ scoped_lock lk( _mutex );
+ _lastPingCheck = boost::tuple<string, Date_t, Date_t, OID>( lastPing["_id"].String().c_str(), lastPing["ping"].Date(), remote, o["ts"].OID() );
+ }
+ else {
+
+ // GOTCHA! Due to network issues, it is possible that the current time
+ // is less than the remote time. We *have* to check this here, otherwise
+ // we overflow and our lock breaks.
+ if(_lastPingCheck.get<2>() >= remote)
+ elapsed = 0;
+ else
+ elapsed = remote - _lastPingCheck.get<2>();
+ }
+
}
-
- unsigned long long elapsed = now - pingTime;
- elapsed = elapsed / ( 1000 * 60 ); // convert to minutes
-
- if ( elapsed > ( 60 * 24 * 365 * 100 ) /* 100 years */ ) {
- warning() << "distlock elapsed time seems impossible: " << lastPing << endl;
+ catch( LockException& e ) {
+
+ // Remote server cannot be found / is not responsive
+ warning() << "Could not get remote time from " << _conn << causedBy( e );
+ // If our config server is having issues, forget all the pings until we can see it again
+ resetLastPing();
+
}
-
- if ( elapsed <= _takeoverMinutes ) {
- log(1) << "dist_lock lock failed because taken by: " << o << " elapsed minutes: " << elapsed << endl;
- *other = o;
- conn.done();
+
+ if ( elapsed <= takeover && ! canReenter ) {
+ log( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
+ *other = o; other->getOwned(); conn.done();
return false;
}
-
- log() << "dist_lock forcefully taking over from: " << o << " elapsed minutes: " << elapsed << endl;
- conn->update( _ns , _id , BSON( "$set" << BSON( "state" << 0 ) ) );
- string err = conn->getLastError();
- if ( ! err.empty() ) {
- warning() << "dist_lock take over from: " << o << " failed: " << err << endl;
- *other = o.getOwned();
- other->getOwned();
- conn.done();
+ else if( elapsed > takeover && canReenter ) {
+ log( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
+ *other = o; other->getOwned(); conn.done();
return false;
}
+ log( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
+ << ( canReenter ? "re-entering is allowed, " : "" )
+ << "elapsed time " << elapsed << " > takeover time " << takeover << endl;
+
+ if( elapsed > takeover ) {
+
+ // Lock may forced, reset our timer if succeeds or fails
+ // Ensures that another timeout must happen if something borks up here, and resets our pristine
+ // ping state if acquired.
+ resetLastPing();
+
+ try {
+
+ // Check the clock skew again. If we check this before we get a lock
+ // and after the lock times out, we can be pretty sure the time is
+ // increasing at the same rate on all servers and therefore our
+ // timeout is accurate
+ uassert( 14023, str::stream() << "remote time in cluster " << _conn.toString() << " is now skewed, cannot force lock.", !isRemoteTimeSkewed() );
+
+ // Make sure we break the lock with the correct "ts" (OID) value, otherwise
+ // we can overwrite a new lock inserted in the meantime.
+ conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << o["state"].numberInt() << "ts" << o["ts"] ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy(errMsg) : string("(another force won)") ) << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ }
+ catch( UpdateNotTheSame& ) {
+ // Ok to continue since we know we forced at least one lock document, and all lock docs
+ // are required for a lock to be held.
+ warning() << "lock forcing " << lockName << " inconsistent" << endl;
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception forcing distributed lock "
+ << lockName << causedBy( e ), 13660);
+ }
+
+ }
+ else {
+
+ assert( canReenter );
+
+ // Lock may be re-entered, reset our timer if succeeds or fails
+ // Not strictly necessary, but helpful for small timeouts where thread scheduling is significant.
+ // This ensures that two attempts are still required for a force if not acquired, and resets our
+ // state if we are acquired.
+ resetLastPing();
+
+ // Test that the lock is held by trying to update the finalized state of the lock to the same state
+ // if it does not update or does not update on all servers, we can't re-enter.
+ try {
+
+ // Test the lock with the correct "ts" (OID) value
+ conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << 2 << "ts" << o["ts"] ),
+ BSON( "$set" << BSON( "state" << 2 ) ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ if ( ! errMsg.empty() || ! err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy(errMsg) : string("(not sure lock is held)") )
+ << " gle: " << err
+ << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ }
+ catch( UpdateNotTheSame& ) {
+ // NOT ok to continue since our lock isn't held by all servers, so isn't valid.
+ warning() << "inconsistent state re-entering lock, lock " << lockName << " not held" << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception re-entering distributed lock "
+ << lockName << causedBy( e ), 13660);
+ }
+
+ log( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
+ *other = o; other->getOwned(); conn.done();
+ return true;
+
+ }
+
+ log( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
+
+ // We don't need the ts value in the query, since we will only ever replace locks with state=0.
}
+ // Case 3: We have an expired lock
else if ( o["ts"].type() ) {
queryBuilder.append( o["ts"] );
}
}
- OID ts;
- ts.init();
+ // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock state is open
+ // and no locks need to be forced. If anything goes wrong, we don't want to remember an old lock.
+ resetLastPing();
bool gotLock = false;
- BSONObj now;
+ BSONObj currLock;
- BSONObj lockDetails = BSON( "state" << 1 << "who" << getDistLockId() << "process" << getDistLockProcess() <<
- "when" << DATENOW << "why" << why << "ts" << ts );
+ BSONObj lockDetails = BSON( "state" << 1 << "who" << getDistLockId() << "process" << _processId <<
+ "when" << jsTime() << "why" << why << "ts" << OID::gen() );
BSONObj whatIWant = BSON( "$set" << lockDetails );
+
+ BSONObj query = queryBuilder.obj();
+
+ string lockName = _name + string("/") + _processId;
+
try {
- log(4) << "dist_lock about to aquire lock: " << lockDetails << endl;
- conn->update( _ns , queryBuilder.obj() , whatIWant );
+ // Main codepath to acquire lock
+
+ log( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
+ << lockDetails.jsonString(Strict, true) << "\n"
+ << query.jsonString(Strict, true) << endl;
+
+ conn->update( locksNS , query , whatIWant );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
- BSONObj o = conn->getLastErrorDetailed();
- now = conn->findOne( _ns , _id );
+ currLock = conn->findOne( locksNS , _id );
- if ( o["n"].numberInt() == 0 ) {
- *other = now;
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy( errMsg ) : string("(another update won)") ) << endl;
+ *other = currLock;
other->getOwned();
- log() << "dist_lock error trying to aquire lock: " << lockDetails << " error: " << o << endl;
gotLock = false;
}
else {
@@ -277,63 +713,234 @@ namespace mongo {
}
catch ( UpdateNotTheSame& up ) {
+
// this means our update got through on some, but not others
- log(4) << "dist_lock lock did not propagate properly" << endl;
+ warning() << "distributed lock '" << lockName << " did not propagate properly." << causedBy( up ) << endl;
+
+ // Overall protection derives from:
+ // All unlocking updates use the ts value when setting state to 0
+ // This ensures that during locking, we can override all smaller ts locks with
+ // our own safe ts value and not be unlocked afterward.
+ for ( unsigned i = 0; i < up.size(); i++ ) {
+
+ ScopedDbConnection indDB( up[i].first );
+ BSONObj indUpdate;
+
+ try {
+
+ indUpdate = indDB->findOne( locksNS , _id );
+
+ // If we override this lock in any way, grab and protect it.
+ // We assume/ensure that if a process does not have all lock documents, it is no longer
+ // holding the lock.
+ // Note - finalized locks may compete too, but we know they've won already if competing
+ // in this round. Cleanup of crashes during finalizing may take a few tries.
+ if( indUpdate["ts"] < lockDetails["ts"] || indUpdate["state"].numberInt() == 0 ) {
+
+ BSONObj grabQuery = BSON( "_id" << _id["_id"].String() << "ts" << indUpdate["ts"].OID() );
+
+ // Change ts so we won't be forced, state so we won't be relocked
+ BSONObj grabChanges = BSON( "ts" << lockDetails["ts"].OID() << "state" << 1 );
+
+ // Either our update will succeed, and we'll grab the lock, or it will fail b/c some other
+ // process grabbed the lock (which will change the ts), but the lock will be set until forcing
+ indDB->update( locksNS, grabQuery, BSON( "$set" << grabChanges ) );
+
+ indUpdate = indDB->findOne( locksNS, _id );
+
+ // Our lock should now be set until forcing.
+ assert( indUpdate["state"].numberInt() == 1 );
+
+ }
+ // else our lock is the same, in which case we're safe, or it's a bigger lock,
+ // in which case we won't need to protect anything since we won't have the lock.
+
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "distributed lock " << lockName
+ << " had errors communicating with individual server "
+ << up[1].first << causedBy( e ), 13661 );
+ }
- for ( unsigned i=0; i<up.size(); i++ ) {
- ScopedDbConnection temp( up[i].first );
- BSONObj temp2 = temp->findOne( _ns , _id );
+ assert( !indUpdate.isEmpty() );
- if ( now.isEmpty() || now["ts"] < temp2["ts"] ) {
- now = temp2.getOwned();
+ // Find max TS value
+ if ( currLock.isEmpty() || currLock["ts"] < indUpdate["ts"] ) {
+ currLock = indUpdate.getOwned();
}
- temp.done();
+ indDB.done();
+
}
- if ( now["ts"].OID() == ts ) {
- log(4) << "dist_lock completed lock propagation" << endl;
+ // Locks on all servers are now set and safe until forcing
+
+ if ( currLock["ts"] == lockDetails["ts"] ) {
+ log( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
gotLock = true;
- conn->update( _ns , _id , whatIWant );
}
else {
- log() << "dist_lock error trying to complete propagation" << endl;
+ log( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
+
+ // Register the lock for deletion, to speed up failover
+ // Not strictly necessary, but helpful
+ distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
+
gotLock = false;
}
}
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception creating distributed lock "
+ << lockName << causedBy( e ), 13663 );
+ }
- conn.done();
+ // Complete lock propagation
+ if( gotLock ) {
+
+ // This is now safe, since we know that no new locks will be placed on top of the ones we've checked for at
+ // least 15 minutes. Sets the state = 2, so that future clients can determine that the lock is truly set.
+ // The invariant for rollbacks is that we will never force locks with state = 2 and active pings, since that
+ // indicates the lock is active, but this means the process creating/destroying them must explicitly poll
+ // when something goes wrong.
+ try {
+
+ BSONObjBuilder finalLockDetails;
+ BSONObjIterator bi( lockDetails );
+ while( bi.more() ) {
+ BSONElement el = bi.next();
+ if( (string) ( el.fieldName() ) == "state" )
+ finalLockDetails.append( "state", 2 );
+ else finalLockDetails.append( el );
+ }
+
+ conn->update( locksNS , _id , BSON( "$set" << finalLockDetails.obj() ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ currLock = conn->findOne( locksNS , _id );
- log(2) << "dist_lock lock gotLock: " << gotLock << " now: " << now << endl;
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ warning() << "could not finalize winning lock " << lockName
+ << ( !errMsg.empty() ? causedBy( errMsg ) : " (did not update lock) " ) << endl;
+ gotLock = false;
+ }
+ else {
+ // SUCCESS!
+ gotLock = true;
+ }
+
+ }
+ catch( std::exception& e ) {
+ conn.done();
+
+ // Register the bad final lock for deletion, in case it exists
+ distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
+
+ throw LockException( str::stream() << "exception finalizing winning lock"
+ << causedBy( e ), 13662 );
+ }
+
+ }
+
+ *other = currLock;
+ other->getOwned();
+
+ // Log our lock results
+ if(gotLock)
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
+ else
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
+
+ conn.done();
return gotLock;
}
- void DistributedLock::unlock() {
+ // Unlock now takes an optional pointer to the lock, so you can be specific about which
+ // particular lock you want to unlock. This is required when the config server is down,
+ // and so cannot tell you what lock ts you should try later.
+ void DistributedLock::unlock( BSONObj* oldLockPtr ) {
+
+ assert( _name != "" );
+
+ string lockName = _name + string("/") + _processId;
+
const int maxAttempts = 3;
int attempted = 0;
+
+ BSONObj oldLock;
+ if( oldLockPtr ) oldLock = *oldLockPtr;
+
while ( ++attempted <= maxAttempts ) {
+ ScopedDbConnection conn( _conn );
+
try {
- ScopedDbConnection conn( _conn );
- conn->update( _ns , _id, BSON( "$set" << BSON( "state" << 0 ) ) );
- log(2) << "dist_lock unlock: " << conn->findOne( _ns , _id ) << endl;
- conn.done();
- return;
+ if( oldLock.isEmpty() )
+ oldLock = conn->findOne( locksNS, _id );
+
+ if( oldLock["state"].eoo() || oldLock["state"].numberInt() != 2 || oldLock["ts"].eoo() ) {
+ warning() << "cannot unlock invalid distributed lock " << oldLock << endl;
+ conn.done();
+ break;
+ }
+ // Use ts when updating lock, so that new locks can be sure they won't get trampled.
+ conn->update( locksNS ,
+ BSON( "_id" << _id["_id"].String() << "ts" << oldLock["ts"].OID() ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+ // Check that the lock was actually unlocked... if not, try again
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ){
+ warning() << "distributed lock unlock update failed, retrying "
+ << ( errMsg.empty() ? causedBy( "( update not registered )" ) : causedBy( errMsg ) ) << endl;
+ conn.done();
+ continue;
+ }
+
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
+ conn.done();
+ return;
+ }
+ catch( UpdateNotTheSame& ) {
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
+ conn.done();
+ break;
}
catch ( std::exception& e) {
- log( LL_WARNING ) << "dist_lock " << _name << " failed to contact config server in unlock attempt "
- << attempted << ": " << e.what() << endl;
+ warning() << "distributed lock '" << lockName << "' failed unlock attempt."
+ << causedBy( e ) << endl;
- sleepsecs(1 << attempted);
+ conn.done();
+ // TODO: If our lock timeout is small, sleeping this long may be unsafe.
+ if( attempted != maxAttempts) sleepsecs(1 << attempted);
}
}
- log( LL_WARNING ) << "dist_lock couldn't consumate unlock request. " << "Lock " << _name
- << " will be taken over after " << _takeoverMinutes << " minutes timeout" << endl;
+ if( attempted > maxAttempts && ! oldLock.isEmpty() && ! oldLock["ts"].eoo() ) {
+
+ log( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
+ << ", will attempt again later" << endl;
+
+ // We couldn't unlock the lock at all, so try again later in the pinging thread...
+ distLockPinger.addUnlockOID( oldLock["ts"].OID() );
+ }
+ else if( attempted > maxAttempts ) {
+ warning() << "could not unlock untracked distributed lock, a manual force may be required" << endl;
+ }
+
+ warning() << "distributed lock '" << lockName << "' couldn't consummate unlock request. "
+ << "lock may be taken over after " << ( _lockTimeout / (60 * 1000) )
+ << " minutes timeout." << endl;
}
+
+
}
diff --git a/client/distlock.h b/client/distlock.h
index 753a241..8985672 100644
--- a/client/distlock.h
+++ b/client/distlock.h
@@ -23,9 +23,42 @@
#include "redef_macros.h"
#include "syncclusterconnection.h"
+#define LOCK_TIMEOUT (15 * 60 * 1000)
+#define LOCK_SKEW_FACTOR (30)
+#define LOCK_PING (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define MAX_LOCK_NET_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define MAX_LOCK_CLOCK_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define NUM_LOCK_SKEW_CHECKS (3)
+
+// The maximum clock skew we need to handle between config servers is
+// 2 * MAX_LOCK_NET_SKEW + MAX_LOCK_CLOCK_SKEW.
+
+// Net effect of *this* clock being slow is effectively a multiplier on the max net skew
+// and a linear increase or decrease of the max clock skew.
+
namespace mongo {
/**
+ * Exception class to encapsulate exceptions while managing distributed locks
+ */
+ class LockException : public DBException {
+ public:
+ LockException( const char * msg , int code ) : DBException( msg, code ) {}
+ LockException( const string& msg, int code ) : DBException( msg, code ) {}
+ virtual ~LockException() throw() { }
+ };
+
+ /**
+ * Indicates an error in retrieving time values from remote servers.
+ */
+ class TimeNotFoundException : public LockException {
+ public:
+ TimeNotFoundException( const char * msg , int code ) : LockException( msg, code ) {}
+ TimeNotFoundException( const string& msg, int code ) : LockException( msg, code ) {}
+ virtual ~TimeNotFoundException() throw() { }
+ };
+
+ /**
* The distributed lock is a configdb backed way of synchronizing system-wide tasks. A task must be identified by a
* unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks
* collection with that name.
@@ -36,53 +69,155 @@ namespace mongo {
class DistributedLock {
public:
+ static LabeledLevel logLvl;
+
/**
* The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
* Construction does trigger a lock "pinging" mechanism, though.
*
* @param conn address of config(s) server(s)
* @param name identifier for the lock
- * @param takeoverMinutes how long can the log go "unpinged" before a new attempt to lock steals it (in minutes)
+ * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it (in minutes).
+ * @param lockPing how long to wait between lock pings
+ * @param legacy use legacy logic
+ *
*/
- DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes = 15 );
+ DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout = 0, bool asProcess = false );
+ ~DistributedLock(){};
/**
- * Attempts to aquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
+ * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
* consider using the dist_lock_try construct to acquire this lock in an exception safe way.
*
* @param why human readable description of why the lock is being taken (used to log)
- * @param other configdb's lock document that is currently holding the lock, if lock is taken
+ * @param whether this is a lock re-entry or a new lock
+ * @param other configdb's lock document that is currently holding the lock, if lock is taken, or our own lock
+ * details if not
* @return true if it managed to grab the lock
*/
- bool lock_try( string why , BSONObj * other = 0 );
+ bool lock_try( const string& why , bool reenter = false, BSONObj * other = 0 );
/**
* Releases a previously taken lock.
*/
- void unlock();
+ void unlock( BSONObj* oldLockPtr = NULL );
+
+ Date_t getRemoteTime();
+
+ bool isRemoteTimeSkewed();
+
+ const string& getProcessId();
+
+ const ConnectionString& getRemoteConnection();
+
+ /**
+ * Check the skew between a cluster of servers
+ */
+ static bool checkSkew( const ConnectionString& cluster, unsigned skewChecks = NUM_LOCK_SKEW_CHECKS, unsigned long long maxClockSkew = MAX_LOCK_CLOCK_SKEW, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
+
+ /**
+ * Get the remote time from a server or cluster
+ */
+ static Date_t remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
+
+ static bool killPinger( DistributedLock& lock );
+
+ /**
+ * Namespace for lock pings
+ */
+ static const string lockPingNS;
+
+ /**
+ * Namespace for locks
+ */
+ static const string locksNS;
+
+ const ConnectionString _conn;
+ const string _name;
+ const BSONObj _id;
+ const string _processId;
+
+ // Timeout for lock, usually LOCK_TIMEOUT
+ const unsigned long long _lockTimeout;
+ const unsigned long long _maxClockSkew;
+ const unsigned long long _maxNetSkew;
+ const unsigned long long _lockPing;
private:
- ConnectionString _conn;
- string _name;
- unsigned _takeoverMinutes;
- string _ns;
- BSONObj _id;
+ void resetLastPing(){
+ scoped_lock lk( _mutex );
+ _lastPingCheck = boost::tuple<string, Date_t, Date_t, OID>();
+ }
+
+ mongo::mutex _mutex;
+
+ // Data from last check of process with ping time
+ boost::tuple<string, Date_t, Date_t, OID> _lastPingCheck;
+ // May or may not exist, depending on startup
+ string _threadId;
+
};
class dist_lock_try {
public:
+
+ dist_lock_try() : _lock(NULL), _got(false) {}
+
+ dist_lock_try( const dist_lock_try& that ) : _lock(that._lock), _got(that._got), _other(that._other) {
+ _other.getOwned();
+
+ // Make sure the lock ownership passes to this object,
+ // so we only unlock once.
+ ((dist_lock_try&) that)._got = false;
+ ((dist_lock_try&) that)._lock = NULL;
+ ((dist_lock_try&) that)._other = BSONObj();
+ }
+
+ // Needed so we can handle lock exceptions in context of lock try.
+ dist_lock_try& operator=( const dist_lock_try& that ){
+
+ if( this == &that ) return *this;
+
+ _lock = that._lock;
+ _got = that._got;
+ _other = that._other;
+ _other.getOwned();
+ _why = that._why;
+
+ // Make sure the lock ownership passes to this object,
+ // so we only unlock once.
+ ((dist_lock_try&) that)._got = false;
+ ((dist_lock_try&) that)._lock = NULL;
+ ((dist_lock_try&) that)._other = BSONObj();
+
+ return *this;
+ }
+
dist_lock_try( DistributedLock * lock , string why )
- : _lock(lock) {
- _got = _lock->lock_try( why , &_other );
+ : _lock(lock), _why(why) {
+ _got = _lock->lock_try( why , false , &_other );
}
~dist_lock_try() {
if ( _got ) {
- _lock->unlock();
+ assert( ! _other.isEmpty() );
+ _lock->unlock( &_other );
}
}
+ bool reestablish(){
+ return retry();
+ }
+
+ bool retry() {
+ assert( _lock );
+ assert( _got );
+ assert( ! _other.isEmpty() );
+
+ return _got = _lock->lock_try( _why , true, &_other );
+ }
+
bool got() const { return _got; }
BSONObj other() const { return _other; }
@@ -90,6 +225,7 @@ namespace mongo {
DistributedLock * _lock;
bool _got;
BSONObj _other;
+ string _why;
};
}
diff --git a/client/distlock_test.cpp b/client/distlock_test.cpp
index 83d143f..42a1c48 100644
--- a/client/distlock_test.cpp
+++ b/client/distlock_test.cpp
@@ -15,85 +15,123 @@
* limitations under the License.
*/
+#include <iostream>
#include "../pch.h"
#include "dbclient.h"
#include "distlock.h"
#include "../db/commands.h"
+#include "../util/bson_util.h"
+
+// Modify some config options for the RNG, since they cause MSVC to fail
+#include <boost/config.hpp>
+
+#if defined(BOOST_MSVC) && defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS)
+#undef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
+#define BOOST_RNG_HACK
+#endif
+
+// Well, sort-of cross-platform RNG
+#include <boost/random/mersenne_twister.hpp>
+
+#ifdef BOOST_RNG_HACK
+#define BOOST_NO_MEMBER_TEMPLATE_FRIENDS
+#undef BOOST_RNG_HACK
+#endif
+
+
+#include <boost/random/uniform_int.hpp>
+#include <boost/random/variate_generator.hpp>
+
+
+// TODO: Make a method in BSONObj if useful, don't modify for now
+#define string_field(obj, name, def) ( obj.hasField(name) ? obj[name].String() : def )
+#define number_field(obj, name, def) ( obj.hasField(name) ? obj[name].Number() : def )
namespace mongo {
- class TestDistLockWithSync : public Command {
+ class TestDistLockWithSync: public Command {
public:
- TestDistLockWithSync() : Command( "_testDistLockWithSyncCluster" ) {}
- virtual void help( stringstream& help ) const {
+ TestDistLockWithSync() :
+ Command("_testDistLockWithSyncCluster") {
+ }
+ virtual void help(stringstream& help) const {
help << "should not be calling this directly" << endl;
}
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
static void runThread() {
- while ( keepGoing ) {
- if ( current->lock_try( "test" ) ) {
+ while (keepGoing) {
+ if (current->lock_try( "test" )) {
count++;
int before = count;
- sleepmillis( 3 );
+ sleepmillis(3);
int after = count;
-
- if ( after != before ) {
- error() << " before: " << before << " after: " << after << endl;
+
+ if (after != before) {
+ error() << " before: " << before << " after: " << after
+ << endl;
}
-
+
current->unlock();
}
}
}
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
Timer t;
- DistributedLock lk( ConnectionString( cmdObj["host"].String() , ConnectionString::SYNC ), "testdistlockwithsync" );
+ DistributedLock lk(ConnectionString(cmdObj["host"].String(),
+ ConnectionString::SYNC), "testdistlockwithsync", 0, 0);
current = &lk;
count = 0;
gotit = 0;
errors = 0;
keepGoing = true;
-
+
vector<shared_ptr<boost::thread> > l;
- for ( int i=0; i<4; i++ ) {
- l.push_back( shared_ptr<boost::thread>( new boost::thread( runThread ) ) );
+ for (int i = 0; i < 4; i++) {
+ l.push_back(
+ shared_ptr<boost::thread> (new boost::thread(runThread)));
}
-
+
int secs = 10;
- if ( cmdObj["secs"].isNumber() )
+ if (cmdObj["secs"].isNumber())
secs = cmdObj["secs"].numberInt();
- sleepsecs( secs );
+ sleepsecs(secs);
keepGoing = false;
- for ( unsigned i=0; i<l.size(); i++ )
+ for (unsigned i = 0; i < l.size(); i++)
l[i]->join();
current = 0;
- result.append( "count" , count );
- result.append( "gotit" , gotit );
- result.append( "errors" , errors );
- result.append( "timeMS" , t.millis() );
+ result.append("count", count);
+ result.append("gotit", gotit);
+ result.append("errors", errors);
+ result.append("timeMS", t.millis());
return errors == 0;
}
-
+
// variables for test
static DistributedLock * current;
static int gotit;
static int errors;
static AtomicUInt count;
-
+
static bool keepGoing;
} testDistLockWithSyncCmd;
-
DistributedLock * TestDistLockWithSync::current;
AtomicUInt TestDistLockWithSync::count;
int TestDistLockWithSync::gotit;
@@ -101,4 +139,300 @@ namespace mongo {
bool TestDistLockWithSync::keepGoing;
+
+ class TestDistLockWithSkew: public Command {
+ public:
+
+ static const int logLvl = 1;
+
+ TestDistLockWithSkew() :
+ Command("_testDistLockWithSkew") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ void runThread(ConnectionString& hostConn, unsigned threadId, unsigned seed,
+ BSONObj& cmdObj, BSONObjBuilder& result) {
+
+ stringstream ss;
+ ss << "thread-" << threadId;
+ setThreadName(ss.str().c_str());
+
+ // Lock name
+ string lockName = string_field(cmdObj, "lockName", this->name + "_lock");
+
+ // Range of clock skew in diff threads
+ int skewRange = (int) number_field(cmdObj, "skewRange", 1);
+
+ // How long to wait with the lock
+ int threadWait = (int) number_field(cmdObj, "threadWait", 30);
+ if(threadWait <= 0) threadWait = 1;
+
+ // Max amount of time (ms) a thread waits before checking the lock again
+ int threadSleep = (int) number_field(cmdObj, "threadSleep", 30);
+ if(threadSleep <= 0) threadSleep = 1;
+
+ // How long until the lock is forced in ms, only compared locally
+ unsigned long long takeoverMS = (unsigned long long) number_field(cmdObj, "takeoverMS", 0);
+
+ // Whether or not we should hang some threads
+ int hangThreads = (int) number_field(cmdObj, "hangThreads", 0);
+
+
+ boost::mt19937 gen((boost::mt19937::result_type) seed);
+
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSkew(gen, boost::uniform_int<>(0, skewRange));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomWait(gen, boost::uniform_int<>(1, threadWait));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSleep(gen, boost::uniform_int<>(1, threadSleep));
+
+
+ int skew = 0;
+ if (!lock.get()) {
+
+ // Pick a skew, but the first two threads skew the whole range
+ if(threadId == 0)
+ skew = -skewRange / 2;
+ else if(threadId == 1)
+ skew = skewRange / 2;
+ else skew = randomSkew() - (skewRange / 2);
+
+ // Skew this thread
+ jsTimeVirtualThreadSkew( skew );
+
+ log() << "Initializing lock with skew of " << skew << " for thread " << threadId << endl;
+
+ lock.reset(new DistributedLock(hostConn, lockName, takeoverMS, true ));
+
+ log() << "Skewed time " << jsTime() << " for thread " << threadId << endl
+ << " max wait (with lock: " << threadWait << ", after lock: " << threadSleep << ")" << endl
+ << " takeover in " << takeoverMS << "(ms remote)" << endl;
+
+ }
+
+ DistributedLock* myLock = lock.get();
+
+ bool errors = false;
+ BSONObj lockObj;
+ while (keepGoing) {
+ try {
+
+ if (myLock->lock_try("Testing distributed lock with skew.", false, &lockObj )) {
+
+ log() << "**** Locked for thread " << threadId << " with ts " << lockObj["ts"] << endl;
+
+ if( count % 2 == 1 && ! myLock->lock_try( "Testing lock re-entry.", true ) ) {
+ errors = true;
+ log() << "**** !Could not re-enter lock already held" << endl;
+ break;
+ }
+
+ if( count % 3 == 1 && myLock->lock_try( "Testing lock non-re-entry.", false ) ) {
+ errors = true;
+ log() << "**** !Invalid lock re-entry" << endl;
+ break;
+ }
+
+ count++;
+ int before = count;
+ int sleep = randomWait();
+ sleepmillis(sleep);
+ int after = count;
+
+ if(after != before) {
+ errors = true;
+ log() << "**** !Bad increment while sleeping with lock for: " << sleep << "ms" << endl;
+ break;
+ }
+
+ // Unlock only half the time...
+ if(hangThreads == 0 || threadId % hangThreads != 0) {
+ log() << "**** Unlocking for thread " << threadId << " with ts " << lockObj["ts"] << endl;
+ myLock->unlock( &lockObj );
+ }
+ else {
+ log() << "**** Not unlocking for thread " << threadId << endl;
+ DistributedLock::killPinger( *myLock );
+ // We're simulating a crashed process...
+ break;
+ }
+ }
+
+ }
+ catch( LockException& e ) {
+ log() << "*** !Could not try distributed lock." << causedBy( e ) << endl;
+ break;
+ }
+
+ sleepmillis(randomSleep());
+ }
+
+ result << "errors" << errors
+ << "skew" << skew
+ << "takeover" << (long long) takeoverMS
+ << "localTimeout" << (takeoverMS > 0);
+
+ }
+
+ void test(ConnectionString& hostConn, string& lockName, unsigned seed) {
+ return;
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+
+ Timer t;
+
+ ConnectionString hostConn(cmdObj["host"].String(),
+ ConnectionString::SYNC);
+
+ unsigned seed = (unsigned) number_field(cmdObj, "seed", 0);
+ int numThreads = (int) number_field(cmdObj, "numThreads", 4);
+ int wait = (int) number_field(cmdObj, "wait", 10000);
+
+ log() << "Starting " << this->name << " with -" << endl
+ << " seed: " << seed << endl
+ << " numThreads: " << numThreads << endl
+ << " total wait: " << wait << endl << endl;
+
+ // Skew host clocks if needed
+ try {
+ skewClocks( hostConn, cmdObj );
+ }
+ catch( DBException e ) {
+ errmsg = str::stream() << "Clocks could not be skewed." << causedBy( e );
+ return false;
+ }
+
+ count = 0;
+ keepGoing = true;
+
+ vector<shared_ptr<boost::thread> > threads;
+ vector<shared_ptr<BSONObjBuilder> > results;
+ for (int i = 0; i < numThreads; i++) {
+ results.push_back(shared_ptr<BSONObjBuilder> (new BSONObjBuilder()));
+ threads.push_back(shared_ptr<boost::thread> (new boost::thread(
+ boost::bind(&TestDistLockWithSkew::runThread, this,
+ hostConn, (unsigned) i, seed + i, boost::ref(cmdObj),
+ boost::ref(*(results[i].get()))))));
+ }
+
+ sleepsecs(wait / 1000);
+ keepGoing = false;
+
+ bool errors = false;
+ for (unsigned i = 0; i < threads.size(); i++) {
+ threads[i]->join();
+ errors = errors || results[i].get()->obj()["errors"].Bool();
+ }
+
+ result.append("count", count);
+ result.append("errors", errors);
+ result.append("timeMS", t.millis());
+
+ return !errors;
+
+ }
+
+ /**
+ * Skews the clocks of a remote cluster by a particular amount, specified by
+ * the "skewHosts" element in a BSONObj.
+ */
+ static void skewClocks( ConnectionString& cluster, BSONObj& cmdObj ) {
+
+ vector<long long> skew;
+ if(cmdObj.hasField("skewHosts")) {
+ bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
+ }
+ else {
+ log( logLvl ) << "No host clocks to skew." << endl;
+ return;
+ }
+
+ log( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
+
+ unsigned s = 0;
+ for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
+
+ ConnectionString server( cluster.getServers()[s] );
+ ScopedDbConnection conn( server );
+
+ BSONObj result;
+ try {
+ bool success = conn->runCommand( string("admin"), BSON( "_skewClockCommand" << 1 << "skew" << *i ), result );
+
+ uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
+
+ log( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
+ }
+ catch(...) {
+ conn.done();
+ throw;
+ }
+
+ conn.done();
+
+ }
+
+ }
+
+ // variables for test
+ thread_specific_ptr<DistributedLock> lock;
+ AtomicUInt count;
+ bool keepGoing;
+
+ } testDistLockWithSkewCmd;
+
+
+ /**
+ * Utility command to virtually skew the clock of a mongo server a particular amount.
+ * This skews the clock globally, per-thread skew is also possible.
+ */
+ class SkewClockCommand: public Command {
+ public:
+ SkewClockCommand() :
+ Command("_skewClockCommand") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+
+ long long skew = (long long) number_field(cmdObj, "skew", 0);
+
+ log() << "Adjusting jsTime() clock skew to " << skew << endl;
+
+ jsTimeVirtualSkew( skew );
+
+ log() << "JSTime adjusted, now is " << jsTime() << endl;
+
+ return true;
+
+ }
+
+ } testSkewClockCommand;
+
}
+
diff --git a/client/examples/clientTest.cpp b/client/examples/clientTest.cpp
index 96c014e..aaea6bd 100644
--- a/client/examples/clientTest.cpp
+++ b/client/examples/clientTest.cpp
@@ -246,5 +246,34 @@ int main( int argc, const char **argv ) {
//MONGO_PRINT(out);
}
+ {
+ // test timeouts
+
+ DBClientConnection conn( true , 0 , 2 );
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+ conn.insert( "test.totest" , BSON( "x" << 1 ) );
+ BSONObj res;
+
+ bool gotError = false;
+ assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+ try {
+ conn.eval( "test" , "sleep(5000); return db.totest.findOne().x" , res );
+ }
+ catch ( std::exception& e ) {
+ gotError = true;
+ log() << e.what() << endl;
+ }
+ assert( gotError );
+ // sleep so the server isn't locked anymore
+ sleepsecs( 4 );
+
+ assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+
+
+ }
+
cout << "client test finished!" << endl;
}
diff --git a/client/examples/httpClientTest.cpp b/client/examples/httpClientTest.cpp
index 4fa5fd8..4055d44 100644
--- a/client/examples/httpClientTest.cpp
+++ b/client/examples/httpClientTest.cpp
@@ -18,10 +18,27 @@
#include <iostream>
#include "client/dbclient.h"
-#include "util/httpclient.h"
+#include "util/net/httpclient.h"
using namespace mongo;
+void play( string url ) {
+ cout << "[" << url << "]" << endl;
+
+ HttpClient c;
+ HttpClient::Result r;
+ MONGO_assert( c.get( url , &r ) == 200 );
+
+ HttpClient::Headers h = r.getHeaders();
+ MONGO_assert( h["Content-Type"].find( "text/html" ) == 0 );
+
+ cout << "\tHeaders" << endl;
+ for ( HttpClient::Headers::iterator i = h.begin() ; i != h.end(); ++i ) {
+ cout << "\t\t" << i->first << "\t" << i->second << endl;
+ }
+
+}
+
int main( int argc, const char **argv ) {
int port = 27017;
@@ -32,12 +49,10 @@ int main( int argc, const char **argv ) {
}
port += 1000;
- stringstream ss;
- ss << "http://localhost:" << port << "/";
- string url = ss.str();
-
- cout << "[" << url << "]" << endl;
-
- HttpClient c;
- MONGO_assert( c.get( url ) == 200 );
+ play( str::stream() << "http://localhost:" << port << "/" );
+
+#ifdef MONGO_SSL
+ play( "https://www.10gen.com/" );
+#endif
+
}
diff --git a/client/examples/insert_demo.cpp b/client/examples/insert_demo.cpp
new file mode 100644
index 0000000..14ac79e
--- /dev/null
+++ b/client/examples/insert_demo.cpp
@@ -0,0 +1,47 @@
+/*
+ C++ client program which inserts documents in a MongoDB database.
+
+ How to build and run:
+
+ Using mongo_client_lib.cpp:
+ g++ -I .. -I ../.. insert_demo.cpp ../mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
+ ./a.out
+*/
+
+#include <iostream>
+#include "dbclient.h" // the mongo c++ driver
+
+using namespace std;
+using namespace mongo;
+using namespace bson;
+
+int main() {
+ try {
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+
+ bo o = BSON( "hello" << "world" );
+
+ cout << "inserting..." << endl;
+
+ time_t start = time(0);
+ for( unsigned i = 0; i < 1000000; i++ ) {
+ c.insert("test.foo", o);
+ }
+
+ // wait until all operations applied
+ cout << "getlasterror returns: \"" << c.getLastError() << '"' << endl;
+
+ time_t done = time(0);
+ time_t dt = done-start;
+ cout << dt << " seconds " << 1000000/dt << " per second" << endl;
+ }
+ catch(DBException& e) {
+ cout << "caught DBException " << e.toString() << endl;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/client/examples/rs.cpp b/client/examples/rs.cpp
index 7813ec6..3307d87 100644
--- a/client/examples/rs.cpp
+++ b/client/examples/rs.cpp
@@ -21,11 +21,62 @@
#include "client/dbclient.h"
#include <iostream>
+#include <vector>
using namespace mongo;
using namespace std;
+void workerThread( string collName , bool print , DBClientReplicaSet * conn ) {
+
+ while ( true ) {
+ try {
+ conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
+
+ BSONObj x = conn->findOne( collName , BSONObj() );
+
+ if ( print ) {
+ cout << x << endl;
+ }
+
+ BSONObj a = conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
+ BSONObj b = conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
+
+ if ( print ) {
+ cout << "\t A " << a << endl;
+ cout << "\t B " << b << endl;
+ }
+ }
+ catch ( std::exception& e ) {
+ cout << "ERROR: " << e.what() << endl;
+ }
+ sleepmillis( 10 );
+ }
+}
+
int main( int argc , const char ** argv ) {
+
+ unsigned nThreads = 1;
+ bool print = false;
+ bool testTimeout = false;
+
+ for ( int i=1; i<argc; i++ ) {
+ if ( mongoutils::str::equals( "--threads" , argv[i] ) ) {
+ nThreads = atoi( argv[++i] );
+ }
+ else if ( mongoutils::str::equals( "--print" , argv[i] ) ) {
+ print = true;
+ }
+ // Run a special mode to demonstrate the DBClientReplicaSet so_timeout option.
+ else if ( mongoutils::str::equals( "--testTimeout" , argv[i] ) ) {
+ testTimeout = true;
+ }
+ else {
+ cerr << "unknown option: " << argv[i] << endl;
+ return 1;
+ }
+
+ }
+
string errmsg;
ConnectionString cs = ConnectionString::parse( "foo/127.0.0.1" , errmsg );
if ( ! cs.isValid() ) {
@@ -33,7 +84,7 @@ int main( int argc , const char ** argv ) {
return 1;
}
- DBClientReplicaSet * conn = (DBClientReplicaSet*)cs.connect( errmsg );
+ DBClientReplicaSet * conn = dynamic_cast<DBClientReplicaSet*>(cs.connect( errmsg, testTimeout ? 10 : 0 ));
if ( ! conn ) {
cout << "error connecting: " << errmsg << endl;
return 2;
@@ -42,17 +93,26 @@ int main( int argc , const char ** argv ) {
string collName = "test.rs1";
conn->dropCollection( collName );
- while ( true ) {
+
+ if ( testTimeout ) {
+ conn->insert( collName, BSONObj() );
try {
- conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
- cout << conn->findOne( collName , BSONObj() ) << endl;
- cout << "\t A" << conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk ) << endl;
- cout << "\t B " << conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk ) << endl;
- }
- catch ( std::exception& e ) {
- cout << "ERROR: " << e.what() << endl;
+ conn->count( collName, BSON( "$where" << "sleep(40000)" ) );
+ } catch( DBException& ) {
+ return 0;
}
- sleepsecs( 1 );
+ cout << "expected socket exception" << endl;
+ return 1;
+ }
+
+ vector<boost::shared_ptr<boost::thread> > threads;
+ for ( unsigned i=0; i<nThreads; i++ ) {
+ string errmsg;
+ threads.push_back( boost::shared_ptr<boost::thread>( new boost::thread( boost::bind( workerThread , collName , print , (DBClientReplicaSet*)cs.connect(errmsg) ) ) ) );
+ }
+
+ for ( unsigned i=0; i<threads.size(); i++ ) {
+ threads[i]->join();
}
}
diff --git a/client/examples/simple_client_demo.vcxproj b/client/examples/simple_client_demo.vcxproj
new file mode 100755
index 0000000..4658a42
--- /dev/null
+++ b/client/examples/simple_client_demo.vcxproj
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{89C30BC3-2874-4F2C-B4DA-EB04E9782236}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>simple_client_demo</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <IncludePath>..\..;..\..\pcre-7.4;$(IncludePath)</IncludePath>
+ <LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <IncludePath>..\..;..\..\pcre-7.4;$(IncludePath)</IncludePath>
+ <LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\mongo_client_lib.cpp" />
+ <ClCompile Include="..\simple_client_demo.cpp" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/client/examples/simple_client_demo.vcxproj.filters b/client/examples/simple_client_demo.vcxproj.filters
new file mode 100755
index 0000000..d6580c3
--- /dev/null
+++ b/client/examples/simple_client_demo.vcxproj.filters
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\simple_client_demo.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\mongo_client_lib.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/client/examples/whereExample.cpp b/client/examples/whereExample.cpp
index ce4174b..12b68d7 100644
--- a/client/examples/whereExample.cpp
+++ b/client/examples/whereExample.cpp
@@ -1,4 +1,5 @@
-// whereExample.cpp
+// @file whereExample.cpp
+// @see http://www.mongodb.org/display/DOCS/Server-side+Code+Execution
/* Copyright 2009 10gen Inc.
*
diff --git a/client/mongo_client_lib.cpp b/client/mongo_client_lib.cpp
index 69f801a..8100d71 100644
--- a/client/mongo_client_lib.cpp
+++ b/client/mongo_client_lib.cpp
@@ -4,13 +4,23 @@
Normally one includes dbclient.h, and links against libmongoclient.a, when connecting to MongoDB
from C++. However, if you have a situation where the pre-built library does not work, you can use
- this file instead to build all the necessary symbols. To do so, include client_lib.cpp in your
+ this file instead to build all the necessary symbols. To do so, include mongo_client_lib.cpp in your
project.
+ GCC
+ ---
For example, to build and run simple_client_demo.cpp with GCC and run it:
g++ -I .. simple_client_demo.cpp mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
./a.out
+
+ Visual Studio (2010 tested)
+ ---------------------------
+ First, see client/examples/simple_client_demo.vcxproj.
+ - Be sure to include your boost include directory in your project as an Additional Include Directory.
+ - Define _CRT_SECURE_NO_WARNINGS to avoid warnings on use of strncpy and such by the MongoDB client code.
+ - Include the boost libraries directory.
+ - Linker.Input.Additional Dependencies - add ws2_32.lib for the Winsock library.
*/
/* Copyright 2009 10gen Inc.
@@ -28,23 +38,30 @@
* limitations under the License.
*/
+#if defined(_WIN32)
+// C4800 forcing value to bool 'true' or 'false' (performance warning)
+#pragma warning( disable : 4800 )
+#endif
+
#include "../util/md5main.cpp"
#define MONGO_EXPOSE_MACROS
#include "../pch.h"
#include "../util/assert_util.cpp"
-#include "../util/message.cpp"
+#include "../util/net/message.cpp"
#include "../util/util.cpp"
#include "../util/background.cpp"
#include "../util/base64.cpp"
-#include "../util/sock.cpp"
+#include "../util/net/sock.cpp"
#include "../util/log.cpp"
#include "../util/password.cpp"
+#include "../util/net/message_port.cpp"
#include "../util/concurrency/thread_pool.cpp"
#include "../util/concurrency/vars.cpp"
#include "../util/concurrency/task.cpp"
+#include "../util/concurrency/spin_lock.cpp"
#include "connpool.cpp"
#include "syncclusterconnection.cpp"
@@ -53,13 +70,19 @@
#include "gridfs.cpp"
#include "dbclientcursor.cpp"
+#include "../util/text.cpp"
+#include "dbclient_rs.cpp"
+#include "../bson/oid.cpp"
+
#include "../db/lasterror.cpp"
#include "../db/json.cpp"
#include "../db/jsobj.cpp"
-#include "../db/common.cpp"
+//#include "../db/common.cpp"
#include "../db/nonce.cpp"
#include "../db/commands.cpp"
+#include "../pch.cpp"
+
extern "C" {
#include "../util/md5.c"
}
diff --git a/client/parallel.cpp b/client/parallel.cpp
index c4905e3..76b0168 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -63,7 +63,20 @@ namespace mongo {
_init();
}
- auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft ) {
+ void ClusteredCursor::_checkCursor( DBClientCursor * cursor ) {
+ assert( cursor );
+
+ if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
+ throw StaleConfigException( _ns , "ClusteredCursor::query" );
+ }
+
+ if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ) {
+ BSONObj o = cursor->next();
+ throw UserException( o["code"].numberInt() , o["$err"].String() );
+ }
+ }
+
+ auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft , bool lazy ) {
uassert( 10017 , "cursor already done" , ! _done );
assert( _didInit );
@@ -80,12 +93,10 @@ namespace mongo {
throw StaleConfigException( _ns , "ClusteredCursor::query ShardConnection had to change" , true );
}
- if ( logLevel >= 5 ) {
- log(5) << "ClusteredCursor::query (" << type() << ") server:" << server
- << " ns:" << _ns << " query:" << q << " num:" << num
- << " _fields:" << _fields << " options: " << _options << endl;
- }
-
+ LOG(5) << "ClusteredCursor::query (" << type() << ") server:" << server
+ << " ns:" << _ns << " query:" << q << " num:" << num
+ << " _fields:" << _fields << " options: " << _options << endl;
+
auto_ptr<DBClientCursor> cursor =
conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
@@ -97,21 +108,9 @@ namespace mongo {
massert( 13633 , str::stream() << "error querying server: " << server , cursor.get() );
- if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
- conn.done();
- throw StaleConfigException( _ns , "ClusteredCursor::query" );
- }
-
- if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ) {
- conn.done();
- BSONObj o = cursor->next();
- throw UserException( o["code"].numberInt() , o["$err"].String() );
- }
-
-
- cursor->attach( &conn );
-
- conn.done();
+ cursor->attach( &conn ); // this calls done on conn
+ assert( ! conn.ok() );
+ _checkCursor( cursor.get() );
return cursor;
}
catch ( SocketException& e ) {
@@ -228,6 +227,11 @@ namespace mongo {
: _matcher( filter ) , _cursor( cursor ) , _done( cursor.get() == 0 ) {
}
+ FilteringClientCursor::FilteringClientCursor( DBClientCursor* cursor , const BSONObj filter )
+ : _matcher( filter ) , _cursor( cursor ) , _done( cursor == 0 ) {
+ }
+
+
FilteringClientCursor::~FilteringClientCursor() {
}
@@ -237,6 +241,13 @@ namespace mongo {
_done = _cursor.get() == 0;
}
+ void FilteringClientCursor::reset( DBClientCursor* cursor ) {
+ _cursor.reset( cursor );
+ _next = BSONObj();
+ _done = cursor == 0;
+ }
+
+
bool FilteringClientCursor::more() {
if ( ! _next.isEmpty() )
return true;
@@ -399,17 +410,245 @@ namespace mongo {
}
}
+ // TODO: Merge with futures API? We do a lot of error checking here that would be useful elsewhere.
void ParallelSortClusteredCursor::_init() {
+
+ // log() << "Starting parallel search..." << endl;
+
+ // make sure we're not already initialized
assert( ! _cursors );
_cursors = new FilteringClientCursor[_numServers];
- // TODO: parellize
- int num = 0;
- for ( set<ServerAndQuery>::iterator i = _servers.begin(); i!=_servers.end(); ++i ) {
- const ServerAndQuery& sq = *i;
- _cursors[num++].reset( query( sq._server , 0 , sq._extra , _needToSkip ) );
+ bool returnPartial = ( _options & QueryOption_PartialResults );
+
+ vector<ServerAndQuery> queries( _servers.begin(), _servers.end() );
+ set<int> retryQueries;
+ int finishedQueries = 0;
+
+ vector< shared_ptr<ShardConnection> > conns;
+ vector<string> servers;
+
+ // Since we may get all sorts of errors, record them all as they come and throw them later if necessary
+ vector<string> staleConfigExs;
+ vector<string> socketExs;
+ vector<string> otherExs;
+ bool allConfigStale = false;
+
+ int retries = -1;
+
+ // Loop through all the queries until we've finished or gotten a socket exception on all of them
+ // We break early for non-socket exceptions, and socket exceptions if we aren't returning partial results
+ do {
+ retries++;
+
+ bool firstPass = retryQueries.size() == 0;
+
+ if( ! firstPass ){
+ log() << "retrying " << ( returnPartial ? "(partial) " : "" ) << "parallel connection to ";
+ for( set<int>::iterator it = retryQueries.begin(); it != retryQueries.end(); ++it ){
+ log() << queries[*it]._server << ", ";
+ }
+ log() << finishedQueries << " finished queries." << endl;
+ }
+
+ size_t num = 0;
+ for ( vector<ServerAndQuery>::iterator it = queries.begin(); it != queries.end(); ++it ) {
+ size_t i = num++;
+
+ const ServerAndQuery& sq = *it;
+
+ // If we're not retrying this cursor on later passes, continue
+ if( ! firstPass && retryQueries.find( i ) == retryQueries.end() ) continue;
+
+ // log() << "Querying " << _query << " from " << _ns << " for " << sq._server << endl;
+
+ BSONObj q = _query;
+ if ( ! sq._extra.isEmpty() ) {
+ q = concatQuery( q , sq._extra );
+ }
+
+ string errLoc = " @ " + sq._server;
+
+ if( firstPass ){
+
+ // This may be the first time connecting to this shard, if so we can get an error here
+ try {
+ conns.push_back( shared_ptr<ShardConnection>( new ShardConnection( sq._server , _ns ) ) );
+ }
+ catch( std::exception& e ){
+ socketExs.push_back( e.what() + errLoc );
+ if( ! returnPartial ){
+ num--;
+ break;
+ }
+ conns.push_back( shared_ptr<ShardConnection>() );
+ continue;
+ }
+
+ servers.push_back( sq._server );
+ }
+
+ if ( conns[i]->setVersion() ) {
+ conns[i]->done();
+ staleConfigExs.push_back( StaleConfigException( _ns , "ClusteredCursor::query ShardConnection had to change" , true ).what() + errLoc );
+ break;
+ }
+
+ LOG(5) << "ParallelSortClusteredCursor::init server:" << sq._server << " ns:" << _ns
+ << " query:" << q << " _fields:" << _fields << " options: " << _options << endl;
+
+ if( ! _cursors[i].raw() )
+ _cursors[i].reset( new DBClientCursor( conns[i]->get() , _ns , q ,
+ 0 , // nToReturn
+ 0 , // nToSkip
+ _fields.isEmpty() ? 0 : &_fields , // fieldsToReturn
+ _options ,
+ _batchSize == 0 ? 0 : _batchSize + _needToSkip // batchSize
+ ) );
+
+ try{
+ _cursors[i].raw()->initLazy( ! firstPass );
+ }
+ catch( SocketException& e ){
+ socketExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ if( ! returnPartial ) break;
+ }
+ catch( std::exception& e){
+ otherExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ break;
+ }
+
+ }
+
+ // Go through all the potentially started cursors and finish initializing them or log any errors and
+ // potentially retry
+ // TODO: Better error classification would make this easier, errors are indicated in all sorts of ways
+ // here that we need to trap.
+ for ( size_t i = 0; i < num; i++ ) {
+
+ // log() << "Finishing query for " << cons[i].get()->getHost() << endl;
+ string errLoc = " @ " + queries[i]._server;
+
+ if( ! _cursors[i].raw() || ( ! firstPass && retryQueries.find( i ) == retryQueries.end() ) ){
+ if( conns[i] ) conns[i].get()->done();
+ continue;
+ }
+
+ assert( conns[i] );
+ retryQueries.erase( i );
+
+ bool retry = false;
+
+ try {
+
+ if( ! _cursors[i].raw()->initLazyFinish( retry ) ) {
+
+ warning() << "invalid result from " << conns[i]->getHost() << ( retry ? ", retrying" : "" ) << endl;
+ _cursors[i].reset( NULL );
+
+ if( ! retry ){
+ socketExs.push_back( str::stream() << "error querying server: " << servers[i] );
+ conns[i]->done();
+ }
+ else {
+ retryQueries.insert( i );
+ }
+
+ continue;
+ }
+ }
+ catch ( MsgAssertionException& e ){
+ socketExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch ( SocketException& e ) {
+ socketExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch( std::exception& e ){
+ otherExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+
+ try {
+ _cursors[i].raw()->attach( conns[i].get() ); // this calls done on conn
+ _checkCursor( _cursors[i].raw() );
+
+ finishedQueries++;
+ }
+ catch ( StaleConfigException& e ){
+
+ // Our stored configuration data is actually stale, we need to reload it
+ // when we throw our exception
+ allConfigStale = true;
+
+ staleConfigExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch( std::exception& e ){
+ otherExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ }
+
+ // Don't exceed our max retries, should not happen
+ assert( retries < 5 );
+ }
+ while( retryQueries.size() > 0 /* something to retry */ &&
+ ( socketExs.size() == 0 || returnPartial ) /* no conn issues */ &&
+ staleConfigExs.size() == 0 /* no config issues */ &&
+ otherExs.size() == 0 /* no other issues */);
+
+ // Assert that our conns are all closed!
+ for( vector< shared_ptr<ShardConnection> >::iterator i = conns.begin(); i < conns.end(); ++i ){
+ assert( ! (*i) || ! (*i)->ok() );
+ }
+
+ // Handle errors we got during initialization.
+ // If we're returning partial results, we can ignore socketExs, but nothing else
+ // Log a warning in any case, so we don't lose these messages
+ bool throwException = ( socketExs.size() > 0 && ! returnPartial ) || staleConfigExs.size() > 0 || otherExs.size() > 0;
+
+ if( socketExs.size() > 0 || staleConfigExs.size() > 0 || otherExs.size() > 0 ) {
+
+ vector<string> errMsgs;
+
+ errMsgs.insert( errMsgs.end(), staleConfigExs.begin(), staleConfigExs.end() );
+ errMsgs.insert( errMsgs.end(), otherExs.begin(), otherExs.end() );
+ errMsgs.insert( errMsgs.end(), socketExs.begin(), socketExs.end() );
+
+ stringstream errMsg;
+ errMsg << "could not initialize cursor across all shards because : ";
+ for( vector<string>::iterator i = errMsgs.begin(); i != errMsgs.end(); i++ ){
+ if( i != errMsgs.begin() ) errMsg << " :: and :: ";
+ errMsg << *i;
+ }
+
+ if( throwException && staleConfigExs.size() > 0 )
+ throw StaleConfigException( _ns , errMsg.str() , ! allConfigStale );
+ else if( throwException )
+ throw DBException( errMsg.str(), 14827 );
+ else
+ warning() << errMsg.str() << endl;
}
+ if( retries > 0 )
+ log() << "successfully finished parallel query after " << retries << " retries" << endl;
+
}
ParallelSortClusteredCursor::~ParallelSortClusteredCursor() {
@@ -451,6 +690,7 @@ namespace mongo {
if ( best.isEmpty() ) {
best = me;
bestFrom = i;
+ if( _sortKey.isEmpty() ) break;
continue;
}
@@ -481,49 +721,62 @@ namespace mongo {
// ---- Future -----
// -----------------
- Future::CommandResult::CommandResult( const string& server , const string& db , const BSONObj& cmd , DBClientBase * conn ) {
- _server = server;
- _db = db;
- _cmd = cmd;
- _conn = conn;
- _done = false;
- }
+ Future::CommandResult::CommandResult( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn )
+ :_server(server) ,_db(db) , _options(options), _cmd(cmd) ,_conn(conn) ,_done(false)
+ {
+ try {
+ if ( ! _conn ){
+ _connHolder.reset( new ScopedDbConnection( _server ) );
+ _conn = _connHolder->get();
+ }
- bool Future::CommandResult::join() {
- _thr->join();
- assert( _done );
- return _ok;
+ if ( _conn->lazySupported() ) {
+ _cursor.reset( new DBClientCursor(_conn, _db + ".$cmd", _cmd, -1/*limit*/, 0, NULL, _options, 0));
+ _cursor->initLazy();
+ }
+ else {
+ _done = true; // we set _done first because even if there is an error we're done
+ _ok = _conn->runCommand( db , cmd , _res , options );
+ }
+ }
+ catch ( std::exception& e ) {
+ error() << "Future::spawnComand (part 1) exception: " << e.what() << endl;
+ _ok = false;
+ _done = true;
+ }
}
- void Future::commandThread(shared_ptr<CommandResult> res) {
- setThreadName( "future" );
+ bool Future::CommandResult::join() {
+ if (_done)
+ return _ok;
try {
- DBClientBase * conn = res->_conn;
-
- scoped_ptr<ScopedDbConnection> myconn;
- if ( ! conn ){
- myconn.reset( new ScopedDbConnection( res->_server ) );
- conn = myconn->get();
- }
-
- res->_ok = conn->runCommand( res->_db , res->_cmd , res->_res );
+ // TODO: Allow retries?
+ bool retry = false;
+ bool finished = _cursor->initLazyFinish( retry );
+
+ // Shouldn't need to communicate with server any more
+ if ( _connHolder )
+ _connHolder->done();
- if ( myconn )
- myconn->done();
+ uassert(14812, str::stream() << "Error running command on server: " << _server, finished);
+ massert(14813, "Command returned nothing", _cursor->more());
+
+ _res = _cursor->nextSafe();
+ _ok = _res["ok"].trueValue();
}
catch ( std::exception& e ) {
- error() << "Future::commandThread exception: " << e.what() << endl;
- res->_ok = false;
+ error() << "Future::spawnComand (part 2) exception: " << e.what() << endl;
+ _ok = false;
}
- res->_done = true;
- }
- shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd , DBClientBase * conn ) {
- shared_ptr<Future::CommandResult> res (new Future::CommandResult( server , db , cmd , conn ));
- res->_thr.reset( new boost::thread( boost::bind(Future::commandThread, res) ) );
+ _done = true;
+ return _ok;
+ }
+ shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn ) {
+ shared_ptr<Future::CommandResult> res (new Future::CommandResult( server , db , cmd , options , conn ));
return res;
}
diff --git a/client/parallel.h b/client/parallel.h
index 0809376..869bff9 100644
--- a/client/parallel.h
+++ b/client/parallel.h
@@ -89,9 +89,15 @@ namespace mongo {
virtual void _init() = 0;
- auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() , int skipLeft = 0 );
+ auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() , int skipLeft = 0 , bool lazy=false );
BSONObj explain( const string& server , BSONObj extraFilter = BSONObj() );
+ /**
+ * checks the cursor for any errors
+ * will throw an exceptionif an error is encountered
+ */
+ void _checkCursor( DBClientCursor * cursor );
+
static BSONObj _concatFilter( const BSONObj& filter , const BSONObj& extraFilter );
virtual void _explain( map< string,list<BSONObj> >& out ) = 0;
@@ -111,15 +117,20 @@ namespace mongo {
class FilteringClientCursor {
public:
FilteringClientCursor( const BSONObj filter = BSONObj() );
+ FilteringClientCursor( DBClientCursor* cursor , const BSONObj filter = BSONObj() );
FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter = BSONObj() );
~FilteringClientCursor();
void reset( auto_ptr<DBClientCursor> cursor );
+ void reset( DBClientCursor* cursor );
bool more();
BSONObj next();
BSONObj peek();
+
+ DBClientCursor* raw() { return _cursor.get(); }
+
private:
void _advance();
@@ -269,14 +280,16 @@ namespace mongo {
private:
- CommandResult( const string& server , const string& db , const BSONObj& cmd , DBClientBase * conn );
+ CommandResult( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn );
string _server;
string _db;
+ int _options;
BSONObj _cmd;
DBClientBase * _conn;
+ scoped_ptr<ScopedDbConnection> _connHolder; // used if not provided a connection
- scoped_ptr<boost::thread> _thr;
+ scoped_ptr<DBClientCursor> _cursor;
BSONObj _res;
bool _ok;
@@ -285,7 +298,6 @@ namespace mongo {
friend class Future;
};
- static void commandThread(shared_ptr<CommandResult> res);
/**
* @param server server name
@@ -293,7 +305,7 @@ namespace mongo {
* @param cmd cmd to exec
* @param conn optional connection to use. will use standard pooled if non-specified
*/
- static shared_ptr<CommandResult> spawnCommand( const string& server , const string& db , const BSONObj& cmd , DBClientBase * conn = 0 );
+ static shared_ptr<CommandResult> spawnCommand( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn = 0 );
};
diff --git a/client/redef_macros.h b/client/redef_macros.h
index a4cb1c9..897912d 100644
--- a/client/redef_macros.h
+++ b/client/redef_macros.h
@@ -1,4 +1,7 @@
-/** @file redef_macros.h - redefine macros from undef_macros.h */
+/** @file redef_macros.h macros the implementation uses.
+
+ @see undef_macros.h undefines these after use to minimize name pollution.
+*/
/* Copyright 2009 10gen Inc.
*
diff --git a/client/simple_client_demo.cpp b/client/simple_client_demo.cpp
index fa2f4a8..f4278dd 100644
--- a/client/simple_client_demo.cpp
+++ b/client/simple_client_demo.cpp
@@ -21,15 +21,33 @@ using namespace mongo;
using namespace bson;
int main() {
- cout << "connecting to localhost..." << endl;
- DBClientConnection c;
- c.connect("localhost");
- cout << "connected ok" << endl;
- unsigned long long count = c.count("test.foo");
- cout << "count of exiting documents in collection test.foo : " << count << endl;
-
- bo o = BSON( "hello" << "world" );
- c.insert("test.foo", o);
+ try {
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+ unsigned long long count = c.count("test.foo");
+ cout << "count of exiting documents in collection test.foo : " << count << endl;
+
+ bo o = BSON( "hello" << "world" );
+ c.insert("test.foo", o);
+
+ string e = c.getLastError();
+ if( !e.empty() ) {
+ cout << "insert #1 failed: " << e << endl;
+ }
+
+ // make an index with a unique key constraint
+ c.ensureIndex("test.foo", BSON("hello"<<1), /*unique*/true);
+
+ c.insert("test.foo", o); // will cause a dup key error on "hello" field
+ cout << "we expect a dup key error here:" << endl;
+ cout << " " << c.getLastErrorDetailed().toString() << endl;
+ }
+ catch(DBException& e) {
+ cout << "caught DBException " << e.toString() << endl;
+ return 1;
+ }
return 0;
}
diff --git a/client/syncclusterconnection.cpp b/client/syncclusterconnection.cpp
index 4fafdc1..34633d1 100644
--- a/client/syncclusterconnection.cpp
+++ b/client/syncclusterconnection.cpp
@@ -24,7 +24,7 @@
namespace mongo {
- SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L) : _mutex("SynClusterConnection") {
+ SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
{
stringstream s;
int n=0;
@@ -38,7 +38,7 @@ namespace mongo {
_connect( i->toString() );
}
- SyncClusterConnection::SyncClusterConnection( string commaSeperated ) : _mutex("SyncClusterConnection") {
+ SyncClusterConnection::SyncClusterConnection( string commaSeperated, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
_address = commaSeperated;
string::size_type idx;
while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ) {
@@ -50,7 +50,7 @@ namespace mongo {
uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
}
- SyncClusterConnection::SyncClusterConnection( string a , string b , string c ) : _mutex("SyncClusterConnection") {
+ SyncClusterConnection::SyncClusterConnection( string a , string b , string c, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
_address = a + "," + b + "," + c;
// connect to all even if not working
_connect( a );
@@ -58,7 +58,7 @@ namespace mongo {
_connect( c );
}
- SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev ) : _mutex("SyncClusterConnection") {
+ SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
assert(0);
}
@@ -79,7 +79,7 @@ namespace mongo {
for ( size_t i=0; i<_conns.size(); i++ ) {
BSONObj res;
try {
- if ( _conns[i]->simpleCommand( "admin" , 0 , "fsync" ) )
+ if ( _conns[i]->simpleCommand( "admin" , &res , "fsync" ) )
continue;
}
catch ( DBException& e ) {
@@ -144,6 +144,7 @@ namespace mongo {
void SyncClusterConnection::_connect( string host ) {
log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
DBClientConnection * c = new DBClientConnection( true );
+ c->setSoTimeout( _socketTimeout );
string errmsg;
if ( ! c->connect( host , errmsg ) )
log() << "SyncClusterConnection connect fail to: " << host << " errmsg: " << errmsg << endl;
@@ -159,7 +160,7 @@ namespace mongo {
BSONObj SyncClusterConnection::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
if ( ns.find( ".$cmd" ) != string::npos ) {
- string cmdName = query.obj.firstElement().fieldName();
+ string cmdName = query.obj.firstElementFieldName();
int lockType = _lockType( cmdName );
@@ -194,12 +195,22 @@ namespace mongo {
return DBClientBase::findOne( ns , query , fieldsToReturn , queryOptions );
}
+ bool SyncClusterConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
+ for (vector<DBClientConnection*>::iterator it = _conns.begin(); it < _conns.end(); it++) {
+ massert( 15848, "sync cluster of sync clusters?", (*it)->type() != ConnectionString::SYNC);
+
+ if (!(*it)->auth(dbname, username, password_text, errmsg, digestPassword)) {
+ return false;
+ }
+ }
+ return true;
+ }
auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
_lastErrors.clear();
if ( ns.find( ".$cmd" ) != string::npos ) {
- string cmdName = query.obj.firstElement().fieldName();
+ string cmdName = query.obj.firstElementFieldName();
int lockType = _lockType( cmdName );
uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection::query for:" + cmdName , lockType <= 0 );
}
@@ -240,7 +251,7 @@ namespace mongo {
return c;
}
- void SyncClusterConnection::insert( const string &ns, BSONObj obj ) {
+ void SyncClusterConnection::insert( const string &ns, BSONObj obj , int flags) {
uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() ,
ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() );
@@ -250,13 +261,13 @@ namespace mongo {
throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );
for ( size_t i=0; i<_conns.size(); i++ ) {
- _conns[i]->insert( ns , obj );
+ _conns[i]->insert( ns , obj , flags);
}
_checkLast();
}
- void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v ) {
+ void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v , int flags) {
uassert( 10023 , "SyncClusterConnection bulk insert not implemented" , 0);
}
@@ -284,7 +295,7 @@ namespace mongo {
throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
}
- for ( size_t i=0; i<_conns.size(); i++ ) {
+ for ( size_t i = 0; i < _conns.size(); i++ ) {
try {
_conns[i]->update( ns , query , obj , upsert , multi );
}
@@ -347,7 +358,7 @@ namespace mongo {
throw UserException( 8008 , "all servers down!" );
}
- void SyncClusterConnection::say( Message &toSend ) {
+ void SyncClusterConnection::say( Message &toSend, bool isRetry ) {
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg );
@@ -386,4 +397,11 @@ namespace mongo {
assert(0);
}
+ void SyncClusterConnection::setAllSoTimeouts( double socketTimeout ){
+ _socketTimeout = socketTimeout;
+ for ( size_t i=0; i<_conns.size(); i++ )
+
+ if( _conns[i] ) _conns[i]->setSoTimeout( socketTimeout );
+ }
+
}
diff --git a/client/syncclusterconnection.h b/client/syncclusterconnection.h
index c946073..68dd338 100644
--- a/client/syncclusterconnection.h
+++ b/client/syncclusterconnection.h
@@ -43,9 +43,9 @@ namespace mongo {
/**
* @param commaSeparated should be 3 hosts comma separated
*/
- SyncClusterConnection( const list<HostAndPort> & );
- SyncClusterConnection( string commaSeparated );
- SyncClusterConnection( string a , string b , string c );
+ SyncClusterConnection( const list<HostAndPort> &, double socketTimeout = 0);
+ SyncClusterConnection( string commaSeparated, double socketTimeout = 0);
+ SyncClusterConnection( string a , string b , string c, double socketTimeout = 0 );
~SyncClusterConnection();
/**
@@ -67,16 +67,16 @@ namespace mongo {
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn, int options );
- virtual void insert( const string &ns, BSONObj obj );
+ virtual void insert( const string &ns, BSONObj obj, int flags=0);
- virtual void insert( const string &ns, const vector< BSONObj >& v );
+ virtual void insert( const string &ns, const vector< BSONObj >& v, int flags=0);
virtual void remove( const string &ns , Query query, bool justOne );
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi );
virtual bool call( Message &toSend, Message &response, bool assertOk , string * actualServer );
- virtual void say( Message &toSend );
+ virtual void say( Message &toSend, bool isRetry = false );
virtual void sayPiggyBack( Message &toSend );
virtual void killCursor( long long cursorID );
@@ -91,8 +91,14 @@ namespace mongo {
virtual ConnectionString::ConnectionType type() const { return ConnectionString::SYNC; }
+ void setAllSoTimeouts( double socketTimeout );
+ double getSoTimeout() const { return _socketTimeout; }
+
+ virtual bool auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword);
+
+ virtual bool lazySupported() const { return false; }
private:
- SyncClusterConnection( SyncClusterConnection& prev );
+ SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout = 0 );
string _toString() const;
bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
@@ -108,6 +114,8 @@ namespace mongo {
mongo::mutex _mutex;
vector<BSONObj> _lastErrors;
+
+ double _socketTimeout;
};
class UpdateNotTheSame : public UserException {
diff --git a/client/undef_macros.h b/client/undef_macros.h
index bc59a84..30ece61 100644
--- a/client/undef_macros.h
+++ b/client/undef_macros.h
@@ -1,4 +1,4 @@
-/** @file undef_macros.h - remove mongo-specific macros that might cause issues */
+/** @file undef_macros.h remove mongo implementation macros after using */
/* Copyright 2009 10gen Inc.
*
diff --git a/db/btree.cpp b/db/btree.cpp
index 299c212..bf9926e 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -27,33 +27,23 @@
#include "curop-inl.h"
#include "stats/counters.h"
#include "dur_commitjob.h"
+#include "btreebuilder.h"
+#include "../util/unittest.h"
namespace mongo {
-#define VERIFYTHISLOC dassert( thisLoc.btree() == this );
+ BOOST_STATIC_ASSERT( Record::HeaderSize == 16 );
+ BOOST_STATIC_ASSERT( Record::HeaderSize + BtreeData_V1::BucketSize == 8192 );
- /**
- * give us a writable version of the btree bucket (declares write intent).
- * note it is likely more efficient to declare write intent on something smaller when you can.
- */
- BtreeBucket* DiskLoc::btreemod() const {
- assert( _a != -1 );
- BtreeBucket *b = const_cast< BtreeBucket * >( btree() );
- return static_cast< BtreeBucket* >( getDur().writingPtr( b, BucketSize ) );
- }
+#define VERIFYTHISLOC dassert( thisLoc.btree<V>() == this );
- _KeyNode& _KeyNode::writing() const {
- return *getDur().writing( const_cast< _KeyNode* >( this ) );
+ template< class Loc >
+ __KeyNode<Loc> & __KeyNode<Loc>::writing() const {
+ return *getDur().writing( const_cast< __KeyNode<Loc> * >( this ) );
}
- KeyNode::KeyNode(const BucketBasics& bb, const _KeyNode &k) :
- prevChildBucket(k.prevChildBucket),
- recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
- { }
-
- // largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
- static const int KeyMax = BucketSize / 10;
-
+ // BucketBasics::lowWaterMark()
+ //
// We define this value as the maximum number of bytes such that, if we have
// fewer than this many bytes, we must be able to either merge with or receive
// keys from any neighboring node. If our utilization goes below this value we
@@ -65,18 +55,15 @@ namespace mongo {
// rebalancedSeparatorPos(). The conditions for lowWaterMark - 1 are as
// follows: We know we cannot merge with the neighbor, so the total data size
// for us, the neighbor, and the separator must be at least
- // BtreeBucket::bodySize() + 1. We must be able to accept one key of any
+ // BtreeBucket<V>::bodySize() + 1. We must be able to accept one key of any
// allowed size, so our size plus storage for that additional key must be
- // <= BtreeBucket::bodySize() / 2. This way, with the extra key we'll have a
+ // <= BtreeBucket<V>::bodySize() / 2. This way, with the extra key we'll have a
// new bucket data size < half the total data size and by the implementation
// of rebalancedSeparatorPos() the key must be added.
- static const int lowWaterMark = BtreeBucket::bodySize() / 2 - KeyMax - sizeof( _KeyNode ) + 1;
static const int split_debug = 0;
static const int insert_debug = 0;
- extern int otherTraceLevel;
-
/**
* this error is ok/benign when doing a background indexing -- that logic in pdfile checks explicitly
* for the 10287 error code.
@@ -88,47 +75,57 @@ namespace mongo {
/* BucketBasics --------------------------------------------------- */
- void BucketBasics::assertWritable() {
+ template< class V >
+ void BucketBasics<V>::assertWritable() {
if( cmdLine.dur )
- dur::assertAlreadyDeclared(this, sizeof(*this));
+ dur::assertAlreadyDeclared(this, V::BucketSize);
}
- string BtreeBucket::bucketSummary() const {
+ template< class V >
+ string BtreeBucket<V>::bucketSummary() const {
stringstream ss;
ss << " Bucket info:" << endl;
- ss << " n: " << n << endl;
- ss << " parent: " << parent.toString() << endl;
- ss << " nextChild: " << parent.toString() << endl;
- ss << " flags:" << flags << endl;
- ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
+ ss << " n: " << this->n << endl;
+ ss << " parent: " << this->parent.toString() << endl;
+ ss << " nextChild: " << this->parent.toString() << endl;
+ ss << " flags:" << this->flags << endl;
+ ss << " emptySize: " << this->emptySize << " topSize: " << this->topSize << endl;
return ss.str();
}
- int BucketBasics::Size() const {
- assert( _wasSize == BucketSize );
- return BucketSize;
+ template< class V >
+ int BucketBasics<V>::Size() const {
+ return V::BucketSize;
}
- void BucketBasics::_shape(int level, stringstream& ss) const {
+ template< class V >
+ void BucketBasics<V>::_shape(int level, stringstream& ss) const {
for ( int i = 0; i < level; i++ ) ss << ' ';
ss << "*\n";
- for ( int i = 0; i < n; i++ )
- if ( !k(i).prevChildBucket.isNull() )
- k(i).prevChildBucket.btree()->_shape(level+1,ss);
- if ( !nextChild.isNull() )
- nextChild.btree()->_shape(level+1,ss);
+ for ( int i = 0; i < this->n; i++ ) {
+ if ( !k(i).prevChildBucket.isNull() ) {
+ DiskLoc ll = k(i).prevChildBucket;
+ ll.btree<V>()->_shape(level+1,ss);
+ }
+ }
+ if ( !this->nextChild.isNull() ) {
+ DiskLoc ll = this->nextChild;
+ ll.btree<V>()->_shape(level+1,ss);
+ }
}
int bt_fv=0;
int bt_dmp=0;
- void BtreeBucket::dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const {
+ template< class V >
+ void BtreeBucket<V>::dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const {
bt_dmp=1;
fullValidate(thisLoc, order);
bt_dmp=0;
}
- int BtreeBucket::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, int *unusedCount, bool strict) const {
+ template< class V >
+ long long BtreeBucket<V>::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount, bool strict, unsigned depth) const {
{
bool f = false;
assert( f = true );
@@ -136,18 +133,18 @@ namespace mongo {
}
killCurrentOp.checkForInterrupt();
- assertValid(order, true);
+ this->assertValid(order, true);
if ( bt_dmp ) {
- out() << thisLoc.toString() << ' ';
- ((BtreeBucket *) this)->dump();
+ _log() << thisLoc.toString() << ' ';
+ ((BtreeBucket *) this)->dump(depth);
}
// keycount
- int kc = 0;
+ long long kc = 0;
- for ( int i = 0; i < n; i++ ) {
- const _KeyNode& kn = k(i);
+ for ( int i = 0; i < this->n; i++ ) {
+ const _KeyNode& kn = this->k(i);
if ( kn.isUsed() ) {
kc++;
@@ -159,25 +156,26 @@ namespace mongo {
}
if ( !kn.prevChildBucket.isNull() ) {
DiskLoc left = kn.prevChildBucket;
- const BtreeBucket *b = left.btree();
+ const BtreeBucket *b = left.btree<V>();
if ( strict ) {
assert( b->parent == thisLoc );
}
else {
wassert( b->parent == thisLoc );
}
- kc += b->fullValidate(kn.prevChildBucket, order, unusedCount, strict);
+ kc += b->fullValidate(kn.prevChildBucket, order, unusedCount, strict, depth+1);
}
}
- if ( !nextChild.isNull() ) {
- const BtreeBucket *b = nextChild.btree();
+ if ( !this->nextChild.isNull() ) {
+ DiskLoc ll = this->nextChild;
+ const BtreeBucket *b = ll.btree<V>();
if ( strict ) {
assert( b->parent == thisLoc );
}
else {
wassert( b->parent == thisLoc );
}
- kc += b->fullValidate(nextChild, order, unusedCount, strict);
+ kc += b->fullValidate(this->nextChild, order, unusedCount, strict, depth+1);
}
return kc;
@@ -185,12 +183,17 @@ namespace mongo {
int nDumped = 0;
- void BucketBasics::assertValid(const Ordering &order, bool force) const {
+ template< class V >
+ void BucketBasics<V>::assertValid(const Ordering &order, bool force) const {
if ( !debug && !force )
return;
- wassert( n >= 0 && n < Size() );
- wassert( emptySize >= 0 && emptySize < BucketSize );
- wassert( topSize >= n && topSize <= BucketSize );
+ {
+ int foo = this->n;
+ wassert( foo >= 0 && this->n < Size() );
+ foo = this->emptySize;
+ wassert( foo >= 0 && this->emptySize < V::BucketSize );
+ wassert( this->topSize >= this->n && this->topSize <= V::BucketSize );
+ }
// this is very slow so don't do often
{
@@ -201,26 +204,26 @@ namespace mongo {
DEV {
// slow:
- for ( int i = 0; i < n-1; i++ ) {
- BSONObj k1 = keyNode(i).key;
- BSONObj k2 = keyNode(i+1).key;
+ for ( int i = 0; i < this->n-1; i++ ) {
+ Key k1 = keyNode(i).key;
+ Key k2 = keyNode(i+1).key;
int z = k1.woCompare(k2, order); //OK
if ( z > 0 ) {
out() << "ERROR: btree key order corrupt. Keys:" << endl;
if ( ++nDumped < 5 ) {
- for ( int j = 0; j < n; j++ ) {
+ for ( int j = 0; j < this->n; j++ ) {
out() << " " << keyNode(j).key.toString() << endl;
}
- ((BtreeBucket *) this)->dump();
+ ((BtreeBucket<V> *) this)->dump();
}
wassert(false);
break;
}
else if ( z == 0 ) {
if ( !(k(i).recordLoc < k(i+1).recordLoc) ) {
- out() << "ERROR: btree key order corrupt (recordloc's wrong). Keys:" << endl;
- out() << " k(" << i << "):" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
- out() << " k(" << i+1 << "):" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
+ out() << "ERROR: btree key order corrupt (recordloc's wrong):" << endl;
+ out() << " k(" << i << ")" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
+ out() << " k(" << i+1 << ")" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
wassert( k(i).recordLoc < k(i+1).recordLoc );
}
}
@@ -228,15 +231,15 @@ namespace mongo {
}
else {
//faster:
- if ( n > 1 ) {
- BSONObj k1 = keyNode(0).key;
- BSONObj k2 = keyNode(n-1).key;
+ if ( this->n > 1 ) {
+ Key k1 = keyNode(0).key;
+ Key k2 = keyNode(this->n-1).key;
int z = k1.woCompare(k2, order);
//wassert( z <= 0 );
if ( z > 0 ) {
problem() << "btree keys out of order" << '\n';
ONCE {
- ((BtreeBucket *) this)->dump();
+ ((BtreeBucket<V> *) this)->dump();
}
assert(false);
}
@@ -244,53 +247,59 @@ namespace mongo {
}
}
- inline void BucketBasics::markUnused(int keypos) {
- assert( keypos >= 0 && keypos < n );
+ template< class V >
+ inline void BucketBasics<V>::markUnused(int keypos) {
+ assert( keypos >= 0 && keypos < this->n );
k(keypos).setUnused();
}
- inline int BucketBasics::totalDataSize() const {
- return (int) (Size() - (data-(char*)this));
+ template< class V >
+ inline int BucketBasics<V>::totalDataSize() const {
+ return (int) (Size() - (this->data-(char*)this));
}
- void BucketBasics::init() {
- parent.Null();
- nextChild.Null();
- _wasSize = BucketSize;
- _reserved1 = 0;
- flags = Packed;
- n = 0;
- emptySize = totalDataSize();
- topSize = 0;
- reserved = 0;
+ template< class V >
+ void BucketBasics<V>::init() {
+ this->_init();
+ this->parent.Null();
+ this->nextChild.Null();
+ this->flags = Packed;
+ this->n = 0;
+ this->emptySize = totalDataSize();
+ this->topSize = 0;
}
/** see _alloc */
- inline void BucketBasics::_unalloc(int bytes) {
- topSize -= bytes;
- emptySize += bytes;
+ template< class V >
+ inline void BucketBasics<V>::_unalloc(int bytes) {
+ this->topSize -= bytes;
+ this->emptySize += bytes;
}
/**
* we allocate space from the end of the buffer for data.
* the keynodes grow from the front.
*/
- inline int BucketBasics::_alloc(int bytes) {
- topSize += bytes;
- emptySize -= bytes;
- int ofs = totalDataSize() - topSize;
+ template< class V >
+ inline int BucketBasics<V>::_alloc(int bytes) {
+ assert( this->emptySize >= bytes );
+ this->topSize += bytes;
+ this->emptySize -= bytes;
+ int ofs = totalDataSize() - this->topSize;
assert( ofs > 0 );
return ofs;
}
- void BucketBasics::_delKeyAtPos(int keypos, bool mayEmpty) {
- assert( keypos >= 0 && keypos <= n );
+ template< class V >
+ void BucketBasics<V>::_delKeyAtPos(int keypos, bool mayEmpty) {
+ // TODO This should be keypos < n
+ assert( keypos >= 0 && keypos <= this->n );
assert( childForPos(keypos).isNull() );
// TODO audit cases where nextChild is null
- assert( ( mayEmpty && n > 0 ) || n > 1 || nextChild.isNull() );
- emptySize += sizeof(_KeyNode);
- n--;
- for ( int j = keypos; j < n; j++ )
+ assert( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
+ this->emptySize += sizeof(_KeyNode);
+ this->n--;
+ for ( int j = keypos; j < this->n; j++ )
k(j) = k(j+1);
setNotPacked();
}
@@ -299,38 +308,54 @@ namespace mongo {
* pull rightmost key from the bucket. this version requires its right child to be null so it
* does not bother returning that value.
*/
- void BucketBasics::popBack(DiskLoc& recLoc, BSONObj& key) {
- massert( 10282 , "n==0 in btree popBack()", n > 0 );
- assert( k(n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
- KeyNode kn = keyNode(n-1);
+ template< class V >
+ void BucketBasics<V>::popBack(DiskLoc& recLoc, Key &key) {
+ massert( 10282 , "n==0 in btree popBack()", this->n > 0 );
+ assert( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
+ KeyNode kn = keyNode(this->n-1);
recLoc = kn.recordLoc;
- key = kn.key;
- int keysize = kn.key.objsize();
+ key.assign(kn.key);
+ int keysize = kn.key.dataSize();
- massert( 10283 , "rchild not null in btree popBack()", nextChild.isNull());
+ massert( 10283 , "rchild not null in btree popBack()", this->nextChild.isNull());
// weirdly, we also put the rightmost down pointer in nextchild, even when bucket isn't full.
- nextChild = kn.prevChildBucket;
+ this->nextChild = kn.prevChildBucket;
- n--;
- emptySize += sizeof(_KeyNode);
+ this->n--;
+ // This is risky because the key we are returning points to this unalloc'ed memory,
+ // and we are assuming that the last key points to the last allocated
+ // bson region.
+ this->emptySize += sizeof(_KeyNode);
_unalloc(keysize);
}
/** add a key. must be > all existing. be careful to set next ptr right. */
- bool BucketBasics::_pushBack(const DiskLoc recordLoc, const BSONObj& key, const Ordering &order, const DiskLoc prevChild) {
- int bytesNeeded = key.objsize() + sizeof(_KeyNode);
- if ( bytesNeeded > emptySize )
+ template< class V >
+ bool BucketBasics<V>::_pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
+ int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
+ if ( bytesNeeded > this->emptySize )
return false;
- assert( bytesNeeded <= emptySize );
- assert( n == 0 || keyNode(n-1).key.woCompare(key, order) <= 0 );
- emptySize -= sizeof(_KeyNode);
- _KeyNode& kn = k(n++);
+ assert( bytesNeeded <= this->emptySize );
+ if( this->n ) {
+ const KeyNode klast = keyNode(this->n-1);
+ if( klast.key.woCompare(key, order) > 0 ) {
+ log() << "btree bucket corrupt? consider reindexing or running validate command" << endl;
+ log() << " klast: " << keyNode(this->n-1).key.toString() << endl;
+ log() << " key: " << key.toString() << endl;
+ DEV klast.key.woCompare(key, order);
+ assert(false);
+ }
+ }
+ this->emptySize -= sizeof(_KeyNode);
+ _KeyNode& kn = k(this->n++);
kn.prevChildBucket = prevChild;
kn.recordLoc = recordLoc;
- kn.setKeyDataOfs( (short) _alloc(key.objsize()) );
- char *p = dataAt(kn.keyDataOfs());
- memcpy(p, key.objdata(), key.objsize());
+ kn.setKeyDataOfs( (short) _alloc(key.dataSize()) );
+ short ofs = kn.keyDataOfs();
+ char *p = dataAt(ofs);
+ memcpy(p, key.data(), key.dataSize());
+
return true;
}
@@ -342,19 +367,20 @@ namespace mongo {
/** insert a key in a bucket with no complexity -- no splits required
@return false if a split is required.
*/
- bool BucketBasics::basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering &order) const {
- assert( keypos >= 0 && keypos <= n );
- int bytesNeeded = key.objsize() + sizeof(_KeyNode);
- if ( bytesNeeded > emptySize ) {
+ template< class V >
+ bool BucketBasics<V>::basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const Key& key, const Ordering &order) const {
+ assert( keypos >= 0 && keypos <= this->n );
+ int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
+ if ( bytesNeeded > this->emptySize ) {
_pack(thisLoc, order, keypos);
- if ( bytesNeeded > emptySize )
+ if ( bytesNeeded > this->emptySize )
return false;
}
BucketBasics *b;
{
const char *p = (const char *) &k(keypos);
- const char *q = (const char *) &k(n+1);
+ const char *q = (const char *) &k(this->n+1);
// declare that we will write to [k(keypos),k(n)]
// todo: this writes a medium amount to the journal. we may want to add a verb "shift" to the redo log so
// we can log a very small amount.
@@ -364,39 +390,45 @@ namespace mongo {
// 1 4 9
// ->
// 1 4 _ 9
- for ( int j = n; j > keypos; j-- ) // make room
+ for ( int j = this->n; j > keypos; j-- ) // make room
b->k(j) = b->k(j-1);
}
- getDur().declareWriteIntent(&b->emptySize, 12); // [b->emptySize..b->n] is 12 bytes and we are going to write those
+ getDur().declareWriteIntent(&b->emptySize, sizeof(this->emptySize)+sizeof(this->topSize)+sizeof(this->n));
b->emptySize -= sizeof(_KeyNode);
b->n++;
+ // This _KeyNode was marked for writing above.
_KeyNode& kn = b->k(keypos);
kn.prevChildBucket.Null();
kn.recordLoc = recordLoc;
- kn.setKeyDataOfs((short) b->_alloc(key.objsize()) );
+ kn.setKeyDataOfs((short) b->_alloc(key.dataSize()) );
char *p = b->dataAt(kn.keyDataOfs());
- getDur().declareWriteIntent(p, key.objsize());
- memcpy(p, key.objdata(), key.objsize());
+ getDur().declareWriteIntent(p, key.dataSize());
+ memcpy(p, key.data(), key.dataSize());
return true;
}
- /** with this implementation, refPos == 0 disregards effect of refPos */
- bool BucketBasics::mayDropKey( int index, int refPos ) const {
+ /**
+ * With this implementation, refPos == 0 disregards effect of refPos.
+ * index > 0 prevents creation of an empty bucket.
+ */
+ template< class V >
+ bool BucketBasics<V>::mayDropKey( int index, int refPos ) const {
return index > 0 && ( index != refPos ) && k( index ).isUnused() && k( index ).prevChildBucket.isNull();
}
- int BucketBasics::packedDataSize( int refPos ) const {
- if ( flags & Packed ) {
- return BucketSize - emptySize - headerSize();
+ template< class V >
+ int BucketBasics<V>::packedDataSize( int refPos ) const {
+ if ( this->flags & Packed ) {
+ return V::BucketSize - this->emptySize - headerSize();
}
int size = 0;
- for( int j = 0; j < n; ++j ) {
+ for( int j = 0; j < this->n; ++j ) {
if ( mayDropKey( j, refPos ) ) {
continue;
}
- size += keyNode( j ).key.objsize() + sizeof( _KeyNode );
+ size += keyNode( j ).key.dataSize() + sizeof( _KeyNode );
}
return size;
}
@@ -405,8 +437,9 @@ namespace mongo {
* when we delete things we just leave empty space until the node is
* full and then we repack it.
*/
- void BucketBasics::_pack(const DiskLoc thisLoc, const Ordering &order, int &refPos) const {
- if ( flags & Packed )
+ template< class V >
+ void BucketBasics<V>::_pack(const DiskLoc thisLoc, const Ordering &order, int &refPos) const {
+ if ( this->flags & Packed )
return;
VERIFYTHISLOC
@@ -416,22 +449,23 @@ namespace mongo {
declaration anyway within the group commit interval, in which case we would just be adding
code and complexity without benefit.
*/
- thisLoc.btreemod()->_packReadyForMod(order, refPos);
+ thisLoc.btreemod<V>()->_packReadyForMod(order, refPos);
}
/** version when write intent already declared */
- void BucketBasics::_packReadyForMod( const Ordering &order, int &refPos ) {
+ template< class V >
+ void BucketBasics<V>::_packReadyForMod( const Ordering &order, int &refPos ) {
assertWritable();
- if ( flags & Packed )
+ if ( this->flags & Packed )
return;
int tdz = totalDataSize();
- char temp[BucketSize];
+ char temp[V::BucketSize];
int ofs = tdz;
- topSize = 0;
+ this->topSize = 0;
int i = 0;
- for ( int j = 0; j < n; j++ ) {
+ for ( int j = 0; j < this->n; j++ ) {
if( mayDropKey( j, refPos ) ) {
continue; // key is unused and has no children - drop it
}
@@ -442,36 +476,40 @@ namespace mongo {
k( i ) = k( j );
}
short ofsold = k(i).keyDataOfs();
- int sz = keyNode(i).key.objsize();
+ int sz = keyNode(i).key.dataSize();
ofs -= sz;
- topSize += sz;
+ this->topSize += sz;
memcpy(temp+ofs, dataAt(ofsold), sz);
k(i).setKeyDataOfsSavingUse( ofs );
++i;
}
- if ( refPos == n ) {
+ if ( refPos == this->n ) {
refPos = i;
}
- n = i;
+ this->n = i;
int dataUsed = tdz - ofs;
- memcpy(data + ofs, temp + ofs, dataUsed);
+ memcpy(this->data + ofs, temp + ofs, dataUsed);
// assertWritable();
// TEMP TEST getDur().declareWriteIntent(this, sizeof(*this));
- emptySize = tdz - dataUsed - n * sizeof(_KeyNode);
- assert( emptySize >= 0 );
+ this->emptySize = tdz - dataUsed - this->n * sizeof(_KeyNode);
+ {
+ int foo = this->emptySize;
+ assert( foo >= 0 );
+ }
setPacked();
assertValid( order );
}
- inline void BucketBasics::truncateTo(int N, const Ordering &order, int &refPos) {
+ template< class V >
+ inline void BucketBasics<V>::truncateTo(int N, const Ordering &order, int &refPos) {
dbMutex.assertWriteLocked();
assertWritable();
- n = N;
+ this->n = N;
setNotPacked();
_packReadyForMod( order, refPos );
}
@@ -489,19 +527,21 @@ namespace mongo {
* We just have a simple algorithm right now: if a key includes the
* halfway point (or 10% way point) in terms of bytes, split on that key;
* otherwise split on the key immediately to the left of the halfway
- * point.
+ * point (or 10% point).
*
* This function is expected to be called on a packed bucket.
*/
- int BucketBasics::splitPos( int keypos ) const {
- assert( n > 2 );
+ template< class V >
+ int BucketBasics<V>::splitPos( int keypos ) const {
+ assert( this->n > 2 );
int split = 0;
int rightSize = 0;
// when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
// see SERVER-983
- int rightSizeLimit = ( topSize + sizeof( _KeyNode ) * n ) / ( keypos == n ? 10 : 2 );
- for( int i = n - 1; i > -1; --i ) {
- rightSize += keyNode( i ).key.objsize() + sizeof( _KeyNode );
+ // TODO I think we only want to do the 90% split on the rhs node of the tree.
+ int rightSizeLimit = ( this->topSize + sizeof( _KeyNode ) * this->n ) / ( keypos == this->n ? 10 : 2 );
+ for( int i = this->n - 1; i > -1; --i ) {
+ rightSize += keyNode( i ).key.dataSize() + sizeof( _KeyNode );
if ( rightSize > rightSizeLimit ) {
split = i;
break;
@@ -511,37 +551,40 @@ namespace mongo {
if ( split < 1 ) {
split = 1;
}
- else if ( split > n - 2 ) {
- split = n - 2;
+ else if ( split > this->n - 2 ) {
+ split = this->n - 2;
}
return split;
}
- void BucketBasics::reserveKeysFront( int nAdd ) {
- assert( emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
- emptySize -= sizeof( _KeyNode ) * nAdd;
- for( int i = n - 1; i > -1; --i ) {
+ template< class V >
+ void BucketBasics<V>::reserveKeysFront( int nAdd ) {
+ assert( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
+ this->emptySize -= sizeof( _KeyNode ) * nAdd;
+ for( int i = this->n - 1; i > -1; --i ) {
k( i + nAdd ) = k( i );
}
- n += nAdd;
+ this->n += nAdd;
}
- void BucketBasics::setKey( int i, const DiskLoc recordLoc, const BSONObj &key, const DiskLoc prevChildBucket ) {
+ template< class V >
+ void BucketBasics<V>::setKey( int i, const DiskLoc recordLoc, const Key &key, const DiskLoc prevChildBucket ) {
_KeyNode &kn = k( i );
kn.recordLoc = recordLoc;
kn.prevChildBucket = prevChildBucket;
- short ofs = (short) _alloc( key.objsize() );
+ short ofs = (short) _alloc( key.dataSize() );
kn.setKeyDataOfs( ofs );
char *p = dataAt( ofs );
- memcpy( p, key.objdata(), key.objsize() );
+ memcpy( p, key.data(), key.dataSize() );
}
- void BucketBasics::dropFront( int nDrop, const Ordering &order, int &refpos ) {
- for( int i = nDrop; i < n; ++i ) {
+ template< class V >
+ void BucketBasics<V>::dropFront( int nDrop, const Ordering &order, int &refpos ) {
+ for( int i = nDrop; i < this->n; ++i ) {
k( i - nDrop ) = k( i );
}
- n -= nDrop;
+ this->n -= nDrop;
setNotPacked();
_packReadyForMod( order, refpos );
}
@@ -549,10 +592,11 @@ namespace mongo {
/* - BtreeBucket --------------------------------------------------- */
/** @return largest key in the subtree. */
- void BtreeBucket::findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey) {
+ template< class V >
+ void BtreeBucket<V>::findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey) {
DiskLoc loc = thisLoc;
while ( 1 ) {
- const BtreeBucket *b = loc.btree();
+ const BtreeBucket *b = loc.btree<V>();
if ( !b->nextChild.isNull() ) {
loc = b->nextChild;
continue;
@@ -571,8 +615,16 @@ namespace mongo {
* not have more keys than an unsigned variable has bits. The same
* assumption is used in the implementation below with respect to the 'mask'
* variable.
+ *
+ * @param l a regular bsonobj
+ * @param rBegin composed partly of an existing bsonobj, and the remaining keys are taken from a vector of elements that frequently changes
+ *
+ * see
+ * jstests/index_check6.js
+ * https://jira.mongodb.org/browse/SERVER-371
*/
- int BtreeBucket::customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction ) {
+ template< class V >
+ int BtreeBucket<V>::customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction ) {
BSONObjIterator ll( l );
BSONObjIterator rr( rBegin );
vector< const BSONElement * >::const_iterator rr2 = rEnd.begin();
@@ -610,31 +662,29 @@ namespace mongo {
return 0;
}
- bool BtreeBucket::exists(const IndexDetails& idx, const DiskLoc &thisLoc, const BSONObj& key, const Ordering& order) const {
- int pos;
- bool found;
- DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
+ template< class V >
+ bool BtreeBucket<V>::exists(const IndexDetails& idx, const DiskLoc &thisLoc, const Key& key, const Ordering& order) const {
+ int pos;
+ bool found;
+ DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
- // skip unused keys
- while ( 1 ) {
- if( b.isNull() )
- break;
- const BtreeBucket *bucket = b.btree();
- const _KeyNode& kn = bucket->k(pos);
- if ( kn.isUsed() )
- return bucket->keyAt(pos).woEqual(key);
- b = bucket->advance(b, pos, 1, "BtreeBucket::exists");
+ // skip unused keys
+ while ( 1 ) {
+ if( b.isNull() )
+ break;
+ const BtreeBucket *bucket = b.btree<V>();
+ const _KeyNode& kn = bucket->k(pos);
+ if ( kn.isUsed() )
+ return bucket->keyAt(pos).woEqual(key);
+ b = bucket->advance(b, pos, 1, "BtreeBucket<V>::exists");
}
return false;
}
- /**
- * @param self - don't complain about ourself already being in the index case.
- * @return true = there is a duplicate.
- */
- bool BtreeBucket::wouldCreateDup(
+ template< class V >
+ bool BtreeBucket<V>::wouldCreateDup(
const IndexDetails& idx, const DiskLoc &thisLoc,
- const BSONObj& key, const Ordering& order,
+ const Key& key, const Ordering& order,
const DiskLoc &self) const {
int pos;
bool found;
@@ -642,24 +692,25 @@ namespace mongo {
while ( !b.isNull() ) {
// we skip unused keys
- const BtreeBucket *bucket = b.btree();
+ const BtreeBucket *bucket = b.btree<V>();
const _KeyNode& kn = bucket->k(pos);
if ( kn.isUsed() ) {
if( bucket->keyAt(pos).woEqual(key) )
return kn.recordLoc != self;
break;
}
- b = bucket->advance(b, pos, 1, "BtreeBucket::dupCheck");
+ b = bucket->advance(b, pos, 1, "BtreeBucket<V>::dupCheck");
}
return false;
}
- string BtreeBucket::dupKeyError( const IndexDetails& idx , const BSONObj& key ) {
+ template< class V >
+ string BtreeBucket<V>::dupKeyError( const IndexDetails& idx , const Key& key ) {
stringstream ss;
ss << "E11000 duplicate key error ";
ss << "index: " << idx.indexNamespace() << " ";
- ss << "dup key: " << key;
+ ss << "dup key: " << key.toString();
return ss.str();
}
@@ -677,30 +728,20 @@ namespace mongo {
* returns n if it goes after the last existing key.
* note result might be an Unused location!
*/
- char foo;
- bool BtreeBucket::find(const IndexDetails& idx, const BSONObj& key, const DiskLoc &recordLoc, const Ordering &order, int& pos, bool assertIfDup) const {
-#if defined(_EXPERIMENT1)
- {
- char *z = (char *) this;
- int i = 0;
- while( 1 ) {
- i += 4096;
- if( i >= BucketSize )
- break;
- foo += z[i];
- }
- }
-#endif
-
+ template< class V >
+ bool BtreeBucket<V>::find(const IndexDetails& idx, const Key& key, const DiskLoc &rl,
+ const Ordering &order, int& pos, bool assertIfDup) const {
+ Loc recordLoc;
+ recordLoc = rl;
globalIndexCounters.btree( (char*)this );
// binary search for this key
bool dupsChecked = false;
int l=0;
- int h=n-1;
+ int h=this->n-1;
while ( l <= h ) {
int m = (l+h)/2;
- KeyNode M = keyNode(m);
+ KeyNode M = this->keyNode(m);
int x = key.woCompare(M.key, order);
if ( x == 0 ) {
if( assertIfDup ) {
@@ -710,8 +751,8 @@ namespace mongo {
// coding effort in here to make this particularly fast
if( !dupsChecked ) {
dupsChecked = true;
- if( idx.head.btree()->exists(idx, idx.head, key, order) ) {
- if( idx.head.btree()->wouldCreateDup(idx, idx.head, key, order, recordLoc) )
+ if( idx.head.btree<V>()->exists(idx, idx.head, key, order) ) {
+ if( idx.head.btree<V>()->wouldCreateDup(idx, idx.head, key, order, recordLoc) )
uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
else
alreadyInIndex();
@@ -726,7 +767,7 @@ namespace mongo {
}
// dup keys allowed. use recordLoc as if it is part of the key
- DiskLoc unusedRL = M.recordLoc;
+ Loc unusedRL = M.recordLoc;
unusedRL.GETOFS() &= ~1; // so we can test equality without the used bit messing us up
x = recordLoc.compare(unusedRL);
}
@@ -742,49 +783,59 @@ namespace mongo {
}
// not found
pos = l;
- if ( pos != n ) {
- BSONObj keyatpos = keyNode(pos).key;
+ if ( pos != this->n ) {
+ Key keyatpos = keyNode(pos).key;
wassert( key.woCompare(keyatpos, order) <= 0 );
if ( pos > 0 ) {
- wassert( keyNode(pos-1).key.woCompare(key, order) <= 0 );
+ if( !( keyNode(pos-1).key.woCompare(key, order) <= 0 ) ) {
+ DEV {
+ log() << key.toString() << endl;
+ log() << keyNode(pos-1).key.toString() << endl;
+ }
+ wassert(false);
+ }
}
}
return false;
}
- void BtreeBucket::delBucket(const DiskLoc thisLoc, const IndexDetails& id) {
+ template< class V >
+ void BtreeBucket<V>::delBucket(const DiskLoc thisLoc, const IndexDetails& id) {
ClientCursor::informAboutToDeleteBucket(thisLoc); // slow...
assert( !isHead() );
- const BtreeBucket *p = parent.btree();
+ DiskLoc ll = this->parent;
+ const BtreeBucket *p = ll.btree<V>();
int parentIdx = indexInParent( thisLoc );
p->childForPos( parentIdx ).writing().Null();
deallocBucket( thisLoc, id );
}
- void BtreeBucket::deallocBucket(const DiskLoc thisLoc, const IndexDetails &id) {
+ template< class V >
+ void BtreeBucket<V>::deallocBucket(const DiskLoc thisLoc, const IndexDetails &id) {
#if 0
// as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
// it (meaning it is ineligible for reuse).
memset(this, 0, Size());
#else
// defensive:
- n = -1;
- parent.Null();
+ this->n = -1;
+ this->parent.Null();
string ns = id.indexNamespace();
theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), thisLoc.rec(), thisLoc);
#endif
}
/** note: may delete the entire bucket! this invalid upon return sometimes. */
- void BtreeBucket::delKeyAtPos( const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order) {
- assert(n>0);
- DiskLoc left = childForPos(p);
-
- if ( n == 1 ) {
- if ( left.isNull() && nextChild.isNull() ) {
- _delKeyAtPos(p);
+ template< class V >
+ void BtreeBucket<V>::delKeyAtPos( const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order) {
+ assert(this->n>0);
+ DiskLoc left = this->childForPos(p);
+
+ if ( this->n == 1 ) {
+ if ( left.isNull() && this->nextChild.isNull() ) {
+ this->_delKeyAtPos(p);
if ( isHead() ) {
// we don't delete the top bucket ever
}
@@ -803,7 +854,7 @@ namespace mongo {
}
if ( left.isNull() ) {
- _delKeyAtPos(p);
+ this->_delKeyAtPos(p);
mayBalanceWithNeighbors( thisLoc, id, order );
}
else {
@@ -833,53 +884,71 @@ namespace mongo {
* k by k', preserving the key's unused marking. This function is only
* expected to mark a key as unused when handling a legacy btree.
*/
- void BtreeBucket::deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order ) {
- DiskLoc lchild = childForPos( keypos );
- DiskLoc rchild = childForPos( keypos + 1 );
+ template< class V >
+ void BtreeBucket<V>::deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order ) {
+ DiskLoc lchild = this->childForPos( keypos );
+ DiskLoc rchild = this->childForPos( keypos + 1 );
assert( !lchild.isNull() || !rchild.isNull() );
int advanceDirection = lchild.isNull() ? 1 : -1;
int advanceKeyOfs = keypos;
DiskLoc advanceLoc = advance( thisLoc, advanceKeyOfs, advanceDirection, __FUNCTION__ );
-
- if ( !advanceLoc.btree()->childForPos( advanceKeyOfs ).isNull() ||
- !advanceLoc.btree()->childForPos( advanceKeyOfs + 1 ).isNull() ) {
+ // advanceLoc must be a descentant of thisLoc, because thisLoc has a
+ // child in the proper direction and all descendants of thisLoc must be
+ // nonempty because they are not the root.
+
+ if ( !advanceLoc.btree<V>()->childForPos( advanceKeyOfs ).isNull() ||
+ !advanceLoc.btree<V>()->childForPos( advanceKeyOfs + 1 ).isNull() ) {
// only expected with legacy btrees, see note above
- markUnused( keypos );
+ this->markUnused( keypos );
return;
}
- KeyNode kn = advanceLoc.btree()->keyNode( advanceKeyOfs );
- setInternalKey( thisLoc, keypos, kn.recordLoc, kn.key, order, childForPos( keypos ), childForPos( keypos + 1 ), id );
- advanceLoc.btreemod()->delKeyAtPos( advanceLoc, id, advanceKeyOfs, order );
+ KeyNode kn = advanceLoc.btree<V>()->keyNode( advanceKeyOfs );
+ // Because advanceLoc is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of advanceLoc and kn will be stable
+ // during the following setInternalKey()
+ setInternalKey( thisLoc, keypos, kn.recordLoc, kn.key, order, this->childForPos( keypos ), this->childForPos( keypos + 1 ), id );
+ advanceLoc.btreemod<V>()->delKeyAtPos( advanceLoc, id, advanceKeyOfs, order );
}
- void BtreeBucket::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
- assert( n == 0 && !nextChild.isNull() );
- if ( parent.isNull() ) {
+//#define BTREE(loc) (static_cast<DiskLoc>(loc).btree<V>())
+#define BTREE(loc) (loc.template btree<V>())
+//#define BTREEMOD(loc) (static_cast<DiskLoc>(loc).btreemod<V>())
+#define BTREEMOD(loc) (loc.template btreemod<V>())
+
+ template< class V >
+ void BtreeBucket<V>::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
+ assert( this->n == 0 && !this->nextChild.isNull() );
+ if ( this->parent.isNull() ) {
assert( id.head == thisLoc );
- id.head.writing() = nextChild;
+ id.head.writing() = this->nextChild;
}
else {
- parent.btree()->childForPos( indexInParent( thisLoc ) ).writing() = nextChild;
+ DiskLoc ll = this->parent;
+ ll.btree<V>()->childForPos( indexInParent( thisLoc ) ).writing() = this->nextChild;
}
- nextChild.btree()->parent.writing() = parent;
+ BTREE(this->nextChild)->parent.writing() = this->parent;
+
+ BTREE(this->nextChild)->parent.writing() = this->parent;
+ //(static_cast<DiskLoc>(this->nextChild).btree<V>())->parent.writing() = this->parent;
ClientCursor::informAboutToDeleteBucket( thisLoc );
deallocBucket( thisLoc, id );
}
- bool BtreeBucket::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
- assert( leftIndex >= 0 && leftIndex < n );
- DiskLoc leftNodeLoc = childForPos( leftIndex );
- DiskLoc rightNodeLoc = childForPos( leftIndex + 1 );
+ template< class V >
+ bool BtreeBucket<V>::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
+ assert( leftIndex >= 0 && leftIndex < this->n );
+ DiskLoc leftNodeLoc = this->childForPos( leftIndex );
+ DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
if ( leftNodeLoc.isNull() || rightNodeLoc.isNull() ) {
// TODO if this situation is possible in long term implementation, maybe we should compact somehow anyway
return false;
}
int pos = 0;
{
- const BtreeBucket *l = leftNodeLoc.btree();
- const BtreeBucket *r = rightNodeLoc.btree();
- if ( ( headerSize() + l->packedDataSize( pos ) + r->packedDataSize( pos ) + keyNode( leftIndex ).key.objsize() + sizeof(_KeyNode) > unsigned( BucketSize ) ) ) {
+ const BtreeBucket *l = leftNodeLoc.btree<V>();
+ const BtreeBucket *r = rightNodeLoc.btree<V>();
+ if ( ( this->headerSize() + l->packedDataSize( pos ) + r->packedDataSize( pos ) + keyNode( leftIndex ).key.dataSize() + sizeof(_KeyNode) > unsigned( V::BucketSize ) ) ) {
return false;
}
}
@@ -890,33 +959,34 @@ namespace mongo {
* This implementation must respect the meaning and value of lowWaterMark.
* Also see comments in splitPos().
*/
- int BtreeBucket::rebalancedSeparatorPos( const DiskLoc &thisLoc, int leftIndex ) const {
+ template< class V >
+ int BtreeBucket<V>::rebalancedSeparatorPos( const DiskLoc &thisLoc, int leftIndex ) const {
int split = -1;
int rightSize = 0;
- const BtreeBucket *l = childForPos( leftIndex ).btree();
- const BtreeBucket *r = childForPos( leftIndex + 1 ).btree();
+ const BtreeBucket *l = BTREE(this->childForPos( leftIndex ));
+ const BtreeBucket *r = BTREE(this->childForPos( leftIndex + 1 ));
int KNS = sizeof( _KeyNode );
- int rightSizeLimit = ( l->topSize + l->n * KNS + keyNode( leftIndex ).key.objsize() + KNS + r->topSize + r->n * KNS ) / 2;
+ int rightSizeLimit = ( l->topSize + l->n * KNS + keyNode( leftIndex ).key.dataSize() + KNS + r->topSize + r->n * KNS ) / 2;
// This constraint should be ensured by only calling this function
// if we go below the low water mark.
- assert( rightSizeLimit < BtreeBucket::bodySize() );
+ assert( rightSizeLimit < BtreeBucket<V>::bodySize() );
for( int i = r->n - 1; i > -1; --i ) {
- rightSize += r->keyNode( i ).key.objsize() + KNS;
+ rightSize += r->keyNode( i ).key.dataSize() + KNS;
if ( rightSize > rightSizeLimit ) {
split = l->n + 1 + i;
break;
}
}
if ( split == -1 ) {
- rightSize += keyNode( leftIndex ).key.objsize() + KNS;
+ rightSize += keyNode( leftIndex ).key.dataSize() + KNS;
if ( rightSize > rightSizeLimit ) {
split = l->n;
}
}
if ( split == -1 ) {
for( int i = l->n - 1; i > -1; --i ) {
- rightSize += l->keyNode( i ).key.objsize() + KNS;
+ rightSize += l->keyNode( i ).key.dataSize() + KNS;
if ( rightSize > rightSizeLimit ) {
split = i;
break;
@@ -934,15 +1004,18 @@ namespace mongo {
return split;
}
- void BtreeBucket::doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
- DiskLoc leftNodeLoc = childForPos( leftIndex );
- DiskLoc rightNodeLoc = childForPos( leftIndex + 1 );
- BtreeBucket *l = leftNodeLoc.btreemod();
- BtreeBucket *r = rightNodeLoc.btreemod();
+ template< class V >
+ void BtreeBucket<V>::doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
+ DiskLoc leftNodeLoc = this->childForPos( leftIndex );
+ DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
+ BtreeBucket *l = leftNodeLoc.btreemod<V>();
+ BtreeBucket *r = rightNodeLoc.btreemod<V>();
int pos = 0;
l->_packReadyForMod( order, pos );
r->_packReadyForMod( order, pos ); // pack r in case there are droppable keys
+ // We know the additional keys below will fit in l because canMergeChildren()
+ // must be true.
int oldLNum = l->n;
{
KeyNode kn = keyNode( leftIndex );
@@ -955,10 +1028,10 @@ namespace mongo {
l->nextChild = r->nextChild;
l->fixParentPtrs( leftNodeLoc, oldLNum );
r->delBucket( rightNodeLoc, id );
- childForPos( leftIndex + 1 ) = leftNodeLoc;
- childForPos( leftIndex ) = DiskLoc();
- _delKeyAtPos( leftIndex, true );
- if ( n == 0 ) {
+ this->childForPos( leftIndex + 1 ) = leftNodeLoc;
+ this->childForPos( leftIndex ) = DiskLoc();
+ this->_delKeyAtPos( leftIndex, true );
+ if ( this->n == 0 ) {
// will trash this and thisLoc
// TODO To ensure all leaves are of equal height, we should ensure
// this is only called on the root.
@@ -970,9 +1043,10 @@ namespace mongo {
}
}
- int BtreeBucket::indexInParent( const DiskLoc &thisLoc ) const {
- assert( !parent.isNull() );
- const BtreeBucket *p = parent.btree();
+ template< class V >
+ int BtreeBucket<V>::indexInParent( const DiskLoc &thisLoc ) const {
+ assert( !this->parent.isNull() );
+ const BtreeBucket *p = BTREE(this->parent);
if ( p->nextChild == thisLoc ) {
return p->n;
}
@@ -986,27 +1060,33 @@ namespace mongo {
out() << "ERROR: can't find ref to child bucket.\n";
out() << "child: " << thisLoc << "\n";
dump();
- out() << "Parent: " << parent << "\n";
+ out() << "Parent: " << this->parent << "\n";
p->dump();
assert(false);
return -1; // just to compile
}
- bool BtreeBucket::tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const {
+ template< class V >
+ bool BtreeBucket<V>::tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const {
// If we can merge, then we must merge rather than balance to preserve
// bucket utilization constraints.
if ( canMergeChildren( thisLoc, leftIndex ) ) {
return false;
}
- thisLoc.btreemod()->doBalanceChildren( thisLoc, leftIndex, id, order );
+ thisLoc.btreemod<V>()->doBalanceChildren( thisLoc, leftIndex, id, order );
return true;
}
- void BtreeBucket::doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
+ template< class V >
+ void BtreeBucket<V>::doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
BtreeBucket *l, const DiskLoc lchild,
BtreeBucket *r, const DiskLoc rchild,
IndexDetails &id, const Ordering &order ) {
// TODO maybe do some audits the same way pushBack() does?
+ // As a precondition, rchild + the old separator are <= half a body size,
+ // and lchild is at most completely full. Based on the value of split,
+ // rchild will get <= half of the total bytes which is at most 75%
+ // of a full body. So rchild will have room for the following keys:
int rAdd = l->n - split;
r->reserveKeysFront( rAdd );
for( int i = split + 1, j = 0; i < l->n; ++i, ++j ) {
@@ -1021,16 +1101,26 @@ namespace mongo {
{
KeyNode kn = l->keyNode( split );
l->nextChild = kn.prevChildBucket;
+ // Because lchild is a descendant of thisLoc, updating thisLoc will
+ // not not affect packing or keys of lchild and kn will be stable
+ // during the following setInternalKey()
setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
}
int zeropos = 0;
+ // lchild and rchild cannot be merged, so there must be >0 (actually more)
+ // keys to the left of split.
l->truncateTo( split, order, zeropos );
}
- void BtreeBucket::doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
+ template< class V >
+ void BtreeBucket<V>::doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
BtreeBucket *l, const DiskLoc lchild,
BtreeBucket *r, const DiskLoc rchild,
IndexDetails &id, const Ordering &order ) {
+ // As a precondition, lchild + the old separator are <= half a body size,
+ // and rchild is at most completely full. Based on the value of split,
+ // lchild will get less than half of the total bytes which is at most 75%
+ // of a full body. So lchild will have room for the following keys:
int lN = l->n;
{
KeyNode kn = keyNode( leftIndex );
@@ -1043,20 +1133,27 @@ namespace mongo {
{
KeyNode kn = r->keyNode( split - lN - 1 );
l->nextChild = kn.prevChildBucket;
+ // Child lN was lchild's old nextChild, and don't need to fix that one.
l->fixParentPtrs( lchild, lN + 1, l->n );
+ // Because rchild is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of rchild and kn will be stable
+ // during the following setInternalKey()
setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
}
int zeropos = 0;
+ // lchild and rchild cannot be merged, so there must be >0 (actually more)
+ // keys to the right of split.
r->dropFront( split - lN, order, zeropos );
}
- void BtreeBucket::doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
- DiskLoc lchild = childForPos( leftIndex );
- DiskLoc rchild = childForPos( leftIndex + 1 );
+ template< class V >
+ void BtreeBucket<V>::doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
+ DiskLoc lchild = this->childForPos( leftIndex );
+ DiskLoc rchild = this->childForPos( leftIndex + 1 );
int zeropos = 0;
- BtreeBucket *l = lchild.btreemod();
+ BtreeBucket *l = lchild.btreemod<V>();
l->_packReadyForMod( order, zeropos );
- BtreeBucket *r = rchild.btreemod();
+ BtreeBucket *r = rchild.btreemod<V>();
r->_packReadyForMod( order, zeropos );
int split = rebalancedSeparatorPos( thisLoc, leftIndex );
@@ -1071,16 +1168,17 @@ namespace mongo {
}
}
- bool BtreeBucket::mayBalanceWithNeighbors( const DiskLoc thisLoc, IndexDetails &id, const Ordering &order ) const {
- if ( parent.isNull() ) { // we are root, there are no neighbors
+ template< class V >
+ bool BtreeBucket<V>::mayBalanceWithNeighbors( const DiskLoc thisLoc, IndexDetails &id, const Ordering &order ) const {
+ if ( this->parent.isNull() ) { // we are root, there are no neighbors
return false;
}
- if ( packedDataSize( 0 ) >= lowWaterMark ) {
+ if ( this->packedDataSize( 0 ) >= this->lowWaterMark() ) {
return false;
}
- const BtreeBucket *p = parent.btree();
+ const BtreeBucket *p = BTREE(this->parent);
int parentIdx = indexInParent( thisLoc );
// TODO will missing neighbor case be possible long term? Should we try to merge/balance somehow in that case if so?
@@ -1091,21 +1189,21 @@ namespace mongo {
// to preserve btree bucket utilization constraints since that's a more
// heavy duty operation (especially if we must re-split later).
if ( mayBalanceRight &&
- p->tryBalanceChildren( parent, parentIdx, id, order ) ) {
+ p->tryBalanceChildren( this->parent, parentIdx, id, order ) ) {
return true;
}
if ( mayBalanceLeft &&
- p->tryBalanceChildren( parent, parentIdx - 1, id, order ) ) {
+ p->tryBalanceChildren( this->parent, parentIdx - 1, id, order ) ) {
return true;
}
- BtreeBucket *pm = parent.btreemod();
+ BtreeBucket *pm = BTREEMOD(this->parent);
if ( mayBalanceRight ) {
- pm->doMergeChildren( parent, parentIdx, id, order );
+ pm->doMergeChildren( this->parent, parentIdx, id, order );
return true;
}
else if ( mayBalanceLeft ) {
- pm->doMergeChildren( parent, parentIdx - 1, id, order );
+ pm->doMergeChildren( this->parent, parentIdx - 1, id, order );
return true;
}
@@ -1113,64 +1211,70 @@ namespace mongo {
}
/** remove a key from the index */
- bool BtreeBucket::unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc ) const {
+ template< class V >
+ bool BtreeBucket<V>::unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc ) const {
int pos;
bool found;
- DiskLoc loc = locate(id, thisLoc, key, Ordering::make(id.keyPattern()), pos, found, recordLoc, 1);
+ const Ordering ord = Ordering::make(id.keyPattern());
+ DiskLoc loc = locate(id, thisLoc, key, ord, pos, found, recordLoc, 1);
if ( found ) {
-
- if ( key.objsize() > KeyMax ) {
+ if ( key.objsize() > this->KeyMax ) {
OCCASIONALLY problem() << "unindex: key too large to index but was found for " << id.indexNamespace() << " reIndex suggested" << endl;
- }
-
- loc.btreemod()->delKeyAtPos(loc, id, pos, Ordering::make(id.keyPattern()));
-
+ }
+ loc.btreemod<V>()->delKeyAtPos(loc, id, pos, ord);
return true;
}
return false;
}
- BtreeBucket* BtreeBucket::allocTemp() {
- BtreeBucket *b = (BtreeBucket*) malloc(BucketSize);
+ template< class V >
+ BtreeBucket<V> * BtreeBucket<V>::allocTemp() {
+ BtreeBucket *b = (BtreeBucket*) malloc(V::BucketSize);
b->init();
return b;
}
- inline void BtreeBucket::fix(const DiskLoc thisLoc, const DiskLoc child) {
+ template< class V >
+ inline void BtreeBucket<V>::fix(const DiskLoc thisLoc, const DiskLoc child) {
if ( !child.isNull() ) {
if ( insert_debug )
- out() << " " << child.toString() << ".parent=" << thisLoc.toString() << endl;
- child.btree()->parent.writing() = thisLoc;
+ out() << " fix " << child.toString() << ".parent=" << thisLoc.toString() << endl;
+ child.btree<V>()->parent.writing() = thisLoc;
}
}
- /** this sucks. maybe get rid of parent ptrs. */
- void BtreeBucket::fixParentPtrs(const DiskLoc thisLoc, int firstIndex, int lastIndex) const {
+ /**
+ * This can cause a lot of additional page writes when we assign buckets to
+ * different parents. Maybe get rid of parent ptrs?
+ */
+ template< class V >
+ void BtreeBucket<V>::fixParentPtrs(const DiskLoc thisLoc, int firstIndex, int lastIndex) const {
VERIFYTHISLOC
if ( lastIndex == -1 ) {
- lastIndex = n;
+ lastIndex = this->n;
}
for ( int i = firstIndex; i <= lastIndex; i++ ) {
- fix(thisLoc, childForPos(i));
+ fix(thisLoc, this->childForPos(i));
}
}
- void BtreeBucket::setInternalKey( const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj &key, const Ordering &order,
+ template< class V >
+ void BtreeBucket<V>::setInternalKey( const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key &key, const Ordering &order,
const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx ) {
- childForPos( keypos ).Null();
+ this->childForPos( keypos ).Null();
// This may leave the bucket empty (n == 0) which is ok only as a
// transient state. In the instant case, the implementation of
// insertHere behaves correctly when n == 0 and as a side effect
// increments n.
- _delKeyAtPos( keypos, true );
+ this->_delKeyAtPos( keypos, true );
// Ensure we do not orphan neighbor's old child.
- assert( childForPos( keypos ) == rchild );
+ assert( this->childForPos( keypos ) == rchild );
// Just set temporarily - required to pass validation in insertHere()
- childForPos( keypos ) = lchild;
+ this->childForPos( keypos ) = lchild;
insertHere( thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx );
}
@@ -1180,127 +1284,137 @@ namespace mongo {
* @keypos - where to insert the key in range 0..n. 0=make leftmost, n=make rightmost.
* NOTE this function may free some data, and as a result the value passed for keypos may
* be invalid after calling insertHere()
+ *
+ * Some of the write intent signaling below relies on the implementation of
+ * the optimized write intent code in basicInsert().
*/
- void BtreeBucket::insertHere( const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj& key, const Ordering& order,
+ template< class V >
+ void BtreeBucket<V>::insertHere( const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key& key, const Ordering& order,
const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) const {
if ( insert_debug )
out() << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
<< lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
- if ( !basicInsert(thisLoc, keypos, recordLoc, key, order) ) {
- thisLoc.btreemod()->split(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
+ if ( !this->basicInsert(thisLoc, keypos, recordLoc, key, order) ) {
+ // If basicInsert() fails, the bucket will be packed as required by split().
+ thisLoc.btreemod<V>()->split(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
return;
}
{
const _KeyNode *_kn = &k(keypos);
_KeyNode *kn = (_KeyNode *) getDur().alreadyDeclared((_KeyNode*) _kn); // already declared intent in basicInsert()
- if ( keypos+1 == n ) { // last key
- if ( nextChild != lchild ) {
+ if ( keypos+1 == this->n ) { // last key
+ if ( this->nextChild != lchild ) {
out() << "ERROR nextChild != lchild" << endl;
out() << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
- out() << " keyPos: " << keypos << " n:" << n << endl;
- out() << " nextChild: " << nextChild.toString() << " lchild: " << lchild.toString() << endl;
+ out() << " keyPos: " << keypos << " n:" << this->n << endl;
+ out() << " nextChild: " << this->nextChild.toString() << " lchild: " << lchild.toString() << endl;
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
out() << " key: " << key.toString() << endl;
dump();
assert(false);
}
- kn->prevChildBucket = nextChild;
+ kn->prevChildBucket = this->nextChild;
assert( kn->prevChildBucket == lchild );
- nextChild.writing() = rchild;
+ this->nextChild.writing() = rchild;
if ( !rchild.isNull() )
- rchild.btree()->parent.writing() = thisLoc;
+ BTREE(rchild)->parent.writing() = thisLoc;
}
else {
kn->prevChildBucket = lchild;
if ( k(keypos+1).prevChildBucket != lchild ) {
out() << "ERROR k(keypos+1).prevChildBucket != lchild" << endl;
out() << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
- out() << " keyPos: " << keypos << " n:" << n << endl;
+ out() << " keyPos: " << keypos << " n:" << this->n << endl;
out() << " k(keypos+1).pcb: " << k(keypos+1).prevChildBucket.toString() << " lchild: " << lchild.toString() << endl;
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
out() << " key: " << key.toString() << endl;
dump();
assert(false);
}
- const DiskLoc *pc = &k(keypos+1).prevChildBucket;
- *getDur().alreadyDeclared((DiskLoc*) pc) = rchild; // declared in basicInsert()
+ const Loc *pc = &k(keypos+1).prevChildBucket;
+ *getDur().alreadyDeclared( const_cast<Loc*>(pc) ) = rchild; // declared in basicInsert()
if ( !rchild.isNull() )
- rchild.btree()->parent.writing() = thisLoc;
+ rchild.btree<V>()->parent.writing() = thisLoc;
}
return;
}
}
- void BtreeBucket::split(const DiskLoc thisLoc, int keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) {
- assertWritable();
+ template< class V >
+ void BtreeBucket<V>::split(const DiskLoc thisLoc, int keypos, const DiskLoc recordLoc, const Key& key, const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) {
+ this->assertWritable();
if ( split_debug )
out() << " " << thisLoc.toString() << ".split" << endl;
- int split = splitPos( keypos );
+ int split = this->splitPos( keypos );
DiskLoc rLoc = addBucket(idx);
- BtreeBucket *r = rLoc.btreemod();
+ BtreeBucket *r = rLoc.btreemod<V>();
if ( split_debug )
- out() << " split:" << split << ' ' << keyNode(split).key.toString() << " n:" << n << endl;
- for ( int i = split+1; i < n; i++ ) {
+ out() << " split:" << split << ' ' << keyNode(split).key.toString() << " this->n:" << this->n << endl;
+ for ( int i = split+1; i < this->n; i++ ) {
KeyNode kn = keyNode(i);
r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket);
}
- r->nextChild = nextChild;
+ r->nextChild = this->nextChild;
r->assertValid( order );
if ( split_debug )
- out() << " new rLoc:" << rLoc.toString() << endl;
+ out() << " this->new rLoc:" << rLoc.toString() << endl;
r = 0;
- rLoc.btree()->fixParentPtrs(rLoc);
+ rLoc.btree<V>()->fixParentPtrs(rLoc);
{
KeyNode splitkey = keyNode(split);
- nextChild = splitkey.prevChildBucket; // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
+ this->nextChild = splitkey.prevChildBucket; // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
if ( split_debug ) {
out() << " splitkey key:" << splitkey.key.toString() << endl;
}
- // promote splitkey to a parent node
- if ( parent.isNull() ) {
- // make a new parent if we were the root
+ // Because thisLoc is a descendant of parent, updating parent will
+ // not affect packing or keys of thisLoc and splitkey will be stable
+ // during the following:
+
+ // promote splitkey to a parent this->node
+ if ( this->parent.isNull() ) {
+ // make a this->new this->parent if we were the root
DiskLoc L = addBucket(idx);
- BtreeBucket *p = L.btreemod();
+ BtreeBucket *p = L.btreemod<V>();
p->pushBack(splitkey.recordLoc, splitkey.key, order, thisLoc);
p->nextChild = rLoc;
p->assertValid( order );
- parent = idx.head.writing() = L;
+ this->parent = idx.head.writing() = L;
if ( split_debug )
- out() << " we were root, making new root:" << hex << parent.getOfs() << dec << endl;
- rLoc.btree()->parent.writing() = parent;
+ out() << " we were root, making this->new root:" << hex << this->parent.getOfs() << dec << endl;
+ rLoc.btree<V>()->parent.writing() = this->parent;
}
else {
// set this before calling _insert - if it splits it will do fixParent() logic and change the value.
- rLoc.btree()->parent.writing() = parent;
+ rLoc.btree<V>()->parent.writing() = this->parent;
if ( split_debug )
out() << " promoting splitkey key " << splitkey.key.toString() << endl;
- parent.btree()->_insert(parent, splitkey.recordLoc, splitkey.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx);
+ BTREE(this->parent)->_insert(this->parent, splitkey.recordLoc, splitkey.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx);
}
}
int newpos = keypos;
// note this may trash splitkey.key. thus we had to promote it before finishing up here.
- truncateTo(split, order, newpos); // note this may trash splitkey.key. thus we had to promote it before finishing up here.
+ this->truncateTo(split, order, newpos);
- // add our new key, there is room now
+ // add our this->new key, there is room this->now
{
if ( keypos <= split ) {
if ( split_debug )
- out() << " keypos<split, insertHere() the new key" << endl;
+ out() << " keypos<split, insertHere() the this->new key" << endl;
insertHere(thisLoc, newpos, recordLoc, key, order, lchild, rchild, idx);
}
else {
int kp = keypos-split-1;
assert(kp>=0);
- rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
+ BTREE(rLoc)->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
}
}
@@ -1308,41 +1422,44 @@ namespace mongo {
out() << " split end " << hex << thisLoc.getOfs() << dec << endl;
}
- /** start a new index off, empty */
- DiskLoc BtreeBucket::addBucket(const IndexDetails& id) {
+ /** start a this->new index off, empty */
+ template< class V >
+ DiskLoc BtreeBucket<V>::addBucket(const IndexDetails& id) {
string ns = id.indexNamespace();
- DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, BucketSize, true);
- BtreeBucket *b = loc.btreemod();
+ DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, true);
+ BtreeBucket *b = BTREEMOD(loc);
b->init();
return loc;
}
- void BtreeBucket::renameIndexNamespace(const char *oldNs, const char *newNs) {
+ void renameIndexNamespace(const char *oldNs, const char *newNs) {
renameNamespace( oldNs, newNs );
}
- const DiskLoc BtreeBucket::getHead(const DiskLoc& thisLoc) const {
+ template< class V >
+ const DiskLoc BtreeBucket<V>::getHead(const DiskLoc& thisLoc) const {
DiskLoc p = thisLoc;
- while ( !p.btree()->isHead() )
- p = p.btree()->parent;
+ while ( !BTREE(p)->isHead() )
+ p = BTREE(p)->parent;
return p;
}
- DiskLoc BtreeBucket::advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const {
- if ( keyOfs < 0 || keyOfs >= n ) {
- out() << "ASSERT failure BtreeBucket::advance, caller: " << caller << endl;
+ template< class V >
+ DiskLoc BtreeBucket<V>::advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const {
+ if ( keyOfs < 0 || keyOfs >= this->n ) {
+ out() << "ASSERT failure BtreeBucket<V>::advance, caller: " << caller << endl;
out() << " thisLoc: " << thisLoc.toString() << endl;
- out() << " keyOfs: " << keyOfs << " n:" << n << " direction: " << direction << endl;
+ out() << " keyOfs: " << keyOfs << " this->n:" << this->n << " direction: " << direction << endl;
out() << bucketSummary() << endl;
assert(false);
}
int adj = direction < 0 ? 1 : 0;
int ko = keyOfs + direction;
- DiskLoc nextDown = childForPos(ko+adj);
+ DiskLoc nextDown = this->childForPos(ko+adj);
if ( !nextDown.isNull() ) {
while ( 1 ) {
- keyOfs = direction>0 ? 0 : nextDown.btree()->n - 1;
- DiskLoc loc = nextDown.btree()->childForPos(keyOfs + adj);
+ keyOfs = direction>0 ? 0 : BTREE(nextDown)->n - 1;
+ DiskLoc loc = BTREE(nextDown)->childForPos(keyOfs + adj);
if ( loc.isNull() )
break;
nextDown = loc;
@@ -1350,18 +1467,18 @@ namespace mongo {
return nextDown;
}
- if ( ko < n && ko >= 0 ) {
+ if ( ko < this->n && ko >= 0 ) {
keyOfs = ko;
return thisLoc;
}
// end of bucket. traverse back up.
DiskLoc childLoc = thisLoc;
- DiskLoc ancestor = parent;
+ DiskLoc ancestor = this->parent;
while ( 1 ) {
if ( ancestor.isNull() )
break;
- const BtreeBucket *an = ancestor.btree();
+ const BtreeBucket *an = BTREE(ancestor);
for ( int i = 0; i < an->n; i++ ) {
if ( an->childForPos(i+adj) == childLoc ) {
keyOfs = i;
@@ -1369,7 +1486,7 @@ namespace mongo {
}
}
assert( direction<0 || an->nextChild == childLoc );
- // parent exhausted also, keep going up
+ // this->parent exhausted also, keep going up
childLoc = ancestor;
ancestor = an->parent;
}
@@ -1377,7 +1494,14 @@ namespace mongo {
return DiskLoc();
}
- DiskLoc BtreeBucket::locate(const IndexDetails& idx, const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order, int& pos, bool& found, const DiskLoc &recordLoc, int direction) const {
+ template< class V >
+ DiskLoc BtreeBucket<V>::locate(const IndexDetails& idx, const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order, int& pos, bool& found, const DiskLoc &recordLoc, int direction) const {
+ KeyOwned k(key);
+ return locate(idx, thisLoc, k, order, pos, found, recordLoc, direction);
+ }
+
+ template< class V >
+ DiskLoc BtreeBucket<V>::locate(const IndexDetails& idx, const DiskLoc& thisLoc, const Key& key, const Ordering &order, int& pos, bool& found, const DiskLoc &recordLoc, int direction) const {
int p;
found = find(idx, key, recordLoc, order, p, /*assertIfDup*/ false);
if ( found ) {
@@ -1385,10 +1509,10 @@ namespace mongo {
return thisLoc;
}
- DiskLoc child = childForPos(p);
+ DiskLoc child = this->childForPos(p);
if ( !child.isNull() ) {
- DiskLoc l = child.btree()->locate(idx, child, key, order, pos, found, recordLoc, direction);
+ DiskLoc l = BTREE(child)->locate(idx, child, key, order, pos, found, recordLoc, direction);
if ( !l.isNull() )
return l;
}
@@ -1397,14 +1521,15 @@ namespace mongo {
if ( direction < 0 )
return --pos == -1 ? DiskLoc() /*theend*/ : thisLoc;
else
- return pos == n ? DiskLoc() /*theend*/ : thisLoc;
+ return pos == this->n ? DiskLoc() /*theend*/ : thisLoc;
}
- bool BtreeBucket::customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) const {
+ template< class V >
+ bool BtreeBucket<V>::customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) const {
while( 1 ) {
if ( l + 1 == h ) {
keyOfs = ( direction > 0 ) ? h : l;
- DiskLoc next = thisLoc.btree()->k( h ).prevChildBucket;
+ DiskLoc next = BTREE(thisLoc)->k( h ).prevChildBucket;
if ( !next.isNull() ) {
bestParent = make_pair( thisLoc, keyOfs );
thisLoc = next;
@@ -1415,7 +1540,7 @@ namespace mongo {
}
}
int m = l + ( h - l ) / 2;
- int cmp = customBSONCmp( thisLoc.btree()->keyNode( m ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
+ int cmp = customBSONCmp( BTREE(thisLoc)->keyNode( m ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
if ( cmp < 0 ) {
l = m;
}
@@ -1438,18 +1563,19 @@ namespace mongo {
* starting thisLoc + keyOfs will be strictly less than/strictly greater than keyBegin/keyBeginLen/keyEnd
* All the direction checks below allowed me to refactor the code, but possibly separate forward and reverse implementations would be more efficient
*/
- void BtreeBucket::advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const {
+ template< class V >
+ void BtreeBucket<V>::advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const {
int l,h;
bool dontGoUp;
if ( direction > 0 ) {
l = keyOfs;
- h = n - 1;
- dontGoUp = ( customBSONCmp( keyNode( h ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
+ h = this->n - 1;
+ dontGoUp = ( customBSONCmp( keyNode( h ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
}
else {
l = 0;
h = keyOfs;
- dontGoUp = ( customBSONCmp( keyNode( l ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
+ dontGoUp = ( customBSONCmp( keyNode( l ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
}
pair< DiskLoc, int > bestParent;
if ( dontGoUp ) {
@@ -1459,16 +1585,16 @@ namespace mongo {
}
}
else {
- // go up parents until rightmost/leftmost node is >=/<= target or at top
- while( !thisLoc.btree()->parent.isNull() ) {
- thisLoc = thisLoc.btree()->parent;
+ // go up this->parents until rightmost/leftmost node is >=/<= target or at top
+ while( !BTREE(thisLoc)->parent.isNull() ) {
+ thisLoc = BTREE(thisLoc)->parent;
if ( direction > 0 ) {
- if ( customBSONCmp( thisLoc.btree()->keyNode( thisLoc.btree()->n - 1 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 ) {
+ if ( customBSONCmp( BTREE(thisLoc)->keyNode( BTREE(thisLoc)->n - 1 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 ) {
break;
}
}
else {
- if ( customBSONCmp( thisLoc.btree()->keyNode( 0 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 ) {
+ if ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 ) {
break;
}
}
@@ -1477,31 +1603,32 @@ namespace mongo {
customLocate( thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, bestParent );
}
- void BtreeBucket::customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const {
- if ( thisLoc.btree()->n == 0 ) {
+ template< class V >
+ void BtreeBucket<V>::customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const {
+ if ( BTREE(thisLoc)->n == 0 ) {
thisLoc = DiskLoc();
return;
}
// go down until find smallest/biggest >=/<= target
while( 1 ) {
int l = 0;
- int h = thisLoc.btree()->n - 1;
+ int h = BTREE(thisLoc)->n - 1;
// leftmost/rightmost key may possibly be >=/<= search key
bool firstCheck;
if ( direction > 0 ) {
- firstCheck = ( customBSONCmp( thisLoc.btree()->keyNode( 0 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
+ firstCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
}
else {
- firstCheck = ( customBSONCmp( thisLoc.btree()->keyNode( h ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
+ firstCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( h ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
}
if ( firstCheck ) {
DiskLoc next;
if ( direction > 0 ) {
- next = thisLoc.btree()->k( 0 ).prevChildBucket;
+ next = BTREE(thisLoc)->k( 0 ).prevChildBucket;
keyOfs = 0;
}
else {
- next = thisLoc.btree()->nextChild;
+ next = BTREE(thisLoc)->nextChild;
keyOfs = h;
}
if ( !next.isNull() ) {
@@ -1515,21 +1642,21 @@ namespace mongo {
}
bool secondCheck;
if ( direction > 0 ) {
- secondCheck = ( customBSONCmp( thisLoc.btree()->keyNode( h ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) < 0 );
+ secondCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( h ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) < 0 );
}
else {
- secondCheck = ( customBSONCmp( thisLoc.btree()->keyNode( 0 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) > 0 );
+ secondCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) > 0 );
}
if ( secondCheck ) {
DiskLoc next;
if ( direction > 0 ) {
- next = thisLoc.btree()->nextChild;
+ next = BTREE(thisLoc)->nextChild;
}
else {
- next = thisLoc.btree()->k( 0 ).prevChildBucket;
+ next = BTREE(thisLoc)->k( 0 ).prevChildBucket;
}
if ( next.isNull() ) {
- // if bestParent is null, we've hit the end and thisLoc gets set to DiskLoc()
+ // if bestParent is this->null, we've hit the end and thisLoc gets set to DiskLoc()
thisLoc = bestParent.first;
keyOfs = bestParent.second;
return;
@@ -1547,14 +1674,15 @@ namespace mongo {
/** @thisLoc disk location of *this */
- int BtreeBucket::_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
- const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ template< class V >
+ int BtreeBucket<V>::_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const Key& key, const Ordering &order, bool dupsAllowed,
const DiskLoc lChild, const DiskLoc rChild, IndexDetails& idx) const {
- if ( key.objsize() > KeyMax ) {
- problem() << "ERROR: key too large len:" << key.objsize() << " max:" << KeyMax << ' ' << key.objsize() << ' ' << idx.indexNamespace() << endl;
+ if ( key.dataSize() > this->KeyMax ) {
+ problem() << "ERROR: key too large len:" << key.dataSize() << " max:" << this->KeyMax << ' ' << key.dataSize() << ' ' << idx.indexNamespace() << endl;
return 2;
}
- assert( key.objsize() > 0 );
+ assert( key.dataSize() > 0 );
int pos;
bool found = find(idx, key, recordLoc, order, pos, !dupsAllowed);
@@ -1562,15 +1690,15 @@ namespace mongo {
out() << " " << thisLoc.toString() << '.' << "_insert " <<
key.toString() << '/' << recordLoc.toString() <<
" l:" << lChild.toString() << " r:" << rChild.toString() << endl;
- out() << " found:" << found << " pos:" << pos << " n:" << n << endl;
+ out() << " found:" << found << " pos:" << pos << " this->n:" << this->n << endl;
}
if ( found ) {
const _KeyNode& kn = k(pos);
if ( kn.isUnused() ) {
log(4) << "btree _insert: reusing unused key" << endl;
- massert( 10285 , "_insert: reuse key but lchild is not null", lChild.isNull());
- massert( 10286 , "_insert: reuse key but rchild is not null", rChild.isNull());
+ massert( 10285 , "_insert: reuse key but lchild is not this->null", lChild.isNull());
+ massert( 10286 , "_insert: reuse key but rchild is not this->null", rChild.isNull());
kn.writing().setUsed();
return 0;
}
@@ -1580,78 +1708,89 @@ namespace mongo {
log() << " " << idx.indexNamespace() << " thisLoc:" << thisLoc.toString() << '\n';
log() << " " << key.toString() << '\n';
log() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
- log() << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
- log() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
+ log() << " old l r: " << this->childForPos(pos).toString() << ' ' << this->childForPos(pos+1).toString() << endl;
+ log() << " this->new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
}
alreadyInIndex();
}
DEBUGGING out() << "TEMP: key: " << key.toString() << endl;
- DiskLoc child = childForPos(pos);
+ Loc ch = this->childForPos(pos);
+ DiskLoc child = ch;
if ( insert_debug )
out() << " getChild(" << pos << "): " << child.toString() << endl;
- if ( child.isNull() || !rChild.isNull() /* means an 'internal' insert */ ) {
+ // In current usage, rChild isNull() for a this->new key and false when we are
+ // promoting a split key. These are the only two cases where _insert()
+ // is called currently.
+ if ( child.isNull() || !rChild.isNull() ) {
+ // A this->new key will be inserted at the same tree height as an adjacent existing key.
insertHere(thisLoc, pos, recordLoc, key, order, lChild, rChild, idx);
return 0;
}
- return child.btree()->bt_insert(child, recordLoc, key, order, dupsAllowed, idx, /*toplevel*/false);
+ return child.btree<V>()->_insert(child, recordLoc, key, order, dupsAllowed, /*lchild*/DiskLoc(), /*rchild*/DiskLoc(), idx);
}
- void BtreeBucket::dump() const {
- out() << "DUMP btreebucket n:" << n;
- out() << " parent:" << hex << parent.getOfs() << dec;
- for ( int i = 0; i < n; i++ ) {
- out() << '\n';
+ template< class V >
+ void BtreeBucket<V>::dump(unsigned depth) const {
+ string indent = string(depth, ' ');
+ _log() << "BUCKET n:" << this->n;
+ _log() << " parent:" << hex << this->parent.getOfs() << dec;
+ for ( int i = 0; i < this->n; i++ ) {
+ _log() << '\n' << indent;
KeyNode k = keyNode(i);
- out() << '\t' << i << '\t' << k.key.toString() << "\tleft:" << hex <<
- k.prevChildBucket.getOfs() << "\tRecLoc:" << k.recordLoc.toString() << dec;
+ string ks = k.key.toString();
+ _log() << " " << hex << k.prevChildBucket.getOfs() << '\n';
+ _log() << indent << " " << i << ' ' << ks.substr(0, 30) << " Loc:" << k.recordLoc.toString() << dec;
if ( this->k(i).isUnused() )
- out() << " UNUSED";
+ _log() << " UNUSED";
}
- out() << " right:" << hex << nextChild.getOfs() << dec << endl;
+ _log() << "\n" << indent << " " << hex << this->nextChild.getOfs() << dec << endl;
}
/** todo: meaning of return code unclear clean up */
- int BtreeBucket::bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
- const BSONObj& key, const Ordering &order, bool dupsAllowed,
- IndexDetails& idx, bool toplevel) const {
+ template< class V >
+ int BtreeBucket<V>::bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& _key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel) const
+ {
+ KeyOwned key(_key);
+
if ( toplevel ) {
- if ( key.objsize() > KeyMax ) {
- problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace() << ' ' << key.objsize() << ' ' << key.toString() << endl;
+ if ( key.dataSize() > this->KeyMax ) {
+ problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace() << ' ' << key.dataSize() << ' ' << key.toString() << endl;
return 3;
}
}
int x = _insert(thisLoc, recordLoc, key, order, dupsAllowed, DiskLoc(), DiskLoc(), idx);
- assertValid( order );
+ this->assertValid( order );
return x;
}
- void BtreeBucket::shape(stringstream& ss) const {
- _shape(0, ss);
+ template< class V >
+ void BtreeBucket<V>::shape(stringstream& ss) const {
+ this->_shape(0, ss);
}
- int BtreeBucket::getLowWaterMark() {
- return lowWaterMark;
+ template< class V >
+ int BtreeBucket<V>::getKeyMax() {
+ return V::KeyMax;
}
- int BtreeBucket::getKeyMax() {
- return KeyMax;
- }
-
- DiskLoc BtreeBucket::findSingle( const IndexDetails& indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const {
- indexdetails.checkVersion();
+ template< class V >
+ DiskLoc BtreeBucket<V>::findSingle( const IndexDetails& indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const {
int pos;
bool found;
- // TODO: is it really ok here that the order is a default?
+ // TODO: is it really ok here that the order is a default?
+ // for findById() use, yes. for checkNoIndexConflicts, this->no?
Ordering o = Ordering::make(BSONObj());
DiskLoc bucket = locate( indexdetails , indexdetails.head , key , o , pos , found , minDiskLoc );
if ( bucket.isNull() )
return bucket;
- const BtreeBucket *b = bucket.btree();
+ const BtreeBucket<V> *b = bucket.btree<V>();
while ( 1 ) {
const _KeyNode& knraw = b->k(pos);
if ( knraw.isUsed() )
@@ -1659,23 +1798,24 @@ namespace mongo {
bucket = b->advance( bucket , pos , 1 , "findSingle" );
if ( bucket.isNull() )
return bucket;
- b = bucket.btree();
+ b = bucket.btree<V>();
}
KeyNode kn = b->keyNode( pos );
- if ( key.woCompare( kn.key ) != 0 )
+ if ( KeyOwned(key).woCompare( kn.key, o ) != 0 )
return DiskLoc();
return kn.recordLoc;
}
-} // namespace mongo
+} // this->namespace mongo
#include "db.h"
#include "dbhelpers.h"
namespace mongo {
- void BtreeBucket::a_test(IndexDetails& id) {
- BtreeBucket *b = id.head.btreemod();
+ template< class V >
+ void BtreeBucket<V>::a_test(IndexDetails& id) {
+ BtreeBucket *b = id.head.btreemod<V>();
// record locs for testing
DiskLoc A(1, 20);
@@ -1703,155 +1843,45 @@ namespace mongo {
b->dumpTree(id.head, orderObj);
- /* b->bt_insert(id.head, B, key, order, false, id);
+ /* b->bt_insert(id.head, B, key, order, false, id);
b->k(1).setUnused();
-
b->dumpTree(id.head, order);
-
b->bt_insert(id.head, A, key, order, false, id);
-
b->dumpTree(id.head, order);
*/
// this should assert. does it? (it might "accidentally" though, not asserting proves a problem, asserting proves nothing)
b->bt_insert(id.head, C, key, order, false, id);
-// b->dumpTree(id.head, order);
+ // b->dumpTree(id.head, order);
}
- /* --- BtreeBuilder --- */
+ template class BucketBasics<V0>;
+ template class BucketBasics<V1>;
+ template class BtreeBucket<V0>;
+ template class BtreeBucket<V1>;
- BtreeBuilder::BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx) :
- dupsAllowed(_dupsAllowed),
- idx(_idx),
- n(0),
- order( idx.keyPattern() ),
- ordering( Ordering::make(idx.keyPattern()) ) {
- first = cur = BtreeBucket::addBucket(idx);
- b = cur.btreemod();
- committed = false;
- }
-
- void BtreeBuilder::newBucket() {
- DiskLoc L = BtreeBucket::addBucket(idx);
- b->tempNext() = L;
- cur = L;
- b = cur.btreemod();
- }
-
- void BtreeBuilder::mayCommitProgressDurably() {
- if ( getDur().commitIfNeeded() ) {
- b = cur.btreemod();
- }
- }
-
- void BtreeBuilder::addKey(BSONObj& key, DiskLoc loc) {
- if ( key.objsize() > KeyMax ) {
- problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace()
- << ' ' << key.objsize() << ' ' << key.toString() << endl;
- return;
- }
-
- if( !dupsAllowed ) {
- if( n > 0 ) {
- int cmp = keyLast.woCompare(key, order);
- massert( 10288 , "bad key order in BtreeBuilder - server internal error", cmp <= 0 );
- if( cmp == 0 ) {
- //if( !dupsAllowed )
- uasserted( ASSERT_ID_DUPKEY , BtreeBucket::dupKeyError( idx , keyLast ) );
- }
+ struct BTUnitTest : public UnitTest {
+ void run() {
+ DiskLoc big(0xf12312, 0x70001234);
+ DiskLoc56Bit bigl;
+ {
+ bigl = big;
+ assert( big == bigl );
+ DiskLoc e = bigl;
+ assert( big == e );
}
- keyLast = key;
- }
-
- if ( ! b->_pushBack(loc, key, ordering, DiskLoc()) ) {
- // bucket was full
- newBucket();
- b->pushBack(loc, key, ordering, DiskLoc());
- }
- n++;
- mayCommitProgressDurably();
- }
-
- void BtreeBuilder::buildNextLevel(DiskLoc loc) {
- int levels = 1;
- while( 1 ) {
- if( loc.btree()->tempNext().isNull() ) {
- // only 1 bucket at this level. we are done.
- getDur().writingDiskLoc(idx.head) = loc;
- break;
- }
- levels++;
-
- DiskLoc upLoc = BtreeBucket::addBucket(idx);
- DiskLoc upStart = upLoc;
- BtreeBucket *up = upLoc.btreemod();
-
- DiskLoc xloc = loc;
- while( !xloc.isNull() ) {
- if ( getDur().commitIfNeeded() ) {
- b = cur.btreemod();
- up = upLoc.btreemod();
- }
-
- BtreeBucket *x = xloc.btreemod();
- BSONObj k;
- DiskLoc r;
- x->popBack(r,k);
- bool keepX = ( x->n != 0 );
- DiskLoc keepLoc = keepX ? xloc : x->nextChild;
-
- if ( ! up->_pushBack(r, k, ordering, keepLoc) ) {
- // current bucket full
- DiskLoc n = BtreeBucket::addBucket(idx);
- up->tempNext() = n;
- upLoc = n;
- up = upLoc.btreemod();
- up->pushBack(r, k, ordering, keepLoc);
- }
-
- DiskLoc nextLoc = x->tempNext(); // get next in chain at current level
- if ( keepX ) {
- x->parent = upLoc;
- }
- else {
- if ( !x->nextChild.isNull() )
- x->nextChild.btreemod()->parent = upLoc;
- x->deallocBucket( xloc, idx );
- }
- xloc = nextLoc;
+ {
+ DiskLoc d;
+ assert( d.isNull() );
+ DiskLoc56Bit l;
+ l = d;
+ assert( l.isNull() );
+ d = l;
+ assert( d.isNull() );
+ assert( l < bigl );
}
-
- loc = upStart;
- mayCommitProgressDurably();
}
-
- if( levels > 1 )
- log(2) << "btree levels: " << levels << endl;
- }
-
- /** when all addKeys are done, we then build the higher levels of the tree */
- void BtreeBuilder::commit() {
- buildNextLevel(first);
- committed = true;
- }
-
- BtreeBuilder::~BtreeBuilder() {
- DESTRUCTOR_GUARD(
- if( !committed ) {
- log(2) << "Rolling back partially built index space" << endl;
- DiskLoc x = first;
- while( !x.isNull() ) {
- DiskLoc next = x.btree()->tempNext();
- string ns = idx.indexNamespace();
- theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), x.rec(), x);
- x = next;
- getDur().commitIfNeeded();
- }
- assert( idx.head.isNull() );
- log(2) << "done rollback" << endl;
- }
- )
- }
+ } btunittest;
}
diff --git a/db/btree.h b/db/btree.h
index bced95e..9ffa54c 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -22,37 +22,99 @@
#include "jsobj.h"
#include "diskloc.h"
#include "pdfile.h"
+#include "key.h"
namespace mongo {
- const int BucketSize = 8192;
+ /**
+ * Our btree implementation generally follows the standard btree algorithm,
+ * which is described in many places. The nodes of our btree are referred to
+ * as buckets below. These buckets are of size BucketSize and their body is
+ * an ordered array of <bson key, disk loc> pairs, where disk loc is the disk
+ * location of a document and bson key is a projection of this document into
+ * the schema of the index for this btree. Ordering is determined on the
+ * basis of bson key first and then disk loc in case of a tie. All bson keys
+ * for a btree have identical schemas with empty string field names and may
+ * not have an objsize() exceeding KeyMax. The btree's buckets are
+ * themselves organized into an ordered tree. Although there are exceptions,
+ * generally buckets with n keys have n+1 children and the body of a bucket is
+ * at least lowWaterMark bytes. A more strictly enforced requirement is that
+ * a non root bucket must have at least one key except in certain transient
+ * states.
+ *
+ * Our btrees support the following primary read operations: finding a
+ * specified key; iterating from a starting key to the next or previous
+ * ordered key; and skipping from a starting key to another specified key
+ * without checking every intermediate key. The primary write operations
+ * are insertion and deletion of keys. Insertion may trigger a bucket split
+ * if necessary to avoid bucket overflow. In such a case, subsequent splits
+ * will occur recursively as necessary. Deletion may trigger a bucket
+ * rebalance, in which a size deficient bucket is filled with keys from an
+ * adjacent bucket. In this case, splitting may potentially occur in the
+ * parent. Deletion may alternatively trigger a merge, in which the keys
+ * from two buckets and a key from their shared parent are combined into the
+ * same bucket. In such a case, rebalancing or merging may proceed
+ * recursively from the parent.
+ *
+ * While the btree data format has been relatively constant over time, btrees
+ * initially created by versions of mongo earlier than the current version
+ * may embody different properties than freshly created btrees (while
+ * following the same data format). These older btrees are referred to
+ * below as legacy btrees.
+ */
+
+ const int OldBucketSize = 8192;
#pragma pack(1)
- struct _KeyNode {
+ template< class Version > class BucketBasics;
+
+ /**
+ * This is the fixed width data component for storage of a key within a
+ * bucket. It contains an offset pointer to the variable width bson
+ * data component. A _KeyNode may be 'unused', please see below.
+ */
+ template< class Loc >
+ struct __KeyNode {
/** Signals that we are writing this _KeyNode and casts away const */
- _KeyNode& writing() const;
- DiskLoc prevChildBucket; // the lchild
- DiskLoc recordLoc; // location of the record associated with the key
- short keyDataOfs() const {
- return (short) _kdo;
- }
+ __KeyNode<Loc> & writing() const;
+ /**
+ * The 'left' child bucket of this key. If this is the i-th key, it
+ * points to the i index child bucket.
+ */
+ Loc prevChildBucket;
+ /** The location of the record associated with this key. */
+ Loc recordLoc;
+ short keyDataOfs() const { return (short) _kdo; }
+
+ /** Offset within current bucket of the variable width bson key for this _KeyNode. */
unsigned short _kdo;
void setKeyDataOfs(short s) {
_kdo = s;
assert(s>=0);
}
+ /** Seems to be redundant. */
void setKeyDataOfsSavingUse(short s) {
_kdo = s;
assert(s>=0);
}
- void setUsed() { recordLoc.GETOFS() &= ~1; }
+ /**
+ * Unused keys are not returned by read operations. Keys may be marked
+ * as unused in cases where it is difficult to delete them while
+ * maintaining the constraints required of a btree.
+ *
+ * Setting ofs to odd is the sentinel for unused, as real recordLoc's
+ * are always even numbers. Note we need to keep its value basically
+ * the same as we use the recordLoc as part of the key in the index
+ * (to handle duplicate keys efficiently).
+ *
+ * Flagging keys as unused is a feature that is being phased out in favor
+ * of deleting the keys outright. The current btree implementation is
+ * not expected to mark a key as unused in a non legacy btree.
+ */
void setUnused() {
- // Setting ofs to odd is the sentinel for unused, as real recordLoc's are always
- // even numbers.
- // Note we need to keep its value basically the same as we use the recordLoc
- // as part of the key in the index (to handle duplicate keys efficiently).
recordLoc.GETOFS() |= 1;
}
+ void setUsed() { recordLoc.GETOFS() &= ~1; }
int isUnused() const {
return recordLoc.getOfs() & 1;
}
@@ -60,44 +122,175 @@ namespace mongo {
return !isUnused();
}
};
-#pragma pack()
-
- class BucketBasics;
/**
- * wrapper - this is our in memory representation of the key.
- * _KeyNode is the disk representation.
+ * This structure represents header data for a btree bucket. An object of
+ * this type is typically allocated inside of a buffer of size BucketSize,
+ * resulting in a full bucket with an appropriate header.
*
- * This object and its bson key will become invalid if the key is moved.
+ * The body of a btree bucket contains an array of _KeyNode objects starting
+ * from its lowest indexed bytes and growing to higher indexed bytes. The
+ * body also contains variable width bson keys, which are allocated from the
+ * highest indexed bytes toward lower indexed bytes.
+ *
+ * |hhhh|kkkkkkk--------bbbbbbbbbbbuuubbbuubbb|
+ * h = header data
+ * k = KeyNode data
+ * - = empty space
+ * b = bson key data
+ * u = unused (old) bson key data, that may be garbage collected
*/
- class KeyNode {
- public:
- KeyNode(const BucketBasics& bb, const _KeyNode &k);
- const DiskLoc& prevChildBucket;
- const DiskLoc& recordLoc;
- BSONObj key;
- };
-
-#pragma pack(1)
- class BtreeData {
+ class BtreeData_V0 {
protected:
+ /** Parent bucket of this bucket, which isNull() for the root bucket. */
DiskLoc parent;
- DiskLoc nextChild; // child bucket off and to the right of the highest key.
- unsigned short _wasSize; // can be reused, value is 8192 in current pdfile version Apr2010
- unsigned short _reserved1; // zero
+ /** Given that there are n keys, this is the n index child. */
+ DiskLoc nextChild;
+ /** can be reused, value is 8192 in current pdfile version Apr2010 */
+ unsigned short _wasSize;
+ /** zero */
+ unsigned short _reserved1;
int flags;
- // basicInsert() assumes these three are together and in this order:
- int emptySize; // size of the empty region
- int topSize; // size of the data at the top of the bucket (keys are at the beginning or 'bottom')
- int n; // # of keys so far.
+ void _init() {
+ _reserved1 = 0;
+ _wasSize = BucketSize;
+ reserved = 0;
+ }
+
+ /** basicInsert() assumes the next three members are consecutive and in this order: */
+
+ /** Size of the empty region. */
+ int emptySize;
+ /** Size used for bson storage, including storage of old keys. */
+ int topSize;
+ /* Number of keys in the bucket. */
+ int n;
int reserved;
+ /* Beginning of the bucket's body */
+ char data[4];
+
+ public:
+ typedef __KeyNode<DiskLoc> _KeyNode;
+ typedef DiskLoc Loc;
+ typedef KeyBson Key;
+ typedef KeyBson KeyOwned;
+ enum { BucketSize = 8192 };
+
+ // largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
+ static const int KeyMax = OldBucketSize / 10;
+ };
+
+ // a a a ofs ofs ofs ofs
+ class DiskLoc56Bit {
+ int ofs;
+ unsigned char _a[3];
+ unsigned long long Z() const {
+ // endian
+ return *((unsigned long long*)this) & 0x00ffffffffffffffULL;
+ }
+ enum {
+ // first bit of offsets used in _KeyNode we don't use -1 here.
+ OurNullOfs = -2
+ };
+ public:
+ template< class V >
+ const BtreeBucket<V> * btree() const {
+ return DiskLoc(*this).btree<V>();
+ }
+ template< class V >
+ BtreeBucket<V> * btreemod() const {
+ return DiskLoc(*this).btreemod<V>();
+ }
+ operator DiskLoc() const {
+ // endian
+ if( isNull() ) return DiskLoc();
+ unsigned a = *((unsigned *) (_a-1));
+ return DiskLoc(a >> 8, ofs);
+ }
+ int& GETOFS() { return ofs; }
+ int getOfs() const { return ofs; }
+ bool operator<(const DiskLoc56Bit& rhs) const {
+ // the orderering of dup keys in btrees isn't too critical, but we'd like to put items that are
+ // close together on disk close together in the tree, so we do want the file # to be the most significant
+ // bytes
+ return Z() < rhs.Z();
+ }
+ int compare(const DiskLoc56Bit& rhs) const {
+ unsigned long long a = Z();
+ unsigned long long b = rhs.Z();
+ if( a < b ) return -1;
+ return a == b ? 0 : 1;
+ }
+ bool operator==(const DiskLoc56Bit& rhs) const { return Z() == rhs.Z(); }
+ bool operator!=(const DiskLoc56Bit& rhs) const { return Z() != rhs.Z(); }
+ bool operator==(const DiskLoc& rhs) const {
+ return DiskLoc(*this) == rhs;
+ }
+ bool operator!=(const DiskLoc& rhs) const { return !(*this==rhs); }
+ bool isNull() const { return ofs < 0; }
+ void Null() {
+ ofs = OurNullOfs;
+ _a[0] = _a[1] = _a[2] = 0;
+ }
+ string toString() const { return DiskLoc(*this).toString(); }
+ void operator=(const DiskLoc& loc) {
+ ofs = loc.getOfs();
+ int la = loc.a();
+ assert( la <= 0xffffff ); // must fit in 3 bytes
+ if( la < 0 ) {
+ assert( la == -1 );
+ la = 0;
+ ofs = OurNullOfs;
+ }
+ memcpy(_a, &la, 3); // endian
+ dassert( ofs != 0 );
+ }
+ DiskLoc56Bit& writing() const {
+ return *((DiskLoc56Bit*) getDur().writingPtr((void*)this, 7));
+ }
+ };
+
+ class BtreeData_V1 {
+ public:
+ typedef DiskLoc56Bit Loc;
+ //typedef DiskLoc Loc;
+ typedef __KeyNode<Loc> _KeyNode;
+ typedef KeyV1 Key;
+ typedef KeyV1Owned KeyOwned;
+ enum { BucketSize = 8192-16 }; // leave room for Record header
+ // largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
+ static const int KeyMax = 1024;
+ protected:
+ /** Parent bucket of this bucket, which isNull() for the root bucket. */
+ Loc parent;
+ /** Given that there are n keys, this is the n index child. */
+ Loc nextChild;
+
+ unsigned short flags;
+
+ /** basicInsert() assumes the next three members are consecutive and in this order: */
+
+ /** Size of the empty region. */
+ unsigned short emptySize;
+ /** Size used for bson storage, including storage of old keys. */
+ unsigned short topSize;
+ /* Number of keys in the bucket. */
+ unsigned short n;
+
+ /* Beginning of the bucket's body */
char data[4];
+
+ void _init() { }
};
+ typedef BtreeData_V0 V0;
+ typedef BtreeData_V1 V1;
+
/**
- * This class is all about the storage management
+ * This class adds functionality to BtreeData for managing a single bucket.
+ * The following policies are used in an attempt to encourage simplicity:
*
* Const member functions of this class are those which may be called on
* an object for which writing has not been signaled. Non const member
@@ -108,21 +301,47 @@ namespace mongo {
*
* DiskLoc parameters that may shadow references within the btree should
* be passed by value rather than by reference to non const member
- * functions or const member functions which may perform writes. This way
+ * functions or to const member functions which may perform writes. This way
* a callee need not worry that write operations will change or invalidate
* its arguments.
*
* The current policy for dealing with bson arguments is the opposite of
- * what is described above for DiskLoc arguments. We do
- * not want to want to copy bson into memory as an intermediate step for
- * btree changes, so if bson is to be moved it must be copied to the new
- * location before the old location is invalidated.
+ * what is described above for DiskLoc arguments. We do not want to copy
+ * bson into memory as an intermediate step for btree changes, and if bson
+ * is to be moved it must be copied to the new location before the old
+ * location is invalidated. Care should be taken in cases where that invalid
+ * memory may be implicitly referenced by function arguments.
+ *
+ * A number of functions below require a thisLoc argument, which must be the
+ * disk location of the bucket mapped to 'this'.
*/
- class BucketBasics : public BtreeData {
- friend class BtreeBuilder;
- friend class KeyNode;
+ template< class Version >
+ class BucketBasics : public Version {
public:
- /** assert write intent declared for this bucket already */
+ template <class U> friend class BtreeBuilder;
+ typedef typename Version::Key Key;
+ typedef typename Version::_KeyNode _KeyNode;
+ typedef typename Version::Loc Loc;
+
+ int getN() const { return this->n; }
+
+ /**
+ * This is an in memory wrapper for a _KeyNode, and not itself part of btree
+ * storage. This object and its BSONObj 'key' will become invalid if the
+ * _KeyNode data that generated it is moved within the btree. In general,
+ * a KeyNode should not be expected to be valid after a write.
+ */
+ class KeyNode {
+ public:
+ KeyNode(const BucketBasics<Version>& bb, const _KeyNode &k);
+ const Loc& prevChildBucket;
+ const Loc& recordLoc;
+ /* Points to the bson key storage for a _KeyNode */
+ Key key;
+ };
+ friend class KeyNode;
+
+ /** Assert write intent declared for this bucket already. */
void assertWritable();
void assertValid(const Ordering &order, bool force = false) const;
@@ -130,11 +349,12 @@ namespace mongo {
/**
* @return KeyNode for key at index i. The KeyNode will become invalid
- * if the key is moved or reassigned, or if the node is packed.
+ * if the key is moved or reassigned, or if the node is packed. In general
+ * a KeyNode should not be expected to be valid after a write.
*/
const KeyNode keyNode(int i) const {
- if ( i >= n ) {
- massert( 13000 , (string)"invalid keyNode: " + BSON( "i" << i << "n" << n ).jsonString() , i < n );
+ if ( i >= this->n ) {
+ massert( 13000 , (string)"invalid keyNode: " + BSON( "i" << i << "n" << this->n ).jsonString() , i < this->n );
}
return KeyNode(*this, k(i));
}
@@ -143,29 +363,50 @@ namespace mongo {
const BucketBasics *d = 0;
return (char*)&(d->data) - (char*)&(d->parent);
}
- static int bodySize() { return BucketSize - headerSize(); }
+ static int bodySize() { return Version::BucketSize - headerSize(); }
+ static int lowWaterMark() { return bodySize() / 2 - Version::KeyMax - sizeof( _KeyNode ) + 1; } // see comment in btree.cpp
// for testing
- int nKeys() const { return n; }
- const DiskLoc getNextChild() const { return nextChild; }
+ int nKeys() const { return this->n; }
+ const DiskLoc getNextChild() const { return this->nextChild; }
protected:
- char * dataAt(short ofs) { return data + ofs; }
+ char * dataAt(short ofs) { return this->data + ofs; }
- void init(); // initialize a new node
+ /** Initialize the header for a new node. */
+ void init();
/**
- * @return false if node is full and must be split
- * @keypos is where to insert -- inserted before that key #. so keypos=0 is the leftmost one.
- * keypos will be updated if keys are moved as a result of pack()
- * This function will modify the btree bucket memory representation even
- * though it is marked const.
+ * Preconditions:
+ * - 0 <= keypos <= n
+ * - If key is inserted at position keypos, the bucket's keys will still be
+ * in order.
+ * Postconditions:
+ * - If key can fit in the bucket, the bucket may be packed and keypos
+ * may be decreased to reflect deletion of earlier indexed keys during
+ * packing, the key will be inserted at the updated keypos index with
+ * a null prevChildBucket, the subsequent keys shifted to the right,
+ * and the function will return true.
+ * - If key cannot fit in the bucket, the bucket will be packed and
+ * the function will return false.
+ * Although this function is marked const, it modifies the underlying
+ * btree representation through an optimized write intent mechanism.
*/
- bool basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering &order) const;
+ bool basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const Key& key, const Ordering &order) const;
- /** @return true if works, false if not enough space */
- bool _pushBack(const DiskLoc recordLoc, const BSONObj& key, const Ordering &order, const DiskLoc prevChild);
- void pushBack(const DiskLoc recordLoc, const BSONObj& key, const Ordering &order, const DiskLoc prevChild) {
+ /**
+ * Preconditions:
+ * - key / recordLoc are > all existing keys
+ * - The keys in prevChild and their descendents are between all existing
+ * keys and 'key'.
+ * Postconditions:
+ * - If there is space for key without packing, it is inserted as the
+ * last key with specified prevChild and true is returned.
+ * Importantly, nextChild is not updated!
+ * - Otherwise false is returned and there is no change.
+ */
+ bool _pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild);
+ void pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
bool ok = _pushBack( recordLoc , key , order , prevChild );
assert(ok);
}
@@ -180,10 +421,30 @@ namespace mongo {
* returns the last key without deleting it and another which simply
* deletes the last key. Then the caller would have enough control to
* ensure proper memory integrity.
+ *
+ * Preconditions:
+ * - bucket is not empty
+ * - last key of bucket is used (not unused)
+ * - nextChild isNull()
+ * - _unalloc will work correctly as used - see code
+ * Postconditions:
+ * - The last key of the bucket is removed, and its key and recLoc are
+ * returned. As mentioned above, the key points to unallocated memory.
*/
- void popBack(DiskLoc& recLoc, BSONObj& key);
+ void popBack(DiskLoc& recLoc, Key &key);
- void _delKeyAtPos(int keypos, bool mayEmpty = false); // low level version that doesn't deal with child ptrs.
+ /**
+ * Preconditions:
+ * - 0 <= keypos < n
+ * - there is no child bucket at keypos
+ * - n > 1
+ * - if mayEmpty == false or nextChild.isNull(), n > 0
+ * Postconditions:
+ * - The key at keypos is removed, and remaining keys are shifted over.
+ * - The bucket becomes unpacked.
+ * - if mayEmpty is true and nextChild.isNull(), the bucket may have no keys.
+ */
+ void _delKeyAtPos(int keypos, bool mayEmpty = false);
/* !Packed means there is deleted fragment space within the bucket.
We "repack" when we run out of space before considering the node
@@ -191,64 +452,124 @@ namespace mongo {
*/
enum Flags { Packed=1 };
- const DiskLoc& childForPos(int p) const { return p == n ? nextChild : k(p).prevChildBucket; }
- DiskLoc& childForPos(int p) { return p == n ? nextChild : k(p).prevChildBucket; }
+ /** n == 0 is ok */
+ const Loc& childForPos(int p) const { return p == this->n ? this->nextChild : k(p).prevChildBucket; }
+ Loc& childForPos(int p) { return p == this->n ? this->nextChild : k(p).prevChildBucket; }
+ /** Same as bodySize(). */
int totalDataSize() const;
- /** @return true if the key may be dropped by pack() */
+ /**
+ * @return true when a key may be dropped by pack()
+ * @param index index of the key that may be dropped
+ * @param refPos index of a particular key of interest, which must not
+ * be dropped; = 0 to safely ignore
+ */
bool mayDropKey( int index, int refPos ) const;
/**
* Pack the bucket to reclaim space from invalidated memory.
- * @refPos is an index in the bucket which will may be updated if we
+ * @refPos is an index in the bucket which may be updated if we
* delete keys from the bucket
* This function may cast away const and perform a write.
+ * Preconditions: none
+ * Postconditions:
+ * - Bucket will be packed
+ * - Some unused nodes may be dropped, but not ones at index 0 or refPos
+ * - Some used nodes may be moved
+ * - If refPos is the index of an existing key, it will be updated to that
+ * key's new index if the key is moved.
*/
void _pack(const DiskLoc thisLoc, const Ordering &order, int &refPos) const;
/** Pack when already writable */
void _packReadyForMod(const Ordering &order, int &refPos);
+ /** @return the size the bucket's body would have if we were to call pack() */
+ int packedDataSize( int refPos ) const;
+ void setNotPacked() { this->flags &= ~Packed; }
+ void setPacked() { this->flags |= Packed; }
/**
- * @return the size of non header data in this bucket if we were to
- * call pack().
+ * Preconditions: 'bytes' is <= emptySize
+ * Postconditions: A buffer of size 'bytes' is allocated on the top side,
+ * and its offset is returned.
*/
- int packedDataSize( int refPos ) const;
- void setNotPacked() { flags &= ~Packed; }
- void setPacked() { flags |= Packed; }
int _alloc(int bytes);
+ /**
+ * This function can be used to deallocate the lowest byte index bson
+ * buffer in the top region, which in some but not all cases is for the
+ * n - 1 index key. This function only works correctly in certain
+ * special cases, please be careful.
+ * Preconditions: 'bytes' <= topSize
+ * Postconditions: The top region is decreased
+ */
void _unalloc(int bytes);
+ /**
+ * Preconditions: 'N' <= n
+ * Postconditions:
+ * - All keys after the N index key are dropped.
+ * - Then bucket is packed, without dropping refPos if < refPos N.
+ */
void truncateTo(int N, const Ordering &order, int &refPos);
- /** drop specified number of keys from beginning of key array, and pack */
+ /**
+ * Preconditions:
+ * - 'nDrop' < n
+ * - for now, refPos should be zero.
+ * Postconditions:
+ * - All keys before the nDrop index key are dropped.
+ * - The bucket is packed.
+ */
void dropFront(int nDrop, const Ordering &order, int &refPos);
+ /**
+ * Preconditions: 0 <= keypos < n
+ * Postconditions: keypos indexed key is marked unused.
+ */
void markUnused(int keypos);
/**
* BtreeBuilder uses the parent var as a temp place to maintain a linked list chain.
* we use tempNext() when we do that to be less confusing. (one might have written a union in C)
*/
- const DiskLoc& tempNext() const { return parent; }
- DiskLoc& tempNext() { return parent; }
+ DiskLoc tempNext() const { return this->parent; }
+ void setTempNext(DiskLoc l) { this->parent = l; }
void _shape(int level, stringstream&) const;
int Size() const;
- const _KeyNode& k(int i) const { return ((const _KeyNode*)data)[i]; }
- _KeyNode& k(int i) { return ((_KeyNode*)data)[i]; }
+
+ /** @return i-indexed _KeyNode, without bounds checking */
+ public:
+ const _KeyNode& k(int i) const { return ((const _KeyNode*)this->data)[i]; }
+ _KeyNode& _k(int i) { return ((_KeyNode*)this->data)[i]; }
+ protected:
+ _KeyNode& k(int i) { return ((_KeyNode*)this->data)[i]; }
- /** @return the key position where a split should occur on insert */
+ /**
+ * Preconditions: 'this' is packed
+ * @return the key index to be promoted on split
+ * @param keypos The requested index of a key to insert, which may affect
+ * the choice of split position.
+ */
int splitPos( int keypos ) const;
/**
- * Adds new entries to beginning of key array, shifting existing
- * entries to the right. After this is called, setKey() must be called
- * on all the newly created entries in the key array.
+ * Preconditions: nAdd * sizeof( _KeyNode ) <= emptySize
+ * Postconditions:
+ * - Increases indexes of existing _KeyNode objects by nAdd, reserving
+ * space for additional _KeyNode objects at front.
+ * - Does not initialize ofs values for the bson data of these
+ * _KeyNode objects.
*/
void reserveKeysFront( int nAdd );
/**
- * Sets an existing key using the given parameters.
- * @i index of key to set
+ * Preconditions:
+ * - 0 <= i < n
+ * - The bson 'key' must fit in the bucket without packing.
+ * - If 'key' and 'prevChildBucket' are set at index i, the btree
+ * ordering properties will be maintained.
+ * Postconditions:
+ * - The specified key is set at index i, replacing the existing
+ * _KeyNode data and without shifting any other _KeyNode objects.
*/
- void setKey( int i, const DiskLoc recordLoc, const BSONObj &key, const DiskLoc prevChildBucket );
+ void setKey( int i, const DiskLoc recordLoc, const Key& key, const DiskLoc prevChildBucket );
};
/**
@@ -273,22 +594,35 @@ namespace mongo {
* standard usage. Right now the interface is for both a node and a tree,
* so assignment of const is sometimes nonideal.
*
- * TODO There are several cases in which the this pointer is invalidated
+ * TODO There are several cases in which the 'this' pointer is invalidated
* as a result of deallocation. A seperate class representing a btree would
* alleviate some fragile cases where the implementation must currently
- * behave correctly if the this pointer is suddenly invalidated by a
+ * behave correctly if the 'this' pointer is suddenly invalidated by a
* callee.
*/
- class BtreeBucket : public BucketBasics {
+ template< class V >
+ class BtreeBucket : public BucketBasics<V> {
friend class BtreeCursor;
public:
- bool isHead() const { return parent.isNull(); }
+ // make compiler happy:
+ typedef typename V::Key Key;
+ typedef typename V::KeyOwned KeyOwned;
+ typedef typename BucketBasics<V>::KeyNode KeyNode;
+ typedef typename BucketBasics<V>::_KeyNode _KeyNode;
+ typedef typename BucketBasics<V>::Loc Loc;
+ const _KeyNode& k(int i) const { return static_cast< const BucketBasics<V> * >(this)->k(i); }
+ protected:
+ _KeyNode& k(int i) { return static_cast< BucketBasics<V> * >(this)->_k(i); }
+ public:
+ const KeyNode keyNode(int i) const { return static_cast< const BucketBasics<V> * >(this)->keyNode(i); }
+
+ bool isHead() const { return this->parent.isNull(); }
void dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const;
- int fullValidate(const DiskLoc& thisLoc, const BSONObj &order, int *unusedCount = 0, bool strict = false) const; /* traverses everything */
+ long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount = 0, bool strict = false, unsigned depth=0) const; /* traverses everything */
- bool isUsed( int i ) const { return k(i).isUsed(); }
+ bool isUsed( int i ) const { return this->k(i).isUsed(); }
string bucketSummary() const;
- void dump() const;
+ void dump(unsigned depth=0) const;
/**
* @return true if key exists in index
@@ -297,25 +631,63 @@ namespace mongo {
* BSONObj order = ((IndexDetails&)idx).keyPattern();
* likewise below in bt_insert() etc.
*/
- bool exists(const IndexDetails& idx, const DiskLoc &thisLoc, const BSONObj& key, const Ordering& order) const;
+ private:
+ bool exists(const IndexDetails& idx, const DiskLoc &thisLoc, const Key& key, const Ordering& order) const;
+ public:
+ /**
+ * @param self - Don't complain about ourself already being in the index case.
+ * @return true = There is a duplicate used key.
+ */
bool wouldCreateDup(
const IndexDetails& idx, const DiskLoc &thisLoc,
- const BSONObj& key, const Ordering& order,
+ const Key& key, const Ordering& order,
const DiskLoc &self) const;
- static DiskLoc addBucket(const IndexDetails&); /* start a new index off, empty */
- /** invalidates 'this' and thisLoc */
- void deallocBucket(const DiskLoc thisLoc, const IndexDetails &id);
+ /**
+ * Preconditions: none
+ * Postconditions: @return a new bucket allocated from pdfile storage
+ * and init()-ed. This bucket is suitable to for use as a new root
+ * or any other new node in the tree.
+ */
+ static DiskLoc addBucket(const IndexDetails&);
- static void renameIndexNamespace(const char *oldNs, const char *newNs);
+ /**
+ * Preconditions: none
+ * Postconditions:
+ * - Some header values in this bucket are cleared, and the bucket is
+ * deallocated from pdfile storage.
+ * - The memory at thisLoc is invalidated, and 'this' is invalidated.
+ */
+ void deallocBucket(const DiskLoc thisLoc, const IndexDetails &id);
- /** This function may change the btree root */
+ /**
+ * Preconditions:
+ * - 'key' has a valid schema for this index.
+ * - All other paramenters are valid and consistent with this index if applicable.
+ * Postconditions:
+ * - If key is bigger than KeyMax, @return 2 or 3 and no change.
+ * - If key / recordLoc exist in the btree as an unused key, set them
+ * as used and @return 0
+ * - If key / recordLoc exist in the btree as a used key, @throw
+ * exception 10287 and no change.
+ * - If key / recordLoc do not exist in the btree, they are inserted
+ * and @return 0. The root of the btree may be changed, so
+ * 'this'/thisLoc may no longer be the root upon return.
+ */
int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
const BSONObj& key, const Ordering &order, bool dupsAllowed,
IndexDetails& idx, bool toplevel = true) const;
- /** This function may change the btree root */
+ /**
+ * Preconditions:
+ * - 'key' has a valid schema for this index, and may have objsize() > KeyMax.
+ * Postconditions:
+ * - If key / recordLoc are in the btree, they are removed (possibly
+ * by being marked as an unused key), @return true, and potentially
+ * invalidate 'this' / thisLoc and change the head.
+ * - If key / recordLoc are not in the btree, @return false and do nothing.
+ */
bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const;
/**
@@ -327,21 +699,31 @@ namespace mongo {
*/
DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) const;
+ DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const Key& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) const;
/**
* find the first instance of the key
* does not handle dups
- * returned DiskLoc isNull if can't find anything with that
+ * WARNING: findSingle may not be compound index safe. this may need to change. see notes in
+ * findSingle code.
* @return the record location of the first match
*/
DiskLoc findSingle( const IndexDetails &indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const;
- /** advance one key position in the index: */
+ /**
+ * Advance to next or previous key in the index.
+ * @param direction to advance.
+ */
DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const;
+ /** Advance in specified direction to the specified key */
void advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const;
+
+ /** Locate a key with fields comprised of a combination of keyBegin fields and keyEnd fields. */
void customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const;
+ /** @return head of the btree by traversing from current bucket. */
const DiskLoc getHead(const DiskLoc& thisLoc) const;
/** get tree shape */
@@ -349,111 +731,275 @@ namespace mongo {
static void a_test(IndexDetails&);
- static int getLowWaterMark();
static int getKeyMax();
protected:
/**
- * Fix parent pointers for children
- * @firstIndex first index to modify
- * @lastIndex last index to modify (-1 means last index is n)
+ * Preconditions:
+ * - 0 <= firstIndex <= n
+ * - -1 <= lastIndex <= n ( -1 is equivalent to n )
+ * Postconditions:
+ * - Any children at indexes firstIndex through lastIndex (inclusive)
+ * will have their parent pointers set to thisLoc.
*/
void fixParentPtrs(const DiskLoc thisLoc, int firstIndex = 0, int lastIndex = -1) const;
- /** invalidates this and thisLoc */
+ /**
+ * Preconditions:
+ * - thisLoc is not the btree head.
+ * - n == 0 is ok
+ * Postconditions:
+ * - All cursors pointing to this bucket will be updated.
+ * - This bucket's parent's child pointer is set to null.
+ * - This bucket is deallocated from pdfile storage.
+ * - 'this' and thisLoc are invalidated.
+ */
void delBucket(const DiskLoc thisLoc, const IndexDetails&);
- /** may invalidate this and thisLoc */
+
+ /**
+ * Preconditions: 0 <= p < n
+ * Postconditions:
+ * - The key at index p is removed from the btree.
+ * - 'this' and thisLoc may be invalidated.
+ * - The tree head may change.
+ */
void delKeyAtPos(const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order);
/**
- * May balance utilization of this bucket with a neighbor, either by
- * merging the buckets or shifting nodes.
- * @return true iff balancing was performed.
- * NOTE This function may invalidate thisLoc.
+ * Preconditions:
+ * - n == 0 is ok
+ * Postconditions:
+ * - If thisLoc is head, or if its body has at least lowWaterMark bytes,
+ * return false and do nothing.
+ * - Otherwise, if thisLoc has left or right neighbors, either balance
+ * or merge with them and return true. Also, 'this' and thisLoc may
+ * be invalidated and the tree head may change.
*/
bool mayBalanceWithNeighbors(const DiskLoc thisLoc, IndexDetails &id, const Ordering &order) const;
- /** @return true if balance succeeded */
+ /**
+ * Preconditions:
+ * - 0 <= leftIndex < n
+ * - The child at leftIndex or the child at leftIndex + 1 contains
+ * fewer than lowWaterMark bytes.
+ * Postconditions:
+ * - If the child bucket at leftIndex can merge with the child index
+ * at leftIndex + 1, do nothing and return false.
+ * - Otherwise, balance keys between the leftIndex child and the
+ * leftIndex + 1 child, return true, and possibly change the tree head.
+ */
bool tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const;
+
+ /**
+ * Preconditions:
+ * - All preconditions of tryBalanceChildren.
+ * - The leftIndex child and leftIndex + 1 child cannot be merged.
+ * Postconditions:
+ * - Keys are moved between the leftIndex child and the leftIndex + 1
+ * child such that neither child has fewer than lowWaterMark bytes.
+ * The tree head may change.
+ */
void doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order );
+
+ /**
+ * Preconditions:
+ * - All preconditions of doBalanceChildren
+ * - The leftIndex and leftIndex + 1 children are packed.
+ * - The leftIndex + 1 child has fewer than lowWaterMark bytes.
+ * - split returned by rebalancedSeparatorPos()
+ * Postconditions:
+ * - The key in lchild at index split is set as thisLoc's key at index
+ * leftIndex, which may trigger a split and change the tree head.
+ * The previous key in thisLoc at index leftIndex and all keys with
+ * indexes greater than split in lchild are moved to rchild.
+ */
void doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
- BtreeBucket *l, const DiskLoc lchild,
- BtreeBucket *r, const DiskLoc rchild,
+ BtreeBucket<V> *l, const DiskLoc lchild,
+ BtreeBucket<V> *r, const DiskLoc rchild,
IndexDetails &id, const Ordering &order );
+ /**
+ * Preconditions:
+ * - All preconditions of doBalanceChildren
+ * - The leftIndex and leftIndex + 1 children are packed.
+ * - The leftIndex child has fewer than lowWaterMark bytes.
+ * - split returned by rebalancedSeparatorPos()
+ * Postconditions:
+ * - The key in rchild at index split - l->n - 1 is set as thisLoc's key
+ * at index leftIndex, which may trigger a split and change the tree
+ * head. The previous key in thisLoc at index leftIndex and all keys
+ * with indexes less than split - l->n - 1 in rchild are moved to
+ * lchild.
+ */
void doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
- BtreeBucket *l, const DiskLoc lchild,
- BtreeBucket *r, const DiskLoc rchild,
+ BtreeBucket<V> *l, const DiskLoc lchild,
+ BtreeBucket<V> *r, const DiskLoc rchild,
IndexDetails &id, const Ordering &order );
- /** may invalidate this and thisLoc */
+ /**
+ * Preconditions:
+ * - 0 <= leftIndex < n
+ * - this->canMergeChildren( thisLoc, leftIndex ) == true
+ * Postconditions:
+ * - All of the above mentioned keys will be placed in the left child.
+ * - The tree may be updated recursively, resulting in 'this' and
+ * thisLoc being invalidated and the tree head being changed.
+ */
void doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order);
- /** will invalidate this and thisLoc */
+ /**
+ * Preconditions:
+ * - n == 0
+ * - !nextChild.isNull()
+ * Postconditions:
+ * - 'this' and thisLoc are deallocated (and invalidated), any cursors
+ * to them are updated, and the tree head may change.
+ * - nextChild replaces thisLoc in the btree structure.
+ */
void replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id );
- /** @return true iff left and right child can be merged into one node */
+ /**
+ * @return true iff the leftIndex and leftIndex + 1 children both exist,
+ * and if their body sizes when packed and the thisLoc key at leftIndex
+ * would fit in a single bucket body.
+ */
bool canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const;
/**
+ * Preconditions:
+ * - leftIndex and leftIndex + 1 children are packed
+ * - leftIndex or leftIndex + 1 child is below lowWaterMark
* @return index of the rebalanced separator; the index value is
- * determined as if we had an array
- * <left bucket keys array>.push( <old separator> ).concat( <right bucket keys array> )
- * This is only expected to be called if the left and right child
- * cannot be merged.
- * This function is expected to be called on packed buckets, see also
- * comments for splitPos().
+ * determined as if we had a bucket with body
+ * <left bucket keys array>.push( <old separator> ).concat( <right bucket keys array> )
+ * and called splitPos( 0 ) on it.
*/
int rebalancedSeparatorPos( const DiskLoc &thisLoc, int leftIndex ) const;
- int indexInParent( const DiskLoc &thisLoc ) const;
- BSONObj keyAt(int keyOfs) const {
- return keyOfs >= n ? BSONObj() : keyNode(keyOfs).key;
+ /**
+ * Preconditions: thisLoc has a parent
+ * @return parent's index of thisLoc.
+ */
+ int indexInParent( const DiskLoc &thisLoc ) const;
+
+ public:
+ Key keyAt(int i) const {
+ if( i >= this->n )
+ return Key();
+ return Key(this->data + k(i).keyDataOfs());
}
- static BtreeBucket* allocTemp(); /* caller must release with free() */
+ protected:
+
+ /**
+ * Allocate a temporary btree bucket in ram rather than in memory mapped
+ * storage. The caller must release this bucket with free().
+ */
+ static BtreeBucket<V> * allocTemp();
- /** split bucket */
+ /**
+ * Preconditions:
+ * - This bucket is packed.
+ * - Cannot add a key of size KeyMax to this bucket.
+ * - 0 <= keypos <= n is the position of a new key that will be inserted
+ * - lchild is equal to the existing child at index keypos.
+ * Postconditions:
+ * - The thisLoc bucket is split into two packed buckets, possibly
+ * invalidating the initial position of keypos, with a split key
+ * promoted to the parent. The new key key/recordLoc will be inserted
+ * into one of the split buckets, and lchild/rchild set appropriately.
+ * Splitting may occur recursively, possibly changing the tree head.
+ */
void split(const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj& key,
+ const DiskLoc recordLoc, const Key& key,
const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx);
+ /**
+ * Preconditions:
+ * - 0 <= keypos <= n
+ * - If key / recordLoc are inserted at position keypos, with provided
+ * lchild and rchild, the btree ordering requirements will be
+ * maintained.
+ * - lchild is equal to the existing child at index keypos.
+ * - n == 0 is ok.
+ * Postconditions:
+ * - The key / recordLoc are inserted at position keypos, and the
+ * bucket is split if necessary, which may change the tree head.
+ * - The bucket may be packed or split, invalidating the specified value
+ * of keypos.
+ * This function will always modify thisLoc, but it's marked const because
+ * it commonly relies on the specialized write intent mechanism of basicInsert().
+ */
void insertHere(const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj& key, const Ordering &order,
+ const DiskLoc recordLoc, const Key& key, const Ordering &order,
const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx) const;
+ /** bt_insert() is basically just a wrapper around this. */
int _insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
- const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ const Key& key, const Ordering &order, bool dupsAllowed,
const DiskLoc lChild, const DiskLoc rChild, IndexDetails &idx) const;
- bool find(const IndexDetails& idx, const BSONObj& key, const DiskLoc &recordLoc, const Ordering &order, int& pos, bool assertIfDup) const;
+
+ bool find(const IndexDetails& idx, const Key& key, const DiskLoc &recordLoc, const Ordering &order, int& pos, bool assertIfDup) const;
bool customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) const;
static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
static int customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction );
+
+ /** If child is non null, set its parent to thisLoc */
static void fix(const DiskLoc thisLoc, const DiskLoc child);
- /** Replaces an existing key with the new specified key, splitting if necessary */
+ /**
+ * Preconditions:
+ * - 0 <= keypos < n
+ * - If the specified key and recordLoc are placed in keypos of thisLoc,
+ * and lchild and rchild are set, the btree ordering properties will
+ * be maintained.
+ * - rchild == childForPos( keypos + 1 )
+ * - childForPos( keypos ) is referenced elsewhere if nonnull.
+ * Postconditions:
+ * - The key at keypos will be replaced with the specified key and
+ * lchild, potentially splitting this bucket and changing the tree
+ * head.
+ * - childForPos( keypos ) will be orphaned.
+ */
void setInternalKey( const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj &key, const Ordering &order,
+ const DiskLoc recordLoc, const Key &key, const Ordering &order,
const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx);
/**
- * Deletes the specified key, replacing it with the key immediately
- * preceding or succeeding it in the btree. Either the left or right
- * child of the specified key must be non null.
+ * Preconditions:
+ * - 0 <= keypos < n
+ * - The keypos or keypos+1 indexed child is non null.
+ * Postconditions:
+ * - The specified key is deleted by replacing it with another key if
+ * possible. This replacement may cause a split and change the tree
+ * head. The replacement key will be deleted from its original
+ * location, potentially causing merges and splits that may invalidate
+ * 'this' and thisLoc and change the tree head.
+ * - If the key cannot be replaced, it will be marked as unused. This
+ * is only expected in legacy btrees.
*/
void deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order );
public:
/** simply builds and returns a dup key error message string */
- static string dupKeyError( const IndexDetails& idx , const BSONObj& key );
+ static string dupKeyError( const IndexDetails& idx , const Key& key );
};
#pragma pack()
+ class FieldRangeVector;
+ class FieldRangeVectorIterator;
+
class BtreeCursor : public Cursor {
- public:
+ protected:
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
+ public:
+ virtual ~BtreeCursor();
+ /** makes an appropriate subclass depending on the index version */
+ static BtreeCursor* make( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
+ static BtreeCursor* make( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
+
virtual bool ok() { return !bucket.isNull(); }
virtual bool advance();
virtual void noteLocation(); // updates keyAtKeyOfs...
- virtual void checkLocation();
+ virtual void checkLocation() = 0;
virtual bool supportGetMore() { return true; }
virtual bool supportYields() { return true; }
@@ -462,7 +1008,7 @@ namespace mongo {
* if a multikey index traversal:
* if loc has already been sent, returns true.
* otherwise, marks loc as sent.
- * @return true if the loc has not been seen
+ * @return false if the loc has not been seen
*/
virtual bool getsetdup(DiskLoc loc) {
if( _multikey ) {
@@ -475,18 +1021,17 @@ namespace mongo {
virtual bool modifiedKeys() const { return _multikey; }
virtual bool isMultiKey() const { return _multikey; }
- const _KeyNode& _currKeyNode() const {
+ /*const _KeyNode& _currKeyNode() const {
assert( !bucket.isNull() );
- const _KeyNode& kn = bucket.btree()->k(keyOfs);
+ const _KeyNode& kn = keyNode(keyOfs);
assert( kn.isUsed() );
return kn;
- }
- const KeyNode currKeyNode() const {
- assert( !bucket.isNull() );
- return bucket.btree()->keyNode(keyOfs);
- }
+ }*/
- virtual BSONObj currKey() const { return currKeyNode().key; }
+ /** returns BSONObj() if ofs is out of range */
+ virtual BSONObj keyAt(int ofs) const = 0;
+
+ virtual BSONObj currKey() const = 0;
virtual BSONObj indexKeyPattern() { return indexDetails.keyPattern(); }
virtual void aboutToDeleteBucket(const DiskLoc& b) {
@@ -494,33 +1039,22 @@ namespace mongo {
keyOfs = -1;
}
- virtual DiskLoc currLoc() { return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc(); }
+ virtual DiskLoc currLoc() = 0; // { return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc(); }
virtual DiskLoc refLoc() { return currLoc(); }
virtual Record* _current() { return currLoc().rec(); }
virtual BSONObj current() { return BSONObj(_current()); }
- virtual string toString() {
- string s = string("BtreeCursor ") + indexDetails.indexName();
- if ( _direction < 0 ) s += " reverse";
- if ( _bounds.get() && _bounds->size() > 1 ) s += " multi";
- return s;
- }
+ virtual string toString();
BSONObj prettyKey( const BSONObj &key ) const {
return key.replaceFieldNames( indexDetails.keyPattern() ).clientReadable();
}
- virtual BSONObj prettyIndexBounds() const {
- if ( !_independentFieldRanges ) {
- return BSON( "start" << prettyKey( startKey ) << "end" << prettyKey( endKey ) );
- }
- else {
- return _bounds->obj();
- }
- }
+ virtual BSONObj prettyIndexBounds() const;
void forgetEndKey() { endKey = BSONObj(); }
virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
@@ -529,12 +1063,16 @@ namespace mongo {
/** for debugging only */
const DiskLoc getBucket() const { return bucket; }
- private:
+ // just for unit tests
+ virtual bool curKeyHasChild() = 0;
+
+ protected:
/**
* Our btrees may (rarely) have "unused" keys when items are deleted.
* Skip past them.
*/
- bool skipUnusedKeys( bool mayJump );
+ virtual bool skipUnusedKeys() = 0;
+
bool skipOutOfRangeKeysAndCheckEnd();
void skipAndCheck();
void checkEnd();
@@ -542,14 +1080,17 @@ namespace mongo {
/** selective audits on construction */
void audit();
+ virtual void _audit() = 0;
+ virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) = 0;
+ virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) = 0;
+
/** set initial bucket */
void init();
/** if afterKey is true, we want the first key with values of the keyBegin fields greater than keyBegin */
void advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive );
- friend class BtreeBucket;
-
set<DiskLoc> _dups;
NamespaceDetails * const d;
const int idxNo;
@@ -566,56 +1107,31 @@ namespace mongo {
BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
DiskLoc locAtKeyOfs;
const shared_ptr< FieldRangeVector > _bounds;
- auto_ptr< FieldRangeVector::Iterator > _boundsIterator;
+ auto_ptr< FieldRangeVectorIterator > _boundsIterator;
const IndexSpec& _spec;
shared_ptr< CoveredIndexMatcher > _matcher;
bool _independentFieldRanges;
long long _nscanned;
};
-
- inline bool IndexDetails::hasKey(const BSONObj& key) {
- return head.btree()->exists(*this, head, key, Ordering::make(keyPattern()));
- }
- inline bool IndexDetails::wouldCreateDup(const BSONObj& key, DiskLoc self) {
- return head.btree()->wouldCreateDup(*this, head, key, Ordering::make(keyPattern()), self);
- }
+ /** Renames the index namespace for this btree's index. */
+ void renameIndexNamespace(const char *oldNs, const char *newNs);
/**
- * build btree from the bottom up
- * _ TODO dropDups
+ * give us a writable version of the btree bucket (declares write intent).
+ * note it is likely more efficient to declare write intent on something smaller when you can.
*/
- class BtreeBuilder {
- bool dupsAllowed;
- IndexDetails& idx;
- unsigned long long n;
- BSONObj keyLast;
- BSONObj order;
- Ordering ordering;
- bool committed;
-
- DiskLoc cur, first;
- BtreeBucket *b;
-
- void newBucket();
- void buildNextLevel(DiskLoc);
- void mayCommitProgressDurably();
-
- public:
- ~BtreeBuilder();
-
- BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx);
-
- /** keys must be added in order */
- void addKey(BSONObj& key, DiskLoc loc);
-
- /**
- * commit work. if not called, destructor will clean up partially completed work
- * (in case exception has happened).
- */
- void commit();
+ template< class V >
+ BtreeBucket<V> * DiskLoc::btreemod() const {
+ assert( _a != -1 );
+ BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() );
+ return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::BucketSize ) );
+ }
- unsigned long long getn() { return n; }
- };
+ template< class V >
+ BucketBasics<V>::KeyNode::KeyNode(const BucketBasics<V>& bb, const _KeyNode &k) :
+ prevChildBucket(k.prevChildBucket),
+ recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
+ { }
} // namespace mongo;
diff --git a/db/btreebuilder.cpp b/db/btreebuilder.cpp
new file mode 100644
index 0000000..0ec587a
--- /dev/null
+++ b/db/btreebuilder.cpp
@@ -0,0 +1,184 @@
+// btreebuilder.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "btree.h"
+#include "pdfile.h"
+#include "json.h"
+#include "clientcursor.h"
+#include "client.h"
+#include "dbhelpers.h"
+#include "curop-inl.h"
+#include "stats/counters.h"
+#include "dur_commitjob.h"
+#include "btreebuilder.h"
+
+namespace mongo {
+
+ /* --- BtreeBuilder --- */
+
+ template<class V>
+ BtreeBuilder<V>::BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx) :
+ dupsAllowed(_dupsAllowed),
+ idx(_idx),
+ n(0),
+ order( idx.keyPattern() ),
+ ordering( Ordering::make(idx.keyPattern()) ) {
+ first = cur = BtreeBucket<V>::addBucket(idx);
+ b = cur.btreemod<V>();
+ committed = false;
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::newBucket() {
+ DiskLoc L = BtreeBucket<V>::addBucket(idx);
+ b->setTempNext(L);
+ cur = L;
+ b = cur.btreemod<V>();
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::mayCommitProgressDurably() {
+ if ( getDur().commitIfNeeded() ) {
+ b = cur.btreemod<V>();
+ }
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::addKey(BSONObj& _key, DiskLoc loc) {
+
+ auto_ptr< KeyOwned > key( new KeyOwned(_key) );
+ if ( key->dataSize() > BtreeBucket<V>::KeyMax ) {
+ problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace()
+ << ' ' << key->dataSize() << ' ' << key->toString() << endl;
+ return;
+ }
+
+ if( !dupsAllowed ) {
+ if( n > 0 ) {
+ int cmp = keyLast->woCompare(*key, ordering);
+ massert( 10288 , "bad key order in BtreeBuilder - server internal error", cmp <= 0 );
+ if( cmp == 0 ) {
+ //if( !dupsAllowed )
+ uasserted( ASSERT_ID_DUPKEY , BtreeBucket<V>::dupKeyError( idx , *keyLast ) );
+ }
+ }
+ }
+
+ if ( ! b->_pushBack(loc, *key, ordering, DiskLoc()) ) {
+ // bucket was full
+ newBucket();
+ b->pushBack(loc, *key, ordering, DiskLoc());
+ }
+ keyLast = key;
+ n++;
+ mayCommitProgressDurably();
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::buildNextLevel(DiskLoc loc) {
+ int levels = 1;
+ while( 1 ) {
+ if( loc.btree<V>()->tempNext().isNull() ) {
+ // only 1 bucket at this level. we are done.
+ getDur().writingDiskLoc(idx.head) = loc;
+ break;
+ }
+ levels++;
+
+ DiskLoc upLoc = BtreeBucket<V>::addBucket(idx);
+ DiskLoc upStart = upLoc;
+ BtreeBucket<V> *up = upLoc.btreemod<V>();
+
+ DiskLoc xloc = loc;
+ while( !xloc.isNull() ) {
+ if ( getDur().commitIfNeeded() ) {
+ b = cur.btreemod<V>();
+ up = upLoc.btreemod<V>();
+ }
+
+ BtreeBucket<V> *x = xloc.btreemod<V>();
+ Key k;
+ DiskLoc r;
+ x->popBack(r,k);
+ bool keepX = ( x->n != 0 );
+ DiskLoc keepLoc = keepX ? xloc : x->nextChild;
+
+ if ( ! up->_pushBack(r, k, ordering, keepLoc) ) {
+ // current bucket full
+ DiskLoc n = BtreeBucket<V>::addBucket(idx);
+ up->setTempNext(n);
+ upLoc = n;
+ up = upLoc.btreemod<V>();
+ up->pushBack(r, k, ordering, keepLoc);
+ }
+
+ DiskLoc nextLoc = x->tempNext(); // get next in chain at current level
+ if ( keepX ) {
+ x->parent = upLoc;
+ }
+ else {
+ if ( !x->nextChild.isNull() ) {
+ DiskLoc ll = x->nextChild;
+ ll.btreemod<V>()->parent = upLoc;
+ //(x->nextChild.btreemod<V>())->parent = upLoc;
+ }
+ x->deallocBucket( xloc, idx );
+ }
+ xloc = nextLoc;
+ }
+
+ loc = upStart;
+ mayCommitProgressDurably();
+ }
+
+ if( levels > 1 )
+ log(2) << "btree levels: " << levels << endl;
+ }
+
+ /** when all addKeys are done, we then build the higher levels of the tree */
+ template<class V>
+ void BtreeBuilder<V>::commit() {
+ buildNextLevel(first);
+ committed = true;
+ }
+
+ template<class V>
+ BtreeBuilder<V>::~BtreeBuilder() {
+ DESTRUCTOR_GUARD(
+ if( !committed ) {
+ log(2) << "Rolling back partially built index space" << endl;
+ DiskLoc x = first;
+ while( !x.isNull() ) {
+ DiskLoc next = x.btree<V>()->tempNext();
+ string ns = idx.indexNamespace();
+ theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), x.rec(), x);
+ x = next;
+ getDur().commitIfNeeded();
+ }
+ assert( idx.head.isNull() );
+ log(2) << "done rollback" << endl;
+ }
+ )
+ }
+
+ template class BtreeBuilder<V0>;
+ template class BtreeBuilder<V1>;
+
+}
diff --git a/db/btreebuilder.h b/db/btreebuilder.h
new file mode 100644
index 0000000..6de55d8
--- /dev/null
+++ b/db/btreebuilder.h
@@ -0,0 +1,53 @@
+#pragma once
+
+#include "btree.h"
+
+namespace mongo {
+
+ /**
+ * build btree from the bottom up
+ */
+ template< class V >
+ class BtreeBuilder {
+ typedef typename V::KeyOwned KeyOwned;
+ typedef typename V::Key Key;
+
+ bool dupsAllowed;
+ IndexDetails& idx;
+ /** Number of keys added to btree. */
+ unsigned long long n;
+ /** Last key passed to addKey(). */
+ auto_ptr< typename V::KeyOwned > keyLast;
+ BSONObj order;
+ Ordering ordering;
+ /** true iff commit() completed successfully. */
+ bool committed;
+
+ DiskLoc cur, first;
+ BtreeBucket<V> *b;
+
+ void newBucket();
+ void buildNextLevel(DiskLoc);
+ void mayCommitProgressDurably();
+
+ public:
+ ~BtreeBuilder();
+
+ BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx);
+
+ /**
+ * Preconditions: 'key' is > or >= last key passed to this function (depends on _dupsAllowed)
+ * Postconditions: 'key' is added to intermediate storage.
+ */
+ void addKey(BSONObj& key, DiskLoc loc);
+
+ /**
+ * commit work. if not called, destructor will clean up partially completed work
+ * (in case exception has happened).
+ */
+ void commit();
+
+ unsigned long long getn() { return n; }
+ };
+
+}
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index ce841ce..f39d5bb 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -21,10 +21,254 @@
#include "pdfile.h"
#include "jsobj.h"
#include "curop-inl.h"
+#include "queryutil.h"
namespace mongo {
- extern int otherTraceLevel;
+ template< class V >
+ class BtreeCursorImpl : public BtreeCursor {
+ public:
+ typedef typename BucketBasics<V>::KeyNode KeyNode;
+ typedef typename V::Key Key;
+ typedef typename V::_KeyNode _KeyNode;
+
+ BtreeCursorImpl(NamespaceDetails *a, int b, const IndexDetails& c, const BSONObj &d, const BSONObj &e, bool f, int g) :
+ BtreeCursor(a,b,c,d,e,f,g) { }
+ BtreeCursorImpl(NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction) :
+ BtreeCursor(_d,_idxNo,_id,_bounds,_direction)
+ {
+ pair< DiskLoc, int > noBestParent;
+ indexDetails.head.btree<V>()->customLocate( bucket, keyOfs, startKey, 0, false, _boundsIterator->cmp(), _boundsIterator->inc(), _ordering, _direction, noBestParent );
+ skipAndCheck();
+ dassert( _dups.size() == 0 );
+ }
+
+ virtual DiskLoc currLoc() {
+ if( bucket.isNull() ) return DiskLoc();
+ return currKeyNode().recordLoc;
+ }
+
+ virtual BSONObj keyAt(int ofs) const {
+ assert( !bucket.isNull() );
+ const BtreeBucket<V> *b = bucket.btree<V>();
+ int n = b->getN();
+ if( n == 0xffff ) {
+ throw UserException(15850, "keyAt bucket deleted");
+ }
+ dassert( n >= 0 && n < 10000 );
+ return ofs >= n ? BSONObj() : b->keyNode(ofs).key.toBson();
+ }
+
+ virtual BSONObj currKey() const {
+ assert( !bucket.isNull() );
+ return bucket.btree<V>()->keyNode(keyOfs).key.toBson();
+ }
+
+ virtual bool curKeyHasChild() {
+ return !currKeyNode().prevChildBucket.isNull();
+ }
+
+ bool skipUnusedKeys() {
+ int u = 0;
+ while ( 1 ) {
+ if ( !ok() )
+ break;
+ const _KeyNode& kn = keyNode(keyOfs);
+ if ( kn.isUsed() )
+ break;
+ bucket = _advance(bucket, keyOfs, _direction, "skipUnusedKeys");
+ u++;
+ //don't include unused keys in nscanned
+ //++_nscanned;
+ }
+ if ( u > 10 )
+ OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
+ return u;
+ }
+
+ /* Since the last noteLocation(), our key may have moved around, and that old cached
+ information may thus be stale and wrong (although often it is right). We check
+ that here; if we have moved, we have to search back for where we were at.
+
+ i.e., after operations on the index, the BtreeCursor's cached location info may
+ be invalid. This function ensures validity, so you should call it before using
+ the cursor if other writers have used the database since the last noteLocation
+ call.
+ */
+ void checkLocation() {
+ if ( eof() )
+ return;
+
+ _multikey = d->isMultikey(idxNo);
+
+ if ( keyOfs >= 0 ) {
+ assert( !keyAtKeyOfs.isEmpty() );
+
+ try {
+ // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
+ // which is possible as keys may have been deleted.
+ int x = 0;
+ while( 1 ) {
+ // if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
+ // b->k(keyOfs).recordLoc == locAtKeyOfs ) {
+ if ( keyAt(keyOfs).binaryEqual(keyAtKeyOfs) ) {
+ const _KeyNode& kn = keyNode(keyOfs);
+ if( kn.recordLoc == locAtKeyOfs ) {
+ if ( !kn.isUsed() ) {
+ // we were deleted but still exist as an unused
+ // marker key. advance.
+ skipUnusedKeys();
+ }
+ return;
+ }
+ }
+
+ // we check one key earlier too, in case a key was just deleted. this is
+ // important so that multi updates are reasonably fast.
+ if( keyOfs == 0 || x++ )
+ break;
+ keyOfs--;
+ }
+ }
+ catch(UserException& e) {
+ if( e.getCode() != 15850 )
+ throw;
+ // hack: fall through if bucket was just deleted. should only happen under deleteObjects()
+ DEV log() << "debug info: bucket was deleted" << endl;
+ }
+ }
+
+ /* normally we don't get to here. when we do, old position is no longer
+ valid and we must refind where we left off (which is expensive)
+ */
+
+ /* TODO: Switch to keep indexdetails and do idx.head! */
+ bucket = _locate(keyAtKeyOfs, locAtKeyOfs);
+ RARELY log() << "key seems to have moved in the index, refinding. " << bucket.toString() << endl;
+ if ( ! bucket.isNull() )
+ skipUnusedKeys();
+
+ }
+
+ protected:
+ virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) {
+ thisLoc.btree<V>()->advanceTo(thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction);
+ }
+ virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ return thisLoc.btree<V>()->advance(thisLoc, keyOfs, direction, caller);
+ }
+ virtual void _audit() {
+ out() << "BtreeCursor(). dumping head bucket" << endl;
+ indexDetails.head.btree<V>()->dump();
+ }
+ virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc) {
+ bool found;
+ return indexDetails.head.btree<V>()->
+ locate(indexDetails, indexDetails.head, key, _ordering, keyOfs, found, loc, _direction);
+ }
+
+ const _KeyNode& keyNode(int keyOfs) const {
+ return bucket.btree<V>()->k(keyOfs);
+ }
+
+ private:
+ const KeyNode currKeyNode() const {
+ assert( !bucket.isNull() );
+ const BtreeBucket<V> *b = bucket.btree<V>();
+ return b->keyNode(keyOfs);
+ }
+ };
+
+ template class BtreeCursorImpl<V0>;
+ template class BtreeCursorImpl<V1>;
+
+ /*
+ class BtreeCursorV1 : public BtreeCursor {
+ public:
+ typedef BucketBasics<V1>::KeyNode KeyNode;
+ typedef V1::Key Key;
+
+ BtreeCursorV1(NamespaceDetails *a, int b, const IndexDetails& c, const BSONObj &d, const BSONObj &e, bool f, int g) :
+ BtreeCursor(a,b,c,d,e,f,g) { }
+ BtreeCursorV1(NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction) :
+ BtreeCursor(_d,_idxNo,_id,_bounds,_direction)
+ {
+ pair< DiskLoc, int > noBestParent;
+ indexDetails.head.btree<V1>()->customLocate( bucket, keyOfs, startKey, 0, false, _boundsIterator->cmp(), _boundsIterator->inc(), _ordering, _direction, noBestParent );
+ skipAndCheck();
+ dassert( _dups.size() == 0 );
+ }
+
+ virtual DiskLoc currLoc() {
+ if( bucket.isNull() ) return DiskLoc();
+ return currKeyNode().recordLoc;
+ }
+
+ virtual BSONObj currKey() const {
+ assert( !bucket.isNull() );
+ return bucket.btree<V1>()->keyNode(keyOfs).key.toBson();
+ }
+
+ protected:
+ virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) {
+ thisLoc.btree<V1>()->advanceTo(thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction);
+ }
+ virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ return thisLoc.btree<V1>()->advance(thisLoc, keyOfs, direction, caller);
+ }
+ virtual void _audit() {
+ out() << "BtreeCursor(). dumping head bucket" << endl;
+ indexDetails.head.btree<V1>()->dump();
+ }
+ virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc);
+ virtual const _KeyNode& keyNode(int keyOfs) {
+ return bucket.btree<V1>()->k(keyOfs);
+ }
+
+ private:
+ const KeyNode currKeyNode() const {
+ assert( !bucket.isNull() );
+ const BtreeBucket<V1> *b = bucket.btree<V1>();
+ return b->keyNode(keyOfs);
+ }
+ };*/
+
+ BtreeCursor* BtreeCursor::make(
+ NamespaceDetails *_d, int _idxNo, const IndexDetails& _id,
+ const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction)
+ {
+ int v = _id.version();
+ BtreeCursor *c = 0;
+ if( v == 1 ) {
+ c = new BtreeCursorImpl<V1>(_d,_idxNo,_id,startKey,endKey,endKeyInclusive,direction);
+ }
+ else if( v == 0 ) {
+ c = new BtreeCursorImpl<V0>(_d,_idxNo,_id,startKey,endKey,endKeyInclusive,direction);
+ }
+ else {
+ uasserted(14800, str::stream() << "unsupported index version " << v);
+ }
+ c->init();
+ dassert( c->_dups.size() == 0 );
+ return c;
+ }
+
+ BtreeCursor* BtreeCursor::make(
+ NamespaceDetails *_d, int _idxNo, const IndexDetails& _id,
+ const shared_ptr< FieldRangeVector > &_bounds, int _direction )
+ {
+ int v = _id.version();
+ if( v == 1 )
+ return new BtreeCursorImpl<V1>(_d,_idxNo,_id,_bounds,_direction);
+ if( v == 0 )
+ return new BtreeCursorImpl<V0>(_d,_idxNo,_id,_bounds,_direction);
+ uasserted(14801, str::stream() << "unsupported index version " << v);
+
+ // just check we are in sync with this method
+ dassert( IndexDetails::isASupportedIndexVersionNumber(v) );
+
+ return 0;
+ }
BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails &_id,
const BSONObj &_startKey, const BSONObj &_endKey, bool endKeyInclusive, int _direction ) :
@@ -41,8 +285,6 @@ namespace mongo {
_independentFieldRanges( false ),
_nscanned( 0 ) {
audit();
- init();
- dassert( _dups.size() == 0 );
}
BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction )
@@ -55,7 +297,7 @@ namespace mongo {
_ordering( Ordering::make( _order ) ),
_direction( _direction ),
_bounds( ( assert( _bounds.get() ), _bounds ) ),
- _boundsIterator( new FieldRangeVector::Iterator( *_bounds ) ),
+ _boundsIterator( new FieldRangeVectorIterator( *_bounds ) ),
_spec( _id.getSpec() ),
_independentFieldRanges( true ),
_nscanned( 0 ) {
@@ -64,28 +306,15 @@ namespace mongo {
startKey = _bounds->startKey();
_boundsIterator->advance( startKey ); // handles initialization
_boundsIterator->prepDive();
- pair< DiskLoc, int > noBestParent;
bucket = indexDetails.head;
keyOfs = 0;
- indexDetails.head.btree()->customLocate( bucket, keyOfs, startKey, 0, false, _boundsIterator->cmp(), _boundsIterator->inc(), _ordering, _direction, noBestParent );
- skipAndCheck();
- dassert( _dups.size() == 0 );
}
+ /** Properly destroy forward declared class members. */
+ BtreeCursor::~BtreeCursor() {}
+
void BtreeCursor::audit() {
- indexDetails.checkVersion();
dassert( d->idxNo((IndexDetails&) indexDetails) == idxNo );
-
- if ( otherTraceLevel >= 12 ) {
- if ( otherTraceLevel >= 200 ) {
- out() << "::BtreeCursor() qtl>200. validating entire index." << endl;
- indexDetails.head.btree()->fullValidate(indexDetails.head, _order);
- }
- else {
- out() << "BTreeCursor(). dumping head bucket" << endl;
- indexDetails.head.btree()->dump();
- }
- }
}
void BtreeCursor::init() {
@@ -93,24 +322,28 @@ namespace mongo {
startKey = _spec.getType()->fixKey( startKey );
endKey = _spec.getType()->fixKey( endKey );
}
- bool found;
- bucket = indexDetails.head.btree()->
- locate(indexDetails, indexDetails.head, startKey, _ordering, keyOfs, found, _direction > 0 ? minDiskLoc : maxDiskLoc, _direction);
+ bucket = _locate(startKey, _direction > 0 ? minDiskLoc : maxDiskLoc);
if ( ok() ) {
_nscanned = 1;
}
- skipUnusedKeys( false );
+ skipUnusedKeys();
checkEnd();
}
void BtreeCursor::skipAndCheck() {
- skipUnusedKeys( true );
+ int startNscanned = _nscanned;
+ skipUnusedKeys();
while( 1 ) {
if ( !skipOutOfRangeKeysAndCheckEnd() ) {
break;
}
- while( skipOutOfRangeKeysAndCheckEnd() );
- if ( !skipUnusedKeys( true ) ) {
+ do {
+ if ( _nscanned > startNscanned + 20 ) {
+ skipUnusedKeys();
+ return;
+ }
+ } while( skipOutOfRangeKeysAndCheckEnd() );
+ if ( !skipUnusedKeys() ) {
break;
}
}
@@ -120,7 +353,7 @@ namespace mongo {
if ( !ok() ) {
return false;
}
- int ret = _boundsIterator->advance( currKeyNode().key );
+ int ret = _boundsIterator->advance( currKey() );
if ( ret == -2 ) {
bucket = DiskLoc();
return false;
@@ -130,33 +363,10 @@ namespace mongo {
return false;
}
++_nscanned;
- advanceTo( currKeyNode().key, ret, _boundsIterator->after(), _boundsIterator->cmp(), _boundsIterator->inc() );
+ advanceTo( currKey(), ret, _boundsIterator->after(), _boundsIterator->cmp(), _boundsIterator->inc() );
return true;
}
- /* skip unused keys. */
- bool BtreeCursor::skipUnusedKeys( bool mayJump ) {
- int u = 0;
- while ( 1 ) {
- if ( !ok() )
- break;
- const BtreeBucket *b = bucket.btree();
- const _KeyNode& kn = b->k(keyOfs);
- if ( kn.isUsed() )
- break;
- bucket = b->advance(bucket, keyOfs, _direction, "skipUnusedKeys");
- u++;
- //don't include unused keys in nscanned
- //++_nscanned;
- if ( mayJump && ( u % 10 == 0 ) ) {
- skipOutOfRangeKeysAndCheckEnd();
- }
- }
- if ( u > 10 )
- OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
- return u;
- }
-
// Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
int sgn( int i ) {
if ( i == 0 )
@@ -177,7 +387,7 @@ namespace mongo {
}
void BtreeCursor::advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive) {
- bucket.btree()->advanceTo( bucket, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, _ordering, _direction );
+ _advanceTo( bucket, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, _ordering, _direction );
}
bool BtreeCursor::advance() {
@@ -185,10 +395,10 @@ namespace mongo {
if ( bucket.isNull() )
return false;
- bucket = bucket.btree()->advance(bucket, keyOfs, _direction, "BtreeCursor::advance");
+ bucket = _advance(bucket, keyOfs, _direction, "BtreeCursor::advance");
if ( !_independentFieldRanges ) {
- skipUnusedKeys( false );
+ skipUnusedKeys();
checkEnd();
if ( ok() ) {
++_nscanned;
@@ -202,69 +412,27 @@ namespace mongo {
void BtreeCursor::noteLocation() {
if ( !eof() ) {
- BSONObj o = bucket.btree()->keyAt(keyOfs).copy();
+ BSONObj o = currKey().getOwned();
keyAtKeyOfs = o;
- locAtKeyOfs = bucket.btree()->k(keyOfs).recordLoc;
+ locAtKeyOfs = currLoc();
}
}
- /* Since the last noteLocation(), our key may have moved around, and that old cached
- information may thus be stale and wrong (although often it is right). We check
- that here; if we have moved, we have to search back for where we were at.
-
- i.e., after operations on the index, the BtreeCursor's cached location info may
- be invalid. This function ensures validity, so you should call it before using
- the cursor if other writers have used the database since the last noteLocation
- call.
- */
- void BtreeCursor::checkLocation() {
- if ( eof() )
- return;
-
- _multikey = d->isMultikey(idxNo);
-
- if ( keyOfs >= 0 ) {
- const BtreeBucket *b = bucket.btree();
-
- assert( !keyAtKeyOfs.isEmpty() );
-
- // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
- // which is possible as keys may have been deleted.
- int x = 0;
- while( 1 ) {
- if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
- b->k(keyOfs).recordLoc == locAtKeyOfs ) {
- if ( !b->k(keyOfs).isUsed() ) {
- /* we were deleted but still exist as an unused
- marker key. advance.
- */
- skipUnusedKeys( false );
- }
- return;
- }
-
- /* we check one key earlier too, in case a key was just deleted. this is
- important so that multi updates are reasonably fast.
- */
- if( keyOfs == 0 || x++ )
- break;
- keyOfs--;
- }
- }
-
- /* normally we don't get to here. when we do, old position is no longer
- valid and we must refind where we left off (which is expensive)
- */
-
- bool found;
-
- /* TODO: Switch to keep indexdetails and do idx.head! */
- bucket = indexDetails.head.btree()->locate(indexDetails, indexDetails.head, keyAtKeyOfs, _ordering, keyOfs, found, locAtKeyOfs, _direction);
- RARELY log() << " key seems to have moved in the index, refinding. found:" << found << endl;
- if ( ! bucket.isNull() )
- skipUnusedKeys( false );
-
+ string BtreeCursor::toString() {
+ string s = string("BtreeCursor ") + indexDetails.indexName();
+ if ( _direction < 0 ) s += " reverse";
+ if ( _bounds.get() && _bounds->size() > 1 ) s += " multi";
+ return s;
}
+
+ BSONObj BtreeCursor::prettyIndexBounds() const {
+ if ( !_independentFieldRanges ) {
+ return BSON( "start" << prettyKey( startKey ) << "end" << prettyKey( endKey ) );
+ }
+ else {
+ return _bounds->obj();
+ }
+ }
/* ----------------------------------------------------------------------------- */
diff --git a/db/cap.cpp b/db/cap.cpp
index 260b311..a8be238 100644
--- a/db/cap.cpp
+++ b/db/cap.cpp
@@ -26,9 +26,8 @@
#include "btree.h"
#include <algorithm>
#include <list>
-#include "query.h"
-#include "queryutil.h"
#include "json.h"
+#include "clientcursor.h"
/*
capped collection layout
@@ -131,7 +130,12 @@ namespace mongo {
bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
assert( !dl.isNull() );
// We could have a rec or drec, doesn't matter.
- return dl.drec()->myExtent( dl ) == capExtent.ext();
+ bool res = dl.drec()->myExtentLoc(dl) == capExtent;
+ DEV {
+ // old implementation. this check is temp to test works the same. new impl should be a little faster.
+ assert( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
+ }
+ return res;
}
bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
@@ -443,7 +447,7 @@ namespace mongo {
for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
DiskLoc prev = ext.ext()->xprev;
DiskLoc next = ext.ext()->xnext;
- DiskLoc empty = ext.ext()->reuse( ns );
+ DiskLoc empty = ext.ext()->reuse( ns, true );
ext.ext()->xprev.writing() = prev;
ext.ext()->xnext.writing() = next;
addDeletedRec( empty.drec(), empty );
diff --git a/db/client.cpp b/db/client.cpp
index e4fd4b9..c1a359c 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -32,6 +32,9 @@
#include "dbwebserver.h"
#include "../util/mongoutils/html.h"
#include "../util/mongoutils/checksum.h"
+#include "../util/file_allocator.h"
+#include "repl/rs.h"
+#include "../scripting/engine.h"
namespace mongo {
@@ -40,10 +43,50 @@ namespace mongo {
set<Client*> Client::clients; // always be in clientsMutex when manipulating this
boost::thread_specific_ptr<Client> currentClient;
+#if defined(_DEBUG)
+ struct StackChecker;
+ ThreadLocalValue<StackChecker *> checker;
+
+ struct StackChecker {
+ enum { SZ = 256 * 1024 };
+ char buf[SZ];
+ StackChecker() {
+ checker.set(this);
+ }
+ void init() {
+ memset(buf, 42, sizeof(buf));
+ }
+ static void check(const char *tname) {
+ static int max;
+ StackChecker *sc = checker.get();
+ const char *p = sc->buf;
+ int i = 0;
+ for( ; i < SZ; i++ ) {
+ if( p[i] != 42 )
+ break;
+ }
+ int z = SZ-i;
+ if( z > max ) {
+ max = z;
+ log() << "thread " << tname << " stack usage was " << z << " bytes" << endl;
+ }
+ wassert( i > 16000 );
+ }
+ };
+#endif
+
/* each thread which does db operations has a Client object in TLS.
call this when your thread starts.
*/
- Client& Client::initThread(const char *desc, MessagingPort *mp) {
+ Client& Client::initThread(const char *desc, AbstractMessagingPort *mp) {
+#if defined(_DEBUG)
+ {
+ if( sizeof(void*) == 8 ) {
+ StackChecker sc;
+ sc.init();
+ }
+ }
+#endif
assert( currentClient.get() == 0 );
Client *c = new Client(desc, mp);
currentClient.reset(c);
@@ -51,7 +94,7 @@ namespace mongo {
return *c;
}
- Client::Client(const char *desc, MessagingPort *p) :
+ Client::Client(const char *desc, AbstractMessagingPort *p) :
_context(0),
_shutdown(false),
_desc(desc),
@@ -60,6 +103,11 @@ namespace mongo {
_mp(p) {
_connectionId = setThreadName(desc);
_curOp = new CurOp( this );
+#ifndef _WIN32
+ stringstream temp;
+ temp << hex << showbase << pthread_self();
+ _threadId = temp.str();
+#endif
scoped_lock bl(clientsMutex);
clients.insert(this);
}
@@ -74,13 +122,23 @@ namespace mongo {
error() << "Client::shutdown not called: " << _desc << endl;
}
- scoped_lock bl(clientsMutex);
- if ( ! _shutdown )
- clients.erase(this);
- delete _curOp;
+ if ( ! inShutdown() ) {
+ // we can't clean up safely once we're in shutdown
+ scoped_lock bl(clientsMutex);
+ if ( ! _shutdown )
+ clients.erase(this);
+ delete _curOp;
+ }
}
bool Client::shutdown() {
+#if defined(_DEBUG)
+ {
+ if( sizeof(void*) == 8 ) {
+ StackChecker::check( desc() );
+ }
+ }
+#endif
_shutdown = true;
if ( inShutdown() )
return false;
@@ -128,17 +186,21 @@ namespace mongo {
void Client::Context::_finishInit( bool doauth ) {
int lockState = dbMutex.getState();
assert( lockState );
+
+ if ( lockState > 0 && FileAllocator::get()->hasFailed() ) {
+ uassert(14031, "Can't take a write lock while out of disk space", false);
+ }
_db = dbHolder.get( _ns , _path );
if ( _db ) {
_justCreated = false;
}
- else if ( dbMutex.getState() > 0 ) {
+ else if ( lockState > 0 ) {
// already in a write lock
_db = dbHolder.getOrCreate( _ns , _path , _justCreated );
assert( _db );
}
- else if ( dbMutex.getState() < -1 ) {
+ else if ( lockState < -1 ) {
// nested read lock :(
assert( _lock );
_lock->releaseAndWriteLock();
@@ -176,7 +238,7 @@ namespace mongo {
break;
default: {
string errmsg;
- if ( ! shardVersionOk( _ns , lockState > 0 , errmsg ) ) {
+ if ( ! shardVersionOk( _ns , errmsg ) ) {
ostringstream os;
os << "[" << _ns << "] shard version not ok in Client::Context: " << errmsg;
msgassertedNoTrace( StaleConfigInContextCode , os.str().c_str() );
@@ -315,6 +377,19 @@ namespace mongo {
_client = 0;
}
+ void CurOp::enter( Client::Context * context ) {
+ ensureStarted();
+ setNS( context->ns() );
+ _dbprofile = context->_db ? context->_db->profile : 0;
+ }
+
+ void CurOp::leave( Client::Context * context ) {
+ unsigned long long now = curTimeMicros64();
+ Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command );
+ _checkpoint = now;
+ }
+
+
BSONObj CurOp::infoNoauth() {
BSONObjBuilder b;
b.append("opid", _opNum);
@@ -339,20 +414,34 @@ namespace mongo {
clientStr << _remote.toString();
b.append("client", clientStr.str());
- if ( _client )
+ if ( _client ) {
b.append( "desc" , _client->desc() );
-
+ if ( _client->_threadId.size() )
+ b.append( "threadId" , _client->_threadId );
+ if ( _client->_connectionId )
+ b.appendNumber( "connectionId" , _client->_connectionId );
+ }
+
if ( ! _message.empty() ) {
if ( _progressMeter.isActive() ) {
StringBuilder buf(128);
buf << _message.toString() << " " << _progressMeter.toString();
b.append( "msg" , buf.str() );
+ BSONObjBuilder sub( b.subobjStart( "progress" ) );
+ sub.appendNumber( "done" , (long long)_progressMeter.done() );
+ sub.appendNumber( "total" , (long long)_progressMeter.total() );
+ sub.done();
}
else {
b.append( "msg" , _message.toString() );
}
}
+ if( killed() )
+ b.append("killed", true);
+
+ b.append( "numYields" , _numYields );
+
return b.obj();
}
@@ -368,7 +457,14 @@ namespace mongo {
BSONObjBuilder b;
while ( i.more() )
b.append( i.next() );
+
+ b.appendElementsUnique( _handshake );
+
_handshake = b.obj();
+
+ if (theReplSet && o.hasField("member")) {
+ theReplSet->ghost->associateSlave(_remoteId, o["member"].Int());
+ }
}
class HandshakeCmd : public Command {
@@ -378,7 +474,7 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return false; }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
Client& c = cc();
c.gotHandshake( cmdObj );
return 1;
@@ -510,4 +606,125 @@ namespace mongo {
return writers + readers;
}
+
+ void OpDebug::reset() {
+ extra.reset();
+
+ op = 0;
+ iscommand = false;
+ ns = "";
+ query = BSONObj();
+ updateobj = BSONObj();
+
+ cursorid = 0;
+ ntoreturn = 0;
+ ntoskip = 0;
+ exhaust = false;
+
+ nscanned = 0;
+ idhack = false;
+ scanAndOrder = false;
+ moved = false;
+ fastmod = false;
+ fastmodinsert = false;
+ upsert = false;
+ keyUpdates = 0;
+
+ exceptionInfo.reset();
+
+ executionTime = 0;
+ nreturned = 0;
+ responseLength = 0;
+ }
+
+
+#define OPDEBUG_TOSTRING_HELP(x) if( x ) s << " " #x ":" << (x)
+ string OpDebug::toString() const {
+ StringBuilder s( ns.size() + 64 );
+ if ( iscommand )
+ s << "command ";
+ else
+ s << opToString( op ) << ' ';
+ s << ns.toString();
+
+ if ( ! query.isEmpty() ) {
+ if ( iscommand )
+ s << " command: ";
+ else
+ s << " query: ";
+ s << query.toString();
+ }
+
+ if ( ! updateobj.isEmpty() ) {
+ s << " update: ";
+ updateobj.toString( s );
+ }
+
+ OPDEBUG_TOSTRING_HELP( cursorid );
+ OPDEBUG_TOSTRING_HELP( ntoreturn );
+ OPDEBUG_TOSTRING_HELP( ntoskip );
+ OPDEBUG_TOSTRING_HELP( exhaust );
+
+ OPDEBUG_TOSTRING_HELP( nscanned );
+ OPDEBUG_TOSTRING_HELP( idhack );
+ OPDEBUG_TOSTRING_HELP( scanAndOrder );
+ OPDEBUG_TOSTRING_HELP( moved );
+ OPDEBUG_TOSTRING_HELP( fastmod );
+ OPDEBUG_TOSTRING_HELP( fastmodinsert );
+ OPDEBUG_TOSTRING_HELP( upsert );
+ OPDEBUG_TOSTRING_HELP( keyUpdates );
+
+ if ( extra.len() )
+ s << " " << extra.str();
+
+ if ( ! exceptionInfo.empty() ) {
+ s << " exception: " << exceptionInfo.msg;
+ if ( exceptionInfo.code )
+ s << " code:" << exceptionInfo.code;
+ }
+
+ OPDEBUG_TOSTRING_HELP( nreturned );
+ if ( responseLength )
+ s << " reslen:" << responseLength;
+ s << " " << executionTime << "ms";
+
+ return s.str();
+ }
+
+#define OPDEBUG_APPEND_NUMBER(x) if( x ) b.append( #x , (x) )
+#define OPDEBUG_APPEND_BOOL(x) if( x ) b.appendBool( #x , (x) )
+ void OpDebug::append( const CurOp& curop, BSONObjBuilder& b ) const {
+ b.append( "op" , iscommand ? "command" : opToString( op ) );
+ b.append( "ns" , ns.toString() );
+ if ( ! query.isEmpty() )
+ b.append( iscommand ? "command" : "query" , query );
+ else if ( ! iscommand && curop.haveQuery() )
+ curop.appendQuery( b , "query" );
+
+ if ( ! updateobj.isEmpty() )
+ b.append( "updateobj" , updateobj );
+
+ OPDEBUG_APPEND_NUMBER( cursorid );
+ OPDEBUG_APPEND_NUMBER( ntoreturn );
+ OPDEBUG_APPEND_NUMBER( ntoskip );
+ OPDEBUG_APPEND_BOOL( exhaust );
+
+ OPDEBUG_APPEND_NUMBER( nscanned );
+ OPDEBUG_APPEND_BOOL( idhack );
+ OPDEBUG_APPEND_BOOL( scanAndOrder );
+ OPDEBUG_APPEND_BOOL( moved );
+ OPDEBUG_APPEND_BOOL( fastmod );
+ OPDEBUG_APPEND_BOOL( fastmodinsert );
+ OPDEBUG_APPEND_BOOL( upsert );
+ OPDEBUG_APPEND_NUMBER( keyUpdates );
+
+ if ( ! exceptionInfo.empty() )
+ exceptionInfo.append( b , "exception" , "exceptionCode" );
+
+ OPDEBUG_APPEND_NUMBER( nreturned );
+ OPDEBUG_APPEND_NUMBER( responseLength );
+ b.append( "millis" , executionTime );
+
+ }
+
}
diff --git a/db/client.h b/db/client.h
index 4e8589e..a8e3138 100644
--- a/db/client.h
+++ b/db/client.h
@@ -38,12 +38,13 @@ namespace mongo {
class CurOp;
class Command;
class Client;
- class MessagingPort;
+ class AbstractMessagingPort;
extern boost::thread_specific_ptr<Client> currentClient;
typedef long long ConnectionId;
+ /** the database's concept of an outside "client" */
class Client : boost::noncopyable {
public:
class Context;
@@ -52,14 +53,14 @@ namespace mongo {
static set<Client*> clients; // always be in clientsMutex when manipulating this
static int recommendedYieldMicros( int * writers = 0 , int * readers = 0 );
static int getActiveClientCount( int& writers , int& readers );
-
static Client *syncThread;
-
/* each thread which does db operations has a Client object in TLS.
call this when your thread starts.
*/
- static Client& initThread(const char *desc, MessagingPort *mp = 0);
+ static Client& initThread(const char *desc, AbstractMessagingPort *mp = 0);
+
+ ~Client();
/*
this has to be called as the client goes away, but before thread termination
@@ -67,17 +68,16 @@ namespace mongo {
*/
bool shutdown();
-
- ~Client();
-
+ /** set so isSyncThread() works */
void iAmSyncThread() {
wassert( syncThread == 0 );
syncThread = this;
}
- bool isSyncThread() const { return this == syncThread; } // true if this client is the replication secondary pull thread
-
+ /** @return true if this client is the replication secondary pull thread. not used much, is used in create index sync code. */
+ bool isSyncThread() const { return this == syncThread; }
string clientAddress(bool includePort=false) const;
+ const AuthenticationInfo * getAuthenticationInfo() const { return &_ai; }
AuthenticationInfo * getAuthenticationInfo() { return &_ai; }
bool isAdmin() { return _ai.isAuthorized( "admin" ); }
CurOp* curop() const { return _curOp; }
@@ -96,13 +96,12 @@ namespace mongo {
void gotHandshake( const BSONObj& o );
BSONObj getRemoteID() const { return _remoteId; }
BSONObj getHandshake() const { return _handshake; }
-
- MessagingPort * port() const { return _mp; }
-
+ AbstractMessagingPort * port() const { return _mp; }
ConnectionId getConnectionId() const { return _connectionId; }
private:
ConnectionId _connectionId; // > 0 for things "conn", 0 otherwise
+ string _threadId; // "" on non support systems
CurOp * _curOp;
Context * _context;
bool _shutdown;
@@ -112,9 +111,9 @@ namespace mongo {
ReplTime _lastOp;
BSONObj _handshake;
BSONObj _remoteId;
- MessagingPort * const _mp;
+ AbstractMessagingPort * const _mp;
- Client(const char *desc, MessagingPort *p = 0);
+ Client(const char *desc, AbstractMessagingPort *p = 0);
friend class CurOp;
@@ -128,7 +127,6 @@ namespace mongo {
~GodScope();
};
-
/* Set database we want to use, then, restores when we finish (are out of scope)
Note this is also helpful if an exception happens as the state if fixed up.
*/
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index bc09457..e803afd 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -23,17 +23,18 @@
*/
#include "pch.h"
-#include "query.h"
+#include "clientcursor.h"
#include "introspect.h"
#include <time.h>
#include "db.h"
#include "commands.h"
#include "repl_block.h"
+#include "../util/processinfo.h"
namespace mongo {
CCById ClientCursor::clientCursorsById;
- boost::recursive_mutex ClientCursor::ccmutex;
+ boost::recursive_mutex& ClientCursor::ccmutex( *(new boost::recursive_mutex()) );
long long ClientCursor::numberTimedOut = 0;
void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ); // from s/d_logic.h
@@ -73,31 +74,39 @@ namespace mongo {
//void removedKey(const DiskLoc& btreeLoc, int keyPos) {
//}
- /* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
- works fine as the prefix will end with '.'. however, when used with drop and
- dropIndexes, this could take out cursors that belong to something else -- if you
- drop "foo", currently, this will kill cursors for "foobar".
- */
- void ClientCursor::invalidate(const char *nsPrefix) {
- vector<ClientCursor*> toDelete;
+ // ns is either a full namespace or "dbname." when invalidating for a whole db
+ void ClientCursor::invalidate(const char *ns) {
+ dbMutex.assertWriteLocked();
+ int len = strlen(ns);
+ const char* dot = strchr(ns, '.');
+ assert( len > 0 && dot);
- int len = strlen(nsPrefix);
- assert( len > 0 && strchr(nsPrefix, '.') );
+ bool isDB = (dot == &ns[len-1]); // first (and only) dot is the last char
{
- //cout << "\nTEMP invalidate " << nsPrefix << endl;
+ //cout << "\nTEMP invalidate " << ns << endl;
recursive_scoped_lock lock(ccmutex);
Database *db = cc().database();
assert(db);
- assert( str::startsWith(nsPrefix, db->name) );
+ assert( str::startsWith(ns, db->name) );
- for( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
+ for( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); /*++i*/ ) {
ClientCursor *cc = i->second;
+
+ ++i; // we may be removing this node
+
if( cc->_db != db )
continue;
- if ( strncmp(nsPrefix, cc->_ns.c_str(), len) == 0 ) {
- toDelete.push_back(i->second);
+
+ if (isDB) {
+ // already checked that db matched above
+ dassert( str::startsWith(cc->_ns.c_str(), ns) );
+ delete cc; //removes self from ccByID
+ }
+ else {
+ if ( str::equals(cc->_ns.c_str(), ns) )
+ delete cc; //removes self from ccByID
}
}
@@ -109,15 +118,12 @@ namespace mongo {
CCByLoc& bl = db->ccByLoc;
for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); ++i ) {
ClientCursor *cc = i->second;
- if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 ) {
+ if ( strncmp(ns, cc->ns.c_str(), len) == 0 ) {
assert( cc->_db == db );
toDelete.push_back(i->second);
}
}*/
- for ( vector<ClientCursor*>::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
- delete (*i);
-
/*cout << "TEMP after invalidate " << endl;
for( auto i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
cout << " " << i->second->ns << endl;
@@ -140,11 +146,19 @@ namespace mongo {
i++;
if( j->second->shouldTimeout( millis ) ) {
numberTimedOut++;
- log(1) << "killing old cursor " << j->second->_cursorid << ' ' << j->second->_ns
+ LOG(1) << "killing old cursor " << j->second->_cursorid << ' ' << j->second->_ns
<< " idle:" << j->second->idleTime() << "ms\n";
delete j->second;
}
}
+ unsigned sz = clientCursorsById.size();
+ static time_t last;
+ if( sz >= 100000 ) {
+ if( time(0) - last > 300 ) {
+ last = time(0);
+ log() << "warning number of open cursors is very large: " << sz << endl;
+ }
+ }
}
/* must call when a btree bucket going away.
@@ -157,6 +171,9 @@ namespace mongo {
RARELY if ( bl.size() > 70 ) {
log() << "perf warning: byLoc.size=" << bl.size() << " in aboutToDeleteBucket\n";
}
+ if( bl.size() == 0 ) {
+ DEV tlog() << "debug warning: no cursors found in informAboutToDeleteBucket()" << endl;
+ }
for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); i++ )
i->second->_c->aboutToDeleteBucket(b);
}
@@ -225,10 +242,13 @@ namespace mongo {
c->checkLocation();
DiskLoc tmp1 = c->refLoc();
if ( tmp1 != dl ) {
- /* this might indicate a failure to call ClientCursor::updateLocation() */
+ // This might indicate a failure to call ClientCursor::updateLocation() but it can
+ // also happen during correct operation, see SERVER-2009.
problem() << "warning: cursor loc " << tmp1 << " does not match byLoc position " << dl << " !" << endl;
}
- c->advance();
+ else {
+ c->advance();
+ }
if ( c->eof() ) {
// advanced to end
// leave ClientCursor in place so next getMore doesn't fail
@@ -249,6 +269,9 @@ namespace mongo {
_query(query), _queryOptions(queryOptions),
_idleAgeMillis(0), _pinValue(0),
_doingDeletes(false), _yieldSometimesTracker(128,10) {
+
+ dbMutex.assertAtLeastReadLocked();
+
assert( _db );
assert( str::startsWith(_ns, _db->name) );
if( queryOptions & QueryOption_NoCursorTimeout )
@@ -277,7 +300,11 @@ namespace mongo {
ClientCursor::~ClientCursor() {
- assert( _pos != -2 );
+ if( _pos == -2 ) {
+ // defensive: destructor called twice
+ wassert(false);
+ return;
+ }
{
recursive_scoped_lock lock(ccmutex);
@@ -290,7 +317,7 @@ namespace mongo {
}
}
- bool ClientCursor::getFieldsDotted( const string& name, BSONElementSet &ret ) {
+ bool ClientCursor::getFieldsDotted( const string& name, BSONElementSet &ret, BSONObj& holder ) {
map<string,int>::const_iterator i = _indexedFields.find( name );
if ( i == _indexedFields.end() ) {
@@ -300,7 +327,8 @@ namespace mongo {
int x = i->second;
- BSONObjIterator it( currKey() );
+ holder = currKey();
+ BSONObjIterator it( holder );
while ( x && it.more() ) {
it.next();
x--;
@@ -310,18 +338,20 @@ namespace mongo {
return true;
}
- BSONElement ClientCursor::getFieldDotted( const string& name , bool * fromKey ) {
+ BSONElement ClientCursor::getFieldDotted( const string& name , BSONObj& holder , bool * fromKey ) {
map<string,int>::const_iterator i = _indexedFields.find( name );
if ( i == _indexedFields.end() ) {
if ( fromKey )
*fromKey = false;
- return current().getFieldDotted( name );
+ holder = current();
+ return holder.getFieldDotted( name );
}
int x = i->second;
- BSONObjIterator it( currKey() );
+ holder = currKey();
+ BSONObjIterator it( holder );
while ( x && it.more() ) {
it.next();
x--;
@@ -333,6 +363,29 @@ namespace mongo {
return it.next();
}
+ BSONObj ClientCursor::extractFields(const BSONObj &pattern , bool fillWithNull ) {
+ BSONObjBuilder b( pattern.objsize() * 2 );
+
+ BSONObj holder;
+
+ BSONObjIterator i( pattern );
+ while ( i.more() ) {
+ BSONElement key = i.next();
+ BSONElement value = getFieldDotted( key.fieldName() , holder );
+
+ if ( value.type() ) {
+ b.appendAs( value , key.fieldName() );
+ continue;
+ }
+
+ if ( fillWithNull )
+ b.appendNull( key.fieldName() );
+
+ }
+
+ return b.obj();
+ }
+
/* call when cursor's location changes so that we can update the
cursorsbylocation map. if you are locked and internally iterating, only
@@ -366,18 +419,66 @@ namespace mongo {
return micros;
}
+
+ Record* ClientCursor::_recordForYield( ClientCursor::RecordNeeds need ) {
+ if ( need == DontNeed ) {
+ return 0;
+ }
+ else if ( need == MaybeCovered ) {
+ // TODO
+ return 0;
+ }
+ else if ( need == WillNeed ) {
+ // no-op
+ }
+ else {
+ warning() << "don't understand RecordNeeds: " << (int)need << endl;
+ return 0;
+ }
+
+ DiskLoc l = currLoc();
+ if ( l.isNull() )
+ return 0;
+
+ Record * rec = l.rec();
+ if ( rec->likelyInPhysicalMemory() )
+ return 0;
+
+ return rec;
+ }
- bool ClientCursor::yieldSometimes() {
- if ( ! _yieldSometimesTracker.ping() )
+ bool ClientCursor::yieldSometimes( RecordNeeds need, bool *yielded ) {
+ if ( yielded ) {
+ *yielded = false;
+ }
+ if ( ! _yieldSometimesTracker.ping() ) {
+ Record* rec = _recordForYield( need );
+ if ( rec ) {
+ if ( yielded ) {
+ *yielded = true;
+ }
+ return yield( yieldSuggest() , rec );
+ }
return true;
+ }
int micros = yieldSuggest();
- return ( micros > 0 ) ? yield( micros ) : true;
+ if ( micros > 0 ) {
+ if ( yielded ) {
+ *yielded = true;
+ }
+ return yield( micros , _recordForYield( need ) );
+ }
+ return true;
}
- void ClientCursor::staticYield( int micros , const StringData& ns ) {
+ void ClientCursor::staticYield( int micros , const StringData& ns , Record * rec ) {
killCurrentOp.checkForInterrupt( false );
{
+ auto_ptr<RWLockRecursive::Shared> lk;
+ if ( rec )
+ lk.reset( new RWLockRecursive::Shared( MongoFile::mmmutex) );
+
dbtempreleasecond unlock;
if ( unlock.unlocked() ) {
if ( micros == -1 )
@@ -386,14 +487,28 @@ namespace mongo {
sleepmicros( micros );
}
else {
- warning() << "ClientCursor::yield can't unlock b/c of recursive lock ns: " << ns << endl;
+ CurOp * c = cc().curop();
+ while ( c->parent() )
+ c = c->parent();
+ warning() << "ClientCursor::yield can't unlock b/c of recursive lock"
+ << " ns: " << ns
+ << " top: " << c->info()
+ << endl;
}
+
+ if ( rec )
+ rec->touch();
+
+ lk.reset(0); // need to release this before dbtempreleasecond
}
}
bool ClientCursor::prepareToYield( YieldData &data ) {
if ( ! _c->supportYields() )
return false;
+ if ( ! _c->prepareToYield() ) {
+ return false;
+ }
// need to store in case 'this' gets deleted
data._id = _cursorid;
@@ -434,40 +549,34 @@ namespace mongo {
}
cc->_doingDeletes = data._doingDeletes;
- cc->_c->checkLocation();
+ cc->_c->recoverFromYield();
return true;
}
- bool ClientCursor::yield( int micros ) {
+ bool ClientCursor::yield( int micros , Record * recordToLoad ) {
if ( ! _c->supportYields() )
return true;
+
YieldData data;
prepareToYield( data );
- staticYield( micros , _ns );
+ staticYield( micros , _ns , recordToLoad );
return ClientCursor::recoverFromYield( data );
}
- int ctmLast = 0; // so we don't have to do find() which is a little slow very often.
+ long long ctmLast = 0; // so we don't have to do find() which is a little slow very often.
long long ClientCursor::allocCursorId_inlock() {
- if( 0 ) {
- static long long z;
- ++z;
- cout << "TEMP alloccursorid " << z << endl;
- return z;
- }
-
+ long long ctm = curTimeMillis64();
+ dassert( ctm );
long long x;
- int ctm = (int) curTimeMillis();
while ( 1 ) {
x = (((long long)rand()) << 32);
- x = x | ctm | 0x80000000; // OR to make sure not zero
+ x = x ^ ctm;
if ( ctm != ctmLast || ClientCursor::find_inlock(x, false) == 0 )
break;
}
ctmLast = ctm;
- //DEV tlog() << " alloccursorid " << x << endl;
return x;
}
@@ -495,6 +604,19 @@ namespace mongo {
result.appendNumber("totalOpen", clientCursorsById.size() );
result.appendNumber("clientCursors_size", (int) numCursors());
result.appendNumber("timedOut" , numberTimedOut);
+ unsigned pinned = 0;
+ unsigned notimeout = 0;
+ for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); i++ ) {
+ unsigned p = i->second->_pinValue;
+ if( p >= 100 )
+ pinned++;
+ else if( p > 0 )
+ notimeout++;
+ }
+ if( pinned )
+ result.append("pinned", pinned);
+ if( notimeout )
+ result.append("totalNoTimeout", notimeout);
}
// QUESTION: Restrict to the namespace from which this command was issued?
@@ -507,25 +629,64 @@ namespace mongo {
help << " example: { cursorInfo : 1 }";
}
virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
ClientCursor::appendStats( result );
return true;
}
} cmdCursorInfo;
+ struct Mem {
+ Mem() { res = virt = mapped = 0; }
+ int res;
+ int virt;
+ int mapped;
+ bool grew(const Mem& r) {
+ return (r.res && (((double)res)/r.res)>1.1 ) ||
+ (r.virt && (((double)virt)/r.virt)>1.1 ) ||
+ (r.mapped && (((double)mapped)/r.mapped)>1.1 );
+ }
+ };
+
+ /** called once a minute from killcursors thread */
+ void sayMemoryStatus() {
+ static time_t last;
+ static Mem mlast;
+ try {
+ ProcessInfo p;
+ if ( !cmdLine.quiet && p.supported() ) {
+ Mem m;
+ m.res = p.getResidentSize();
+ m.virt = p.getVirtualMemorySize();
+ m.mapped = (int) (MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ));
+ if( time(0)-last >= 300 || m.grew(mlast) ) {
+ log() << "mem (MB) res:" << m.res << " virt:" << m.virt << " mapped:" << m.mapped << endl;
+ if( m.virt - (cmdLine.dur?2:1)*m.mapped > 5000 ) {
+ ONCE log() << "warning virtual/mapped memory differential is large. journaling:" << cmdLine.dur << endl;
+ }
+ last = time(0);
+ mlast = m;
+ }
+ }
+ }
+ catch(...) {
+ log() << "ProcessInfo exception" << endl;
+ }
+ }
+
+ /** thread for timing out old cursors */
void ClientCursorMonitor::run() {
Client::initThread("clientcursormon");
Client& client = cc();
-
- unsigned old = curTimeMillis();
-
+ Timer t;
+ const int Secs = 4;
+ unsigned n = 0;
while ( ! inShutdown() ) {
- unsigned now = curTimeMillis();
- ClientCursor::idleTimeReport( now - old );
- old = now;
- sleepsecs(4);
+ ClientCursor::idleTimeReport( t.millisReset() );
+ sleepsecs(Secs);
+ if( ++n % (60/4) == 0 /*once a minute*/ ) {
+ sayMemoryStatus();
+ }
}
-
client.shutdown();
}
@@ -551,7 +712,6 @@ namespace mongo {
}
-
ClientCursorMonitor clientCursorMonitor;
} // namespace mongo
diff --git a/db/clientcursor.h b/db/clientcursor.h
index f1d107f..75c7da8 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -27,13 +27,15 @@
#include "../pch.h"
#include "cursor.h"
#include "jsobj.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
#include "../util/background.h"
#include "diskloc.h"
#include "dbhelpers.h"
#include "matcher.h"
#include "../client/dbclient.h"
#include "projection.h"
+#include "s/d_chunk_manager.h"
namespace mongo {
@@ -158,14 +160,15 @@ namespace mongo {
DiskLoc lastLoc() const { return _lastLoc; }
- /* Get rid of cursors for namespaces that begin with nsprefix.
+ /* Get rid of cursors for namespaces 'ns'. When dropping a db, ns is "dbname."
Used by drop, dropIndexes, dropDatabase.
*/
- static void invalidate(const char *nsPrefix);
+ static void invalidate(const char *ns);
/**
* @param microsToSleep -1 : ask client
* >=0 : sleep for that amount
+ * @param recordToLoad after yielding lock, load this record with only mmutex
* do a dbtemprelease
* note: caller should check matcher.docMatcher().atomic() first and not yield if atomic -
* we don't do herein as this->matcher (above) is only initialized for true queries/getmore.
@@ -174,15 +177,22 @@ namespace mongo {
* if false is returned, then this ClientCursor should be considered deleted -
* in fact, the whole database could be gone.
*/
- bool yield( int microsToSleep = -1 );
+ bool yield( int microsToSleep = -1 , Record * recordToLoad = 0 );
+ enum RecordNeeds {
+ DontNeed = -1 , MaybeCovered = 0 , WillNeed = 100
+ };
+
/**
+ * @param needRecord whether or not the next record has to be read from disk for sure
+ * if this is true, will yield of next record isn't in memory
+ * @param yielded true if a yield occurred, and potentially if a yield did not occur
* @return same as yield()
*/
- bool yieldSometimes();
+ bool yieldSometimes( RecordNeeds need, bool *yielded = 0 );
static int yieldSuggest();
- static void staticYield( int micros , const StringData& ns );
+ static void staticYield( int micros , const StringData& ns , Record * rec );
struct YieldData { CursorId _id; bool _doingDeletes; };
bool prepareToYield( YieldData &data );
@@ -235,21 +245,30 @@ namespace mongo {
DiskLoc currLoc() { return _c->currLoc(); }
BSONObj currKey() const { return _c->currKey(); }
-
/**
* same as BSONObj::getFieldsDotted
* if it can be retrieved from key, it is
+ * @param holder keeps the currKey in scope by keeping a reference to it here. generally you'll want
+ * holder and ret to destruct about the same time.
* @return if this was retrieved from key
*/
- bool getFieldsDotted( const string& name, BSONElementSet &ret );
+ bool getFieldsDotted( const string& name, BSONElementSet &ret, BSONObj& holder );
/**
* same as BSONObj::getFieldDotted
* if it can be retrieved from key, it is
* @return if this was retrieved from key
*/
- BSONElement getFieldDotted( const string& name , bool * fromKey = 0 );
-
+ BSONElement getFieldDotted( const string& name , BSONObj& holder , bool * fromKey = 0 ) ;
+
+ /** extract items from object which match a pattern object.
+ * e.g., if pattern is { x : 1, y : 1 }, builds an object with
+ * x and y elements of this object, if they are present.
+ * returns elements with original field names
+ * NOTE: copied from BSONObj::extractFields
+ */
+ BSONObj extractFields(const BSONObj &pattern , bool fillWithNull = false) ;
+
bool currentIsDup() { return _c->getsetdup( _c->currLoc() ); }
bool currentMatches() {
@@ -258,6 +277,9 @@ namespace mongo {
return _c->matcher()->matchesCurrent( _c.get() );
}
+ void setChunkManager( ShardChunkManagerPtr manager ){ _chunkManager = manager; }
+ ShardChunkManagerPtr getChunkManager(){ return _chunkManager; }
+
private:
void setLastLoc_inlock(DiskLoc);
@@ -342,6 +364,8 @@ namespace mongo {
void noTimeout() { _pinValue++; }
CCByLoc& byLoc() { return _db->ccByLoc; }
+
+ Record* _recordForYield( RecordNeeds need );
private:
@@ -371,6 +395,8 @@ namespace mongo {
bool _doingDeletes;
ElapsedTracker _yieldSometimesTracker;
+ ShardChunkManagerPtr _chunkManager;
+
public:
shared_ptr<ParsedQuery> pq;
shared_ptr<Projection> fields; // which fields query wants returned
@@ -382,7 +408,7 @@ namespace mongo {
static CCById clientCursorsById;
static long long numberTimedOut;
- static boost::recursive_mutex ccmutex; // must use this for all statics above!
+ static boost::recursive_mutex& ccmutex; // must use this for all statics above!
static CursorId allocCursorId_inlock();
};
@@ -396,3 +422,11 @@ namespace mongo {
extern ClientCursorMonitor clientCursorMonitor;
} // namespace mongo
+
+// ClientCursor should only be used with auto_ptr because it needs to be
+// release()ed after a yield if stillOk() returns false and these pointer types
+// do not support releasing. This will prevent them from being used accidentally
+namespace boost{
+ template<> class scoped_ptr<mongo::ClientCursor> {};
+ template<> class shared_ptr<mongo::ClientCursor> {};
+}
diff --git a/db/cloner.cpp b/db/cloner.cpp
index ec5ba99..8956133 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -17,11 +17,12 @@
*/
#include "pch.h"
+#include "cloner.h"
#include "pdfile.h"
#include "../client/dbclient.h"
#include "../bson/util/builder.h"
#include "jsobj.h"
-#include "query.h"
+#include "ops/query.h"
#include "commands.h"
#include "db.h"
#include "instance.h"
@@ -29,14 +30,30 @@
namespace mongo {
+ BSONElement getErrField(const BSONObj& o);
+
void ensureHaveIdIndex(const char *ns);
bool replAuthenticate(DBClientBase *);
+ /** Selectively release the mutex based on a parameter. */
+ class dbtempreleaseif {
+ public:
+ dbtempreleaseif( bool release ) : _impl( release ? new dbtemprelease() : 0 ) {}
+ private:
+ shared_ptr< dbtemprelease > _impl;
+ };
+
+ void mayInterrupt( bool mayBeInterrupted ) {
+ if ( mayBeInterrupted ) {
+ killCurrentOp.checkForInterrupt( false );
+ }
+ }
+
class Cloner: boost::noncopyable {
auto_ptr< DBClientWithCommands > conn;
void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
- bool masterSameProcess, bool slaveOk, Query q = Query());
+ bool masterSameProcess, bool slaveOk, bool mayYield, bool mayBeInterrupted, Query q = Query());
struct Fun;
public:
Cloner() { }
@@ -47,9 +64,11 @@ namespace mongo {
for example repairDatabase need not use it.
*/
void setConnection( DBClientWithCommands *c ) { conn.reset( c ); }
- bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot);
- bool copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes = true, bool logForRepl = true );
+ /** copy the entire database */
+ bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode = 0);
+
+ bool copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool mayYield, bool mayBeInterrupted, bool copyIndexes = true, bool logForRepl = true );
};
/* for index info object:
@@ -87,6 +106,8 @@ namespace mongo {
}
struct Cloner::Fun {
+ Fun() : lastLog(0) { }
+ time_t lastLog;
void operator()( DBClientCursorBatchIterator &i ) {
mongolock l( true );
if ( context ) {
@@ -95,7 +116,15 @@ namespace mongo {
while( i.moreInCurrentBatch() ) {
if ( n % 128 == 127 /*yield some*/ ) {
- dbtemprelease t;
+ time_t now = time(0);
+ if( now - lastLog >= 60 ) {
+ // report progress
+ if( lastLog )
+ log() << "clone " << to_collection << ' ' << n << endl;
+ lastLog = now;
+ }
+ mayInterrupt( _mayBeInterrupted );
+ dbtempreleaseif t( _mayYield );
}
BSONObj tmp = i.nextSafe();
@@ -151,12 +180,14 @@ namespace mongo {
list<BSONObj> *storedForLater;
bool logForRepl;
Client::Context *context;
+ bool _mayYield;
+ bool _mayBeInterrupted;
};
/* copy the specified collection
isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
*/
- void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, Query query) {
+ void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, bool mayYield, bool mayBeInterrupted, Query query) {
list<BSONObj> storedForLater;
Fun f;
@@ -167,11 +198,14 @@ namespace mongo {
f.saveLast = time( 0 );
f.storedForLater = &storedForLater;
f.logForRepl = logForRepl;
+ f._mayYield = mayYield;
+ f._mayBeInterrupted = mayBeInterrupted;
int options = QueryOption_NoCursorTimeout | ( slaveOk ? QueryOption_SlaveOk : 0 );
{
- dbtemprelease r;
- f.context = r._context;
+ f.context = cc().getContext();
+ mayInterrupt( mayBeInterrupted );
+ dbtempreleaseif r( mayYield );
DBClientConnection *remote = dynamic_cast< DBClientConnection* >( conn.get() );
if ( remote ) {
remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
@@ -204,12 +238,12 @@ namespace mongo {
}
}
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logForRepl) {
+ bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logForRepl, bool mayYield, bool mayBeInterrupted) {
Cloner c;
- return c.copyCollection(host, ns, query, errmsg , /*copyIndexes*/ true, logForRepl);
+ return c.copyCollection(host, ns, query, errmsg, mayYield, mayBeInterrupted, /*copyIndexes*/ true, logForRepl);
}
- bool Cloner::copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes, bool logForRepl ) {
+ bool Cloner::copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool mayYield, bool mayBeInterrupted, bool copyIndexes, bool logForRepl ) {
auto_ptr<DBClientConnection> myconn;
myconn.reset( new DBClientConnection() );
if ( ! myconn->connect( from , errmsg ) )
@@ -231,7 +265,7 @@ namespace mongo {
{
// main data
- copy( ns.c_str() , ns.c_str() , /*isindex*/false , logForRepl , false , true , Query(query).snapshot() );
+ copy( ns.c_str() , ns.c_str() , /*isindex*/false , logForRepl , false , true , mayYield, mayBeInterrupted, Query(query).snapshot() );
}
/* TODO : copyIndexes bool does not seem to be implemented! */
@@ -242,7 +276,7 @@ namespace mongo {
{
// indexes
string temp = ctx.db()->name + ".system.indexes";
- copy( temp.c_str() , temp.c_str() , /*isindex*/true , logForRepl , false , true , BSON( "ns" << ns ) );
+ copy( temp.c_str() , temp.c_str() , /*isindex*/true , logForRepl , false , true , mayYield, mayBeInterrupted, BSON( "ns" << ns ) );
}
getDur().commitIfNeeded();
return true;
@@ -251,8 +285,10 @@ namespace mongo {
extern bool inDBRepair;
void ensureIdIndexForNewNs(const char *ns);
- bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot) {
-
+ bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode) {
+ if ( errCode ) {
+ *errCode = 0;
+ }
massert( 10289 , "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );
string todb = cc().database()->name;
@@ -274,7 +310,8 @@ namespace mongo {
string ns = fromdb + ".system.namespaces";
list<BSONObj> toClone;
{
- dbtemprelease r;
+ mayInterrupt( mayBeInterrupted );
+ dbtempreleaseif r( mayYield );
// just using exhaust for collection copying right now
auto_ptr<DBClientCursor> c;
@@ -302,6 +339,18 @@ namespace mongo {
errmsg = "query failed " + ns;
return false;
}
+
+ if ( c->more() ) {
+ BSONObj first = c->next();
+ if( !getErrField(first).eoo() ) {
+ if ( errCode ) {
+ *errCode = first.getIntField("code");
+ }
+ errmsg = "query failed " + ns;
+ return false;
+ }
+ c->putBack( first );
+ }
while ( c->more() ) {
BSONObj collection = c->next();
@@ -325,7 +374,7 @@ namespace mongo {
continue;
}
}
- if( ! isANormalNSName( from_name ) ) {
+ if( ! NamespaceString::normal( from_name ) ) {
log(2) << "\t\t not cloning because has $ " << endl;
continue;
}
@@ -335,7 +384,8 @@ namespace mongo {
for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ) {
{
- dbtemprelease r;
+ mayInterrupt( mayBeInterrupted );
+ dbtempreleaseif r( mayYield );
}
BSONObj collection = *i;
log(2) << " really will clone: " << collection << endl;
@@ -358,7 +408,7 @@ namespace mongo {
Query q;
if( snapshot )
q.snapshot();
- copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk, q);
+ copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk, mayYield, mayBeInterrupted, q);
if( wantIdIndex ) {
/* we need dropDups to be true as we didn't do a true snapshot and this is before applying oplog operations
@@ -385,20 +435,15 @@ namespace mongo {
rather than this exact value. we should standardize. OR, remove names - which is in the bugdb. Anyway, this
is dubious here at the moment.
*/
- copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, BSON( "name" << NE << "_id_" ) );
+ copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, mayYield, mayBeInterrupted, BSON( "name" << NE << "_id_" ) );
return true;
}
- /* slaveOk - if true it is ok if the source of the data is !ismaster.
- useReplAuth - use the credentials we normally use as a replication slave for the cloning
- snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
- for example repairDatabase need not use it.
- */
bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
- bool slaveOk, bool useReplAuth, bool snapshot) {
+ bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode) {
Cloner c;
- return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk, useReplAuth, snapshot);
+ return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk, useReplAuth, snapshot, mayYield, mayBeInterrupted, errCode);
}
/* Usage:
@@ -415,7 +460,7 @@ namespace mongo {
help << "{ clone : \"host13\" }";
}
CmdClone() : Command("clone") { }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string from = cmdObj.getStringField("clone");
if ( from.empty() )
return false;
@@ -423,7 +468,7 @@ namespace mongo {
were to clone it would get a different point-in-time and not match.
*/
return cloneFrom(from.c_str(), errmsg, dbname,
- /*logForReplication=*/!fromRepl, /*slaveok*/false, /*usereplauth*/false, /*snapshot*/true);
+ /*logForReplication=*/!fromRepl, /*slaveok*/false, /*usereplauth*/false, /*snapshot*/true, /*mayYield*/true, /*mayBeInterrupted*/false);
}
} cmdclone;
@@ -441,7 +486,7 @@ namespace mongo {
"Warning: the local copy of 'ns' is emptied before the copying begins. Any existing data will be lost there."
;
}
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string fromhost = cmdObj.getStringField("from");
if ( fromhost.empty() ) {
errmsg = "missing 'from' parameter";
@@ -470,7 +515,7 @@ namespace mongo {
<< " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
Cloner c;
- return c.copyCollection( fromhost , collection , query, errmsg , copyIndexes );
+ return c.copyCollection( fromhost , collection , query, errmsg , true, false, copyIndexes );
}
} cmdclonecollection;
@@ -493,7 +538,7 @@ namespace mongo {
help << "get a nonce for subsequent copy db request from secure server\n";
help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
}
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string fromhost = cmdObj.getStringField("fromhost");
if ( fromhost.empty() ) {
/* copy from self */
@@ -532,9 +577,10 @@ namespace mongo {
virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream &help ) const {
help << "copy a database from another host to this host\n";
- help << "usage: {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>[, username: <username>, nonce: <nonce>, key: <key>]}";
+ help << "usage: {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
}
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool slaveOk = cmdObj["slaveOk"].trueValue();
string fromhost = cmdObj.getStringField("fromhost");
if ( fromhost.empty() ) {
/* copy from self */
@@ -565,7 +611,7 @@ namespace mongo {
c.setConnection( authConn_.release() );
}
Client::Context ctx(todb);
- bool res = c.go(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, /*slaveok*/false, /*replauth*/false, /*snapshot*/true);
+ bool res = c.go(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, slaveOk, /*replauth*/false, /*snapshot*/true, /*mayYield*/true, /*mayBeInterrupted*/ false);
return res;
}
} cmdcopydb;
@@ -576,6 +622,7 @@ namespace mongo {
virtual bool adminOnly() const {
return true;
}
+ virtual bool requiresAuth() { return false; } // do our own auth
virtual bool slaveOk() const {
return false;
}
@@ -586,7 +633,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << " example: { renameCollection: foo.a, to: bar.b }";
}
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string source = cmdObj.getStringField( name.c_str() );
string target = cmdObj.getStringField( "to" );
if ( source.empty() || target.empty() ) {
@@ -597,7 +644,7 @@ namespace mongo {
bool capped = false;
long long size = 0;
{
- Client::Context ctx( source );
+ Client::Context ctx( source ); // auths against source
NamespaceDetails *nsd = nsdetails( source.c_str() );
uassert( 10026 , "source namespace does not exist", nsd );
capped = nsd->capped;
@@ -606,7 +653,7 @@ namespace mongo {
size += i.ext()->length;
}
- Client::Context ctx( target );
+ Client::Context ctx( target ); //auths against target
if ( nsdetails( target.c_str() ) ) {
uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() );
diff --git a/db/cloner.h b/db/cloner.h
new file mode 100644
index 0000000..94264f8
--- /dev/null
+++ b/db/cloner.h
@@ -0,0 +1,39 @@
+// cloner.h - copy a database (export/import basically)
+
+/**
+ * Copyright (C) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+
+namespace mongo {
+
+ /**
+ * @param slaveOk - if true it is ok if the source of the data is !ismaster.
+ * @param useReplAuth - use the credentials we normally use as a replication slave for the cloning
+ * @param snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
+ * for example repairDatabase need not use it.
+ * @param errCode - If provided, this will be set on error to the server's error code. Currently
+ * this will only be set if there is an error in the initial system.namespaces query.
+ */
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
+ bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield,
+ bool mayBeInterrupted, int *errCode = 0);
+
+ bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logForRepl, bool mayYield, bool mayBeInterrupted);
+
+} // namespace mongo
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
index 2a10fb5..fd759a7 100644
--- a/db/cmdline.cpp
+++ b/db/cmdline.cpp
@@ -19,14 +19,17 @@
#include "pch.h"
#include "cmdline.h"
#include "commands.h"
+#include "../util/password.h"
#include "../util/processinfo.h"
-#include "../util/message.h"
-#include "security_key.h"
+#include "../util/net/listen.h"
+#include "security_common.h"
#ifdef _WIN32
#include <direct.h>
#endif
+#define MAX_LINE_LENGTH 256
+
namespace po = boost::program_options;
namespace fs = boost::filesystem;
@@ -34,7 +37,8 @@ namespace mongo {
void setupSignals( bool inFork );
string getHostNameCached();
- BSONArray argvArray;
+ static BSONArray argvArray;
+ static BSONObj parsedOpts;
void CmdLine::addGlobalOptions( boost::program_options::options_description& general ,
boost::program_options::options_description& hidden ) {
@@ -52,15 +56,25 @@ namespace mongo {
("port", po::value<int>(&cmdLine.port), "specify port number")
("bind_ip", po::value<string>(&cmdLine.bind_ip), "comma separated list of ip addresses to listen on - all local ips by default")
("maxConns",po::value<int>(), "max number of simultaneous connections")
+ ("objcheck", "inspect client data for validity on receipt")
("logpath", po::value<string>() , "log file to send write to instead of stdout - has to be a file, not directory" )
("logappend" , "append to logpath instead of over-writing" )
("pidfilepath", po::value<string>(), "full path to pidfile (if not set, no pidfile is created)")
("keyFile", po::value<string>(), "private key for cluster authentication (only for replica sets)")
#ifndef _WIN32
+ ("nounixsocket", "disable listening on unix sockets")
("unixSocketPrefix", po::value<string>(), "alternative directory for UNIX domain sockets (defaults to /tmp)")
("fork" , "fork server process" )
#endif
;
+
+ hidden.add_options()
+#ifdef MONGO_SSL
+ ("sslOnNormalPorts" , "use ssl on configured ports" )
+ ("sslPEMKeyFile" , po::value<string>(&cmdLine.sslPEMKeyFile), "PEM file for ssl" )
+ ("sslPEMKeyPassword" , new PasswordValue(&cmdLine.sslPEMKeyPassword) , "PEM file password" )
+#endif
+ ;
}
@@ -82,6 +96,32 @@ namespace mongo {
}
#endif
+ void CmdLine::parseConfigFile( istream &f, stringstream &ss ) {
+ string s;
+ char line[MAX_LINE_LENGTH];
+
+ while ( f ) {
+ f.getline(line, MAX_LINE_LENGTH);
+ s = line;
+ std::remove(s.begin(), s.end(), ' ');
+ std::remove(s.begin(), s.end(), '\t');
+ boost::to_upper(s);
+
+ if ( s.find( "FASTSYNC" ) != string::npos )
+ cout << "warning \"fastsync\" should not be put in your configuration file" << endl;
+
+ if ( s.c_str()[0] == '#' ) {
+ // skipping commented line
+ } else if ( s.find( "=FALSE" ) == string::npos ) {
+ ss << line << endl;
+ } else {
+ cout << "warning: remove or comment out this line by starting it with \'#\', skipping now : " << line << endl;
+ }
+ }
+ return;
+ }
+
+
bool CmdLine::store( int argc , char ** argv ,
boost::program_options::options_description& visible,
@@ -138,7 +178,9 @@ namespace mongo {
return false;
}
- po::store( po::parse_config_file( f , all ) , params );
+ stringstream ss;
+ CmdLine::parseConfigFile( f, ss );
+ po::store( po::parse_config_file( ss , all ) , params );
f.close();
}
@@ -178,6 +220,10 @@ namespace mongo {
connTicketHolder.resize( newSize );
}
+ if (params.count("objcheck")) {
+ cmdLine.objcheck = true;
+ }
+
string logpath;
#ifndef _WIN32
@@ -188,7 +234,11 @@ namespace mongo {
::exit(-1);
}
}
-
+
+ if (params.count("nounixsocket")) {
+ cmdLine.noUnixSocket = true;
+ }
+
if (params.count("fork")) {
if ( ! params.count( "logpath" ) ) {
cout << "--fork has to be used with --logpath" << endl;
@@ -252,6 +302,7 @@ namespace mongo {
setupCoreSignals();
setupSignals( true );
}
+
#endif
if (params.count("logpath")) {
if ( logpath.size() == 0 )
@@ -272,9 +323,66 @@ namespace mongo {
dbexit(EXIT_BADOPTIONS);
}
+ cmdLine.keyFile = true;
noauth = false;
}
+ else {
+ cmdLine.keyFile = false;
+ }
+#ifdef MONGO_SSL
+ if (params.count("sslOnNormalPorts") ) {
+ cmdLine.sslOnNormalPorts = true;
+
+ if ( cmdLine.sslPEMKeyPassword.size() == 0 ) {
+ log() << "need sslPEMKeyPassword" << endl;
+ dbexit(EXIT_BADOPTIONS);
+ }
+
+ if ( cmdLine.sslPEMKeyFile.size() == 0 ) {
+ log() << "need sslPEMKeyFile" << endl;
+ dbexit(EXIT_BADOPTIONS);
+ }
+
+ cmdLine.sslServerManager = new SSLManager( false );
+ cmdLine.sslServerManager->setupPEM( cmdLine.sslPEMKeyFile , cmdLine.sslPEMKeyPassword );
+ }
+#endif
+
+ {
+ BSONObjBuilder b;
+ for (po::variables_map::const_iterator it(params.begin()), end(params.end()); it != end; it++){
+ if (!it->second.defaulted()){
+ const string& key = it->first;
+ const po::variable_value& value = it->second;
+ const type_info& type = value.value().type();
+
+ if (type == typeid(string)){
+ if (value.as<string>().empty())
+ b.appendBool(key, true); // boost po uses empty string for flags like --quiet
+ else
+ b.append(key, value.as<string>());
+ }
+ else if (type == typeid(int))
+ b.append(key, value.as<int>());
+ else if (type == typeid(double))
+ b.append(key, value.as<double>());
+ else if (type == typeid(bool))
+ b.appendBool(key, value.as<bool>());
+ else if (type == typeid(long))
+ b.appendNumber(key, (long long)value.as<long>());
+ else if (type == typeid(unsigned))
+ b.appendNumber(key, (long long)value.as<unsigned>());
+ else if (type == typeid(unsigned long long))
+ b.appendNumber(key, (long long)value.as<unsigned long long>());
+ else if (type == typeid(vector<string>))
+ b.append(key, value.as<vector<string> >());
+ else
+ b.append(key, "UNKNOWN TYPE: " + demangleName(type));
+ }
+ }
+ parsedOpts = b.obj();
+ }
{
BSONArrayBuilder b;
@@ -286,6 +394,10 @@ namespace mongo {
return true;
}
+ void printCommandLineOpts() {
+ log() << "options: " << parsedOpts << endl;
+ }
+
void ignoreSignal( int sig ) {}
void setupCoreSignals() {
@@ -303,8 +415,9 @@ namespace mongo {
virtual bool adminOnly() const { return true; }
virtual bool slaveOk() const { return true; }
- virtual bool run(const string&, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.append("argv", argvArray);
+ result.append("parsed", parsedOpts);
return true;
}
diff --git a/db/cmdline.h b/db/cmdline.h
index 4c8c7c4..fdf3f56 100644
--- a/db/cmdline.h
+++ b/db/cmdline.h
@@ -21,26 +21,25 @@
namespace mongo {
+#ifdef MONGO_SSL
+ class SSLManager;
+#endif
+
+
+
/* command line options
*/
/* concurrency: OK/READ */
struct CmdLine {
- CmdLine() :
- port(DefaultDBPort), rest(false), jsonp(false), quiet(false), noTableScan(false), prealloc(true), smallfiles(sizeof(int*) == 4),
- quota(false), quotaFiles(8), cpu(false), durOptions(0), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
- syncdelay(60), socket("/tmp") {
- // default may change for this later.
-#if defined(_DURABLEDEFAULTON)
- dur = true;
-#else
- dur = false;
-#endif
- }
+ CmdLine();
string binaryName; // mongod or mongos
string cwd; // cwd of when process started
+ // this is suboptimal as someone could rename a binary. todo...
+ bool isMongos() const { return binaryName == "mongos"; }
+
int port; // --port
enum {
DefaultDBPort = 27017,
@@ -70,13 +69,17 @@ namespace mongo {
bool quiet; // --quiet
bool noTableScan; // --notablescan no table scans allowed
bool prealloc; // --noprealloc no preallocation of data files
+ bool preallocj; // --nopreallocj no preallocation of journal files
bool smallfiles; // --smallfiles allocate smaller data files
+ bool configsvr; // --configsvr
+
bool quota; // --quota
int quotaFiles; // --quotaFiles
bool cpu; // --cpu show cpu time periodically
- bool dur; // --dur durability
+ bool dur; // --dur durability (now --journal)
+ unsigned journalCommitInterval; // group/batch commit interval ms
/** --durOptions 7 dump journal and terminate without doing anything further
--durOptions 4 recover and terminate without listening
@@ -86,10 +89,13 @@ namespace mongo {
DurScanOnly = 2, // don't do any real work, just scan and dump if dump specified
DurRecoverOnly = 4, // terminate after recovery step
DurParanoid = 8, // paranoid mode enables extra checks
- DurAlwaysCommit = 16 // do a group commit every time the writelock is released
+ DurAlwaysCommit = 16, // do a group commit every time the writelock is released
+ DurAlwaysRemap = 32 // remap the private view after every group commit (may lag to the next write lock acquisition, but will do all files then)
};
int durOptions; // --durOptions <n> for debugging
+ bool objcheck; // --objcheck
+
long long oplogSize; // --oplogSize
int defaultProfile; // --profile
int slowMS; // --time in ms that is "slow"
@@ -98,8 +104,19 @@ namespace mongo {
bool moveParanoia; // for move chunk paranoia
double syncdelay; // seconds between fsyncs
+ bool noUnixSocket; // --nounixsocket
string socket; // UNIX domain socket directory
+ bool keyFile;
+
+#ifdef MONGO_SSL
+ bool sslOnNormalPorts; // --sslOnNormalPorts
+ string sslPEMKeyFile; // --sslPEMKeyFile
+ string sslPEMKeyPassword; // --sslPEMKeyPassword
+
+ SSLManager* sslServerManager; // currently leaks on close
+#endif
+
static void addGlobalOptions( boost::program_options::options_description& general ,
boost::program_options::options_description& hidden );
@@ -107,6 +124,7 @@ namespace mongo {
boost::program_options::options_description& hidden );
+ static void parseConfigFile( istream &f, stringstream &ss);
/**
* @return true if should run program, false if should exit
*/
@@ -117,12 +135,37 @@ namespace mongo {
boost::program_options::variables_map &output );
};
+ // todo move to cmdline.cpp?
+ inline CmdLine::CmdLine() :
+ port(DefaultDBPort), rest(false), jsonp(false), quiet(false), noTableScan(false), prealloc(true), preallocj(true), smallfiles(sizeof(int*) == 4),
+ configsvr(false),
+ quota(false), quotaFiles(8), cpu(false), durOptions(0), objcheck(false), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
+ syncdelay(60), noUnixSocket(false), socket("/tmp")
+ {
+ journalCommitInterval = 0; // 0 means use default
+ dur = false;
+#if defined(_DURABLEDEFAULTON)
+ dur = true;
+#endif
+ if( sizeof(void*) == 8 )
+ dur = true;
+#if defined(_DURABLEDEFAULTOFF)
+ dur = false;
+#endif
+
+#ifdef MONGO_SSL
+ sslOnNormalPorts = false;
+ sslServerManager = 0;
+#endif
+ }
+
extern CmdLine cmdLine;
void setupCoreSignals();
string prettyHostName();
+ void printCommandLineOpts();
/**
* used for setParameter
diff --git a/db/commands.cpp b/db/commands.cpp
index 30bdc54..b6c1526 100644
--- a/db/commands.cpp
+++ b/db/commands.cpp
@@ -21,7 +21,7 @@
#include "jsobj.h"
#include "commands.h"
#include "client.h"
-#include "replpair.h"
+#include "replutil.h"
namespace mongo {
@@ -121,55 +121,6 @@ namespace mongo {
help << "no help defined";
}
- bool Command::runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
- const char *p = strchr(ns, '.');
- if ( !p ) return false;
- if ( strcmp(p, ".$cmd") != 0 ) return false;
-
- bool ok = false;
-
- BSONElement e = jsobj.firstElement();
- map<string,Command*>::iterator i;
-
- if ( e.eoo() )
- ;
- /* check for properly registered command objects. Note that all the commands below should be
- migrated over to the command object format.
- */
- else if ( (i = _commands->find(e.fieldName())) != _commands->end() ) {
- string errmsg;
- Command *c = i->second;
- if ( c->adminOnly() && !startsWith(ns, "admin.") ) {
- ok = false;
- errmsg = "access denied - use admin db";
- }
- else if ( jsobj.getBoolField( "help" ) ) {
- stringstream help;
- help << "help for: " << e.fieldName() << " ";
- c->help( help );
- anObjBuilder.append( "help" , help.str() );
- }
- else {
- ok = c->run( nsToDatabase( ns ) , jsobj, errmsg, anObjBuilder, false);
- }
-
- BSONObj tmp = anObjBuilder.asTempObj();
- bool have_ok = tmp.hasField("ok");
- bool have_errmsg = tmp.hasField("errmsg");
-
- if (!have_ok)
- anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
-
- if ( !ok && !have_errmsg) {
- anObjBuilder.append("errmsg", errmsg);
- uassert_nothrow(errmsg.c_str());
- }
- return true;
- }
-
- return false;
- }
-
Command* Command::findCommand( const string& name ) {
map<string,Command*>::iterator i = _commands->find( name );
if ( i == _commands->end() )
diff --git a/db/commands.h b/db/commands.h
index 42e46a0..c186218 100644
--- a/db/commands.h
+++ b/db/commands.h
@@ -18,15 +18,14 @@
#pragma once
#include "../pch.h"
-
#include "jsobj.h"
#include "../util/timer.h"
+#include "../client/dbclient.h"
namespace mongo {
class BSONObj;
class BSONObjBuilder;
- class BufBuilder;
class Client;
/** mongodb "commands" (sent via db.$cmd.findOne(...))
@@ -47,7 +46,7 @@ namespace mongo {
return value is true if succeeded. if false, set errmsg text.
*/
- virtual bool run(const string& db, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) = 0;
+ virtual bool run(const string& db, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl = false ) = 0;
/*
note: logTheTop() MUST be false if READ
@@ -70,7 +69,7 @@ namespace mongo {
*/
virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return false; }
- /* Return true if slaves of a replication pair are allowed to execute the command
+ /* Return true if slaves are allowed to execute the command
(the command directly from a client -- if fromRepl, always allowed).
*/
virtual bool slaveOk() const = 0;
@@ -96,6 +95,11 @@ namespace mongo {
*/
virtual bool requiresAuth() { return true; }
+ /* Return true if a replica set secondary should go into "recovering"
+ (unreadable) state while running this command.
+ */
+ virtual bool maintenanceMode() const { return false; }
+
/** @param webUI expose the command in the web ui as localhost:28017/<name>
@param oldName an optional old, deprecated name for the command
*/
@@ -122,12 +126,30 @@ namespace mongo {
static const map<string,Command*>* commandsByBestName() { return _commandsByBestName; }
static const map<string,Command*>* webCommands() { return _webCommands; }
/** @return if command was found and executed */
- static bool runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder);
+ static bool runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder, int queryOptions = 0);
static LockType locktype( const string& name );
static Command * findCommand( const string& name );
};
- bool _runCommands(const char *ns, BSONObj& jsobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions);
+ class CmdShutdown : public Command {
+ public:
+ virtual bool requiresAuth() { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return true; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const;
+ CmdShutdown() : Command("shutdown") {}
+ bool run(const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);
+ private:
+ bool shutdownHelper();
+ };
+ bool _runCommands(const char *ns, BSONObj& jsobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions);
} // namespace mongo
diff --git a/db/commands/distinct.cpp b/db/commands/distinct.cpp
index 7b2f6a8..48f4405 100644
--- a/db/commands/distinct.cpp
+++ b/db/commands/distinct.cpp
@@ -32,7 +32,7 @@ namespace mongo {
help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer t;
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
@@ -63,7 +63,7 @@ namespace mongo {
shared_ptr<Cursor> cursor;
if ( ! query.isEmpty() ) {
- cursor = bestGuessCursor(ns.c_str() , query , BSONObj() );
+ cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() );
}
else {
@@ -78,29 +78,33 @@ namespace mongo {
if ( idx.inKeyPattern( key ) ) {
cursor = bestGuessCursor( ns.c_str() , BSONObj() , idx.keyPattern() );
- break;
+ if( cursor.get() ) break;
}
}
if ( ! cursor.get() )
- cursor = bestGuessCursor(ns.c_str() , query , BSONObj() );
+ cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() );
}
-
-
- scoped_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
+
+ assert( cursor );
+ string cursorName = cursor->toString();
+
+ auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
while ( cursor->ok() ) {
nscanned++;
bool loadedObject = false;
- if ( !cursor->matcher() || cursor->matcher()->matchesCurrent( cursor.get() , &md ) ) {
+ if ( ( !cursor->matcher() || cursor->matcher()->matchesCurrent( cursor.get() , &md ) ) &&
+ !cursor->getsetdup( cursor->currLoc() ) ) {
n++;
+ BSONObj holder;
BSONElementSet temp;
- loadedObject = ! cc->getFieldsDotted( key , temp );
+ loadedObject = ! cc->getFieldsDotted( key , temp, holder );
for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) {
BSONElement e = *i;
@@ -118,13 +122,15 @@ namespace mongo {
}
}
- if ( loadedObject || md.loadedObject )
+ if ( loadedObject || md._loadedObject )
nscannedObjects++;
cursor->advance();
- if (!cc->yieldSometimes())
+ if (!cc->yieldSometimes( ClientCursor::MaybeCovered )) {
+ cc.release();
break;
+ }
RARELY killCurrentOp.checkForInterrupt();
}
@@ -139,6 +145,7 @@ namespace mongo {
b.appendNumber( "nscanned" , nscanned );
b.appendNumber( "nscannedObjects" , nscannedObjects );
b.appendNumber( "timems" , t.millis() );
+ b.append( "cursor" , cursorName );
result.append( "stats" , b.obj() );
}
diff --git a/db/commands/find_and_modify.cpp b/db/commands/find_and_modify.cpp
new file mode 100644
index 0000000..0cf766f
--- /dev/null
+++ b/db/commands/find_and_modify.cpp
@@ -0,0 +1,153 @@
+// find_and_modify.cpp
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../commands.h"
+#include "../instance.h"
+#include "../clientcursor.h"
+
+namespace mongo {
+
+ /* Find and Modify an object returning either the old (default) or new value*/
+ class CmdFindAndModify : public Command {
+ public:
+ virtual void help( stringstream &help ) const {
+ help <<
+ "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
+ "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
+ "Either update or remove is required, all other fields have default values.\n"
+ "Output is in the \"value\" field\n";
+ }
+
+ CmdFindAndModify() : Command("findAndModify", false, "findandmodify") { }
+ virtual bool logTheOp() { return false; } // the modifications will be logged directly
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ static DBDirectClient db;
+
+ string ns = dbname + '.' + cmdObj.firstElement().valuestr();
+
+ BSONObj origQuery = cmdObj.getObjectField("query"); // defaults to {}
+ Query q (origQuery);
+ BSONElement sort = cmdObj["sort"];
+ if (!sort.eoo())
+ q.sort(sort.embeddedObjectUserCheck());
+
+ bool upsert = cmdObj["upsert"].trueValue();
+
+ BSONObj fieldsHolder (cmdObj.getObjectField("fields"));
+ const BSONObj* fields = (fieldsHolder.isEmpty() ? NULL : &fieldsHolder);
+
+ Projection projection;
+ if (fields) {
+ projection.init(fieldsHolder);
+ if (!projection.includeID())
+ fields = NULL; // do projection in post-processing
+ }
+
+ BSONObj out = db.findOne(ns, q, fields);
+ if (out.isEmpty()) {
+ if (!upsert) {
+ result.appendNull("value");
+ return true;
+ }
+
+ BSONElement update = cmdObj["update"];
+ uassert(13329, "upsert mode requires update field", !update.eoo());
+ uassert(13330, "upsert mode requires query field", !origQuery.isEmpty());
+ db.update(ns, origQuery, update.embeddedObjectUserCheck(), true);
+
+ BSONObj gle = db.getLastErrorDetailed();
+ result.append("lastErrorObject", gle);
+ if (gle["err"].type() == String) {
+ errmsg = gle["err"].String();
+ return false;
+ }
+
+ if (cmdObj["new"].trueValue()) {
+ BSONElement _id = gle["upserted"];
+ if (_id.eoo())
+ _id = origQuery["_id"];
+
+ out = db.findOne(ns, QUERY("_id" << _id), fields);
+ }
+
+ }
+ else {
+
+ if (cmdObj["remove"].trueValue()) {
+ uassert(12515, "can't remove and update", cmdObj["update"].eoo());
+ db.remove(ns, QUERY("_id" << out["_id"]), 1);
+
+ BSONObj gle = db.getLastErrorDetailed();
+ result.append("lastErrorObject", gle);
+ if (gle["err"].type() == String) {
+ errmsg = gle["err"].String();
+ return false;
+ }
+
+ }
+ else { // update
+
+ BSONElement queryId = origQuery["_id"];
+ if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality) {
+ // need to include original query for $ positional operator
+
+ BSONObjBuilder b;
+ b.append(out["_id"]);
+ BSONObjIterator it(origQuery);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (strcmp(e.fieldName(), "_id"))
+ b.append(e);
+ }
+ q = Query(b.obj());
+ }
+
+ if (q.isComplex()) // update doesn't work with complex queries
+ q = Query(q.getFilter().getOwned());
+
+ BSONElement update = cmdObj["update"];
+ uassert(12516, "must specify remove or update", !update.eoo());
+ db.update(ns, q, update.embeddedObjectUserCheck());
+
+ BSONObj gle = db.getLastErrorDetailed();
+ result.append("lastErrorObject", gle);
+ if (gle["err"].type() == String) {
+ errmsg = gle["err"].String();
+ return false;
+ }
+
+ if (cmdObj["new"].trueValue())
+ out = db.findOne(ns, QUERY("_id" << out["_id"]), fields);
+ }
+ }
+
+ if (!fieldsHolder.isEmpty() && !fields){
+ // we need to run projection but haven't yet
+ out = projection.transform(out);
+ }
+
+ result.append("value", out);
+
+ return true;
+ }
+ } cmdFindAndModify;
+
+
+}
diff --git a/db/commands/group.cpp b/db/commands/group.cpp
index 0cc6ab3..d3e5839 100644
--- a/db/commands/group.cpp
+++ b/db/commands/group.cpp
@@ -19,6 +19,8 @@
#include "../commands.h"
#include "../instance.h"
#include "../queryoptimizer.h"
+#include "../../scripting/engine.h"
+#include "../clientcursor.h"
namespace mongo {
@@ -36,13 +38,14 @@ namespace mongo {
if ( func ) {
BSONObjBuilder b( obj.objsize() + 32 );
b.append( "0" , obj );
- int res = s->invoke( func , b.obj() );
+ const BSONObj& key = b.obj();
+ int res = s->invoke( func , &key, 0 );
uassert( 10041 , (string)"invoke failed in $keyf: " + s->getError() , res == 0 );
int type = s->type("return");
uassert( 10042 , "return of $key has to be an object" , type == Object );
return s->getObject( "return" );
}
- return obj.extractFields( keyPattern , true );
+ return obj.extractFields( keyPattern , true ).getOwned();
}
bool group( string realdbname , const string& ns , const BSONObj& query ,
@@ -85,14 +88,28 @@ namespace mongo {
map<BSONObj,int,BSONObjCmp> map;
list<BSONObj> blah;
- shared_ptr<Cursor> cursor = bestGuessCursor(ns.c_str() , query , BSONObj() );
+ shared_ptr<Cursor> cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query);
+ ClientCursor::CleanupPointer ccPointer;
+ ccPointer.reset( new ClientCursor( QueryOption_NoCursorTimeout, cursor, ns ) );
while ( cursor->ok() ) {
- if ( cursor->matcher() && ! cursor->matcher()->matchesCurrent( cursor.get() ) ) {
+
+ if ( !ccPointer->yieldSometimes( ClientCursor::MaybeCovered ) ||
+ !cursor->ok() ) {
+ break;
+ }
+
+ if ( ( cursor->matcher() && !cursor->matcher()->matchesCurrent( cursor.get() ) ) ||
+ cursor->getsetdup( cursor->currLoc() ) ) {
cursor->advance();
continue;
}
+ if ( !ccPointer->yieldSometimes( ClientCursor::WillNeed ) ||
+ !cursor->ok() ) {
+ break;
+ }
+
BSONObj obj = cursor->current();
cursor->advance();
@@ -110,10 +127,11 @@ namespace mongo {
s->setObject( "obj" , obj , true );
s->setNumber( "n" , n - 1 );
- if ( s->invoke( f , BSONObj() , 0 , true ) ) {
+ if ( s->invoke( f , 0, 0 , 0 , true ) ) {
throw UserException( 9010 , (string)"reduce invoke failed: " + s->getError() );
}
}
+ ccPointer.reset();
if (!finalize.empty()) {
s->exec( "$finalize = " + finalize , "finalize define" , false , true , true , 100 );
@@ -125,7 +143,7 @@ namespace mongo {
" $arr[i] = ret; "
" } "
"}" );
- s->invoke( g , BSONObj() , 0 , true );
+ s->invoke( g , 0, 0 , 0 , true );
}
result.appendArray( "retval" , s->getObject( "$arr" ) );
@@ -137,8 +155,13 @@ namespace mongo {
return true;
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ if ( !globalScriptEngine ) {
+ errmsg = "server-side JavaScript execution is disabled";
+ return false;
+ }
+
/* db.$cmd.findOne( { group : <p> } ) */
const BSONObj& p = jsobj.firstElement().embeddedObjectUserCheck();
diff --git a/db/commands/isself.cpp b/db/commands/isself.cpp
index b97f51e..5a868de 100644
--- a/db/commands/isself.cpp
+++ b/db/commands/isself.cpp
@@ -1,7 +1,7 @@
// isself.cpp
#include "pch.h"
-#include "../../util/message.h"
+#include "../../util/net/listen.h"
#include "../commands.h"
#include "../../client/dbclient.h"
@@ -11,6 +11,20 @@
# endif
# include <sys/resource.h>
# include <sys/stat.h>
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <netdb.h>
+#ifdef __openbsd__
+# include <sys/uio.h>
+#endif
+
#endif
@@ -116,7 +130,7 @@ namespace mongo {
help << "{ _isSelf : 1 } INTERNAL ONLY";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
init();
result.append( "id" , _id );
return true;
diff --git a/db/commands/mr.cpp b/db/commands/mr.cpp
index b9f5b59..56e9770 100644
--- a/db/commands/mr.cpp
+++ b/db/commands/mr.cpp
@@ -26,7 +26,7 @@
#include "../queryoptimizer.h"
#include "../matcher.h"
#include "../clientcursor.h"
-#include "../replpair.h"
+#include "../replutil.h"
#include "../../s/d_chunk_manager.h"
#include "../../s/d_logic.h"
@@ -53,6 +53,9 @@ namespace mongo {
_func = _scope->createFunction( _code.c_str() );
uassert( 13598 , str::stream() << "couldn't compile code for: " << _type , _func );
+
+ // install in JS scope so that it can be called in JS mode
+ _scope->setFunction(_type.c_str(), _code.c_str());
}
void JSMapper::init( State * state ) {
@@ -66,8 +69,7 @@ namespace mongo {
void JSMapper::map( const BSONObj& o ) {
Scope * s = _func.scope();
assert( s );
- s->setThis( &o );
- if ( s->invoke( _func.func() , _params , 0 , true ) )
+ if ( s->invoke( _func.func() , &_params, &o , 0 , true, false, true ) )
throw UserException( 9014, str::stream() << "map invoke failed: " + s->getError() );
}
@@ -79,7 +81,7 @@ namespace mongo {
Scope * s = _func.scope();
Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
- s->invokeSafe( _func.func() , o );
+ s->invokeSafe( _func.func() , &o, 0 );
// don't want to use o.objsize() to size b
// since there are many cases where the point of finalize
@@ -90,6 +92,10 @@ namespace mongo {
return b.obj();
}
+ void JSReducer::init( State * state ) {
+ _func.init( state );
+ }
+
/**
* Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
*/
@@ -183,7 +189,8 @@ namespace mongo {
Scope * s = _func.scope();
- s->invokeSafe( _func.func() , args );
+ s->invokeSafe( _func.func() , &args, 0 );
+ ++numReduces;
if ( s->type( "return" ) == Array ) {
uasserted( 10075 , "reduce -> multiple not supported yet");
@@ -214,6 +221,11 @@ namespace mongo {
ns = dbname + "." + cmdObj.firstElement().valuestr();
verbose = cmdObj["verbose"].trueValue();
+ jsMode = cmdObj["jsMode"].trueValue();
+
+ jsMaxKeys = 500000;
+ reduceTriggerRatio = 2.0;
+ maxInMemSize = 5 * 1024 * 1024;
uassert( 13602 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
@@ -255,7 +267,7 @@ namespace mongo {
}
if ( outType != INMEMORY ) { // setup names
- tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << finalShort << "_" << JOB_NUMBER++;
+ tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << JOB_NUMBER++;
incLong = tempLong + "_inc";
@@ -308,10 +320,25 @@ namespace mongo {
if ( ! _onDisk )
return;
- _db.dropCollection( _config.tempLong );
+ if (_config.incLong != _config.tempLong) {
+ // create the inc collection and make sure we have index on "0" key
+ _db.dropCollection( _config.incLong );
+ {
+ writelock l( _config.incLong );
+ Client::Context ctx( _config.incLong );
+ string err;
+ if ( ! userCreateNS( _config.incLong.c_str() , BSON( "autoIndexId" << 0 ) , err , false ) ) {
+ uasserted( 13631 , str::stream() << "userCreateNS failed for mr incLong ns: " << _config.incLong << " err: " << err );
+ }
+ }
+ BSONObj sortKey = BSON( "0" << 1 );
+ _db.ensureIndex( _config.incLong , sortKey );
+ }
+
+ // create temp collection
+ _db.dropCollection( _config.tempLong );
{
- // create
writelock lock( _config.tempLong.c_str() );
Client::Context ctx( _config.tempLong.c_str() );
string errmsg;
@@ -320,7 +347,6 @@ namespace mongo {
}
}
-
{
// copy indexes
auto_ptr<DBClientCursor> idx = _db.getIndexes( _config.finalLong );
@@ -355,6 +381,14 @@ namespace mongo {
if ( _onDisk )
return;
+ if (_jsMode) {
+ ScriptingFunction getResult = _scope->createFunction("var map = _mrMap; var result = []; for (key in map) { result.push({_id: key, value: map[key]}) } return result;");
+ _scope->invoke(getResult, 0, 0, 0, false);
+ BSONObj obj = _scope->getObject("return");
+ final.append("results", BSONArray(obj));
+ return;
+ }
+
uassert( 13604 , "too much data for in memory map/reduce" , _size < ( BSONObjMaxUserSize / 2 ) );
BSONArrayBuilder b( (int)(_size * 1.2) ); // _size is data size, doesn't count overhead and keys
@@ -397,8 +431,10 @@ namespace mongo {
// replace: just rename from temp to final collection name, dropping previous collection
_db.dropCollection( _config.finalLong );
BSONObj info;
- uassert( 10076 , "rename failed" ,
- _db.runCommand( "admin" , BSON( "renameCollection" << _config.tempLong << "to" << _config.finalLong ) , info ) );
+ if ( ! _db.runCommand( "admin" , BSON( "renameCollection" << _config.tempLong << "to" << _config.finalLong ) , info ) ) {
+ uasserted( 10076 , str::stream() << "rename failed: " << info );
+ }
+
_db.dropCollection( _config.tempLong );
}
else if ( _config.outType == Config::MERGE ) {
@@ -447,7 +483,7 @@ namespace mongo {
/**
* Insert doc in collection
*/
- void State::insert( const string& ns , BSONObj& o ) {
+ void State::insert( const string& ns , const BSONObj& o ) {
assert( _onDisk );
writelock l( ns );
@@ -457,6 +493,15 @@ namespace mongo {
}
/**
+ * Insert doc into the inc collection, taking proper lock
+ */
+ void State::insertToInc( BSONObj& o ) {
+ writelock l(_config.incLong);
+ Client::Context ctx(_config.incLong);
+ _insertToInc(o);
+ }
+
+ /**
* Insert doc into the inc collection
*/
void State::_insertToInc( BSONObj& o ) {
@@ -465,7 +510,7 @@ namespace mongo {
getDur().commitIfNeeded();
}
- State::State( const Config& c ) : _config( c ), _size(0), _numEmits(0) {
+ State::State( const Config& c ) : _config( c ), _size(0), _dupCount(0), _numEmits(0) {
_temp.reset( new InMemory() );
_onDisk = _config.outType != Config::INMEMORY;
}
@@ -488,6 +533,12 @@ namespace mongo {
error() << "couldn't cleanup after map reduce: " << e.what() << endl;
}
}
+
+ if (_scope) {
+ // cleanup js objects
+ ScriptingFunction cleanup = _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
+ _scope->invoke(cleanup, 0, 0, 0, true);
+ }
}
/**
@@ -505,29 +556,50 @@ namespace mongo {
_config.reducer->init( this );
if ( _config.finalizer )
_config.finalizer->init( this );
+ _scope->setBoolean("_doFinal", _config.finalizer);
+
+ // by default start in JS mode, will be faster for small jobs
+ _jsMode = _config.jsMode;
+// _jsMode = true;
+ switchMode(_jsMode);
+
+ // global JS map/reduce hashmap
+ // we use a standard JS object which means keys are only simple types
+ // we could also add a real hashmap from a library, still we need to add object comparison methods
+// _scope->setObject("_mrMap", BSONObj(), false);
+ ScriptingFunction init = _scope->createFunction("_emitCt = 0; _keyCt = 0; _dupCt = 0; _redCt = 0; if (typeof(_mrMap) === 'undefined') { _mrMap = {}; }");
+ _scope->invoke(init, 0, 0, 0, true);
+
+ // js function to run reduce on all keys
+// redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ _reduceAll = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length != 1) { ret = _reduce(key, list); map[key] = [ret]; ++_redCt; } } _dupCt = 0;");
+ _reduceAndEmit = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length == 1) { ret = list[0]; } else { ret = _reduce(key, list); ++_redCt; } emit(key, ret); }; delete _mrMap;");
+ _reduceAndFinalize = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length == 1) { if (!_doFinal) {continue;} ret = list[0]; } else { ret = _reduce(key, list); ++_redCt; }; if (_doFinal){ ret = _finalize(ret); } map[key] = ret; }");
+ _reduceAndFinalizeAndInsert = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length == 1) { ret = list[0]; } else { ret = _reduce(key, list); ++_redCt; }; if (_doFinal){ ret = _finalize(ret); } _nativeToTemp({_id: key, value: ret}); }");
- _scope->injectNative( "emit" , fast_emit );
-
- if ( _onDisk ) {
- // clear temp collections
- _db.dropCollection( _config.tempLong );
- _db.dropCollection( _config.incLong );
-
- // create the inc collection and make sure we have index on "0" key
- {
- writelock l( _config.incLong );
- Client::Context ctx( _config.incLong );
- string err;
- if ( ! userCreateNS( _config.incLong.c_str() , BSON( "autoIndexId" << 0 ) , err , false ) ) {
- uasserted( 13631 , str::stream() << "userCreateNS failed for mr incLong ns: " << _config.incLong << " err: " << err );
- }
- }
-
- BSONObj sortKey = BSON( "0" << 1 );
- _db.ensureIndex( _config.incLong , sortKey );
+ }
+ void State::switchMode(bool jsMode) {
+ _jsMode = jsMode;
+ if (jsMode) {
+ // emit function that stays in JS
+ _scope->setFunction("emit", "function(key, value) { if (typeof(key) === 'object') { _bailFromJS(key, value); return; }; ++_emitCt; var map = _mrMap; var list = map[key]; if (!list) { ++_keyCt; list = []; map[key] = list; } else { ++_dupCt; } list.push(value); }");
+ _scope->injectNative("_bailFromJS", _bailFromJS, this);
+ } else {
+ // emit now populates C++ map
+ _scope->injectNative( "emit" , fast_emit, this );
}
+ }
+
+ void State::bailFromJS() {
+ log(1) << "M/R: Switching from JS mode to mixed mode" << endl;
+ // reduce and reemit into c++
+ switchMode(false);
+ _scope->invoke(_reduceAndEmit, 0, 0, 0, true);
+ // need to get the real number emitted so far
+ _numEmits = _scope->getNumberInt("_emitCt");
+ _config.reducer->numReduces = _scope->getNumberInt("_redCt");
}
/**
@@ -542,12 +614,40 @@ namespace mongo {
insert( _config.tempLong , res );
}
+ BSONObj _nativeToTemp( const BSONObj& args, void* data ) {
+ State* state = (State*) data;
+ BSONObjIterator it(args);
+ state->insert(state->_config.tempLong, it.next().Obj());
+ return BSONObj();
+ }
+
+// BSONObj _nativeToInc( const BSONObj& args, void* data ) {
+// State* state = (State*) data;
+// BSONObjIterator it(args);
+// const BSONObj& obj = it.next().Obj();
+// state->_insertToInc(const_cast<BSONObj&>(obj));
+// return BSONObj();
+// }
+
/**
* Applies last reduce and finalize.
* After calling this method, the temp collection will be completed.
* If inline, the results will be in the in memory map
*/
void State::finalReduce( CurOp * op , ProgressMeterHolder& pm ) {
+
+ if (_jsMode) {
+ // apply the reduce within JS
+ if (_onDisk) {
+ _scope->injectNative("_nativeToTemp", _nativeToTemp, this);
+ _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true);
+ return;
+ } else {
+ _scope->invoke(_reduceAndFinalize, 0, 0, 0, true);
+ return;
+ }
+ }
+
if ( ! _onDisk ) {
// all data has already been reduced, just finalize
if ( _config.finalizer ) {
@@ -619,8 +719,16 @@ namespace mongo {
}
ClientCursor::YieldLock yield (cursor.get());
- // reduce an finalize array
- finalReduce( all );
+
+ try {
+ // reduce a finalize array
+ finalReduce( all );
+ }
+ catch (...) {
+ yield.relock();
+ cursor.release();
+ throw;
+ }
all.clear();
prev = o;
@@ -656,9 +764,14 @@ namespace mongo {
*/
void State::reduceInMemory() {
+ if (_jsMode) {
+ // in js mode the reduce is applied when writing to collection
+ return;
+ }
+
auto_ptr<InMemory> n( new InMemory() ); // for new data
long nSize = 0;
- long dupCount = 0;
+ _dupCount = 0;
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
BSONObj key = i->first;
@@ -674,20 +787,19 @@ namespace mongo {
}
else {
// add to new map
- _add( n.get() , all[0] , nSize, dupCount );
+ _add( n.get() , all[0] , nSize );
}
}
else if ( all.size() > 1 ) {
// several values, reduce and add to map
BSONObj res = _config.reducer->reduce( all );
- _add( n.get() , res , nSize, dupCount );
+ _add( n.get() , res , nSize );
}
}
// swap maps
_temp.reset( n.release() );
_size = nSize;
- _dupCount = dupCount;
}
/**
@@ -718,57 +830,87 @@ namespace mongo {
*/
void State::emit( const BSONObj& a ) {
_numEmits++;
- _add( _temp.get() , a , _size, _dupCount );
+ _add( _temp.get() , a , _size );
}
- void State::_add( InMemory* im, const BSONObj& a , long& size, long& dupCount ) {
+ void State::_add( InMemory* im, const BSONObj& a , long& size ) {
BSONList& all = (*im)[a];
all.push_back( a );
size += a.objsize() + 16;
if (all.size() > 1)
- ++dupCount;
+ ++_dupCount;
}
/**
* this method checks the size of in memory map and potentially flushes to disk
*/
void State::checkSize() {
- if ( _size < 1024 * 50 )
+ if (_jsMode) {
+ // try to reduce if it is beneficial
+ int dupCt = _scope->getNumberInt("_dupCt");
+ int keyCt = _scope->getNumberInt("_keyCt");
+
+ if (keyCt > _config.jsMaxKeys) {
+ // too many keys for JS, switch to mixed
+ _bailFromJS(BSONObj(), this);
+ // then fall through to check map size
+ } else if (dupCt > (keyCt * _config.reduceTriggerRatio)) {
+ // reduce now to lower mem usage
+ _scope->invoke(_reduceAll, 0, 0, 0, true);
+ return;
+ }
+ }
+
+ if (_jsMode)
return;
+ bool dump = _onDisk && _size > _config.maxInMemSize;
// attempt to reduce in memory map, if we've seen duplicates
- if ( _dupCount > 0) {
+ if ( dump || _dupCount > (_temp->size() * _config.reduceTriggerRatio)) {
long before = _size;
reduceInMemory();
log(1) << " mr: did reduceInMemory " << before << " -->> " << _size << endl;
}
- if ( ! _onDisk || _size < 1024 * 100 )
- return;
-
- dumpToInc();
- log(1) << " mr: dumping to db" << endl;
+ // reevaluate size and potentially dump
+ if ( dump && _size > _config.maxInMemSize) {
+ dumpToInc();
+ log(1) << " mr: dumping to db" << endl;
+ }
}
- boost::thread_specific_ptr<State*> _tl;
-
/**
* emit that will be called by js function
*/
- BSONObj fast_emit( const BSONObj& args ) {
+ BSONObj fast_emit( const BSONObj& args, void* data ) {
uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
uassert( 13069 , "an emit can't be more than half max bson size" , args.objsize() < ( BSONObjMaxUserSize / 2 ) );
+ State* state = (State*) data;
if ( args.firstElement().type() == Undefined ) {
BSONObjBuilder b( args.objsize() );
b.appendNull( "" );
BSONObjIterator i( args );
i.next();
b.append( i.next() );
- (*_tl)->emit( b.obj() );
+ state->emit( b.obj() );
}
else {
- (*_tl)->emit( args );
+ state->emit( args );
+ }
+ return BSONObj();
+ }
+
+ /**
+ * function is called when we realize we cant use js mode for m/r on the 1st key
+ */
+ BSONObj _bailFromJS( const BSONObj& args, void* data ) {
+ State* state = (State*) data;
+ state->bailFromJS();
+
+ // emit this particular key if there is one
+ if (!args.isEmpty()) {
+ fast_emit(args, data);
}
return BSONObj();
}
@@ -788,7 +930,7 @@ namespace mongo {
help << "http://www.mongodb.org/display/DOCS/MapReduce";
}
virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname , BSONObj& cmd, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname , BSONObj& cmd, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer t;
Client::GodScope cg;
Client& client = cc();
@@ -806,7 +948,6 @@ namespace mongo {
BSONObjBuilder countsBuilder;
BSONObjBuilder timingBuilder;
State state( config );
-
if ( ! state.sourceExists() ) {
errmsg = "ns doesn't exist";
return false;
@@ -823,12 +964,7 @@ namespace mongo {
try {
state.init();
-
- {
- State** s = new State*();
- s[0] = &state;
- _tl.reset( s );
- }
+ state.prepTempCollection();
wassert( config.limit < 0x4000000 ); // see case on next line to 32 bit unsigned
ProgressMeterHolder pm( op->setMessage( "m/r: (1/3) emit phase" , state.incomingDocuments() ) );
@@ -843,23 +979,26 @@ namespace mongo {
}
// obtain cursor on data to apply mr to, sorted
- shared_ptr<Cursor> temp = bestGuessCursor( config.ns.c_str(), config.filter, config.sort );
+ shared_ptr<Cursor> temp = NamespaceDetailsTransient::getCursor( config.ns.c_str(), config.filter, config.sort );
+ uassert( 15876, str::stream() << "could not create cursor over " << config.ns << " for query : " << config.filter << " sort : " << config.sort, temp.get() );
auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , config.ns.c_str() ) );
+ uassert( 15877, str::stream() << "could not create client cursor over " << config.ns << " for query : " << config.filter << " sort : " << config.sort, cursor.get() );
Timer mt;
// go through each doc
while ( cursor->ok() ) {
- // make sure we dont process duplicates in case data gets moved around during map
- if ( cursor->currentIsDup() ) {
+ if ( ! cursor->currentMatches() ) {
cursor->advance();
continue;
}
- if ( ! cursor->currentMatches() ) {
+ // make sure we dont process duplicates in case data gets moved around during map
+ // TODO This won't actually help when data gets moved, it's to handle multikeys.
+ if ( cursor->currentIsDup() ) {
cursor->advance();
continue;
}
-
+
BSONObj o = cursor->current();
cursor->advance();
@@ -874,7 +1013,7 @@ namespace mongo {
if ( config.verbose ) mapTime += mt.micros();
num++;
- if ( num % 100 == 0 ) {
+ if ( num % 1000 == 0 ) {
// try to yield lock regularly
ClientCursor::YieldLock yield (cursor.get());
Timer t;
@@ -908,19 +1047,31 @@ namespace mongo {
timingBuilder.append( "emitLoop" , t.millis() );
op->setMessage( "m/r: (2/3) final reduce in memory" );
+ Timer t;
// do reduce in memory
// this will be the last reduce needed for inline mode
state.reduceInMemory();
// if not inline: dump the in memory map to inc collection, all data is on disk
state.dumpToInc();
- state.prepTempCollection();
// final reduce
state.finalReduce( op , pm );
-
- _tl.reset();
+ inReduce += t.micros();
+ countsBuilder.appendNumber( "reduce" , state.numReduces() );
+ timingBuilder.append( "reduceTime" , inReduce / 1000 );
+ timingBuilder.append( "mode" , state.jsMode() ? "js" : "mixed" );
+ }
+ // TODO: The error handling code for queries is v. fragile,
+ // *requires* rethrow AssertionExceptions - should probably fix.
+ catch ( AssertionException& e ){
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ }
+ catch ( std::exception& e ){
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
}
catch ( ... ) {
- log() << "mr failed, removing collection" << endl;
+ log() << "mr failed for unknown reason, removing collection" << endl;
throw;
}
@@ -967,113 +1118,127 @@ namespace mongo {
virtual bool slaveOverrideOk() { return true; }
virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
+ string postProcessCollection = cmdObj["postProcessCollection"].valuestrsafe();
+ bool postProcessOnly = !(postProcessCollection.empty());
Config config( dbname , cmdObj.firstElement().embeddedObjectUserCheck() );
+ State state(config);
+ state.init();
+ if (postProcessOnly) {
+ // the temp collection has been decided by mongos
+ config.tempLong = dbname + "." + postProcessCollection;
+ }
+ // no need for incremental collection because records are already sorted
config.incLong = config.tempLong;
- set<ServerAndQuery> servers;
-
- BSONObjBuilder shardCounts;
- map<string,long long> counts;
-
BSONObj shards = cmdObj["shards"].embeddedObjectUserCheck();
- vector< auto_ptr<DBClientCursor> > shardCursors;
-
- {
- // parse per shard results
- BSONObjIterator i( shards );
- while ( i.more() ) {
- BSONElement e = i.next();
- string shard = e.fieldName();
-
- BSONObj res = e.embeddedObjectUserCheck();
-
- uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
- servers.insert( shard );
- shardCounts.appendAs( res["counts"] , shard );
-
- BSONObjIterator j( res["counts"].embeddedObjectUserCheck() );
- while ( j.more() ) {
- BSONElement temp = j.next();
- counts[temp.fieldName()] += temp.numberLong();
- }
+ BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
+ BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
+ if (postProcessOnly) {
+ if (!state._db.exists(config.tempLong)) {
+ // nothing to do
+ return 1;
}
+ } else {
+ set<ServerAndQuery> servers;
+ vector< auto_ptr<DBClientCursor> > shardCursors;
- }
-
- State state(config);
- state.prepTempCollection();
+ {
+ // parse per shard results
+ BSONObjIterator i( shards );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ string shard = e.fieldName();
- {
- // reduce from each stream
+ BSONObj res = e.embeddedObjectUserCheck();
- BSONObj sortKey = BSON( "_id" << 1 );
+ uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
+ servers.insert( shard );
- ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection ,
- Query().sort( sortKey ) );
- cursor.init();
- state.init();
+ }
- BSONList values;
- if (!config.outDB.empty()) {
- BSONObjBuilder loc;
- if ( !config.outDB.empty())
- loc.append( "db" , config.outDB );
- if ( !config.finalShort.empty() )
- loc.append( "collection" , config.finalShort );
- result.append("result", loc.obj());
- }
- else {
- if ( !config.finalShort.empty() )
- result.append( "result" , config.finalShort );
}
- while ( cursor.more() ) {
- BSONObj t = cursor.next().getOwned();
+ state.prepTempCollection();
- if ( values.size() == 0 ) {
- values.push_back( t );
- continue;
+ {
+ // reduce from each stream
+
+ BSONObj sortKey = BSON( "_id" << 1 );
+
+ ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection ,
+ Query().sort( sortKey ) );
+ cursor.init();
+
+ BSONList values;
+ if (!config.outDB.empty()) {
+ BSONObjBuilder loc;
+ if ( !config.outDB.empty())
+ loc.append( "db" , config.outDB );
+ if ( !config.finalShort.empty() )
+ loc.append( "collection" , config.finalShort );
+ result.append("result", loc.obj());
}
-
- if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
- values.push_back( t );
- continue;
+ else {
+ if ( !config.finalShort.empty() )
+ result.append( "result" , config.finalShort );
}
+ while ( cursor.more() || !values.empty() ) {
+ BSONObj t;
+ if (cursor.more()) {
+ t = cursor.next().getOwned();
- state.emit( config.reducer->finalReduce( values , config.finalizer.get() ) );
- values.clear();
- values.push_back( t );
- }
+ if ( values.size() == 0 ) {
+ values.push_back( t );
+ continue;
+ }
- if ( values.size() )
- state.emit( config.reducer->finalReduce( values , config.finalizer.get() ) );
- }
+ if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
+ values.push_back( t );
+ continue;
+ }
+ }
+ BSONObj res = config.reducer->finalReduce( values , config.finalizer.get());
+ if (state.isOnDisk())
+ state.insertToInc(res);
+ else
+ state.emit(res);
+ values.clear();
+ if (!t.isEmpty())
+ values.push_back( t );
+ }
+ }
- state.dumpToInc();
- state.postProcessCollection();
- state.appendResults( result );
+ for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
+ ScopedDbConnection conn( i->_server );
+ conn->dropCollection( dbname + "." + shardedOutputCollection );
+ conn.done();
+ }
- for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
- ScopedDbConnection conn( i->_server );
- conn->dropCollection( dbname + "." + shardedOutputCollection );
- conn.done();
+ result.append( "shardCounts" , shardCounts );
}
- result.append( "shardCounts" , shardCounts.obj() );
+ long long finalCount = state.postProcessCollection();
+ state.appendResults( result );
- {
- BSONObjBuilder c;
- for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ) {
- c.append( i->first , i->second );
- }
- result.append( "counts" , c.obj() );
+ // fix the global counts
+ BSONObjBuilder countsB(32);
+ BSONObjIterator j(counts);
+ while (j.more()) {
+ BSONElement elmt = j.next();
+ if (!strcmp(elmt.fieldName(), "reduce"))
+ countsB.append("reduce", elmt.numberLong() + state.numReduces());
+ else if (!strcmp(elmt.fieldName(), "output"))
+ countsB.append("output", finalCount);
+ else
+ countsB.append(elmt);
}
+ result.append( "counts" , countsB.obj() );
return 1;
}
diff --git a/db/commands/mr.h b/db/commands/mr.h
index f505a45..3fa8146 100644
--- a/db/commands/mr.h
+++ b/db/commands/mr.h
@@ -50,12 +50,15 @@ namespace mongo {
class Reducer : boost::noncopyable {
public:
+ Reducer() : numReduces(0) {}
virtual ~Reducer() {}
virtual void init( State * state ) = 0;
virtual BSONObj reduce( const BSONList& tuples ) = 0;
/** this means its a final reduce, even if there is no finalizer */
virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
+
+ long long numReduces;
};
// ------------ js function implementations -----------
@@ -88,7 +91,7 @@ namespace mongo {
class JSMapper : public Mapper {
public:
- JSMapper( const BSONElement & code ) : _func( "map" , code ) {}
+ JSMapper( const BSONElement & code ) : _func( "_map" , code ) {}
virtual void map( const BSONObj& o );
virtual void init( State * state );
@@ -99,8 +102,8 @@ namespace mongo {
class JSReducer : public Reducer {
public:
- JSReducer( const BSONElement& code ) : _func( "reduce" , code ) {}
- virtual void init( State * state ) { _func.init( state ); }
+ JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {}
+ virtual void init( State * state );
virtual BSONObj reduce( const BSONList& tuples );
virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer );
@@ -115,12 +118,11 @@ namespace mongo {
void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
JSFunction _func;
-
};
class JSFinalizer : public Finalizer {
public:
- JSFinalizer( const BSONElement& code ) : _func( "finalize" , code ) {}
+ JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {}
virtual BSONObj finalize( const BSONObj& o );
virtual void init( State * state ) { _func.init( state ); }
private:
@@ -153,6 +155,7 @@ namespace mongo {
// options
bool verbose;
+ bool jsMode;
// query options
@@ -178,6 +181,13 @@ namespace mongo {
string outDB;
+ // max number of keys allowed in JS map before switching mode
+ long jsMaxKeys;
+ // ratio of duplicates vs unique keys before reduce is triggered in js mode
+ float reduceTriggerRatio;
+ // maximum size of map before it gets dumped to disk
+ long maxInMemSize;
+
enum { REPLACE , // atomically replace the collection
MERGE , // merge keys, override dups
REDUCE , // merge keys, reduce dups
@@ -225,6 +235,8 @@ namespace mongo {
* transfers in memory storage to temp collection
*/
void dumpToInc();
+ void insertToInc( BSONObj& o );
+ void _insertToInc( BSONObj& o );
// ------ reduce stage -----------
@@ -252,7 +264,7 @@ namespace mongo {
/**
* inserts with correct replication semantics
*/
- void insert( const string& ns , BSONObj& o );
+ void insert( const string& ns , const BSONObj& o );
// ------ simple accessors -----
@@ -263,27 +275,38 @@ namespace mongo {
const bool isOnDisk() { return _onDisk; }
- long long numEmits() const { return _numEmits; }
+ long long numEmits() const { if (_jsMode) return _scope->getNumberLongLong("_emitCt"); return _numEmits; }
+ long long numReduces() const { if (_jsMode) return _scope->getNumberLongLong("_redCt"); return _config.reducer->numReduces; }
+
+ bool jsMode() {return _jsMode;}
+ void switchMode(bool jsMode);
+ void bailFromJS();
+
+ const Config& _config;
+ DBDirectClient _db;
protected:
- void _insertToInc( BSONObj& o );
- static void _add( InMemory* im , const BSONObj& a , long& size, long& dupCount );
+ void _add( InMemory* im , const BSONObj& a , long& size );
scoped_ptr<Scope> _scope;
- const Config& _config;
bool _onDisk; // if the end result of this map reduce is disk or not
- DBDirectClient _db;
-
scoped_ptr<InMemory> _temp;
long _size; // bytes in _temp
long _dupCount; // number of duplicate key entries
long long _numEmits;
+
+ bool _jsMode;
+ ScriptingFunction _reduceAll;
+ ScriptingFunction _reduceAndEmit;
+ ScriptingFunction _reduceAndFinalize;
+ ScriptingFunction _reduceAndFinalizeAndInsert;
};
- BSONObj fast_emit( const BSONObj& args );
+ BSONObj fast_emit( const BSONObj& args, void* data );
+ BSONObj _bailFromJS( const BSONObj& args, void* data );
} // end mr namespace
}
diff --git a/db/common.cpp b/db/common.cpp
index 44bc54d..0f82bef 100644
--- a/db/common.cpp
+++ b/db/common.cpp
@@ -1,4 +1,5 @@
-// common.cpp
+// @file common.cpp
+
/*
* Copyright (C) 2010 10gen Inc.
*
@@ -17,17 +18,51 @@
#include "pch.h"
#include "concurrency.h"
+#include "jsobjmanipulator.h"
/**
* this just has globals
*/
namespace mongo {
+ /** called by mongos, mongod, test. do not call from clients and such.
+ invoked before about everything except global var construction.
+ */
+ void doPreServerStatupInits() {
+ }
+
/* we use new here so we don't have to worry about destructor orders at program shutdown */
- MongoMutex &dbMutex( *(new MongoMutex("rw:dbMutex")) );
+ MongoMutex &dbMutex( *(new MongoMutex("dbMutex")) );
MongoMutex::MongoMutex(const char *name) : _m(name) {
+ static int n = 0;
+ assert( ++n == 1 ); // below releasingWriteLock we assume MongoMutex is a singleton, and uses dbMutex ref above
_remapPrivateViewRequested = false;
}
+ // OpTime::now() uses dbMutex, thus it is in this file not in the cpp files used by drivers and such
+ void BSONElementManipulator::initTimestamp() {
+ massert( 10332 , "Expected CurrentTime type", _element.type() == Timestamp );
+ unsigned long long &timestamp = *( reinterpret_cast< unsigned long long* >( value() ) );
+ if ( timestamp == 0 )
+ timestamp = OpTime::now().asDate();
+ }
+
+ NOINLINE_DECL OpTime OpTime::skewed() {
+ bool toLog = false;
+ ONCE toLog = true;
+ RARELY toLog = true;
+ last.i++;
+ if ( last.i & 0x80000000 )
+ toLog = true;
+ if ( toLog ) {
+ log() << "clock skew detected prev: " << last.secs << " now: " << (unsigned) time(0) << endl;
+ }
+ if ( last.i & 0x80000000 ) {
+ log() << "error large clock skew detected, shutting down" << endl;
+ throw ClockSkewException();
+ }
+ return last;
+ }
+
}
diff --git a/db/compact.cpp b/db/compact.cpp
index 6bafd91..c6e5f77 100644
--- a/db/compact.cpp
+++ b/db/compact.cpp
@@ -1,4 +1,4 @@
-/* @file compact.cpp
+/** @file compact.cpp
compaction of deleted space in pdfiles (datafiles)
*/
@@ -25,174 +25,273 @@
#include "concurrency.h"
#include "commands.h"
#include "curop-inl.h"
+#include "background.h"
+#include "extsort.h"
+#include "compact.h"
#include "../util/concurrency/task.h"
namespace mongo {
- class CompactJob : public task::Task {
- public:
- CompactJob(string ns) : _ns(ns) { }
- private:
- virtual string name() const { return "compact"; }
- virtual void doWork();
- NamespaceDetails * beginBlock();
- void doBatch();
- void prep();
- const string _ns;
- unsigned long long _nrecords;
- unsigned long long _ncompacted;
- DiskLoc _firstExtent;
- };
+ char faux;
- // lock & set context first. this checks that collection still exists, and that it hasn't
- // morphed into a capped collection between locks (which is possible)
- NamespaceDetails * CompactJob::beginBlock() {
- NamespaceDetails *nsd = nsdetails(_ns.c_str());
- if( nsd == 0 ) throw "ns no longer present";
- if( nsd->firstExtent.isNull() )
- throw "no first extent";
- if( nsd->capped )
- throw "capped collection";
- return nsd;
- }
+ void addRecordToRecListInExtent(Record *r, DiskLoc loc);
+ DiskLoc allocateSpaceForANewRecord(const char *ns, NamespaceDetails *d, int lenWHdr, bool god);
+ void freeExtents(DiskLoc firstExt, DiskLoc lastExt);
+
+ /** @return number of skipped (invalid) documents */
+ unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc ext, int n,
+ const scoped_array<IndexSpec> &indexSpecs,
+ scoped_array<SortPhaseOne>& phase1, int nidx, bool validate)
+ {
+ log() << "compact extent #" << n << endl;
+
+ Extent *e = ext.ext();
+ e->assertOk();
+ assert( e->validates() );
+ unsigned skipped = 0;
- void CompactJob::doBatch() {
- unsigned n = 0;
{
- /* pre-touch records in a read lock so that paging happens in read not write lock.
- note we are only touching the records though; if indexes aren't in RAM, they will
- page later. So the concept is only partial.
- */
- readlock lk;
+ // the next/prev pointers within the extent might not be in order so we first page the whole thing in
+ // sequentially
+ log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
Timer t;
- Client::Context ctx(_ns);
- NamespaceDetails *nsd = beginBlock();
- if( nsd->firstExtent != _firstExtent ) {
- // TEMP DEV - stop after 1st extent
- throw "change of first extent";
- }
- DiskLoc loc = nsd->firstExtent.ext()->firstRecord;
- while( !loc.isNull() ) {
- Record *r = loc.rec();
- loc = r->getNext(loc);
- if( ++n >= 100 || (n % 8 == 0 && t.millis() > 50) )
- break;
+ MAdvise adv(e, e->length, MAdvise::Sequential);
+ const char *p = (const char *) e;
+ for( int i = 0; i < e->length; i += 4096 ) {
+ faux += *p;
}
+ int ms = t.millis();
+ if( ms > 1000 )
+ log() << "compact end paging in " << ms << "ms " << e->length/1000000.0/ms << "MB/sec" << endl;
}
+
{
- writelock lk;
- Client::Context ctx(_ns);
- NamespaceDetails *nsd = beginBlock();
- for( unsigned i = 0; i < n; i++ ) {
- if( nsd->firstExtent != _firstExtent ) {
- // TEMP DEV - stop after 1st extent
- throw "change of first extent (or it is now null)";
+ log() << "compact copying records" << endl;
+ unsigned totalSize = 0;
+ int nrecs = 0;
+ DiskLoc L = e->firstRecord;
+ if( !L.isNull() )
+ while( 1 ) {
+ Record *recOld = L.rec();
+ L = recOld->nextInExtent(L);
+ nrecs++;
+ BSONObj objOld(recOld);
+
+ if( !validate || objOld.valid() ) {
+ unsigned sz = objOld.objsize();
+ unsigned lenWHdr = sz + Record::HeaderSize;
+ totalSize += lenWHdr;
+ DiskLoc extentLoc;
+ DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWHdr, false);
+ uassert(14024, "compact error out of space during compaction", !loc.isNull());
+ Record *recNew = loc.rec();
+ recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
+ addRecordToRecListInExtent(recNew, loc);
+ memcpy(recNew->data, objOld.objdata(), sz);
+
+ {
+ // extract keys for all indexes we will be rebuilding
+ for( int x = 0; x < nidx; x++ ) {
+ phase1[x].addKeys(indexSpecs[x], objOld, loc);
+ }
+ }
}
- DiskLoc loc = nsd->firstExtent.ext()->firstRecord;
- Record *rec = loc.rec();
- BSONObj o = loc.obj().getOwned(); // todo: inefficient, double mem copy...
- try {
- theDataFileMgr.deleteRecord(_ns.c_str(), rec, loc, false);
+ else {
+ if( ++skipped <= 10 )
+ log() << "compact skipping invalid object" << endl;
}
- catch(DBException&) { throw "error deleting record"; }
- try {
- theDataFileMgr.insertNoReturnVal(_ns.c_str(), o);
+
+ if( L.isNull() ) {
+ // we just did the very last record from the old extent. it's still pointed to
+ // by the old extent ext, but that will be fixed below after this loop
+ break;
}
- catch(DBException&) {
- /* todo: save the record somehow??? try again with 'avoid' logic? */
- log() << "compact: error re-inserting record ns:" << _ns << " n:" << _nrecords << " _id:" << o["_id"].toString() << endl;
- throw "error re-inserting record";
+
+ // remove the old records (orphan them) periodically so our commit block doesn't get too large
+ bool stopping = false;
+ RARELY stopping = *killCurrentOp.checkForInterruptNoAssert(false) != 0;
+ if( stopping || getDur().aCommitIsNeeded() ) {
+ e->firstRecord.writing() = L;
+ Record *r = L.rec();
+ getDur().writingInt(r->prevOfs) = DiskLoc::NullOfs;
+ getDur().commitIfNeeded();
+ killCurrentOp.checkForInterrupt(false);
}
- ++_ncompacted;
- if( killCurrentOp.globalInterruptCheck() )
- throw "interrupted";
}
+
+ assert( d->firstExtent == ext );
+ assert( d->lastExtent != ext );
+ DiskLoc newFirst = e->xnext;
+ d->firstExtent.writing() = newFirst;
+ newFirst.ext()->xprev.writing().Null();
+ getDur().writing(e)->markEmpty();
+ freeExtents(ext,ext);
+ getDur().commitIfNeeded();
+
+ log() << "compact " << nrecs << " documents " << totalSize/1000000.0 << "MB" << endl;
}
- }
- void CompactJob::prep() {
- readlock lk;
- Client::Context ctx(_ns);
- NamespaceDetails *nsd = beginBlock();
- DiskLoc L = nsd->firstExtent;
- assert( !L.isNull() );
- _firstExtent = L;
- _nrecords = nsd->stats.nrecords;
- _ncompacted = 0;
+ return skipped;
}
- static mutex m("compact");
- static volatile bool running;
-
- void CompactJob::doWork() {
- Client::initThread("compact");
- cc().curop()->reset();
- cc().curop()->setNS(_ns.c_str());
- cc().curop()->markCommand();
- sleepsecs(60);
- try {
- prep();
- while( _ncompacted < _nrecords )
- doBatch();
+ extern SortPhaseOne *precalced;
+
+ bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result) {
+ //int les = d->lastExtentSize;
+
+ // this is a big job, so might as well make things tidy before we start just to be nice.
+ getDur().commitNow();
+
+ list<DiskLoc> extents;
+ for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext )
+ extents.push_back(L);
+ log() << "compact " << extents.size() << " extents" << endl;
+
+ ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );
+
+ // same data, but might perform a little different after compact?
+ NamespaceDetailsTransient::get_w(ns).clearQueryCache();
+
+ int nidx = d->nIndexes;
+ scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
+ scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
+ {
+ NamespaceDetails::IndexIterator ii = d->ii();
+ int x = 0;
+ while( ii.more() ) {
+ BSONObjBuilder b;
+ IndexDetails& idx = ii.next();
+ BSONObj::iterator i(idx.info.obj());
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if( !str::equals(e.fieldName(), "v") && !str::equals(e.fieldName(), "background") ) {
+ b.append(e);
+ }
+ }
+ BSONObj o = b.obj().getOwned();
+ phase1[x].sorter.reset( new BSONObjExternalSorter( idx.idxInterface(), o.getObjectField("key") ) );
+ phase1[x].sorter->hintNumObjects( d->stats.nrecords );
+ indexSpecs[x++].reset(o);
+ }
}
- catch(const char *p) {
- log() << "info: exception compact " << p << endl;
+
+ log() << "compact orphan deleted lists" << endl;
+ for( int i = 0; i < Buckets; i++ ) {
+ d->deletedList[i].writing().Null();
}
- catch(...) {
- log() << "info: exception compact" << endl;
+
+ // before dropping indexes, at least make sure we can allocate one extent!
+ uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
+
+ // note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
+ log() << "compact dropping indexes" << endl;
+ BSONObjBuilder b;
+ if( !dropIndexes(d, ns, "*", errmsg, b, true) ) {
+ errmsg = "compact drop indexes failed";
+ log() << errmsg << endl;
+ return false;
}
- mongo::running = false;
- cc().shutdown();
- }
- /* --- CompactCmd --- */
+ getDur().commitNow();
- class CompactCmd : public Command {
- public:
- virtual bool run(const string& db, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- string coll = cmdObj.firstElement().valuestr();
- if( coll.empty() || db.empty() ) {
- errmsg = "no collection name specified";
- return false;
+ long long skipped = 0;
+ int n = 0;
+ for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
+ skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate);
+ pm.hit();
+ }
+
+ if( skipped ) {
+ result.append("invalidObjects", skipped);
+ }
+
+ assert( d->firstExtent.ext()->xprev.isNull() );
+
+ // indexes will do their own progress meter?
+ pm.finished();
+
+ // build indexes
+ NamespaceString s(ns);
+ string si = s.db + ".system.indexes";
+ for( int i = 0; i < nidx; i++ ) {
+ killCurrentOp.checkForInterrupt(false);
+ BSONObj info = indexSpecs[i].info;
+ log() << "compact create index " << info["key"].Obj().toString() << endl;
+ try {
+ precalced = &phase1[i];
+ theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize());
}
- string ns = db + '.' + coll;
- assert( isANormalNSName(ns.c_str()) );
- {
- readlock lk;
- Client::Context ctx(ns);
- if( nsdetails(ns.c_str()) == 0 ) {
- errmsg = "namespace " + ns + " does not exist";
- return false;
- }
+ catch(...) {
+ precalced = 0;
+ throw;
}
- {
- scoped_lock lk(m);
- if( running ) {
- errmsg = "a compaction is already running";
- return false;
- }
- running = true;
- task::fork( new CompactJob(ns) );
- return true;
+ precalced = 0;
+ }
+
+ return true;
+ }
+
+ bool compact(const string& ns, string &errmsg, bool validate, BSONObjBuilder& result) {
+ massert( 14028, "bad ns", NamespaceString::normal(ns.c_str()) );
+ massert( 14027, "can't compact a system namespace", !str::contains(ns, ".system.") ); // items in system.indexes cannot be moved there are pointers to those disklocs in NamespaceDetails
+
+ bool ok;
+ {
+ writelock lk;
+ BackgroundOperation::assertNoBgOpInProgForNs(ns.c_str());
+ Client::Context ctx(ns);
+ NamespaceDetails *d = nsdetails(ns.c_str());
+ massert( 13660, str::stream() << "namespace " << ns << " does not exist", d );
+ massert( 13661, "cannot compact capped collection", !d->capped );
+ log() << "compact " << ns << " begin" << endl;
+ try {
+ ok = _compact(ns.c_str(), d, errmsg, validate, result);
}
- errmsg = "not done";
- return false;
+ catch(...) {
+ log() << "compact " << ns << " end (with error)" << endl;
+ throw;
+ }
+ log() << "compact " << ns << " end" << endl;
}
+ return ok;
+ }
+
+ bool isCurrentlyAReplSetPrimary();
+ class CompactCmd : public Command {
+ public:
virtual LockType locktype() const { return NONE; }
virtual bool adminOnly() const { return false; }
virtual bool slaveOk() const { return true; }
+ virtual bool maintenanceMode() const { return true; }
virtual bool logTheOp() { return false; }
virtual void help( stringstream& help ) const {
- help << "compact / defragment a collection in the background, slowly, attempting to minimize disruptions to other operations\n"
- "{ compact : <collection> }";
+ help << "compact collection\n"
+ "warning: this operation blocks the server and is slow. you can cancel with cancelOp()\n"
+ "{ compact : <collection_name>, [force:true], [validate:true] }\n"
+ " force - allows to run on a replica set primary\n"
+ " validate - check records are noncorrupt before adding to newly compacting extents. slower but safer (default is true in this version)\n";
}
virtual bool requiresAuth() { return true; }
-
- /** @param webUI expose the command in the web ui as localhost:28017/<name>
- @param oldName an optional old, deprecated name for the command
- */
CompactCmd() : Command("compact") { }
+
+ virtual bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string coll = cmdObj.firstElement().valuestr();
+ if( coll.empty() || db.empty() ) {
+ errmsg = "no collection name specified";
+ return false;
+ }
+
+ if( isCurrentlyAReplSetPrimary() && !cmdObj["force"].trueValue() ) {
+ errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force";
+ return false;
+ }
+
+ string ns = db + '.' + coll;
+ bool validate = !cmdObj.hasElement("validate") || cmdObj["validate"].trueValue(); // default is true at the moment
+ bool ok = compact(ns, errmsg, validate, result);
+ return ok;
+ }
};
static CompactCmd compactCmd;
diff --git a/db/compact.h b/db/compact.h
new file mode 100644
index 0000000..7bf49c8
--- /dev/null
+++ b/db/compact.h
@@ -0,0 +1,50 @@
+// compact.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /** for bottom up fastbuildindex (where we presort keys) */
+ struct SortPhaseOne {
+ SortPhaseOne() {
+ n = 0;
+ nkeys = 0;
+ multi = false;
+ }
+ shared_ptr<BSONObjExternalSorter> sorter;
+ unsigned long long n; // # of records
+ unsigned long long nkeys;
+ bool multi; // multikey index
+
+ void addKeys(const IndexSpec& spec, const BSONObj& o, DiskLoc loc) {
+ BSONObjSet keys;
+ spec.getKeys(o, keys);
+ int k = 0;
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ if( ++k == 2 ) {
+ multi = true;
+ }
+ sorter->add(*i, loc);
+ nkeys++;
+ }
+ n++;
+ }
+ };
+
+}
diff --git a/db/concurrency.h b/db/concurrency.h
index 39cd853..3d6d02d 100644
--- a/db/concurrency.h
+++ b/db/concurrency.h
@@ -78,15 +78,11 @@ namespace mongo {
namespace mongo {
- inline void dbunlocking_write() { }
- inline void dbunlocking_read() { }
-
struct writelock {
writelock() { dbMutex.lock(); }
writelock(const string& ns) { dbMutex.lock(); }
~writelock() {
DESTRUCTOR_GUARD(
- dbunlocking_write();
dbMutex.unlock();
);
}
@@ -99,7 +95,6 @@ namespace mongo {
readlock() { dbMutex.lock_shared(); }
~readlock() {
DESTRUCTOR_GUARD(
- dbunlocking_read();
dbMutex.unlock_shared();
);
}
@@ -111,7 +106,6 @@ namespace mongo {
}
~readlocktry() {
if ( _got ) {
- dbunlocking_read();
dbMutex.unlock_shared();
}
}
@@ -126,7 +120,6 @@ namespace mongo {
}
~writelocktry() {
if ( _got ) {
- dbunlocking_read();
dbMutex.unlock();
}
}
@@ -175,11 +168,9 @@ namespace mongo {
~mongolock() {
DESTRUCTOR_GUARD(
if( _writelock ) {
- dbunlocking_write();
dbMutex.unlock();
}
else {
- dbunlocking_read();
dbMutex.unlock_shared();
}
);
diff --git a/db/curop.h b/db/curop.h
index c6e949b..2717d78 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -24,16 +24,56 @@
#include "../bson/util/atomic_int.h"
#include "../util/concurrency/spin_lock.h"
#include "../util/time_support.h"
-#include "db.h"
-#include "../scripting/engine.h"
+#include "../util/net/hostandport.h"
namespace mongo {
+ class CurOp;
+
/* lifespan is different than CurOp because of recursives with DBDirectClient */
class OpDebug {
public:
- StringBuilder str;
- void reset() { str.reset(); }
+ OpDebug() : ns(""){ reset(); }
+
+ void reset();
+
+ string toString() const;
+ void append( const CurOp& curop, BSONObjBuilder& b ) const;
+
+ // -------------------
+
+ StringBuilder extra; // weird things we need to fix later
+
+ // basic options
+ int op;
+ bool iscommand;
+ Namespace ns;
+ BSONObj query;
+ BSONObj updateobj;
+
+ // detailed options
+ long long cursorid;
+ int ntoreturn;
+ int ntoskip;
+ bool exhaust;
+
+ // debugging/profile info
+ int nscanned;
+ bool idhack;
+ bool scanAndOrder;
+ bool moved;
+ bool fastmod;
+ bool fastmodinsert;
+ bool upsert;
+ unsigned keyUpdates;
+
+ // error handling
+ ExceptionInfo exceptionInfo;
+
+ // response info
+ int executionTime;
+ int nreturned;
+ int responseLength;
};
/**
@@ -81,7 +121,7 @@ namespace mongo {
int size() const { return *_size; }
bool have() const { return size() > 0; }
- BSONObj get() {
+ BSONObj get() const {
_lock.lock();
BSONObj o;
try {
@@ -95,22 +135,15 @@ namespace mongo {
return o;
}
- void append( BSONObjBuilder& b , const StringData& name ) {
- _lock.lock();
- try {
- BSONObj temp = _get();
- b.append( name , temp );
- _lock.unlock();
- }
- catch ( ... ) {
- _lock.unlock();
- throw;
- }
+ void append( BSONObjBuilder& b , const StringData& name ) const {
+ scoped_spinlock lk(_lock);
+ BSONObj temp = _get();
+ b.append( name , temp );
}
private:
/** you have to be locked when you call this */
- BSONObj _get() {
+ BSONObj _get() const {
int sz = size();
if ( sz == 0 )
return BSONObj();
@@ -122,7 +155,7 @@ namespace mongo {
/** you have to be locked when you call this */
void _reset( int sz ) { _size[0] = sz; }
- SpinLock _lock;
+ mutable SpinLock _lock;
int * _size;
char _buf[512];
};
@@ -137,35 +170,29 @@ namespace mongo {
bool haveQuery() const { return _query.have(); }
BSONObj query() { return _query.get(); }
-
+ void appendQuery( BSONObjBuilder& b , const StringData& name ) const { _query.append( b , name ); }
+
void ensureStarted() {
if ( _start == 0 )
_start = _checkpoint = curTimeMicros64();
}
- void enter( Client::Context * context ) {
- ensureStarted();
- setNS( context->ns() );
- if ( context->_db && context->_db->profile > _dbprofile )
- _dbprofile = context->_db->profile;
- }
+ bool isStarted() const { return _start > 0; }
- void leave( Client::Context * context ) {
- unsigned long long now = curTimeMicros64();
- Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command );
- _checkpoint = now;
- }
+ void enter( Client::Context * context );
+
+ void leave( Client::Context * context );
void reset() {
_reset();
_start = _checkpoint = 0;
- _active = true;
_opNum = _nextOpNum++;
- _ns[0] = '?'; // just in case not set later
+ _ns[0] = 0;
_debug.reset();
_query.reset();
+ _active = true; // this should be last for ui clarity
}
- void reset( const SockAddr & remote, int op ) {
+ void reset( const HostAndPort& remote, int op ) {
reset();
_remote = remote;
_op = op;
@@ -265,6 +292,7 @@ namespace mongo {
CurOp *parent() const { return _wrapped; }
void kill() { _killed = true; }
bool killed() const { return _killed; }
+ void yielded() { _numYields++; }
void setNS(const char *ns) {
strncpy(_ns, ns, Namespace::MaxNsLen);
_ns[Namespace::MaxNsLen] = 0;
@@ -286,12 +314,13 @@ namespace mongo {
int _dbprofile; // 0=off, 1=slow, 2=all
AtomicUInt _opNum;
char _ns[Namespace::MaxNsLen+2];
- struct SockAddr _remote;
+ HostAndPort _remote;
CachedBSONObj _query;
OpDebug _debug;
ThreadSafeString _message;
ProgressMeter _progressMeter;
volatile bool _killed;
+ int _numYields;
void _reset() {
_command = false;
@@ -302,6 +331,7 @@ namespace mongo {
_message = "";
_progressMeter.finished();
_killed = false;
+ _numYields = 0;
}
};
diff --git a/db/cursor.h b/db/cursor.h
index d17b698..9639b26 100644
--- a/db/cursor.h
+++ b/db/cursor.h
@@ -70,6 +70,8 @@ namespace mongo {
return BSONObj();
}
+ virtual bool supportGetMore() = 0;
+
/* called after every query block is iterated -- i.e. between getMore() blocks
so you can note where we are, if necessary.
*/
@@ -78,18 +80,20 @@ namespace mongo {
/* called before query getmore block is iterated */
virtual void checkLocation() { }
- virtual bool supportGetMore() = 0;
virtual bool supportYields() = 0;
+ /** Called before a ClientCursor yield. */
+ virtual bool prepareToYield() { noteLocation(); return supportYields(); }
+
+ /** Called after a ClientCursor yield. */
+ virtual void recoverFromYield() { checkLocation(); }
+
virtual string toString() { return "abstract?"; }
/* used for multikey index traversal to avoid sending back dups. see Matcher::matches().
if a multikey index traversal:
if loc has already been sent, returns true.
otherwise, marks loc as sent.
- @param deep - match was against an array, so we know it is multikey. this is legacy and kept
- for backwards datafile compatibility. 'deep' can be eliminated next time we
- force a data file conversion. 7Jul09
*/
virtual bool getsetdup(DiskLoc loc) = 0;
@@ -115,7 +119,12 @@ namespace mongo {
// matcher() should be checked each time advance() is called.
// Implementations which generate their own matcher should return this
// to avoid a matcher being set manually.
+ // Note that the return values differ subtly here
+
+ // Used when we want fast matcher lookup
virtual CoveredIndexMatcher *matcher() const { return 0; }
+ // Used when we need to share this matcher with someone else
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return shared_ptr< CoveredIndexMatcher >(); }
// A convenience function for setting the value of matcher() manually
// so it may accessed later. Implementations which must generate
@@ -123,6 +132,8 @@ namespace mongo {
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) {
massert( 13285, "manual matcher config not allowed", false );
}
+
+ virtual void explainDetails( BSONObjBuilder& b ) { return; }
};
// strategy object implementing direction of traversal.
@@ -170,6 +181,7 @@ namespace mongo {
virtual bool supportGetMore() { return true; }
virtual bool supportYields() { return true; }
virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
virtual long long nscanned() { return _nscanned; }
diff --git a/db/database.cpp b/db/database.cpp
index d164ba5..97b3fa0 100644
--- a/db/database.cpp
+++ b/db/database.cpp
@@ -52,26 +52,9 @@ namespace mongo {
}
newDb = namespaceIndex.exists();
- profile = 0;
-
- {
- vector<string> others;
- getDatabaseNames( others , path );
-
- for ( unsigned i=0; i<others.size(); i++ ) {
-
- if ( strcasecmp( others[i].c_str() , nm ) )
- continue;
-
- if ( strcmp( others[i].c_str() , nm ) == 0 )
- continue;
-
- stringstream ss;
- ss << "db already exists with different case other: [" << others[i] << "] me [" << nm << "]";
- uasserted( DatabaseDifferCaseCode , ss.str() );
- }
- }
+ profile = cmdLine.defaultProfile;
+ checkDuplicateUncasedNames();
// If already exists, open. Otherwise behave as if empty until
// there's a write, then open.
@@ -91,7 +74,49 @@ namespace mongo {
throw;
}
}
+
+ void Database::checkDuplicateUncasedNames() const {
+ string duplicate = duplicateUncasedName( name, path );
+ if ( !duplicate.empty() ) {
+ stringstream ss;
+ ss << "db already exists with different case other: [" << duplicate << "] me [" << name << "]";
+ uasserted( DatabaseDifferCaseCode , ss.str() );
+ }
+ }
+ string Database::duplicateUncasedName( const string &name, const string &path, set< string > *duplicates ) {
+ if ( duplicates ) {
+ duplicates->clear();
+ }
+
+ vector<string> others;
+ getDatabaseNames( others , path );
+
+ set<string> allShortNames;
+ dbHolder.getAllShortNames( allShortNames );
+
+ others.insert( others.end(), allShortNames.begin(), allShortNames.end() );
+
+ for ( unsigned i=0; i<others.size(); i++ ) {
+
+ if ( strcasecmp( others[i].c_str() , name.c_str() ) )
+ continue;
+
+ if ( strcmp( others[i].c_str() , name.c_str() ) == 0 )
+ continue;
+
+ if ( duplicates ) {
+ duplicates->insert( others[i] );
+ } else {
+ return others[i];
+ }
+ }
+ if ( duplicates ) {
+ return duplicates->empty() ? "" : *duplicates->begin();
+ }
+ return "";
+ }
+
boost::filesystem::path Database::fileName( int n ) const {
stringstream ss;
ss << name << '.' << n;
@@ -167,15 +192,33 @@ namespace mongo {
return ret;
}
- MongoDataFile* Database::suitableFile( int sizeNeeded, bool preallocate ) {
+ bool fileIndexExceedsQuota( const char *ns, int fileIndex, bool enforceQuota ) {
+ return
+ cmdLine.quota &&
+ enforceQuota &&
+ fileIndex >= cmdLine.quotaFiles &&
+ // we don't enforce the quota on "special" namespaces as that could lead to problems -- e.g.
+ // rejecting an index insert after inserting the main record.
+ !NamespaceString::special( ns ) &&
+ NamespaceString( ns ).db != "local";
+ }
+
+ MongoDataFile* Database::suitableFile( const char *ns, int sizeNeeded, bool preallocate, bool enforceQuota ) {
// check existing files
for ( int i=numFiles()-1; i>=0; i-- ) {
MongoDataFile* f = getFile( i );
- if ( f->getHeader()->unusedLength >= sizeNeeded )
- return f;
+ if ( f->getHeader()->unusedLength >= sizeNeeded ) {
+ if ( fileIndexExceedsQuota( ns, i-1, enforceQuota ) ) // NOTE i-1 is the value used historically for this check.
+ ;
+ else
+ return f;
+ }
}
+ if ( fileIndexExceedsQuota( ns, numFiles(), enforceQuota ) )
+ uasserted(12501, "quota exceeded");
+
// allocate files until we either get one big enough or hit maxSize
for ( int i = 0; i < 8; i++ ) {
MongoDataFile* f = addAFile( sizeNeeded, preallocate );
@@ -187,6 +230,7 @@ namespace mongo {
return f;
}
+ uasserted(14810, "couldn't allocate space (suitableFile)"); // callers don't check for null return code
return 0;
}
@@ -198,11 +242,11 @@ namespace mongo {
}
- Extent* Database::allocExtent( const char *ns, int size, bool capped ) {
+ Extent* Database::allocExtent( const char *ns, int size, bool capped, bool enforceQuota ) {
Extent *e = DataFileMgr::allocFromFreeList( ns, size, capped );
if( e )
return e;
- return suitableFile( size, !capped )->createExtent( ns, size, capped );
+ return suitableFile( ns, size, !capped, enforceQuota )->createExtent( ns, size, capped );
}
@@ -223,11 +267,11 @@ namespace mongo {
assert( cc().database() == this );
if ( ! namespaceIndex.details( profileName.c_str() ) ) {
- log(1) << "creating profile ns: " << profileName << endl;
+ log() << "creating profile collection: " << profileName << endl;
BSONObjBuilder spec;
spec.appendBool( "capped", true );
- spec.append( "size", 131072.0 );
- if ( ! userCreateNS( profileName.c_str(), spec.done(), errmsg , true ) ) {
+ spec.append( "size", 1024*1024 );
+ if ( ! userCreateNS( profileName.c_str(), spec.done(), errmsg , false /* we don't replica profile messages */ ) ) {
return false;
}
}
@@ -235,14 +279,6 @@ namespace mongo {
return true;
}
- void Database::finishInit() {
- if ( cmdLine.defaultProfile == profile )
- return;
-
- string errmsg;
- massert( 12506 , errmsg , setProfilingLevel( cmdLine.defaultProfile , errmsg ) );
- }
-
bool Database::validDBName( const string& ns ) {
if ( ns.size() == 0 || ns.size() > 64 )
return false;
diff --git a/db/database.h b/db/database.h
index 6e72ba8..3522f52 100644
--- a/db/database.h
+++ b/db/database.h
@@ -46,8 +46,6 @@ namespace mongo {
void openAllFiles();
- void finishInit();
-
/**
* tries to make sure that this hasn't been deleted
*/
@@ -82,9 +80,9 @@ namespace mongo {
*/
void preallocateAFile() { getFile( numFiles() , 0, true ); }
- MongoDataFile* suitableFile( int sizeNeeded, bool preallocate );
+ MongoDataFile* suitableFile( const char *ns, int sizeNeeded, bool preallocate, bool enforceQuota );
- Extent* allocExtent( const char *ns, int size, bool capped );
+ Extent* allocExtent( const char *ns, int size, bool capped, bool enforceQuota );
MongoDataFile* newestFile();
@@ -93,7 +91,6 @@ namespace mongo {
*/
bool setProfilingLevel( int newLevel , string& errmsg );
-
void flushFiles( bool sync ) const;
/**
@@ -107,7 +104,20 @@ namespace mongo {
}
static bool validDBName( const string& ns );
+
+ /**
+ * @throws DatabaseDifferCaseCode if the name is a duplicate based on
+ * case insensitive matching.
+ */
+ void checkDuplicateUncasedNames() const;
+ /**
+ * @return name of an existing database with same text name but different
+ * casing, if one exists. Otherwise the empty string is returned. If
+ * 'duplicates' is specified, it is filled with all duplicate names.
+ */
+ static string duplicateUncasedName( const string &name, const string &path, set< string > *duplicates = 0 );
+
public: // this should be private later
vector<MongoDataFile*> files;
diff --git a/db/db.cpp b/db/db.cpp
index 4f4575c..e6281d7 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -18,12 +18,12 @@
#include "pch.h"
#include "db.h"
-#include "query.h"
#include "introspect.h"
#include "repl.h"
#include "../util/unittest.h"
#include "../util/file_allocator.h"
#include "../util/background.h"
+#include "../util/text.h"
#include "dbmessage.h"
#include "instance.h"
#include "clientcursor.h"
@@ -36,28 +36,33 @@
#include "stats/snapshots.h"
#include "../util/concurrency/task.h"
#include "../util/version.h"
+#include "../util/ramlog.h"
+#include "../util/net/message_server.h"
#include "client.h"
#include "restapi.h"
#include "dbwebserver.h"
#include "dur.h"
#include "concurrency.h"
+#include "../s/d_writeback.h"
#if defined(_WIN32)
# include "../util/ntservice.h"
#else
# include <sys/file.h>
-# include <sys/resource.h>
#endif
namespace mongo {
+ namespace dur {
+ extern unsigned long long DataLimitPerJournalFile;
+ }
+
/* only off if --nocursors which is for debugging. */
extern bool useCursors;
/* only off if --nohints */
extern bool useHints;
- extern char *appsrvPath;
extern int diagLogging;
extern unsigned lenForNewNsFiles;
extern int lockFile;
@@ -65,9 +70,7 @@ namespace mongo {
extern string repairpath;
void setupSignals( bool inFork );
- void startReplSets(ReplSetCmdline*);
void startReplication();
- void pairWith(const char *remoteEnd, const char *arb);
void exitCleanly( ExitCode code );
CmdLine cmdLine;
@@ -93,65 +96,6 @@ namespace mongo {
QueryResult* emptyMoreResult(long long);
- void connThread( MessagingPort * p );
-
- class OurListener : public Listener {
- public:
- OurListener(const string &ip, int p) : Listener(ip, p) { }
- virtual void accepted(MessagingPort *mp) {
-
- if ( ! connTicketHolder.tryAcquire() ) {
- log() << "connection refused because too many open connections: " << connTicketHolder.used() << " of " << connTicketHolder.outof() << endl;
- // TODO: would be nice if we notified them...
- mp->shutdown();
- delete mp;
- return;
- }
-
- try {
-#ifndef __linux__ // TODO: consider making this ifdef _WIN32
- boost::thread thr(boost::bind(&connThread,mp));
-#else
- pthread_attr_t attrs;
- pthread_attr_init(&attrs);
- pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
-
- static const size_t STACK_SIZE = 4*1024*1024;
-
- struct rlimit limits;
- assert(getrlimit(RLIMIT_STACK, &limits) == 0);
- if (limits.rlim_cur > STACK_SIZE) {
- pthread_attr_setstacksize(&attrs, (DEBUG_BUILD
- ? (STACK_SIZE / 2)
- : STACK_SIZE));
- }
- else if (limits.rlim_cur < 1024*1024) {
- warning() << "Stack size set to " << (limits.rlim_cur/1024) << "KB. We suggest at least 1MB" << endl;
- }
-
- pthread_t thread;
- int failed = pthread_create(&thread, &attrs, (void*(*)(void*)) &connThread, mp);
-
- pthread_attr_destroy(&attrs);
-
- if (failed) {
- log() << "pthread_create failed: " << errnoWithDescription(failed) << endl;
- throw boost::thread_resource_error(); // for consistency with boost::thread
- }
-#endif
- }
- catch ( boost::thread_resource_error& ) {
- log() << "can't create new thread, closing connection" << endl;
- mp->shutdown();
- delete mp;
- }
- catch ( ... ) {
- log() << "unkonwn exception starting connThread" << endl;
- mp->shutdown();
- delete mp;
- }
- }
- };
/* todo: make this a real test. the stuff in dbtests/ seem to do all dbdirectclient which exhaust doesn't support yet. */
// QueryOption_Exhaust
@@ -193,23 +137,8 @@ namespace mongo {
};
#endif
- void listen(int port) {
- //testTheDb();
- log() << "waiting for connections on port " << port << endl;
- OurListener l(cmdLine.bind_ip, port);
- l.setAsTimeTracker();
- startReplication();
- if ( !noHttpInterface )
- boost::thread web( boost::bind(&webServerThread, new RestAdminAccess() /* takes ownership */));
-
-#if(TESTEXHAUST)
- boost::thread thr(testExhaust);
-#endif
- l.initAndListen();
- }
-
void sysRuntimeInfo() {
- out() << "sysinfo:\n";
+ out() << "sysinfo:" << endl;
#if defined(_SC_PAGE_SIZE)
out() << " page size: " << (int) sysconf(_SC_PAGE_SIZE) << endl;
#endif
@@ -226,36 +155,15 @@ namespace mongo {
sleepmicros( Client::recommendedYieldMicros() );
}
- /* we create one thread for each connection from an app server database.
- app server will open a pool of threads.
- todo: one day, asio...
- */
- void connThread( MessagingPort * inPort ) {
- TicketHolderReleaser connTicketReleaser( &connTicketHolder );
-
- /* todo: move to Client object */
- LastError *le = new LastError();
- lastError.reset(le);
-
- inPort->_logLevel = 1;
- auto_ptr<MessagingPort> dbMsgPort( inPort );
- Client& c = Client::initThread("conn", inPort);
-
- try {
-
- c.getAuthenticationInfo()->isLocalHost = dbMsgPort->farEnd.isLocalHost();
-
- Message m;
- while ( 1 ) {
- inPort->clearCounters();
+ class MyMessageHandler : public MessageHandler {
+ public:
+ virtual void connected( AbstractMessagingPort* p ) {
+ Client& c = Client::initThread("conn", p);
+ c.getAuthenticationInfo()->isLocalHost = p->remote().isLocalHost();
+ }
- if ( !dbMsgPort->recv(m) ) {
- if( !cmdLine.quiet )
- log() << "end connection " << dbMsgPort->farEnd.toString() << endl;
- dbMsgPort->shutdown();
- break;
- }
-sendmore:
+ virtual void process( Message& m , AbstractMessagingPort* port , LastError * le) {
+ while ( true ) {
if ( inShutdown() ) {
log() << "got request after shutdown()" << endl;
break;
@@ -264,10 +172,10 @@ sendmore:
lastError.startRequest( m , le );
DbResponse dbresponse;
- assembleResponse( m, dbresponse, dbMsgPort->farEnd );
+ assembleResponse( m, dbresponse, port->remote() );
if ( dbresponse.response ) {
- dbMsgPort->reply(m, *dbresponse.response, dbresponse.responseTo);
+ port->reply(m, *dbresponse.response, dbresponse.responseTo);
if( dbresponse.exhaust ) {
MsgData *header = dbresponse.response->header();
QueryResult *qr = (QueryResult *) header;
@@ -289,46 +197,42 @@ sendmore:
b.decouple();
DEV log() << "exhaust=true sending more" << endl;
beNice();
- goto sendmore;
+ continue; // this goes back to top loop
}
}
}
-
- networkCounter.hit( inPort->getBytesIn() , inPort->getBytesOut() );
-
- m.reset();
+ break;
}
-
- }
- catch ( AssertionException& e ) {
- log() << "AssertionException in connThread, closing client connection" << endl;
- log() << ' ' << e.what() << endl;
- dbMsgPort->shutdown();
- }
- catch ( SocketException& ) {
- problem() << "SocketException in connThread, closing client connection" << endl;
- dbMsgPort->shutdown();
- }
- catch ( const ClockSkewException & ) {
- exitCleanly( EXIT_CLOCK_SKEW );
- }
- catch ( std::exception &e ) {
- problem() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
- dbexit( EXIT_UNCAUGHT );
- }
- catch ( ... ) {
- problem() << "Uncaught exception, terminating" << endl;
- dbexit( EXIT_UNCAUGHT );
}
- // thread ending...
- {
+ virtual void disconnected( AbstractMessagingPort* p ) {
Client * c = currentClient.get();
if( c ) c->shutdown();
+ globalScriptEngine->threadDone();
}
- globalScriptEngine->threadDone();
+
+ };
+
+ void listen(int port) {
+ //testTheDb();
+ MessageServer::Options options;
+ options.port = port;
+ options.ipList = cmdLine.bind_ip;
+
+ MessageServer * server = createServer( options , new MyMessageHandler() );
+ server->setAsTimeTracker();
+
+ startReplication();
+ if ( !noHttpInterface )
+ boost::thread web( boost::bind(&webServerThread, new RestAdminAccess() /* takes ownership */));
+
+#if(TESTEXHAUST)
+ boost::thread thr(testExhaust);
+#endif
+ server->run();
}
+
bool doDBUpgrade( const string& dbName , string errmsg , DataFileHeader * h ) {
static DBDirectClient db;
@@ -378,7 +282,9 @@ sendmore:
if ( !h->isCurrentVersion() || forceRepair ) {
if( h->version <= 0 ) {
- uasserted(10000, str::stream() << "db " << dbName << " appears corrupt pdfile version: " << h->version << " info: " << h->versionMinor << ' ' << h->fileLength);
+ uasserted(14026,
+ str::stream() << "db " << dbName << " appears corrupt pdfile version: " << h->version
+ << " info: " << h->versionMinor << ' ' << h->fileLength);
}
log() << "****" << endl;
@@ -494,10 +400,12 @@ sendmore:
return cc().curop()->opNum();
}
- void _initAndListen(int listenPort, const char *appserverLoc = NULL) {
+ void _initAndListen(int listenPort ) {
Client::initThread("initandlisten");
+ Logstream::get().addGlobalTee( new RamLog("global") );
+
bool is32bit = sizeof(int*) == 4;
{
@@ -510,13 +418,14 @@ sendmore:
l << "MongoDB starting : pid=" << pid << " port=" << cmdLine.port << " dbpath=" << dbpath;
if( replSettings.master ) l << " master=" << replSettings.master;
if( replSettings.slave ) l << " slave=" << (int) replSettings.slave;
- l << ( is32bit ? " 32" : " 64" ) << "-bit " << endl;
+ l << ( is32bit ? " 32" : " 64" ) << "-bit host=" << getHostNameCached() << endl;
}
DEV log() << "_DEBUG build (which is slower)" << endl;
show_warnings();
log() << mongodVersion() << endl;
printGitVersion();
printSysInfo();
+ printCommandLineOpts();
{
stringstream ss;
@@ -529,12 +438,12 @@ sendmore:
uassert( 12590 , ss.str().c_str(), boost::filesystem::exists( repairpath ) );
}
- acquirePathLock();
+ acquirePathLock(forceRepair);
remove_all( dbpath + "/_tmp/" );
FileAllocator::get()->start();
- BOOST_CHECK_EXCEPTION( clearTmpFiles() );
+ MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( clearTmpFiles(), "clear tmp files" );
_diaglog.init();
@@ -556,7 +465,7 @@ sendmore:
repairDatabasesAndCheckVersion();
- /* we didn't want to pre-open all fiels for the repair check above. for regular
+ /* we didn't want to pre-open all files for the repair check above. for regular
operation we do for read/write lock concurrency reasons.
*/
Database::_openAllFiles = true;
@@ -569,12 +478,7 @@ sendmore:
snapshotThread.go();
clientCursorMonitor.go();
-
- if( !cmdLine._replSet.empty() ) {
- replSet = true;
- ReplSetCmdline *replSetCmdline = new ReplSetCmdline(cmdLine._replSet);
- boost::thread t( boost::bind( &startReplSets, replSetCmdline) );
- }
+ PeriodicTask::theRunner->go();
listen(listenPort);
@@ -584,8 +488,14 @@ sendmore:
void testPretouch();
- void initAndListen(int listenPort, const char *appserverLoc = NULL) {
- try { _initAndListen(listenPort, appserverLoc); }
+ void initAndListen(int listenPort) {
+ try {
+ _initAndListen(listenPort);
+ }
+ catch ( DBException &e ) {
+ log() << "exception in initAndListen: " << e.toString() << ", terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
catch ( std::exception &e ) {
log() << "exception in initAndListen std::exception: " << e.what() << ", terminating" << endl;
dbexit( EXIT_UNCAUGHT );
@@ -603,7 +513,7 @@ sendmore:
#if defined(_WIN32)
bool initService() {
ServiceController::reportStatus( SERVICE_RUNNING );
- initAndListen( cmdLine.port, appsrvPath );
+ initAndListen( cmdLine.port );
return true;
}
#endif
@@ -625,23 +535,12 @@ void show_help_text(po::options_description options) {
/* Return error string or "" if no errors. */
string arg_error_check(int argc, char* argv[]) {
- for (int i = 1; i < argc; i++) {
- string s = argv[i];
- /* check for inclusion of old-style arbiter setting. */
- if (s == "--pairwith") {
- if (argc > i + 2) {
- string old_arbiter = argv[i + 2];
- if (old_arbiter == "-" || old_arbiter.substr(0, 1) != "-") {
- return "Specifying arbiter using --pairwith is no longer supported, please use --arbiter";
- }
- }
- }
- }
return "";
}
int main(int argc, char* argv[]) {
static StaticObserver staticObserver;
+ doPreServerStatupInits();
getcurns = ourgetns;
po::options_description general_options("General options");
@@ -667,18 +566,16 @@ int main(int argc, char* argv[]) {
("directoryperdb", "each database will be stored in a separate directory")
("journal", "enable journaling")
("journalOptions", po::value<int>(), "journal diagnostic options")
+ ("journalCommitInterval", po::value<unsigned>(), "how often to group/batch commit (ms)")
("ipv6", "enable IPv6 support (disabled by default)")
("jsonp","allow JSONP access via http (has security implications)")
("noauth", "run without security")
("nohttpinterface", "disable http interface")
+ ("nojournal", "disable journaling (journaling is on by default for 64 bit)")
("noprealloc", "disable data file preallocation - will often hurt performance")
("noscripting", "disable scripting engine")
("notablescan", "do not allow table scans")
-#if !defined(_WIN32)
- ("nounixsocket", "disable listening on unix sockets")
-#endif
("nssize", po::value<int>()->default_value(16), ".ns file size (in MB) for new databases")
- ("objcheck", "inspect client data for validity on receipt")
("profile",po::value<int>(), "0=off 1=slow, 2=all")
("quota", "limits each database to a certain number of files (8 default)")
("quotaFiles", po::value<int>(), "number of files allower per db, requires --quota")
@@ -687,6 +584,9 @@ int main(int argc, char* argv[]) {
("repairpath", po::value<string>() , "root directory for repair files - defaults to dbpath" )
("slowms",po::value<int>(&cmdLine.slowMS)->default_value(100), "value of slow for profile and console log" )
("smallfiles", "use a smaller default file size")
+#if defined(__linux__)
+ ("shutdown", "kill a running server (for init scripts)")
+#endif
("syncdelay",po::value<double>(&cmdLine.syncdelay)->default_value(60), "seconds between disk syncs (0=never, but not recommended)")
("sysinfo", "print some diagnostic system information")
("upgrade", "upgrade db if needed")
@@ -698,7 +598,6 @@ int main(int argc, char* argv[]) {
replication_options.add_options()
("fastsync", "indicate that this instance is starting from a dbpath snapshot of the repl peer")
- ("autoresync", "automatically resync if slave data is stale")
("oplogSize", po::value<int>(), "size limit (in MB) for op log")
;
@@ -708,6 +607,7 @@ int main(int argc, char* argv[]) {
("source", po::value<string>(), "when slave: specify master as <server:port>")
("only", po::value<string>(), "when slave: specify a single database to replicate")
("slavedelay", po::value<int>(), "specify delay (in seconds) to be used when applying master ops to slave")
+ ("autoresync", "automatically resync if slave data is stale")
;
rs_options.add_options()
@@ -724,17 +624,17 @@ int main(int argc, char* argv[]) {
("pretouch", po::value<int>(), "n pretouch threads for applying replicationed operations")
("command", po::value< vector<string> >(), "command")
("cacheSize", po::value<long>(), "cache size (in MB) for rec store")
- // these move to unhidden later:
- ("opIdMem", po::value<long>(), "size limit (in bytes) for in memory storage of op ids for replica pairs DEPRECATED")
- ("pairwith", po::value<string>(), "address of server to pair with DEPRECATED")
- ("arbiter", po::value<string>(), "address of replica pair arbiter server DEPRECATED")
("nodur", "disable journaling (currently the default)")
- ("nojournal", "disable journaling (currently the default)")
- ("appsrvpath", po::value<string>(), "root directory for the babble app server")
+ // things we don't want people to use
("nocursors", "diagnostic/debugging option that turns off cursors DO NOT USE IN PRODUCTION")
("nohints", "ignore query hints")
+ ("nopreallocj", "don't preallocate journal files")
("dur", "enable journaling") // deprecated version
("durOptions", po::value<int>(), "durability diagnostic options") // deprecated version
+ // deprecated pairing command line options
+ ("pairwith", "DEPRECATED")
+ ("arbiter", "DEPRECATED")
+ ("opIdMem", "DEPRECATED")
;
@@ -828,44 +728,46 @@ int main(int argc, char* argv[]) {
cmdLine.quota = true;
cmdLine.quotaFiles = params["quotaFiles"].as<int>() - 1;
}
- if( params.count("nodur") ) {
- cmdLine.dur = false;
- }
- if( params.count("nojournal") ) {
+ bool journalExplicit = false;
+ if( params.count("nodur") || params.count( "nojournal" ) ) {
+ journalExplicit = true;
cmdLine.dur = false;
}
if( params.count("dur") || params.count( "journal" ) ) {
+ journalExplicit = true;
cmdLine.dur = true;
}
if (params.count("durOptions")) {
cmdLine.durOptions = params["durOptions"].as<int>();
}
+ if( params.count("journalCommitInterval") ) {
+ // don't check if dur is false here as many will just use the default, and will default to off on win32.
+ // ie no point making life a little more complex by giving an error on a dev environment.
+ cmdLine.journalCommitInterval = params["journalCommitInterval"].as<unsigned>();
+ if( cmdLine.journalCommitInterval <= 1 || cmdLine.journalCommitInterval > 300 ) {
+ out() << "--journalCommitInterval out of allowed range (0-300ms)" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ }
if (params.count("journalOptions")) {
cmdLine.durOptions = params["journalOptions"].as<int>();
}
- if (params.count("objcheck")) {
- objcheck = true;
- }
- if (params.count("appsrvpath")) {
- /* casting away the const-ness here */
- appsrvPath = (char*)(params["appsrvpath"].as<string>().c_str());
- }
if (params.count("repairpath")) {
repairpath = params["repairpath"].as<string>();
if (!repairpath.size()) {
- out() << "repairpath has to be non-zero" << endl;
+ out() << "repairpath is empty" << endl;
dbexit( EXIT_BADOPTIONS );
}
}
- else {
- repairpath = dbpath;
- }
if (params.count("nocursors")) {
useCursors = false;
}
if (params.count("nohints")) {
useHints = false;
}
+ if (params.count("nopreallocj")) {
+ cmdLine.preallocj = false;
+ }
if (params.count("nohttpinterface")) {
noHttpInterface = true;
}
@@ -884,6 +786,8 @@ int main(int argc, char* argv[]) {
}
if (params.count("smallfiles")) {
cmdLine.smallfiles = true;
+ assert( dur::DataLimitPerJournalFile >= 128 * 1024 * 1024 );
+ dur::DataLimitPerJournalFile = 128 * 1024 * 1024;
}
if (params.count("diaglog")) {
int x = params["diaglog"].as<int>();
@@ -898,10 +802,12 @@ int main(int argc, char* argv[]) {
return 0;
}
if (params.count("repair")) {
+ Record::MemoryTrackingEnabled = false;
shouldRepairDatabases = 1;
forceRepair = 1;
}
if (params.count("upgrade")) {
+ Record::MemoryTrackingEnabled = false;
shouldRepairDatabases = 1;
}
if (params.count("notablescan")) {
@@ -921,6 +827,11 @@ int main(int argc, char* argv[]) {
}
if (params.count("autoresync")) {
replSettings.autoresync = true;
+ if( params.count("replSet") ) {
+ out() << "--autoresync is not used with --replSet" << endl;
+ out() << "see http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
}
if (params.count("source")) {
/* specifies what the source in local.sources should be */
@@ -944,25 +855,6 @@ int main(int argc, char* argv[]) {
if (params.count("only")) {
cmdLine.only = params["only"].as<string>().c_str();
}
- if (params.count("pairwith")) {
- cout << "***********************************\n"
- << "WARNING WARNING WARNING\n"
- << " replica pairs are deprecated\n"
- << " see: http://www.mongodb.org/display/DOCS/Replica+Pairs \n"
- << "***********************************" << endl;
- string paired = params["pairwith"].as<string>();
- if (params.count("arbiter")) {
- string arbiter = params["arbiter"].as<string>();
- pairWith(paired.c_str(), arbiter.c_str());
- }
- else {
- pairWith(paired.c_str(), "-");
- }
- }
- else if (params.count("arbiter")) {
- out() << "specifying --arbiter without --pairwith" << endl;
- dbexit( EXIT_BADOPTIONS );
- }
if( params.count("nssize") ) {
int x = params["nssize"].as<int>();
if (x <= 0 || x > (0x7fffffff/1024/1024)) {
@@ -986,15 +878,6 @@ int main(int argc, char* argv[]) {
cmdLine.oplogSize = x * 1024 * 1024;
assert(cmdLine.oplogSize > 0);
}
- if (params.count("opIdMem")) {
- long x = params["opIdMem"].as<long>();
- if (x <= 0) {
- out() << "bad --opIdMem arg" << endl;
- dbexit( EXIT_BADOPTIONS );
- }
- replSettings.opIdMem = x;
- assert(replSettings.opIdMem > 0);
- }
if (params.count("cacheSize")) {
long x = params["cacheSize"].as<long>();
if (x <= 0) {
@@ -1007,8 +890,13 @@ int main(int argc, char* argv[]) {
if( params.count("configsvr") ) {
cmdLine.port = CmdLine::ConfigServerPort;
}
- if( params.count("shardsvr") )
+ if( params.count("shardsvr") ) {
+ if( params.count("configsvr") ) {
+ log() << "can't do --shardsvr and --configsvr at the same time" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
cmdLine.port = CmdLine::ShardServerPort;
+ }
}
else {
if ( cmdLine.port <= 0 || cmdLine.port > 65535 ) {
@@ -1017,27 +905,36 @@ int main(int argc, char* argv[]) {
}
}
if ( params.count("configsvr" ) ) {
+ cmdLine.configsvr = true;
if (cmdLine.usingReplSets() || replSettings.master || replSettings.slave) {
log() << "replication should not be enabled on a config server" << endl;
::exit(-1);
}
- if ( params.count( "diaglog" ) == 0 )
- _diaglog.level = 1;
+ if ( params.count( "nodur" ) == 0 && params.count( "nojournal" ) == 0 )
+ cmdLine.dur = true;
if ( params.count( "dbpath" ) == 0 )
dbpath = "/data/configdb";
}
if ( params.count( "profile" ) ) {
cmdLine.defaultProfile = params["profile"].as<int>();
}
- if (params.count("nounixsocket")) {
- noUnixSocket = true;
- }
if (params.count("ipv6")) {
enableIPv6();
}
if (params.count("noMoveParanoia")) {
cmdLine.moveParanoia = false;
}
+ if (params.count("pairwith") || params.count("arbiter") || params.count("opIdMem")) {
+ out() << "****" << endl;
+ out() << "Replica Pairs have been deprecated." << endl;
+ out() << "<http://www.mongodb.org/display/DOCS/Replica+Pairs>" << endl;
+ out() << "****" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+
+ // needs to be after things like --configsvr parsing, thus here.
+ if( repairpath.empty() )
+ repairpath = dbpath;
Module::configAll( params );
dataFileSync.go();
@@ -1069,15 +966,85 @@ int main(int argc, char* argv[]) {
if( cmdLine.pretouch )
log() << "--pretouch " << cmdLine.pretouch << endl;
+#ifdef __linux__
+ if (params.count("shutdown")){
+ bool failed = false;
+
+ string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
+ if ( !boost::filesystem::exists( name ) || boost::filesystem::file_size( name ) == 0 )
+ failed = true;
+
+ pid_t pid;
+ string procPath;
+ if (!failed){
+ try {
+ ifstream f (name.c_str());
+ f >> pid;
+ procPath = (str::stream() << "/proc/" << pid);
+ if (!boost::filesystem::exists(procPath))
+ failed = true;
+
+ string exePath = procPath + "/exe";
+ if (boost::filesystem::exists(exePath)){
+ char buf[256];
+ int ret = readlink(exePath.c_str(), buf, sizeof(buf)-1);
+ buf[ret] = '\0'; // readlink doesn't terminate string
+ if (ret == -1) {
+ int e = errno;
+ cerr << "Error resolving " << exePath << ": " << errnoWithDescription(e);
+ failed = true;
+ }
+ else if (!endsWith(buf, "mongod")){
+ cerr << "Process " << pid << " is running " << buf << " not mongod" << endl;
+ ::exit(-1);
+ }
+ }
+ }
+ catch (const std::exception& e){
+ cerr << "Error reading pid from lock file [" << name << "]: " << e.what() << endl;
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ cerr << "There doesn't seem to be a server running with dbpath: " << dbpath << endl;
+ ::exit(-1);
+ }
+
+ cout << "killing process with pid: " << pid << endl;
+ int ret = kill(pid, SIGTERM);
+ if (ret) {
+ int e = errno;
+ cerr << "failed to kill process: " << errnoWithDescription(e) << endl;
+ ::exit(-1);
+ }
+
+ while (boost::filesystem::exists(procPath)) {
+ sleepsecs(1);
+ }
+
+ ::exit(0);
+ }
+#endif
+
#if defined(_WIN32)
if (serviceParamsCheck( params, dbpath, argc, argv )) {
return 0;
}
#endif
+
+
+ if (sizeof(void*) == 4 && !journalExplicit){
+ // trying to make this stand out more like startup warnings
+ log() << endl;
+ warning() << "32-bit servers don't have journaling enabled by default. Please use --journal if you want durability." << endl;
+ log() << endl;
+ }
+
}
UnitTest::runTests();
- initAndListen(cmdLine.port, appsrvPath);
+ initAndListen(cmdLine.port);
dbexit(EXIT_CLEAN);
return 0;
}
@@ -1088,14 +1055,6 @@ namespace mongo {
#undef out
- void exitCleanly( ExitCode code ) {
- killCurrentOp.killAll();
- {
- dblock lk;
- log() << "now exiting" << endl;
- dbexit( code );
- }
- }
#if !defined(_WIN32)
@@ -1166,7 +1125,7 @@ namespace mongo {
void myterminate() {
rawOut( "terminate() called, printing stack:" );
printStackTrace();
- abort();
+ ::abort();
}
void setupSignals_ignoreHelper( int signal ) {}
@@ -1235,19 +1194,63 @@ namespace mongo {
}
}
+ LPTOP_LEVEL_EXCEPTION_FILTER filtLast = 0;
+ ::HANDLE standardOut = GetStdHandle(STD_OUTPUT_HANDLE);
+ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS *ExceptionInfo) {
+ {
+ // given the severity of the event we write to console in addition to the --logFile
+ // (rawOut writes to the logfile, if a special one were specified)
+ DWORD written;
+ WriteFile(standardOut, "unhandled windows exception\n", 20, &written, 0);
+ FlushFileBuffers(standardOut);
+ }
+
+ DWORD ec = ExceptionInfo->ExceptionRecord->ExceptionCode;
+ if( ec == EXCEPTION_ACCESS_VIOLATION ) {
+ rawOut("access violation");
+ }
+ else {
+ rawOut("unhandled windows exception");
+ char buf[64];
+ strcpy(buf, "ec=0x");
+ _ui64toa(ec, buf+5, 16);
+ rawOut(buf);
+ }
+ if( filtLast )
+ return filtLast(ExceptionInfo);
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ // called by mongoAbort()
+ extern void (*reportEventToSystem)(const char *msg);
+ void reportEventToSystemImpl(const char *msg) {
+ static ::HANDLE hEventLog = RegisterEventSource( NULL, TEXT("mongod") );
+ if( hEventLog ) {
+ std::wstring s = toNativeString(msg);
+ LPCTSTR txt = s.c_str();
+ BOOL ok = ReportEvent(
+ hEventLog, EVENTLOG_ERROR_TYPE,
+ 0, 0, NULL,
+ 1,
+ 0,
+ &txt,
+ 0);
+ wassert(ok);
+ }
+ }
+
void myPurecallHandler() {
- rawOut( "pure virtual method called, printing stack:" );
printStackTrace();
- abort();
+ mongoAbort("pure virtual");
}
void setupSignals( bool inFork ) {
- if( SetConsoleCtrlHandler( (PHANDLER_ROUTINE) CtrlHandler, TRUE ) )
- ;
- else
- massert( 10297 , "Couldn't register Windows Ctrl-C handler", false);
+ reportEventToSystem = reportEventToSystemImpl;
+ filtLast = SetUnhandledExceptionFilter(exceptionFilter);
+ massert(10297 , "Couldn't register Windows Ctrl-C handler", SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE));
_set_purecall_handler( myPurecallHandler );
}
+
#endif
} // namespace mongo
diff --git a/db/db.h b/db/db.h
index 7ef7d03..f3e6b05 100644
--- a/db/db.h
+++ b/db/db.h
@@ -17,9 +17,10 @@
#pragma once
#include "../pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "concurrency.h"
#include "pdfile.h"
+#include "curop.h"
#include "client.h"
namespace mongo {
@@ -142,7 +143,8 @@ namespace mongo {
int _locktype;
dbtemprelease() {
- _context = cc().getContext();
+ const Client& c = cc();
+ _context = c.getContext();
_locktype = dbMutex.getState();
assert( _locktype );
@@ -156,7 +158,10 @@ namespace mongo {
if ( _context ) _context->unlocked();
dbMutex.unlock_shared();
}
-
+
+ verify( 14814 , c.curop() );
+ c.curop()->yielded();
+
}
~dbtemprelease() {
if ( _locktype > 0 )
@@ -168,6 +173,33 @@ namespace mongo {
}
};
+ /** must be write locked
+ no assert (and no release) if nested write lock
+ a lot like dbtempreleasecond but no malloc so should be a tiny bit faster
+ */
+ struct dbtempreleasewritelock {
+ Client::Context * _context;
+ int _locktype;
+ dbtempreleasewritelock() {
+ const Client& c = cc();
+ _context = c.getContext();
+ _locktype = dbMutex.getState();
+ assert( _locktype >= 1 );
+ if( _locktype > 1 )
+ return; // nested
+ if ( _context )
+ _context->unlocked();
+ dbMutex.unlock();
+ verify( 14845 , c.curop() );
+ c.curop()->yielded();
+ }
+ ~dbtempreleasewritelock() {
+ if ( _locktype == 1 )
+ dbMutex.lock();
+ if ( _context )
+ _context->relocked();
+ }
+ };
/**
only does a temp release if we're not nested and have a lock
diff --git a/db/db.vcxproj b/db/db.vcxproj
index ad9c6d2..b3bfcfb 100644..100755
--- a/db/db.vcxproj
+++ b/db/db.vcxproj
@@ -1,791 +1,838 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug|Win32">
- <Configuration>Debug</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|Win32">
- <Configuration>Release</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectName>mongod</ProjectName>
- <ProjectGuid>{215B2D68-0A70-4D10-8E75-B31010C62A91}</ProjectGuid>
- <RootNamespace>db</RootNamespace>
- <Keyword>Win32Proj</Keyword>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseOfMfc>false</UseOfMfc>
- <UseOfAtl>false</UseOfAtl>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseOfMfc>false</UseOfMfc>
- <UseOfAtl>false</UseOfAtl>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup>
- <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">.;..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <MinimalRebuild>No</MinimalRebuild>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
- <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <TargetMachine>MachineX86</TargetMachine>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <MinimalRebuild>No</MinimalRebuild>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
- <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <MinimalRebuild>No</MinimalRebuild>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <TargetMachine>MachineX86</TargetMachine>
- <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <MinimalRebuild>No</MinimalRebuild>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- </Link>
- </ItemDefinitionGroup>
- <ItemGroup>
- <ClCompile Include="..\bson\oid.cpp" />
- <ClCompile Include="..\client\dbclientcursor.cpp" />
- <ClCompile Include="..\client\dbclient_rs.cpp" />
- <ClCompile Include="..\client\distlock.cpp" />
- <ClCompile Include="..\client\model.cpp" />
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\scripting\bench.cpp" />
- <ClCompile Include="..\shell\mongo_vstudio.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\s\chunk.cpp" />
- <ClCompile Include="..\s\config.cpp" />
- <ClCompile Include="..\s\d_chunk_manager.cpp" />
- <ClCompile Include="..\s\d_migrate.cpp" />
- <ClCompile Include="..\s\d_split.cpp" />
- <ClCompile Include="..\s\d_state.cpp" />
- <ClCompile Include="..\s\d_writeback.cpp" />
- <ClCompile Include="..\s\grid.cpp" />
- <ClCompile Include="..\s\shard.cpp" />
- <ClCompile Include="..\s\shardconnection.cpp" />
- <ClCompile Include="..\s\shardkey.cpp" />
- <ClCompile Include="..\util\alignedbuilder.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
- <ClCompile Include="..\util\concurrency\synchronization.cpp" />
- <ClCompile Include="..\util\concurrency\task.cpp" />
- <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
- <ClCompile Include="..\util\concurrency\vars.cpp" />
- <ClCompile Include="..\util\file_allocator.cpp" />
- <ClCompile Include="..\util\log.cpp" />
- <ClCompile Include="..\util\logfile.cpp" />
- <ClCompile Include="..\util\processinfo.cpp" />
- <ClCompile Include="..\util\stringutils.cpp" />
- <ClCompile Include="..\util\text.cpp" />
- <ClCompile Include="..\util\version.cpp" />
- <ClCompile Include="cap.cpp" />
- <ClCompile Include="commands\distinct.cpp" />
- <ClCompile Include="commands\group.cpp" />
- <ClCompile Include="commands\isself.cpp" />
- <ClCompile Include="commands\mr.cpp" />
- <ClCompile Include="compact.cpp" />
- <ClCompile Include="dbcommands_generic.cpp" />
- <ClCompile Include="dur.cpp" />
- <ClCompile Include="durop.cpp" />
- <ClCompile Include="dur_commitjob.cpp" />
- <ClCompile Include="dur_journal.cpp" />
- <ClCompile Include="dur_preplogbuffer.cpp" />
- <ClCompile Include="dur_recover.cpp" />
- <ClCompile Include="dur_writetodatafiles.cpp" />
- <ClCompile Include="geo\2d.cpp" />
- <ClCompile Include="geo\haystack.cpp" />
- <ClCompile Include="mongommf.cpp" />
- <ClCompile Include="oplog.cpp" />
- <ClCompile Include="projection.cpp" />
- <ClCompile Include="repl.cpp" />
- <ClCompile Include="repl\consensus.cpp" />
- <ClCompile Include="repl\heartbeat.cpp" />
- <ClCompile Include="repl\manager.cpp" />
- <ClCompile Include="repl\rs_initialsync.cpp" />
- <ClCompile Include="repl\rs_initiate.cpp" />
- <ClCompile Include="repl\rs_rollback.cpp" />
- <ClCompile Include="repl\rs_sync.cpp" />
- <ClCompile Include="repl_block.cpp" />
- <ClCompile Include="restapi.cpp" />
- <ClCompile Include="..\client\connpool.cpp" />
- <ClCompile Include="..\client\dbclient.cpp" />
- <ClCompile Include="..\client\syncclusterconnection.cpp" />
- <ClCompile Include="..\pch.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="client.cpp" />
- <ClCompile Include="clientcursor.cpp" />
- <ClCompile Include="cloner.cpp" />
- <ClCompile Include="commands.cpp" />
- <ClCompile Include="common.cpp" />
- <ClCompile Include="cursor.cpp" />
- <ClCompile Include="database.cpp" />
- <ClCompile Include="db.cpp" />
- <ClCompile Include="dbcommands.cpp" />
- <ClCompile Include="dbcommands_admin.cpp" />
- <ClCompile Include="dbeval.cpp" />
- <ClCompile Include="dbhelpers.cpp" />
- <ClCompile Include="dbwebserver.cpp" />
- <ClCompile Include="extsort.cpp" />
- <ClCompile Include="index.cpp" />
- <ClCompile Include="indexkey.cpp" />
- <ClCompile Include="instance.cpp" />
- <ClCompile Include="introspect.cpp" />
- <ClCompile Include="jsobj.cpp" />
- <ClCompile Include="json.cpp" />
- <ClCompile Include="lasterror.cpp" />
- <ClCompile Include="matcher.cpp" />
- <ClCompile Include="matcher_covered.cpp" />
- <ClCompile Include="..\util\mmap_win.cpp" />
- <ClCompile Include="modules\mms.cpp" />
- <ClCompile Include="module.cpp" />
- <ClCompile Include="namespace.cpp" />
- <ClCompile Include="nonce.cpp" />
- <ClCompile Include="..\client\parallel.cpp" />
- <ClCompile Include="pdfile.cpp" />
- <ClCompile Include="query.cpp" />
- <ClCompile Include="queryoptimizer.cpp" />
- <ClCompile Include="security.cpp" />
- <ClCompile Include="security_commands.cpp" />
- <ClCompile Include="security_key.cpp" />
- <ClCompile Include="tests.cpp" />
- <ClCompile Include="update.cpp" />
- <ClCompile Include="cmdline.cpp" />
- <ClCompile Include="queryutil.cpp" />
- <ClCompile Include="..\util\assert_util.cpp" />
- <ClCompile Include="..\util\background.cpp" />
- <ClCompile Include="..\util\base64.cpp" />
- <ClCompile Include="..\util\mmap.cpp" />
- <ClCompile Include="..\util\ntservice.cpp" />
- <ClCompile Include="..\util\processinfo_win32.cpp" />
- <ClCompile Include="..\util\util.cpp" />
- <ClCompile Include="..\util\httpclient.cpp" />
- <ClCompile Include="..\util\miniwebserver.cpp" />
- <ClCompile Include="..\util\md5.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeaderFile>
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeaderFile>
- </ClCompile>
- <ClCompile Include="..\util\md5main.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\message.cpp" />
- <ClCompile Include="..\util\message_server_port.cpp" />
- <ClCompile Include="..\util\sock.cpp" />
- <ClCompile Include="..\s\d_logic.cpp" />
- <ClCompile Include="..\scripting\engine.cpp" />
- <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
- <ClCompile Include="..\scripting\utils.cpp" />
- <ClCompile Include="stats\counters.cpp" />
- <ClCompile Include="stats\snapshots.cpp" />
- <ClCompile Include="stats\top.cpp" />
- <ClCompile Include="btree.cpp" />
- <ClCompile Include="btreecursor.cpp" />
- <ClCompile Include="repl\health.cpp" />
- <ClCompile Include="repl\rs.cpp" />
- <ClCompile Include="repl\replset_commands.cpp" />
- <ClCompile Include="repl\rs_config.cpp" />
- </ItemGroup>
- <ItemGroup>
- <None Include="..\jstests\dur\basic1.sh" />
- <None Include="..\jstests\dur\dur1.js" />
- <None Include="..\jstests\replsets\replset1.js" />
- <None Include="..\jstests\replsets\replset2.js" />
- <None Include="..\jstests\replsets\replset3.js" />
- <None Include="..\jstests\replsets\replset4.js" />
- <None Include="..\jstests\replsets\replset5.js" />
- <None Include="..\jstests\replsets\replsetadd.js" />
- <None Include="..\jstests\replsets\replsetarb1.js" />
- <None Include="..\jstests\replsets\replsetarb2.js" />
- <None Include="..\jstests\replsets\replsetprio1.js" />
- <None Include="..\jstests\replsets\replsetrestart1.js" />
- <None Include="..\jstests\replsets\replsetrestart2.js" />
- <None Include="..\jstests\replsets\replset_remove_node.js" />
- <None Include="..\jstests\replsets\rollback.js" />
- <None Include="..\jstests\replsets\rollback2.js" />
- <None Include="..\jstests\replsets\sync1.js" />
- <None Include="..\jstests\replsets\twosets.js" />
- <None Include="..\SConstruct" />
- <None Include="..\util\mongoutils\README" />
- <None Include="mongo.ico" />
- <None Include="repl\notes.txt" />
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="..\client\dbclientcursor.h" />
- <ClInclude Include="..\client\distlock.h" />
- <ClInclude Include="..\client\gridfs.h" />
- <ClInclude Include="..\client\parallel.h" />
- <ClInclude Include="..\s\d_logic.h" />
- <ClInclude Include="..\targetver.h" />
- <ClInclude Include="..\pcre-7.4\config.h" />
- <ClInclude Include="..\pcre-7.4\pcre.h" />
- <ClInclude Include="..\util\concurrency\race.h" />
- <ClInclude Include="..\util\concurrency\rwlock.h" />
- <ClInclude Include="..\util\concurrency\msg.h" />
- <ClInclude Include="..\util\concurrency\mutex.h" />
- <ClInclude Include="..\util\concurrency\mvar.h" />
- <ClInclude Include="..\util\concurrency\task.h" />
- <ClInclude Include="..\util\concurrency\thread_pool.h" />
- <ClInclude Include="..\util\logfile.h" />
- <ClInclude Include="..\util\mongoutils\checksum.h" />
- <ClInclude Include="..\util\mongoutils\html.h" />
- <ClInclude Include="..\util\mongoutils\str.h" />
- <ClInclude Include="..\util\paths.h" />
- <ClInclude Include="..\util\ramlog.h" />
- <ClInclude Include="..\util\text.h" />
- <ClInclude Include="..\util\time_support.h" />
- <ClInclude Include="durop.h" />
- <ClInclude Include="dur_commitjob.h" />
- <ClInclude Include="dur_journal.h" />
- <ClInclude Include="dur_journalformat.h" />
- <ClInclude Include="dur_journalimpl.h" />
- <ClInclude Include="dur_stats.h" />
- <ClInclude Include="geo\core.h" />
- <ClInclude Include="helpers\dblogger.h" />
- <ClInclude Include="instance.h" />
- <ClInclude Include="mongommf.h" />
- <ClInclude Include="mongomutex.h" />
- <ClInclude Include="namespace-inl.h" />
- <ClInclude Include="oplogreader.h" />
- <ClInclude Include="projection.h" />
- <ClInclude Include="repl.h" />
- <ClInclude Include="replpair.h" />
- <ClInclude Include="repl\connections.h" />
- <ClInclude Include="repl\multicmd.h" />
- <ClInclude Include="repl\rsmember.h" />
- <ClInclude Include="repl\rs_optime.h" />
- <ClInclude Include="stats\counters.h" />
- <ClInclude Include="stats\snapshots.h" />
- <ClInclude Include="stats\top.h" />
- <ClInclude Include="..\client\connpool.h" />
- <ClInclude Include="..\client\dbclient.h" />
- <ClInclude Include="..\client\model.h" />
- <ClInclude Include="..\client\redef_macros.h" />
- <ClInclude Include="..\client\syncclusterconnection.h" />
- <ClInclude Include="..\client\undef_macros.h" />
- <ClInclude Include="background.h" />
- <ClInclude Include="client.h" />
- <ClInclude Include="clientcursor.h" />
- <ClInclude Include="cmdline.h" />
- <ClInclude Include="commands.h" />
- <ClInclude Include="concurrency.h" />
- <ClInclude Include="curop.h" />
- <ClInclude Include="cursor.h" />
- <ClInclude Include="database.h" />
- <ClInclude Include="db.h" />
- <ClInclude Include="dbhelpers.h" />
- <ClInclude Include="dbinfo.h" />
- <ClInclude Include="dbmessage.h" />
- <ClInclude Include="diskloc.h" />
- <ClInclude Include="index.h" />
- <ClInclude Include="indexkey.h" />
- <ClInclude Include="introspect.h" />
- <ClInclude Include="json.h" />
- <ClInclude Include="matcher.h" />
- <ClInclude Include="namespace.h" />
- <ClInclude Include="..\pch.h" />
- <ClInclude Include="pdfile.h" />
- <ClInclude Include="..\grid\protocol.h" />
- <ClInclude Include="query.h" />
- <ClInclude Include="queryoptimizer.h" />
- <ClInclude Include="resource.h" />
- <ClInclude Include="scanandorder.h" />
- <ClInclude Include="security.h" />
- <ClInclude Include="update.h" />
- <ClInclude Include="..\util\allocator.h" />
- <ClInclude Include="..\util\array.h" />
- <ClInclude Include="..\util\assert_util.h" />
- <ClInclude Include="..\util\background.h" />
- <ClInclude Include="..\util\base64.h" />
- <ClInclude Include="..\util\builder.h" />
- <ClInclude Include="..\util\debug_util.h" />
- <ClInclude Include="..\util\embedded_builder.h" />
- <ClInclude Include="..\util\file.h" />
- <ClInclude Include="..\util\file_allocator.h" />
- <ClInclude Include="..\util\goodies.h" />
- <ClInclude Include="..\util\hashtab.h" />
- <ClInclude Include="..\util\hex.h" />
- <ClInclude Include="lasterror.h" />
- <ClInclude Include="..\util\log.h" />
- <ClInclude Include="..\util\lruishmap.h" />
- <ClInclude Include="..\util\mmap.h" />
- <ClInclude Include="..\util\ntservice.h" />
- <ClInclude Include="..\util\optime.h" />
- <ClInclude Include="..\util\processinfo.h" />
- <ClInclude Include="..\util\queue.h" />
- <ClInclude Include="..\util\ramstore.h" />
- <ClInclude Include="..\util\unittest.h" />
- <ClInclude Include="..\util\concurrency\list.h" />
- <ClInclude Include="..\util\concurrency\value.h" />
- <ClInclude Include="..\util\web\html.h" />
- <ClInclude Include="..\util\httpclient.h" />
- <ClInclude Include="..\util\miniwebserver.h" />
- <ClInclude Include="..\util\md5.h" />
- <ClInclude Include="..\util\md5.hpp" />
- <ClInclude Include="..\util\message.h" />
- <ClInclude Include="..\util\message_server.h" />
- <ClInclude Include="..\util\sock.h" />
- <ClInclude Include="..\scripting\engine.h" />
- <ClInclude Include="..\scripting\engine_spidermonkey.h" />
- <ClInclude Include="..\scripting\engine_v8.h" />
- <ClInclude Include="..\scripting\v8_db.h" />
- <ClInclude Include="..\scripting\v8_utils.h" />
- <ClInclude Include="..\scripting\v8_wrapper.h" />
- <ClInclude Include="btree.h" />
- <ClInclude Include="repl\health.h" />
- <ClInclude Include="..\util\hostandport.h" />
- <ClInclude Include="repl\rs.h" />
- <ClInclude Include="repl\rs_config.h" />
- <ClInclude Include="..\bson\bsonelement.h" />
- <ClInclude Include="..\bson\bsoninlines.h" />
- <ClInclude Include="..\bson\bsonmisc.h" />
- <ClInclude Include="..\bson\bsonobj.h" />
- <ClInclude Include="..\bson\bsonobjbuilder.h" />
- <ClInclude Include="..\bson\bsonobjiterator.h" />
- <ClInclude Include="..\bson\bsontypes.h" />
- <ClInclude Include="jsobj.h" />
- <ClInclude Include="..\bson\oid.h" />
- <ClInclude Include="..\bson\ordering.h" />
- </ItemGroup>
- <ItemGroup>
- <Library Include="..\..\js\js32d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js32r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- </ItemGroup>
- <ItemGroup>
- <ResourceCompile Include="db.rc" />
- </ItemGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectName>mongod</ProjectName>
+ <ProjectGuid>{215B2D68-0A70-4D10-8E75-B31010C62A91}</ProjectGuid>
+ <RootNamespace>db</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">.;..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;;;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>;;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <MinimalRebuild>No</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;;;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <MinimalRebuild>No</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>;;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <MinimalRebuild>No</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\shell\mongo_vstudio.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\s\chunk.cpp" />
+ <ClCompile Include="..\s\config.cpp" />
+ <ClCompile Include="..\s\d_chunk_manager.cpp" />
+ <ClCompile Include="..\s\d_migrate.cpp" />
+ <ClCompile Include="..\s\d_split.cpp" />
+ <ClCompile Include="..\s\d_state.cpp" />
+ <ClCompile Include="..\s\d_writeback.cpp" />
+ <ClCompile Include="..\s\grid.cpp" />
+ <ClCompile Include="..\s\shard.cpp" />
+ <ClCompile Include="..\s\shardconnection.cpp" />
+ <ClCompile Include="..\s\shardkey.cpp" />
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
+ <ClCompile Include="..\util\concurrency\synchronization.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\logfile.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="btreebuilder.cpp" />
+ <ClCompile Include="cap.cpp" />
+ <ClCompile Include="commands\distinct.cpp" />
+ <ClCompile Include="commands\find_and_modify.cpp" />
+ <ClCompile Include="commands\group.cpp" />
+ <ClCompile Include="commands\isself.cpp" />
+ <ClCompile Include="commands\mr.cpp" />
+ <ClCompile Include="compact.cpp" />
+ <ClCompile Include="dbcommands_generic.cpp" />
+ <ClCompile Include="dbmessage.cpp" />
+ <ClCompile Include="dur.cpp" />
+ <ClCompile Include="durop.cpp" />
+ <ClCompile Include="dur_commitjob.cpp" />
+ <ClCompile Include="dur_journal.cpp" />
+ <ClCompile Include="dur_preplogbuffer.cpp" />
+ <ClCompile Include="dur_recover.cpp" />
+ <ClCompile Include="dur_writetodatafiles.cpp" />
+ <ClCompile Include="geo\2d.cpp" />
+ <ClCompile Include="geo\haystack.cpp" />
+ <ClCompile Include="key.cpp" />
+ <ClCompile Include="mongommf.cpp" />
+ <ClCompile Include="oplog.cpp" />
+ <ClCompile Include="ops\delete.cpp" />
+ <ClCompile Include="ops\query.cpp" />
+ <ClCompile Include="ops\update.cpp" />
+ <ClCompile Include="projection.cpp" />
+ <ClCompile Include="queryoptimizercursor.cpp" />
+ <ClCompile Include="querypattern.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="record.cpp" />
+ <ClCompile Include="repl.cpp" />
+ <ClCompile Include="repl\consensus.cpp" />
+ <ClCompile Include="repl\heartbeat.cpp" />
+ <ClCompile Include="repl\manager.cpp" />
+ <ClCompile Include="repl\rs_initialsync.cpp" />
+ <ClCompile Include="repl\rs_initiate.cpp" />
+ <ClCompile Include="repl\rs_rollback.cpp" />
+ <ClCompile Include="repl\rs_sync.cpp" />
+ <ClCompile Include="repl_block.cpp" />
+ <ClCompile Include="restapi.cpp" />
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="client.cpp" />
+ <ClCompile Include="clientcursor.cpp" />
+ <ClCompile Include="cloner.cpp" />
+ <ClCompile Include="commands.cpp" />
+ <ClCompile Include="common.cpp" />
+ <ClCompile Include="cursor.cpp" />
+ <ClCompile Include="database.cpp" />
+ <ClCompile Include="db.cpp" />
+ <ClCompile Include="dbcommands.cpp" />
+ <ClCompile Include="dbcommands_admin.cpp" />
+ <ClCompile Include="dbeval.cpp" />
+ <ClCompile Include="dbhelpers.cpp" />
+ <ClCompile Include="dbwebserver.cpp" />
+ <ClCompile Include="extsort.cpp" />
+ <ClCompile Include="index.cpp" />
+ <ClCompile Include="indexkey.cpp" />
+ <ClCompile Include="instance.cpp" />
+ <ClCompile Include="introspect.cpp" />
+ <ClCompile Include="jsobj.cpp" />
+ <ClCompile Include="json.cpp" />
+ <ClCompile Include="lasterror.cpp" />
+ <ClCompile Include="matcher.cpp" />
+ <ClCompile Include="matcher_covered.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="modules\mms.cpp" />
+ <ClCompile Include="module.cpp" />
+ <ClCompile Include="namespace.cpp" />
+ <ClCompile Include="nonce.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="pdfile.cpp" />
+ <ClCompile Include="queryoptimizer.cpp" />
+ <ClCompile Include="scanandorder.cpp" />
+ <ClCompile Include="security.cpp" />
+ <ClCompile Include="security_commands.cpp" />
+ <ClCompile Include="security_common.cpp" />
+ <ClCompile Include="tests.cpp" />
+ <ClCompile Include="cmdline.cpp" />
+ <ClCompile Include="queryutil.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\ntservice.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="..\util\net\httpclient.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeaderFile>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeaderFile>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\s\d_logic.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="stats\counters.cpp" />
+ <ClCompile Include="stats\snapshots.cpp" />
+ <ClCompile Include="stats\top.cpp" />
+ <ClCompile Include="btree.cpp" />
+ <ClCompile Include="btreecursor.cpp" />
+ <ClCompile Include="repl\health.cpp" />
+ <ClCompile Include="repl\rs.cpp" />
+ <ClCompile Include="repl\replset_commands.cpp" />
+ <ClCompile Include="repl\rs_config.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\jstests\dur\basic1.sh" />
+ <None Include="..\jstests\dur\dur1.js" />
+ <None Include="..\jstests\replsets\replset1.js" />
+ <None Include="..\jstests\replsets\replset2.js" />
+ <None Include="..\jstests\replsets\replset3.js" />
+ <None Include="..\jstests\replsets\replset4.js" />
+ <None Include="..\jstests\replsets\replset5.js" />
+ <None Include="..\jstests\replsets\replsetadd.js" />
+ <None Include="..\jstests\replsets\replsetarb1.js" />
+ <None Include="..\jstests\replsets\replsetarb2.js" />
+ <None Include="..\jstests\replsets\replsetprio1.js" />
+ <None Include="..\jstests\replsets\replsetrestart1.js" />
+ <None Include="..\jstests\replsets\replsetrestart2.js" />
+ <None Include="..\jstests\replsets\replset_remove_node.js" />
+ <None Include="..\jstests\replsets\rollback.js" />
+ <None Include="..\jstests\replsets\rollback2.js" />
+ <None Include="..\jstests\replsets\sync1.js" />
+ <None Include="..\jstests\replsets\twosets.js" />
+ <None Include="..\SConstruct" />
+ <None Include="..\util\mongoutils\README" />
+ <None Include="mongo.ico" />
+ <None Include="repl\notes.txt" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\bson\util\atomic_int.h" />
+ <ClInclude Include="..\bson\util\builder.h" />
+ <ClInclude Include="..\bson\util\misc.h" />
+ <ClInclude Include="..\client\dbclientcursor.h" />
+ <ClInclude Include="..\client\distlock.h" />
+ <ClInclude Include="..\client\gridfs.h" />
+ <ClInclude Include="..\client\parallel.h" />
+ <ClInclude Include="..\s\d_logic.h" />
+ <ClInclude Include="..\targetver.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\third_party\snappy\config.h" />
+ <ClInclude Include="..\third_party\snappy\snappy.h" />
+ <ClInclude Include="..\util\alignedbuilder.h" />
+ <ClInclude Include="..\util\concurrency\race.h" />
+ <ClInclude Include="..\util\concurrency\rwlock.h" />
+ <ClInclude Include="..\util\concurrency\msg.h" />
+ <ClInclude Include="..\util\concurrency\mutex.h" />
+ <ClInclude Include="..\util\concurrency\mvar.h" />
+ <ClInclude Include="..\util\concurrency\task.h" />
+ <ClInclude Include="..\util\concurrency\thread_pool.h" />
+ <ClInclude Include="..\util\logfile.h" />
+ <ClInclude Include="..\util\mongoutils\checksum.h" />
+ <ClInclude Include="..\util\mongoutils\html.h" />
+ <ClInclude Include="..\util\mongoutils\str.h" />
+ <ClInclude Include="..\util\paths.h" />
+ <ClInclude Include="..\util\ramlog.h" />
+ <ClInclude Include="..\util\text.h" />
+ <ClInclude Include="..\util\time_support.h" />
+ <ClInclude Include="durop.h" />
+ <ClInclude Include="dur_commitjob.h" />
+ <ClInclude Include="dur_journal.h" />
+ <ClInclude Include="dur_journalformat.h" />
+ <ClInclude Include="dur_journalimpl.h" />
+ <ClInclude Include="dur_stats.h" />
+ <ClInclude Include="geo\core.h" />
+ <ClInclude Include="helpers\dblogger.h" />
+ <ClInclude Include="instance.h" />
+ <ClInclude Include="mongommf.h" />
+ <ClInclude Include="mongomutex.h" />
+ <ClInclude Include="namespace-inl.h" />
+ <ClInclude Include="oplogreader.h" />
+ <ClInclude Include="ops\delete.h" />
+ <ClInclude Include="ops\update.h" />
+ <ClInclude Include="projection.h" />
+ <ClInclude Include="queryutil.h" />
+ <ClInclude Include="repl.h" />
+ <ClInclude Include="replpair.h" />
+ <ClInclude Include="repl\connections.h" />
+ <ClInclude Include="repl\multicmd.h" />
+ <ClInclude Include="repl\rsmember.h" />
+ <ClInclude Include="repl\rs_optime.h" />
+ <ClInclude Include="stats\counters.h" />
+ <ClInclude Include="stats\snapshots.h" />
+ <ClInclude Include="stats\top.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="..\client\redef_macros.h" />
+ <ClInclude Include="..\client\syncclusterconnection.h" />
+ <ClInclude Include="..\client\undef_macros.h" />
+ <ClInclude Include="background.h" />
+ <ClInclude Include="client.h" />
+ <ClInclude Include="clientcursor.h" />
+ <ClInclude Include="cmdline.h" />
+ <ClInclude Include="commands.h" />
+ <ClInclude Include="concurrency.h" />
+ <ClInclude Include="curop.h" />
+ <ClInclude Include="cursor.h" />
+ <ClInclude Include="database.h" />
+ <ClInclude Include="db.h" />
+ <ClInclude Include="dbhelpers.h" />
+ <ClInclude Include="dbinfo.h" />
+ <ClInclude Include="dbmessage.h" />
+ <ClInclude Include="diskloc.h" />
+ <ClInclude Include="index.h" />
+ <ClInclude Include="indexkey.h" />
+ <ClInclude Include="introspect.h" />
+ <ClInclude Include="json.h" />
+ <ClInclude Include="matcher.h" />
+ <ClInclude Include="namespace.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="pdfile.h" />
+ <ClInclude Include="..\grid\protocol.h" />
+ <ClInclude Include="query.h" />
+ <ClInclude Include="queryoptimizer.h" />
+ <ClInclude Include="resource.h" />
+ <ClInclude Include="scanandorder.h" />
+ <ClInclude Include="security.h" />
+ <ClInclude Include="..\util\allocator.h" />
+ <ClInclude Include="..\util\array.h" />
+ <ClInclude Include="..\util\assert_util.h" />
+ <ClInclude Include="..\util\background.h" />
+ <ClInclude Include="..\util\base64.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\debug_util.h" />
+ <ClInclude Include="..\util\embedded_builder.h" />
+ <ClInclude Include="..\util\file.h" />
+ <ClInclude Include="..\util\file_allocator.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\util\hashtab.h" />
+ <ClInclude Include="..\util\hex.h" />
+ <ClInclude Include="lasterror.h" />
+ <ClInclude Include="..\util\log.h" />
+ <ClInclude Include="..\util\lruishmap.h" />
+ <ClInclude Include="..\util\mmap.h" />
+ <ClInclude Include="..\util\ntservice.h" />
+ <ClInclude Include="..\util\optime.h" />
+ <ClInclude Include="..\util\processinfo.h" />
+ <ClInclude Include="..\util\queue.h" />
+ <ClInclude Include="..\util\ramstore.h" />
+ <ClInclude Include="..\util\unittest.h" />
+ <ClInclude Include="..\util\concurrency\list.h" />
+ <ClInclude Include="..\util\concurrency\value.h" />
+ <ClInclude Include="..\util\web\html.h" />
+ <ClInclude Include="..\util\net\httpclient.h" />
+ <ClInclude Include="..\util\md5.h" />
+ <ClInclude Include="..\util\md5.hpp" />
+ <ClInclude Include="..\util\net\message.h" />
+ <ClInclude Include="..\util\net\message_server.h" />
+ <ClInclude Include="..\util\net\sock.h" />
+ <ClInclude Include="..\scripting\engine.h" />
+ <ClInclude Include="..\scripting\engine_spidermonkey.h" />
+ <ClInclude Include="..\scripting\engine_v8.h" />
+ <ClInclude Include="..\scripting\v8_db.h" />
+ <ClInclude Include="..\scripting\v8_utils.h" />
+ <ClInclude Include="..\scripting\v8_wrapper.h" />
+ <ClInclude Include="btree.h" />
+ <ClInclude Include="repl\health.h" />
+ <ClInclude Include="..\util\hostandport.h" />
+ <ClInclude Include="repl\rs.h" />
+ <ClInclude Include="repl\rs_config.h" />
+ <ClInclude Include="..\bson\bsonelement.h" />
+ <ClInclude Include="..\bson\bsoninlines.h" />
+ <ClInclude Include="..\bson\bsonmisc.h" />
+ <ClInclude Include="..\bson\bsonobj.h" />
+ <ClInclude Include="..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\bson\bsontypes.h" />
+ <ClInclude Include="jsobj.h" />
+ <ClInclude Include="..\bson\oid.h" />
+ <ClInclude Include="..\bson\ordering.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <ItemGroup>
+ <ResourceCompile Include="db.rc" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
</Project> \ No newline at end of file
diff --git a/db/db.vcxproj.filters b/db/db.vcxproj.filters
index a2011df..5650b2c 100755
--- a/db/db.vcxproj.filters
+++ b/db/db.vcxproj.filters
@@ -6,30 +6,6 @@
<ClCompile Include="..\client\dbclient_rs.cpp" />
<ClCompile Include="..\client\distlock.cpp" />
<ClCompile Include="..\client\model.cpp" />
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc" />
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c" />
- <ClCompile Include="..\pcre-7.4\pcre_compile.c" />
- <ClCompile Include="..\pcre-7.4\pcre_config.c" />
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c" />
- <ClCompile Include="..\pcre-7.4\pcre_exec.c" />
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c" />
- <ClCompile Include="..\pcre-7.4\pcre_get.c" />
- <ClCompile Include="..\pcre-7.4\pcre_globals.c" />
- <ClCompile Include="..\pcre-7.4\pcre_info.c" />
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c" />
- <ClCompile Include="..\pcre-7.4\pcre_newline.c" />
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c" />
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c" />
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc" />
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc" />
- <ClCompile Include="..\pcre-7.4\pcre_study.c" />
- <ClCompile Include="..\pcre-7.4\pcre_tables.c" />
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c" />
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c" />
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c" />
- <ClCompile Include="..\pcre-7.4\pcre_version.c" />
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c" />
- <ClCompile Include="..\pcre-7.4\pcreposix.c" />
<ClCompile Include="..\scripting\bench.cpp" />
<ClCompile Include="..\shell\mongo_vstudio.cpp" />
<ClCompile Include="..\s\chunk.cpp" />
@@ -118,12 +94,10 @@
<ClCompile Include="nonce.cpp" />
<ClCompile Include="..\client\parallel.cpp" />
<ClCompile Include="pdfile.cpp" />
- <ClCompile Include="query.cpp" />
<ClCompile Include="queryoptimizer.cpp" />
<ClCompile Include="security.cpp" />
<ClCompile Include="security_commands.cpp" />
<ClCompile Include="tests.cpp" />
- <ClCompile Include="update.cpp" />
<ClCompile Include="cmdline.cpp" />
<ClCompile Include="queryutil.cpp" />
<ClCompile Include="..\util\assert_util.cpp" />
@@ -133,13 +107,8 @@
<ClCompile Include="..\util\ntservice.cpp" />
<ClCompile Include="..\util\processinfo_win32.cpp" />
<ClCompile Include="..\util\util.cpp" />
- <ClCompile Include="..\util\httpclient.cpp" />
- <ClCompile Include="..\util\miniwebserver.cpp" />
<ClCompile Include="..\util\md5.c" />
<ClCompile Include="..\util\md5main.cpp" />
- <ClCompile Include="..\util\message.cpp" />
- <ClCompile Include="..\util\message_server_port.cpp" />
- <ClCompile Include="..\util\sock.cpp" />
<ClCompile Include="..\s\d_logic.cpp" />
<ClCompile Include="..\scripting\engine.cpp" />
<ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
@@ -153,8 +122,36 @@
<ClCompile Include="repl\rs.cpp" />
<ClCompile Include="repl\replset_commands.cpp" />
<ClCompile Include="repl\rs_config.cpp" />
- <ClCompile Include="security_key.cpp" />
<ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="querypattern.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="key.cpp" />
+ <ClCompile Include="btreebuilder.cpp" />
+ <ClCompile Include="queryoptimizercursor.cpp" />
+ <ClCompile Include="record.cpp" />
+ <ClCompile Include="ops\delete.cpp" />
+ <ClCompile Include="ops\update.cpp" />
+ <ClCompile Include="security_common.cpp" />
+ <ClCompile Include="ops\query.cpp" />
+ <ClCompile Include="..\util\net\httpclient.cpp" />
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="dbmessage.cpp" />
+ <ClCompile Include="commands\find_and_modify.cpp" />
+ <ClCompile Include="..\util\compress.cpp">
+ <Filter>snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <Filter>snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <Filter>snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="scanandorder.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\client\dbclientcursor.h" />
@@ -163,8 +160,6 @@
<ClInclude Include="..\client\parallel.h" />
<ClInclude Include="..\s\d_logic.h" />
<ClInclude Include="..\targetver.h" />
- <ClInclude Include="..\pcre-7.4\config.h" />
- <ClInclude Include="..\pcre-7.4\pcre.h" />
<ClInclude Include="..\util\concurrency\rwlock.h" />
<ClInclude Include="..\util\concurrency\msg.h" />
<ClInclude Include="..\util\concurrency\mutex.h" />
@@ -235,7 +230,6 @@
<ClInclude Include="resource.h" />
<ClInclude Include="scanandorder.h" />
<ClInclude Include="security.h" />
- <ClInclude Include="update.h" />
<ClInclude Include="..\util\allocator.h" />
<ClInclude Include="..\util\array.h" />
<ClInclude Include="..\util\assert_util.h" />
@@ -262,13 +256,8 @@
<ClInclude Include="..\util\concurrency\list.h" />
<ClInclude Include="..\util\concurrency\value.h" />
<ClInclude Include="..\util\web\html.h" />
- <ClInclude Include="..\util\httpclient.h" />
- <ClInclude Include="..\util\miniwebserver.h" />
<ClInclude Include="..\util\md5.h" />
<ClInclude Include="..\util\md5.hpp" />
- <ClInclude Include="..\util\message.h" />
- <ClInclude Include="..\util\message_server.h" />
- <ClInclude Include="..\util\sock.h" />
<ClInclude Include="..\scripting\engine.h" />
<ClInclude Include="..\scripting\engine_spidermonkey.h" />
<ClInclude Include="..\scripting\engine_v8.h" />
@@ -292,6 +281,28 @@
<ClInclude Include="..\bson\ordering.h" />
<ClInclude Include="dur_journalimpl.h" />
<ClInclude Include="..\util\concurrency\race.h" />
+ <ClInclude Include="..\util\alignedbuilder.h" />
+ <ClInclude Include="queryutil.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\bson\util\atomic_int.h" />
+ <ClInclude Include="..\bson\util\builder.h" />
+ <ClInclude Include="..\bson\util\misc.h" />
+ <ClInclude Include="ops\delete.h" />
+ <ClInclude Include="ops\update.h" />
+ <ClInclude Include="..\util\net\httpclient.h" />
+ <ClInclude Include="..\util\net\message.h" />
+ <ClInclude Include="..\util\net\message_server.h" />
+ <ClInclude Include="..\util\net\sock.h" />
+ <ClInclude Include="..\third_party\snappy\config.h">
+ <Filter>snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy.h">
+ <Filter>snappy</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="db.rc" />
@@ -326,4 +337,9 @@
<Library Include="..\..\js\js64d.lib" />
<Library Include="..\..\js\js64r.lib" />
</ItemGroup>
+ <ItemGroup>
+ <Filter Include="snappy">
+ <UniqueIdentifier>{bb99c086-7926-4f50-838d-f5f0c18397c0}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
</Project> \ No newline at end of file
diff --git a/db/db_10.sln b/db/db_10.sln
index f74ac3d..12d62a8 100755
--- a/db/db_10.sln
+++ b/db/db_10.sln
@@ -7,10 +7,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "examples", "examples", "{40
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{2B262D59-9DC7-4BF1-A431-1BD4966899A5}"
- ProjectSection(SolutionItems) = preProject
- ..\tools\export.cpp = ..\tools\export.cpp
- ..\tools\sniffer.cpp = ..\tools\sniffer.cpp
- EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "unix files", "unix files", "{2F760952-C71B-4865-998F-AABAE96D1373}"
ProjectSection(SolutionItems) = preProject
@@ -19,8 +15,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "unix files", "unix files",
..\util\processinfo_none.cpp = ..\util\processinfo_none.cpp
EndProjectSection
EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "shell", "shell", "{407B4B88-3451-433C-B74F-31B31FEB5791}"
-EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "other", "other", "{12B11474-2D74-48C3-BB3D-F03249BEA88F}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongod", "db.vcxproj", "{215B2D68-0A70-4D10-8E75-B31010C62A91}"
@@ -33,12 +27,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bsondemo", "..\bson\bsondem
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongoutils test program", "..\util\mongoutils\mongoutils.vcxproj", "{7B84584E-92BC-4DB9-971B-A1A8F93E5053}"
EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "jstests", "jstests", "{F5ABFB2C-A34F-48C1-9B5F-01D456AF6C57}"
- ProjectSection(SolutionItems) = preProject
- ..\jstests\index_many.js = ..\jstests\index_many.js
- ..\jstests\indexapi.js = ..\jstests\indexapi.js
- ..\jstests\objid5.js = ..\jstests\objid5.js
- EndProjectSection
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "simple_client_demo", "..\client\examples\simple_client_demo.vcxproj", "{89C30BC3-2874-4F2C-B4DA-EB04E9782236}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
@@ -120,6 +109,18 @@ Global
{7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Win32.ActiveCfg = Release|Win32
{7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Win32.Build.0 = Release|Win32
{7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|x64.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Win32.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Win32.Build.0 = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|x64.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Any CPU.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Win32.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Win32.Build.0 = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|x64.ActiveCfg = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -127,10 +128,9 @@ Global
GlobalSection(NestedProjects) = preSolution
{2B262D59-9DC7-4BF1-A431-1BD4966899A5} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
{2F760952-C71B-4865-998F-AABAE96D1373} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
- {407B4B88-3451-433C-B74F-31B31FEB5791} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
{4082881B-EB00-486F-906C-843B8EC06E18} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
{C9DB5EB7-81AA-4185-BAA1-DA035654402F} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
{7B84584E-92BC-4DB9-971B-A1A8F93E5053} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
- {F5ABFB2C-A34F-48C1-9B5F-01D456AF6C57} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
EndGlobalSection
EndGlobal
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 59dd78c..31f4b7f 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -15,8 +15,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/* SHARDING:
+ I believe this file is for mongod only.
+ See s/commnands_public.cpp for mongos.
+*/
+
#include "pch.h"
-#include "query.h"
+#include "ops/query.h"
#include "pdfile.h"
#include "jsobj.h"
#include "../bson/util/builder.h"
@@ -26,10 +31,11 @@
#include "../util/lruishmap.h"
#include "../util/md5.hpp"
#include "../util/processinfo.h"
+#include "../util/ramlog.h"
#include "json.h"
#include "repl.h"
#include "repl_block.h"
-#include "replpair.h"
+#include "replutil.h"
#include "commands.h"
#include "db.h"
#include "instance.h"
@@ -45,7 +51,21 @@
namespace mongo {
- extern int otherTraceLevel;
+ namespace dur {
+ void setAgeOutJournalFiles(bool rotate);
+ }
+ /** @return true if fields found */
+ bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ BSONElement e = cmdObj["ageOutJournalFiles"];
+ if( !e.eoo() ) {
+ bool r = e.trueValue();
+ log() << "ageOutJournalFiles " << r << endl;
+ dur::setAgeOutJournalFiles(r);
+ return true;
+ }
+ return false;
+ }
+
void flushDiagLog();
/* reset any errors so that getlasterror comes back clean.
@@ -68,7 +88,7 @@ namespace mongo {
help << "reset error state (used with getpreverror)";
}
CmdResetError() : Command("resetError", false, "reseterror") {}
- bool run(const string& db, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.get();
assert( le );
le->reset();
@@ -99,7 +119,7 @@ namespace mongo {
<< " { w:n } - await replication to n servers (including self) before returning\n"
<< " { wtimeout:m} - timeout for w in m milliseconds";
}
- bool run(const string& dbname, BSONObj& _cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& _cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.disableForCommand();
bool err = false;
@@ -112,7 +132,7 @@ namespace mongo {
Client& c = cc();
c.appendLastOp( result );
- result.appendNumber( "connectionId" , c.getConnectionId() );
+ result.appendNumber( "connectionId" , c.getConnectionId() ); // for sharding; also useful in general for debugging
BSONObj cmdObj = _cmdObj;
{
@@ -139,7 +159,7 @@ namespace mongo {
else if ( cmdObj["fsync"].trueValue() ) {
Timer t;
if( !getDur().awaitCommit() ) {
- // if get here, not running with --dur
+ // if get here, not running with --journal
log() << "fsync from getlasterror" << endl;
result.append( "fsyncFiles" , MemoryMappedFile::flushAll( true ) );
}
@@ -156,12 +176,10 @@ namespace mongo {
}
BSONElement e = cmdObj["w"];
- if ( e.isNumber() ) {
+ if ( e.ok() ) {
int timeout = cmdObj["wtimeout"].numberInt();
Timer t;
- int w = e.numberInt();
-
long long passes = 0;
char buf[32];
while ( 1 ) {
@@ -171,7 +189,7 @@ namespace mongo {
if ( anyReplEnabled() ) {
result.append( "wnote" , "no write has been done on this connection" );
}
- else if ( w <= 1 ) {
+ else if ( e.isNumber() && e.numberInt() <= 1 ) {
// don't do anything
// w=1 and no repl, so this is fine
}
@@ -185,8 +203,9 @@ namespace mongo {
}
// check this first for w=0 or w=1
- if ( opReplicatedEnough( op, w ) )
+ if ( opReplicatedEnough( op, e ) ) {
break;
+ }
// if replication isn't enabled (e.g., config servers)
if ( ! anyReplEnabled() ) {
@@ -230,7 +249,7 @@ namespace mongo {
return true;
}
CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.disableForCommand();
le->appendSelf( result );
if ( le->valid )
@@ -241,6 +260,65 @@ namespace mongo {
}
} cmdGetPrevError;
+ CmdShutdown cmdShutdown;
+
+ void CmdShutdown::help( stringstream& help ) const {
+ help << "shutdown the database. must be ran against admin db and "
+ << "either (1) ran from localhost or (2) authenticated. If "
+ << "this is a primary in a replica set and there is no member "
+ << "within 10 seconds of its optime, it will not shutdown "
+ << "without force : true. You can also specify timeoutSecs : "
+ << "N to wait N seconds for other members to catch up.";
+ }
+
+ bool CmdShutdown::run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+
+ if (!force && theReplSet && theReplSet->isPrimary()) {
+ long long timeout, now, start;
+ timeout = now = start = curTimeMicros64()/1000000;
+ if (cmdObj.hasField("timeoutSecs")) {
+ timeout += cmdObj["timeoutSecs"].numberLong();
+ }
+
+ OpTime lastOp = theReplSet->lastOpTimeWritten;
+ OpTime closest = theReplSet->lastOtherOpTime();
+ long long int diff = lastOp.getSecs() - closest.getSecs();
+ while (now <= timeout && (diff < 0 || diff > 10)) {
+ sleepsecs(1);
+ now++;
+
+ lastOp = theReplSet->lastOpTimeWritten;
+ closest = theReplSet->lastOtherOpTime();
+ diff = lastOp.getSecs() - closest.getSecs();
+ }
+
+ if (diff < 0 || diff > 10) {
+ errmsg = "no secondaries within 10 seconds of my optime";
+ result.append("closest", closest.getSecs());
+ result.append("difference", diff);
+ return false;
+ }
+
+ // step down
+ theReplSet->stepDown(120);
+
+ log() << "waiting for secondaries to catch up" << endl;
+
+ lastOp = theReplSet->lastOpTimeWritten;
+ while (lastOp != closest && now - start < 60) {
+ closest = theReplSet->lastOtherOpTime();
+
+ now++;
+ sleepsecs(1);
+ }
+
+ // regardless of whether they caught up, we'll shut down
+ }
+
+ return shutdownHelper();
+ }
+
class CmdDropDatabase : public Command {
public:
virtual bool logTheOp() {
@@ -254,7 +332,7 @@ namespace mongo {
}
virtual LockType locktype() const { return WRITE; }
CmdDropDatabase() : Command("dropDatabase") {}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.firstElement();
log() << "dropDatabase " << dbname << endl;
int p = (int) e.number();
@@ -274,17 +352,20 @@ namespace mongo {
virtual bool slaveOk() const {
return true;
}
+ virtual bool maintenanceMode() const { return true; }
virtual void help( stringstream& help ) const {
help << "repair database. also compacts. note: slow.";
}
virtual LockType locktype() const { return WRITE; }
CmdRepairDatabase() : Command("repairDatabase") {}
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.firstElement();
log() << "repairDatabase " << dbname << endl;
int p = (int) e.number();
- if ( p != 1 )
+ if ( p != 1 ) {
+ errmsg = "bad option";
return false;
+ }
e = cmdObj.getField( "preserveClonedFilesOnFailure" );
bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
e = cmdObj.getField( "backupOriginalFiles" );
@@ -311,7 +392,7 @@ namespace mongo {
}
virtual LockType locktype() const { return WRITE; }
CmdProfile() : Command("profile") {}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.firstElement();
result.append("was", cc().database()->profile);
result.append("slowms", cmdLine.slowMS );
@@ -348,7 +429,7 @@ namespace mongo {
help << "returns lots of administrative server statistics";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
long long start = Listener::getElapsedTimeMillis();
BSONObjBuilder timeBuilder(128);
@@ -407,9 +488,11 @@ namespace mongo {
t.append("bits", ( sizeof(int*) == 4 ? 32 : 64 ) );
ProcessInfo p;
+ int v = 0;
if ( p.supported() ) {
t.appendNumber( "resident" , p.getResidentSize() );
- t.appendNumber( "virtual" , p.getVirtualMemorySize() );
+ v = p.getVirtualMemorySize();
+ t.appendNumber( "virtual" , v );
t.appendBool( "supported" , true );
}
else {
@@ -419,7 +502,18 @@ namespace mongo {
timeBuilder.appendNumber( "middle of mem" , Listener::getElapsedTimeMillis() - start );
- t.appendNumber( "mapped" , MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) );
+ int m = (int) (MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ));
+ t.appendNumber( "mapped" , m );
+
+ if ( cmdLine.dur ) {
+ m *= 2;
+ t.appendNumber( "mappedWithJournal" , m );
+ }
+
+ if( v - m > 5000 ) {
+ t.append("note", "virtual minus mapped is large. could indicate a memory leak");
+ log() << "warning: virtual size (" << v << "MB) - mapped size (" << m << "MB) is large. could indicate a memory leak" << endl;
+ }
t.done();
@@ -504,9 +598,27 @@ namespace mongo {
result.append("dur", dur::stats.asObj());
}
+ timeBuilder.appendNumber( "after dur" , Listener::getElapsedTimeMillis() - start );
+
+ {
+ RamLog* rl = RamLog::get( "warnings" );
+ verify(15880, rl);
+
+ if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
+ vector<const char*> lines;
+ rl->get( lines );
+
+ BSONArrayBuilder arr( result.subarrayStart( "warnings" ) );
+ for ( unsigned i=std::max(0,(int)lines.size()-10); i<lines.size(); i++ )
+ arr.append( lines[i] );
+ arr.done();
+ }
+ }
+
if ( ! authed )
result.append( "note" , "run against admin for more info" );
-
+
+ timeBuilder.appendNumber( "at end" , Listener::getElapsedTimeMillis() - start );
if ( Listener::getElapsedTimeMillis() - start > 1000 ) {
BSONObj t = timeBuilder.obj();
log() << "serverStatus was very slow: " << t << endl;
@@ -526,7 +638,7 @@ namespace mongo {
virtual void help( stringstream& help ) const { help << "internal"; }
virtual LockType locktype() const { return NONE; }
CmdGetOpTime() : Command("getoptime") { }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
writelock l( "" );
result.appendDate("optime", OpTime::now().asDate());
return true;
@@ -555,7 +667,7 @@ namespace mongo {
}
void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Monitoring+and+Diagnostics#MonitoringandDiagnostics-DatabaseRecord%2FReplay"; }
virtual LockType locktype() const { return WRITE; }
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
flushDiagLog();
if ( !cmdLine.quiet )
@@ -678,7 +790,7 @@ namespace mongo {
}
virtual void help( stringstream& help ) const { help << "drop a collection\n{drop : <collectionName>}"; }
virtual LockType locktype() const { return WRITE; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string nsToDrop = dbname + '.' + cmdObj.firstElement().valuestr();
NamespaceDetails *d = nsdetails(nsToDrop.c_str());
if ( !cmdLine.quiet )
@@ -702,7 +814,7 @@ namespace mongo {
return false;
}
virtual bool slaveOk() const {
- // ok on --slave setups, not ok for nonmaster of a repl pair (unless override)
+ // ok on --slave setups
return replSettings.slave == SimpleSlave;
}
virtual bool slaveOverrideOk() {
@@ -712,7 +824,7 @@ namespace mongo {
return false;
}
virtual void help( stringstream& help ) const { help << "count objects in collection"; }
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
string err;
long long n = runCount(ns.c_str(), cmdObj, err);
@@ -748,11 +860,14 @@ namespace mongo {
}
virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream& help ) const {
- help << "create a collection";
+ help << "create a collection explicitly\n"
+ "{ create: <ns>[, capped: <bool>, size: <collSizeInBytes>, max: <nDocs>] }";
}
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ uassert(15888, "must pass name of collection to create", cmdObj.firstElement().valuestrsafe()[0] != '\0');
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
string err;
+ uassert(14832, "specify size:<n> when capped is true", !cmdObj["capped"].trueValue() || cmdObj["size"].isNumber() || cmdObj.hasField("$nExtents"));
bool ok = userCreateNS(ns.c_str(), cmdObj, err, ! fromRepl );
if ( !ok && !err.empty() )
errmsg = err;
@@ -774,7 +889,7 @@ namespace mongo {
help << "drop indexes for a collection";
}
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
BSONElement e = jsobj.firstElement();
string toDeleteNs = dbname + '.' + e.valuestr();
NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
@@ -819,7 +934,7 @@ namespace mongo {
help << "re-index a collection";
}
CmdReIndex() : Command("reIndex") { }
- bool run(const string& dbname , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
static DBDirectClient db;
BSONElement e = jsobj.firstElement();
@@ -837,7 +952,7 @@ namespace mongo {
auto_ptr<DBClientCursor> i = db.getIndexes( toDeleteNs );
BSONObjBuilder b;
while ( i->more() ) {
- BSONObj o = i->next().getOwned();
+ BSONObj o = i->next().removeField("v").getOwned();
b.append( BSONObjBuilder::numStr( all.size() ) , o );
all.push_back( o );
}
@@ -851,21 +966,9 @@ namespace mongo {
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
- if ( o.getIntField("v") > 0 ) {
- BSONObjBuilder b;
- BSONObjIterator i( o );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName() , "v" ) )
- continue;
- b.append( e );
- }
- o = b.obj();
- }
theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
}
- result.append( "ok" , 1 );
result.append( "nIndexes" , (int)all.size() );
result.appendArray( "indexes" , b.obj() );
return true;
@@ -883,10 +986,10 @@ namespace mongo {
virtual bool adminOnly() const {
return true;
}
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const { help << "list databases on this server"; }
CmdListDatabases() : Command("listDatabases" , true ) {}
- bool run(const string& dbname , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
vector< string > dbNames;
getDatabaseNames( dbNames );
vector< BSONObj > dbInfos;
@@ -895,12 +998,18 @@ namespace mongo {
boost::intmax_t totalSize = 0;
for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
BSONObjBuilder b;
- b.append( "name", i->c_str() );
+ b.append( "name", *i );
+
boost::intmax_t size = dbSize( i->c_str() );
b.append( "sizeOnDisk", (double) size );
- Client::Context ctx( *i );
- b.appendBool( "empty", ctx.db()->isEmpty() );
totalSize += size;
+
+ {
+ readlock lk( *i );
+ Client::Context ctx( *i );
+ b.appendBool( "empty", ctx.db()->isEmpty() );
+ }
+
dbInfos.push_back( b.obj() );
seen.insert( i->c_str() );
@@ -908,7 +1017,11 @@ namespace mongo {
// TODO: erh 1/1/2010 I think this is broken where path != dbpath ??
set<string> allShortNames;
- dbHolder.getAllShortNames( allShortNames );
+ {
+ readlock lk;
+ dbHolder.getAllShortNames( allShortNames );
+ }
+
for ( set<string>::iterator i = allShortNames.begin(); i != allShortNames.end(); i++ ) {
string name = *i;
@@ -916,9 +1029,14 @@ namespace mongo {
continue;
BSONObjBuilder b;
- b << "name" << name << "sizeOnDisk" << double( 1 );
- Client::Context ctx( name );
- b.appendBool( "empty", ctx.db()->isEmpty() );
+ b.append( "name" , name );
+ b.append( "sizeOnDisk" , (double)1.0 );
+
+ {
+ readlock lk( name );
+ Client::Context ctx( name );
+ b.appendBool( "empty", ctx.db()->isEmpty() );
+ }
dbInfos.push_back( b.obj() );
}
@@ -940,7 +1058,7 @@ namespace mongo {
virtual LockType locktype() const { return WRITE; }
CmdCloseAllDatabases() : Command( "closeAllDatabases" ) {}
- bool run(const string& dbname , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
bool ok;
try {
ok = dbHolder.closeAll( dbpath , result, false );
@@ -967,7 +1085,7 @@ namespace mongo {
help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
}
virtual LockType locktype() const { return READ; }
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname;
ns += ".";
{
@@ -986,7 +1104,7 @@ namespace mongo {
BSONObj sort = BSON( "files_id" << 1 << "n" << 1 );
shared_ptr<Cursor> cursor = bestGuessCursor(ns.c_str(), query, sort);
- scoped_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns.c_str()));
+ auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns.c_str()));
int n = 0;
while ( cursor->ok() ) {
@@ -1000,37 +1118,31 @@ namespace mongo {
BSONObj obj = cursor->current();
cursor->advance();
- ClientCursor::YieldLock yield (cc);
- try {
-
- BSONElement ne = obj["n"];
- assert(ne.isNumber());
- int myn = ne.numberInt();
- if ( n != myn ) {
- log() << "should have chunk: " << n << " have:" << myn << endl;
-
- DBDirectClient client;
- Query q(query);
- q.sort(sort);
- auto_ptr<DBClientCursor> c = client.query(ns, q);
- while(c->more())
- PRINT(c->nextSafe());
+ BSONElement ne = obj["n"];
+ assert(ne.isNumber());
+ int myn = ne.numberInt();
+ if ( n != myn ) {
+ log() << "should have chunk: " << n << " have:" << myn << endl;
+ dumpChunks( ns , query , sort );
+ uassert( 10040 , "chunks out of order" , n == myn );
+ }
- uassert( 10040 , "chunks out of order" , n == myn );
- }
+ int len;
+ const char * data = obj["data"].binDataClean( len );
- int len;
- const char * data = obj["data"].binDataClean( len );
+ ClientCursor::YieldLock yield (cc.get());
+ try {
md5_append( &st , (const md5_byte_t*)(data) , len );
-
n++;
}
catch (...) {
- yield.relock(); // needed before yield goes out of scope
+ if ( ! yield.stillOk() ) // relocks
+ cc.release();
throw;
}
if ( ! yield.stillOk() ) {
+ cc.release();
uasserted(13281, "File deleted during filemd5 command");
}
}
@@ -1041,6 +1153,15 @@ namespace mongo {
result.append( "md5" , digestToString( d ) );
return true;
}
+
+ void dumpChunks( const string& ns , const BSONObj& query , const BSONObj& sort ) {
+ DBDirectClient client;
+ Query q(query);
+ q.sort(sort);
+ auto_ptr<DBClientCursor> c = client.query(ns, q);
+ while(c->more())
+ PRINT(c->nextSafe());
+ }
} cmdFileMD5;
static IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
@@ -1063,7 +1184,7 @@ namespace mongo {
"\nkeyPattern, min, and max parameters are optional."
"\nnote: This command may take a while to run";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer timer;
string ns = jsobj.firstElement().String();
@@ -1103,7 +1224,7 @@ namespace mongo {
if ( idx == 0 )
return false;
- c.reset( new BtreeCursor( d, d->idxNo(*idx), *idx, min, max, false, 1 ) );
+ c.reset( BtreeCursor::make( d, d->idxNo(*idx), *idx, min, max, false, 1 ) );
}
long long avgObjSize = d->stats.datasize / d->stats.nrecords;
@@ -1178,9 +1299,10 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
- help << "{ collStats:\"blog.posts\" , scale : 1 } scale divides sizes e.g. for KB use 1024";
+ help << "{ collStats:\"blog.posts\" , scale : 1 } scale divides sizes e.g. for KB use 1024\n"
+ " avgObjSize - in bytes";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + "." + jsobj.firstElement().valuestr();
Client::Context cx( ns );
@@ -1199,7 +1321,6 @@ namespace mongo {
errmsg = "scale has to be > 0";
return false;
}
-
}
else if ( jsobj["scale"].trueValue() ) {
errmsg = "scale has to be a number > 0";
@@ -1246,9 +1367,24 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
- help << " example: { dbStats:1 } ";
+ help <<
+ "Get stats on a database. Not instantaneous. Slower for databases with large .ns files.\n" <<
+ "Example: { dbStats:1, scale:1 }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ int scale = 1;
+ if ( jsobj["scale"].isNumber() ) {
+ scale = jsobj["scale"].numberInt();
+ if ( scale <= 0 ) {
+ errmsg = "scale has to be > 0";
+ return false;
+ }
+ }
+ else if ( jsobj["scale"].trueValue() ) {
+ errmsg = "scale has to be a number > 0";
+ return false;
+ }
+
list<string> collections;
Database* d = cc().database();
if ( d )
@@ -1288,12 +1424,14 @@ namespace mongo {
result.appendNumber( "collections" , ncollections );
result.appendNumber( "objects" , objects );
result.append ( "avgObjSize" , objects == 0 ? 0 : double(size) / double(objects) );
- result.appendNumber( "dataSize" , size );
- result.appendNumber( "storageSize" , storageSize);
+ result.appendNumber( "dataSize" , size / scale );
+ result.appendNumber( "storageSize" , storageSize / scale);
result.appendNumber( "numExtents" , numExtents );
result.appendNumber( "indexes" , indexes );
- result.appendNumber( "indexSize" , indexSize );
- result.appendNumber( "fileSize" , d->fileSize() );
+ result.appendNumber( "indexSize" , indexSize / scale );
+ result.appendNumber( "fileSize" , d->fileSize() / scale );
+ if( d )
+ result.appendNumber( "nsSizeMB", (int) d->namespaceIndex.fileLength() / 1024 / 1024 );
return true;
}
@@ -1308,7 +1446,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string from = jsobj.getStringField( "cloneCollectionAsCapped" );
string to = jsobj.getStringField( "toCollection" );
long long size = (long long)jsobj.getField( "size" ).number();
@@ -1350,6 +1488,7 @@ namespace mongo {
while( c->more() ) {
BSONObj obj = c->next();
theDataFileMgr.insertAndLog( toNs.c_str(), obj, true );
+ getDur().commitIfNeeded();
}
return true;
@@ -1369,7 +1508,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
BackgroundOperation::assertNoBgOpInProgForDb(dbname.c_str());
string from = jsobj.getStringField( "convertToCapped" );
@@ -1411,116 +1550,6 @@ namespace mongo {
}
} cmdConvertToCapped;
- /* Find and Modify an object returning either the old (default) or new value*/
- class CmdFindAndModify : public Command {
- public:
- virtual void help( stringstream &help ) const {
- help <<
- "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
- "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
- "Either update or remove is required, all other fields have default values.\n"
- "Output is in the \"value\" field\n";
- }
-
- CmdFindAndModify() : Command("findAndModify", false, "findandmodify") { }
- virtual bool logTheOp() {
- return false; // the modification will be logged directly
- }
- virtual bool slaveOk() const {
- return false;
- }
- virtual LockType locktype() const { return WRITE; }
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- static DBDirectClient db;
-
- string ns = dbname + '.' + cmdObj.firstElement().valuestr();
-
- BSONObj origQuery = cmdObj.getObjectField("query"); // defaults to {}
- Query q (origQuery);
- BSONElement sort = cmdObj["sort"];
- if (!sort.eoo())
- q.sort(sort.embeddedObjectUserCheck());
-
- bool upsert = cmdObj["upsert"].trueValue();
-
- BSONObj fieldsHolder (cmdObj.getObjectField("fields"));
- const BSONObj* fields = (fieldsHolder.isEmpty() ? NULL : &fieldsHolder);
-
- BSONObj out = db.findOne(ns, q, fields);
- if (out.isEmpty()) {
- if (!upsert) {
- errmsg = "No matching object found";
- return false;
- }
-
- BSONElement update = cmdObj["update"];
- uassert(13329, "upsert mode requires update field", !update.eoo());
- uassert(13330, "upsert mode requires query field", !origQuery.isEmpty());
- db.update(ns, origQuery, update.embeddedObjectUserCheck(), true);
-
- BSONObj gle = db.getLastErrorDetailed();
- if (gle["err"].type() == String) {
- errmsg = gle["err"].String();
- return false;
- }
-
- if (cmdObj["new"].trueValue()) {
- BSONElement _id = gle["upserted"];
- if (_id.eoo())
- _id = origQuery["_id"];
-
- out = db.findOne(ns, QUERY("_id" << _id), fields);
- }
-
- }
- else {
-
- if (cmdObj["remove"].trueValue()) {
- uassert(12515, "can't remove and update", cmdObj["update"].eoo());
- db.remove(ns, QUERY("_id" << out["_id"]), 1);
-
- }
- else { // update
-
- BSONElement queryId = origQuery["_id"];
- if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality) {
- // need to include original query for $ positional operator
-
- BSONObjBuilder b;
- b.append(out["_id"]);
- BSONObjIterator it(origQuery);
- while (it.more()) {
- BSONElement e = it.next();
- if (strcmp(e.fieldName(), "_id"))
- b.append(e);
- }
- q = Query(b.obj());
- }
-
- if (q.isComplex()) // update doesn't work with complex queries
- q = Query(q.getFilter().getOwned());
-
- BSONElement update = cmdObj["update"];
- uassert(12516, "must specify remove or update", !update.eoo());
- db.update(ns, q, update.embeddedObjectUserCheck());
-
- BSONObj gle = db.getLastErrorDetailed();
- if (gle["err"].type() == String) {
- errmsg = gle["err"].String();
- return false;
- }
-
- if (cmdObj["new"].trueValue())
- out = db.findOne(ns, QUERY("_id" << out["_id"]), fields);
- }
- }
-
- result.append("value", out);
-
- return true;
- }
- } cmdFindAndModify;
-
/* Returns client's uri */
class CmdWhatsMyUri : public Command {
public:
@@ -1535,7 +1564,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "{whatsmyuri:1}";
}
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
BSONObj info = cc().curop()->infoNoauth();
result << "you" << info[ "client" ];
return true;
@@ -1550,7 +1579,7 @@ namespace mongo {
return true;
}
virtual bool slaveOk() const {
- return false;
+ return true;
}
virtual LockType locktype() const { return WRITE; }
virtual bool requiresAuth() {
@@ -1559,7 +1588,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "internal. for testing only.";
}
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "godinsert" ].valuestrsafe();
uassert( 13049, "godinsert must specify a collection", !coll.empty() );
string ns = dbname + "." + coll;
@@ -1574,7 +1603,7 @@ namespace mongo {
DBHashCmd() : Command( "dbHash", false, "dbhash" ) {}
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return READ; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
list<string> colls;
Database* db = cc().database();
if ( db )
@@ -1611,7 +1640,7 @@ namespace mongo {
int idNum = nsd->findIdIndex();
if ( idNum >= 0 ) {
- cursor.reset( new BtreeCursor( nsd , idNum , nsd->idx( idNum ) , BSONObj() , BSONObj() , false , 1 ) );
+ cursor.reset( BtreeCursor::make( nsd , idNum , nsd->idx( idNum ) , BSONObj() , BSONObj() , false , 1 ) );
}
else if ( c.find( ".system." ) != string::npos ) {
continue;
@@ -1620,9 +1649,8 @@ namespace mongo {
cursor = findTableScan( c.c_str() , BSONObj() );
}
else {
- bb.done();
- errmsg = (string)"can't find _id index for: " + c;
- return 0;
+ log() << "can't find _id index for: " << c << endl;
+ continue;
}
md5_state_t st;
@@ -1665,16 +1693,13 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual void help( stringstream& help ) const {
help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
- help << "w:true write lock";
+ help << "w:true write lock. secs:<seconds>";
}
CmdSleep() : Command("sleep") { }
- bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
-
-
+ bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
int secs = 100;
if ( cmdObj["secs"].isNumber() )
secs = cmdObj["secs"].numberInt();
-
if( cmdObj.getBoolField("w") ) {
writelock lk("");
sleepsecs(secs);
@@ -1683,7 +1708,6 @@ namespace mongo {
readlock lk("");
sleepsecs(secs);
}
-
return true;
}
} cmdSleep;
@@ -1695,7 +1719,7 @@ namespace mongo {
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return WRITE; }
virtual bool requiresAuth() { return true; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "captrunc" ].valuestrsafe();
uassert( 13416, "captrunc must specify a collection", !coll.empty() );
string ns = dbname + "." + coll;
@@ -1722,7 +1746,7 @@ namespace mongo {
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return WRITE; }
virtual bool requiresAuth() { return true; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "emptycapped" ].valuestrsafe();
uassert( 13428, "emptycapped must specify a collection", !coll.empty() );
string ns = dbname + "." + coll;
@@ -1787,13 +1811,31 @@ namespace mongo {
if ( c->adminOnly() )
log( 2 ) << "command: " << cmdObj << endl;
+ if (c->maintenanceMode() && theReplSet && theReplSet->isSecondary()) {
+ theReplSet->setMaintenanceMode(true);
+ }
+
if ( c->locktype() == Command::NONE ) {
// we also trust that this won't crash
+
+ if ( c->requiresAuth() ) {
+ // test that the user at least as read permissions
+ if ( ! client.getAuthenticationInfo()->isAuthorizedReads( dbname ) ) {
+ result.append( "errmsg" , "need to login" );
+ return false;
+ }
+ }
+
client.curop()->ensureStarted();
string errmsg;
- int ok = c->run( dbname , cmdObj , errmsg , result , fromRepl );
+ int ok = c->run( dbname , cmdObj , queryOptions, errmsg , result , fromRepl );
if ( ! ok )
result.append( "errmsg" , errmsg );
+
+ if (c->maintenanceMode() && theReplSet) {
+ theReplSet->setMaintenanceMode(false);
+ }
+
return ok;
}
@@ -1807,11 +1849,13 @@ namespace mongo {
client.curop()->ensureStarted();
Client::Context ctx( dbname , dbpath , &lk , c->requiresAuth() );
+ bool retval = true;
+
try {
string errmsg;
- if ( ! c->run(dbname, cmdObj, errmsg, result, fromRepl ) ) {
+ if ( ! c->run(dbname, cmdObj, queryOptions, errmsg, result, fromRepl ) ) {
result.append( "errmsg" , errmsg );
- return false;
+ retval = false;
}
}
catch ( DBException& e ) {
@@ -1819,14 +1863,18 @@ namespace mongo {
ss << "exception: " << e.what();
result.append( "errmsg" , ss.str() );
result.append( "code" , e.getCode() );
- return false;
+ retval = false;
}
- if ( c->logTheOp() && ! fromRepl ) {
+ if ( retval && c->logTheOp() && ! fromRepl ) {
logOp("c", cmdns, cmdObj);
}
- return true;
+ if (c->maintenanceMode() && theReplSet) {
+ theReplSet->setMaintenanceMode(false);
+ }
+
+ return retval;
}
@@ -1850,7 +1898,10 @@ namespace mongo {
BSONObj jsobj;
{
BSONElement e = _cmdobj.firstElement();
- if ( e.type() == Object && string("query") == e.fieldName() ) {
+ if ( e.type() == Object && (e.fieldName()[0] == '$'
+ ? str::equals("query", e.fieldName()+1)
+ : str::equals("query", e.fieldName())))
+ {
jsobj = e.embeddedObject();
}
else {
diff --git a/db/dbcommands_admin.cpp b/db/dbcommands_admin.cpp
index 82a9c91..566027f 100644
--- a/db/dbcommands_admin.cpp
+++ b/db/dbcommands_admin.cpp
@@ -33,6 +33,7 @@
#include "../util/background.h"
#include "../util/logfile.h"
#include "../util/alignedbuilder.h"
+#include "../util/paths.h"
#include "../scripting/engine.h"
namespace mongo {
@@ -46,7 +47,7 @@ namespace mongo {
virtual void help(stringstream& h) const { h << "internal"; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string dropns = dbname + "." + cmdObj.firstElement().valuestrsafe();
if ( !cmdLine.quiet )
@@ -81,7 +82,7 @@ namespace mongo {
virtual bool adminOnly() const { return true; }
virtual void help(stringstream& h) const { h << "test how long to write and fsync to a test file in the journal/ directory"; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
filesystem::path p = dur::getJournalDir();
p /= "journalLatencyTest";
@@ -133,6 +134,11 @@ namespace mongo {
}
catch(...) { }
+ try {
+ result.append("onSamePartition", onSamePartition(dur::getJournalDir().string(), dbpath));
+ }
+ catch(...) { }
+
return 1;
}
} journalLatencyTestCmd;
@@ -145,12 +151,13 @@ namespace mongo {
return true;
}
- virtual void help(stringstream& h) const { h << "Validate contents of a namespace by scanning its data structures for correctness. Slow."; }
+ virtual void help(stringstream& h) const { h << "Validate contents of a namespace by scanning its data structures for correctness. Slow.\n"
+ "Add full:true option to do a more thorough check"; }
virtual LockType locktype() const { return READ; }
- //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] } */
+ //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
NamespaceDetails * d = nsdetails( ns.c_str() );
if ( !cmdLine.quiet )
@@ -162,24 +169,27 @@ namespace mongo {
}
result.append( "ns", ns );
- result.append( "result" , validateNS( ns.c_str() , d, &cmdObj ) );
+ validateNS( ns.c_str() , d, cmdObj, result);
return 1;
}
+ private:
+ void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
+ const bool full = cmdObj["full"].trueValue();
+ const bool scanData = full || cmdObj["scandata"].trueValue();
- string validateNS(const char *ns, NamespaceDetails *d, BSONObj *cmdObj) {
- bool scanData = true;
- if( cmdObj && cmdObj->hasElement("scandata") && !cmdObj->getBoolField("scandata") )
- scanData = false;
bool valid = true;
- stringstream ss;
- ss << "\nvalidate\n";
- //ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
- if ( d->capped )
- ss << " capped:" << d->capped << " max:" << d->max << '\n';
-
- ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString()<< '\n';
- ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString() << '\n';
+ BSONArrayBuilder errors; // explanation(s) for why valid = false
+ if ( d->capped ){
+ result.append("capped", d->capped);
+ result.append("max", d->max);
+ }
+
+ result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
+ result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
+
+ BSONArrayBuilder extentData;
+
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
@@ -191,32 +201,46 @@ namespace mongo {
e->assertOk();
el = e->xnext;
ne++;
+ if ( full )
+ extentData << e->dump();
+
killCurrentOp.checkForInterrupt();
}
- ss << " # extents:" << ne << '\n';
+ result.append("extentCount", ne);
}
catch (...) {
valid=false;
- ss << " extent asserted ";
+ errors << "extent asserted";
}
- ss << " datasize?:" << d->stats.datasize << " nrecords?:" << d->stats.nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
- ss << " padding:" << d->paddingFactor << '\n';
+ if ( full )
+ result.appendArray( "extents" , extentData.arr() );
+
+
+ result.appendNumber("datasize", d->stats.datasize);
+ result.appendNumber("nrecords", d->stats.nrecords);
+ result.appendNumber("lastExtentSize", d->lastExtentSize);
+ result.appendNumber("padding", d->paddingFactor);
+
+
try {
try {
- ss << " first extent:\n";
- d->firstExtent.ext()->dump(ss);
- valid = valid && d->firstExtent.ext()->validates();
+ result.append("firstExtentDetails", d->firstExtent.ext()->dump());
+
+ valid = valid && d->firstExtent.ext()->validates() &&
+ d->firstExtent.ext()->xprev.isNull();
}
catch (...) {
- ss << "\n exception firstextent\n" << endl;
+ errors << "exception firstextent";
+ valid = false;
}
set<DiskLoc> recs;
if( scanData ) {
shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
int n = 0;
+ int nInvalid = 0;
long long len = 0;
long long nlen = 0;
int outOfOrder = 0;
@@ -236,27 +260,54 @@ namespace mongo {
Record *r = c->_current();
len += r->lengthWithHeaders;
nlen += r->netLength();
+
+ if (full){
+ BSONObj obj(r);
+ if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
+ valid = false;
+ if (nInvalid == 0) // only log once;
+ errors << "invalid bson object detected (see logs for more info)";
+
+ nInvalid++;
+ if (strcmp("_id", obj.firstElementFieldName()) == 0){
+ try {
+ obj.firstElement().validate(); // throws on error
+ log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
+ }
+ catch(...){
+ log() << "Invalid bson detected in " << ns << " with corrupt _id" << endl;
+ }
+ }
+ else {
+ log() << "Invalid bson detected in " << ns << " and couldn't find _id" << endl;
+ }
+ }
+ }
+
c->advance();
}
if ( d->capped && !d->capLooped() ) {
- ss << " capped outOfOrder:" << outOfOrder;
+ result.append("cappedOutOfOrder", outOfOrder);
if ( outOfOrder > 1 ) {
valid = false;
- ss << " ???";
+ errors << "too many out of order records";
}
- else ss << " (OK)";
- ss << '\n';
}
- ss << " " << n << " objects found, nobj:" << d->stats.nrecords << '\n';
- ss << " " << len << " bytes data w/headers\n";
- ss << " " << nlen << " bytes data wout/headers\n";
+ result.append("objectsFound", n);
+
+ if (full) {
+ result.append("invalidObjects", nInvalid);
+ }
+
+ result.appendNumber("bytesWithHeaders", len);
+ result.appendNumber("bytesWithoutHeaders", nlen);
}
- ss << " deletedList: ";
+ BSONArrayBuilder deletedListArray;
for ( int i = 0; i < Buckets; i++ ) {
- ss << (d->deletedList[i].isNull() ? '0' : '1');
+ deletedListArray << d->deletedList[i].isNull();
}
- ss << endl;
+
int ndel = 0;
long long delSize = 0;
int incorrect = 0;
@@ -278,7 +329,9 @@ namespace mongo {
}
if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
- ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
+ string err (str::stream() << "bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k);
+ errors << err;
+
valid = false;
break;
}
@@ -292,47 +345,60 @@ namespace mongo {
}
}
catch (...) {
- ss <<" ?exception in deleted chain for bucket " << i << endl;
+ errors << ("exception in deleted chain for bucket " + BSONObjBuilder::numStr(i));
valid = false;
}
}
- ss << " deleted: n: " << ndel << " size: " << delSize << endl;
+ result.appendNumber("deletedCount", ndel);
+ result.appendNumber("deletedSize", delSize);
+
if ( incorrect ) {
- ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
+ errors << (BSONObjBuilder::numStr(incorrect) + " records from datafile are in deleted list");
valid = false;
}
int idxn = 0;
try {
- ss << " nIndexes:" << d->nIndexes << endl;
+ result.append("nIndexes", d->nIndexes);
+ BSONObjBuilder indexes; // not using subObjStart to be exception safe
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
IndexDetails& id = i.next();
- ss << " " << id.indexNamespace() << " keys:" <<
- id.head.btree()->fullValidate(id.head, id.keyPattern()) << endl;
+ long long keys = id.idxInterface().fullValidate(id.head, id.keyPattern());
+ indexes.appendNumber(id.indexNamespace(), keys);
}
+ result.append("keysPerIndex", indexes.done());
}
catch (...) {
- ss << "\n exception during index validate idxn:" << idxn << endl;
+ errors << ("exception during index validate idxn " + BSONObjBuilder::numStr(idxn));
valid=false;
}
}
catch (AssertionException) {
- ss << "\n exception during validate\n" << endl;
+ errors << "exception during validate";
valid = false;
}
- if ( !valid )
- ss << " ns corrupt, requires dbchk\n";
+ result.appendBool("valid", valid);
+ result.append("errors", errors.arr());
+
+ if ( !full ){
+ result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
+ }
+
+ if ( !valid ) {
+ result.append("advice", "ns corrupt, requires repair");
+ }
- return ss.str();
}
} validateCmd;
- extern bool unlockRequested;
- extern unsigned lockedForWriting;
- extern mongo::mutex lockedForWritingMutex;
+ bool lockedForWriting = false; // read from db/instance.cpp
+ static bool unlockRequested = false;
+ static mongo::mutex fsyncLockMutex("fsyncLock");
+ static boost::condition fsyncLockCondition;
+ static OID fsyncLockID; // identifies the current lock job
/*
class UnlockCommand : public Command {
@@ -360,6 +426,7 @@ namespace mongo {
db.$cmd.sys.unlock.findOne()
*/
class FSyncCommand : public Command {
+ static const char* url() { return "http://www.mongodb.org/display/DOCS/fsync+Command"; }
class LockDBJob : public BackgroundJob {
protected:
virtual string name() const { return "lockdbjob"; }
@@ -367,23 +434,26 @@ namespace mongo {
Client::initThread("fsyncjob");
Client& c = cc();
{
- scoped_lock lk(lockedForWritingMutex);
- lockedForWriting++;
+ scoped_lock lk(fsyncLockMutex);
+ while (lockedForWriting){ // there is a small window for two LockDBJob's to be active. This prevents it.
+ fsyncLockCondition.wait(lk.boost());
+ }
+ lockedForWriting = true;
+ fsyncLockID.init();
}
readlock lk("");
MemoryMappedFile::flushAll(true);
- log() << "db is now locked for snapshotting, no writes allowed. use db.$cmd.sys.unlock.findOne() to unlock" << endl;
+ log() << "db is now locked for snapshotting, no writes allowed. db.fsyncUnlock() to unlock" << endl;
+ log() << " For more info see " << FSyncCommand::url() << endl;
_ready = true;
- while( 1 ) {
- if( unlockRequested ) {
- unlockRequested = false;
- break;
- }
- sleepmillis(20);
- }
{
- scoped_lock lk(lockedForWritingMutex);
- lockedForWriting--;
+ scoped_lock lk(fsyncLockMutex);
+ while( !unlockRequested ) {
+ fsyncLockCondition.wait(lk.boost());
+ }
+ unlockRequested = false;
+ lockedForWriting = false;
+ fsyncLockCondition.notify_all();
}
c.shutdown();
}
@@ -402,8 +472,8 @@ namespace mongo {
string x = cmdObj["exec"].valuestrsafe();
return !x.empty();
}*/
- virtual void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/fsync+Command"; }
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual void help(stringstream& h) const { h << url(); }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
bool sync = !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately
bool lock = cmdObj["lock"].trueValue();
log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
@@ -433,13 +503,19 @@ namespace mongo {
LockDBJob *l = new LockDBJob(ready);
dbMutex.releaseEarly();
+
+ // There is a narrow window for another lock request to come in
+ // here before the LockDBJob grabs the readlock. LockDBJob will
+ // ensure that the requests are serialized and never running
+ // concurrently
l->go();
// don't return until background thread has acquired the read lock
while( !ready ) {
sleepmillis(10);
}
- result.append("info", "now locked against writes, use db.$cmd.sys.unlock.findOne() to unlock");
+ result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
+ result.append("seeAlso", url());
}
else {
// the simple fsync command case
@@ -453,7 +529,21 @@ namespace mongo {
} fsyncCmd;
-
-
+ // Note that this will only unlock the current lock. If another thread
+ // relocks before we return we still consider the unlocking successful.
+ // This is imporant because if two scripts are trying to fsync-lock, each
+ // one must be assured that between the fsync return and the call to unlock
+ // that the database is fully locked
+ void unlockFsyncAndWait(){
+ scoped_lock lk(fsyncLockMutex);
+ if (lockedForWriting) { // could have handled another unlock before we grabbed the lock
+ OID curOp = fsyncLockID;
+ unlockRequested = true;
+ fsyncLockCondition.notify_all();
+ while (lockedForWriting && fsyncLockID == curOp){
+ fsyncLockCondition.wait( lk.boost() );
+ }
+ }
+ }
}
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index a555b6c..69b51c7 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -20,7 +20,7 @@
*/
#include "pch.h"
-#include "query.h"
+#include "ops/query.h"
#include "pdfile.h"
#include "jsobj.h"
#include "../bson/util/builder.h"
@@ -33,17 +33,17 @@
#include "json.h"
#include "repl.h"
#include "repl_block.h"
-#include "replpair.h"
+#include "replutil.h"
#include "commands.h"
#include "db.h"
#include "instance.h"
#include "lasterror.h"
#include "security.h"
-#include "queryoptimizer.h"
#include "../scripting/engine.h"
#include "stats/counters.h"
#include "background.h"
#include "../util/version.h"
+#include "../util/ramlog.h"
namespace mongo {
@@ -57,8 +57,9 @@ namespace mongo {
help << "get version #, etc.\n";
help << "{ buildinfo:1 }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo();
+ result << "versionArray" << versionArray;
result << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 );
result.appendBool( "debug" , debug );
result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
@@ -87,7 +88,7 @@ namespace mongo {
help << " syncdelay\n";
help << "{ getParameter:'*' } to get everything\n";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
bool all = *cmdObj.firstElement().valuestrsafe() == '*';
int before = result.len();
@@ -116,6 +117,9 @@ namespace mongo {
}
} cmdGet;
+ // tempish
+ bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl );
+
class CmdSet : public Command {
public:
CmdSet() : Command( "setParameter" ) { }
@@ -123,37 +127,58 @@ namespace mongo {
virtual bool adminOnly() const { return true; }
virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const {
- help << "set administrative option(s)\nexample:\n";
- help << "{ setParameter:1, notablescan:true }\n";
+ help << "set administrative option(s)\n";
+ help << "{ setParameter:1, <param>:<value> }\n";
help << "supported so far:\n";
- help << " notablescan\n";
+ help << " journalCommitInterval\n";
help << " logLevel\n";
+ help << " notablescan\n";
help << " quiet\n";
+ help << " syncdelay\n";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
int s = 0;
+ bool found = setParmsMongodSpecific(dbname, cmdObj, errmsg, result, fromRepl);
+ if( cmdObj.hasElement("journalCommitInterval") ) {
+ if( !cmdLine.dur ) {
+ errmsg = "journaling is off";
+ return false;
+ }
+ int x = (int) cmdObj["journalCommitInterval"].Number();
+ assert( x > 1 && x < 500 );
+ cmdLine.journalCommitInterval = x;
+ log() << "setParameter journalCommitInterval=" << x << endl;
+ s++;
+ }
if( cmdObj.hasElement("notablescan") ) {
- result.append("was", cmdLine.noTableScan);
+ assert( !cmdLine.isMongos() );
+ if( s == 0 )
+ result.append("was", cmdLine.noTableScan);
cmdLine.noTableScan = cmdObj["notablescan"].Bool();
s++;
}
if( cmdObj.hasElement("quiet") ) {
- result.append("was", cmdLine.quiet );
+ if( s == 0 )
+ result.append("was", cmdLine.quiet );
cmdLine.quiet = cmdObj["quiet"].Bool();
s++;
}
if( cmdObj.hasElement("syncdelay") ) {
- result.append("was", cmdLine.syncdelay );
+ assert( !cmdLine.isMongos() );
+ if( s == 0 )
+ result.append("was", cmdLine.syncdelay );
cmdLine.syncdelay = cmdObj["syncdelay"].Number();
s++;
}
if( cmdObj.hasElement( "logLevel" ) ) {
- result.append("was", logLevel );
+ if( s == 0 )
+ result.append("was", logLevel );
logLevel = cmdObj["logLevel"].numberInt();
s++;
}
if( cmdObj.hasElement( "replApplyBatchSize" ) ) {
- result.append("was", replApplyBatchSize );
+ if( s == 0 )
+ result.append("was", replApplyBatchSize );
BSONElement e = cmdObj["replApplyBatchSize"];
ParameterValidator * v = ParameterValidator::get( e.fieldName() );
assert( v );
@@ -163,8 +188,8 @@ namespace mongo {
s++;
}
- if( s == 0 ) {
- errmsg = "no option found to set, use '*' to get all ";
+ if( s == 0 && !found ) {
+ errmsg = "no option found to set, use help:true to see options ";
return false;
}
@@ -179,7 +204,7 @@ namespace mongo {
virtual void help( stringstream &help ) const { help << "a way to check that the server is alive. responds immediately even if server is in a db lock."; }
virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool run(const string& badns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& badns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
// IMPORTANT: Don't put anything in here that might lock db - including authentication
return true;
}
@@ -192,7 +217,7 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual bool readOnly() { return true; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( globalScriptEngine ) {
BSONObjBuilder bb( result.subobjStart( "js" ) );
result.append( "utf8" , globalScriptEngine->utf8Ok() );
@@ -214,7 +239,7 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
rotateLogs();
return 1;
}
@@ -228,7 +253,7 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return false; }
- virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONObjBuilder b( result.subobjStart( "commands" ) );
for ( map<string,Command*>::iterator i=_commands->begin(); i!=_commands->end(); ++i ) {
Command * c = i->second;
@@ -256,35 +281,18 @@ namespace mongo {
} listCommandsCmd;
- class CmdShutdown : public Command {
- public:
- virtual bool requiresAuth() { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return true; }
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual LockType locktype() const { return NONE; }
- virtual void help( stringstream& help ) const {
- help << "shutdown the database. must be ran against admin db and either (1) ran from localhost or (2) authenticated.\n";
+ bool CmdShutdown::shutdownHelper() {
+ Client * c = currentClient.get();
+ if ( c ) {
+ c->shutdown();
}
- CmdShutdown() : Command("shutdown") {}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- Client * c = currentClient.get();
- if ( c ) {
- c->shutdown();
- }
- log() << "terminating, shutdown command received" << endl;
+ log() << "terminating, shutdown command received" << endl;
- dbexit( EXIT_CLEAN , "shutdown called" , true ); // this never returns
- assert(0);
- return true;
- }
- } cmdShutdown;
+ dbexit( EXIT_CLEAN , "shutdown called" , true ); // this never returns
+ assert(0);
+ return true;
+ }
/* for testing purposes only */
class CmdForceError : public Command {
@@ -300,7 +308,7 @@ namespace mongo {
}
virtual LockType locktype() const { return NONE; }
CmdForceError() : Command("forceerror") {}
- bool run(const string& dbnamne, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbnamne, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
uassert( 10038 , "forced error", false);
return true;
}
@@ -312,11 +320,57 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
result << "options" << QueryOption_AllSupported;
return true;
}
} availableQueryOptionsCmd;
+ class GetLogCmd : public Command {
+ public:
+ GetLogCmd() : Command( "getLog" ){}
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
+ virtual bool adminOnly() const { return true; }
+
+ virtual void help( stringstream& help ) const {
+ help << "{ getLog : '*' } OR { getLog : 'global' }";
+ }
+
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string p = cmdObj.firstElement().String();
+ if ( p == "*" ) {
+ vector<string> names;
+ RamLog::getNames( names );
+
+ BSONArrayBuilder arr;
+ for ( unsigned i=0; i<names.size(); i++ ) {
+ arr.append( names[i] );
+ }
+
+ result.appendArray( "names" , arr.arr() );
+ }
+ else {
+ RamLog* rl = RamLog::get( p );
+ if ( ! rl ) {
+ errmsg = str::stream() << "no RamLog named: " << p;
+ return false;
+ }
+
+ vector<const char*> lines;
+ rl->get( lines );
+
+ BSONArrayBuilder arr( result.subarrayStart( "log" ) );
+ for ( unsigned i=0; i<lines.size(); i++ )
+ arr.append( lines[i] );
+ arr.done();
+ }
+ return true;
+ }
+
+ } getLogCmd;
+
}
diff --git a/db/dbeval.cpp b/db/dbeval.cpp
index 31d5260..5fe137f 100644
--- a/db/dbeval.cpp
+++ b/db/dbeval.cpp
@@ -18,7 +18,7 @@
*/
#include "pch.h"
-#include "query.h"
+#include "ops/query.h"
#include "pdfile.h"
#include "jsobj.h"
#include "../bson/util/builder.h"
@@ -86,7 +86,7 @@ namespace mongo {
int res;
{
Timer t;
- res = s->invoke(f,args, cmdLine.quota ? 10 * 60 * 1000 : 0 );
+ res = s->invoke(f, &args, 0, cmdLine.quota ? 10 * 60 * 1000 : 0 );
int m = t.millis();
if ( m > cmdLine.slowMS ) {
out() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
@@ -121,7 +121,7 @@ namespace mongo {
}
virtual LockType locktype() const { return NONE; }
CmdEval() : Command("eval", false, "$eval") { }
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
AuthenticationInfo *ai = cc().getAuthenticationInfo();
uassert( 12598 , "$eval reads unauthorized", ai->isAuthorizedReads(dbname.c_str()) );
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index 5e49589..cc4fdba 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -19,12 +19,13 @@
#include "pch.h"
#include "db.h"
#include "dbhelpers.h"
-#include "query.h"
#include "json.h"
#include "queryoptimizer.h"
#include "btree.h"
#include "pdfile.h"
#include "oplog.h"
+#include "ops/update.h"
+#include "ops/delete.h"
namespace mongo {
@@ -63,7 +64,7 @@ namespace mongo {
public:
FindOne( bool requireIndex ) : requireIndex_( requireIndex ) {}
virtual void _init() {
- if ( requireIndex_ && strcmp( qp().indexKey().firstElement().fieldName(), "$natural" ) == 0 )
+ if ( requireIndex_ && strcmp( qp().indexKey().firstElementFieldName(), "$natural" ) == 0 )
throw MsgAssertionException( 9011 , "Not an index cursor" );
c_ = qp().newCursor();
if ( !c_->ok() ) {
@@ -75,7 +76,7 @@ namespace mongo {
setComplete();
return;
}
- if ( matcher()->matches( c_->currKey(), c_->currLoc() ) ) {
+ if ( matcher( c_ )->matchesCurrent( c_.get() ) ) {
one_ = c_->current();
loc_ = c_->currLoc();
setStop();
@@ -148,7 +149,7 @@ namespace mongo {
BSONObj key = i.getKeyFromQuery( query );
- DiskLoc loc = i.head.btree()->findSingle( i , i.head , key );
+ DiskLoc loc = i.idxInterface().findSingle(i , i.head , key);
if ( loc.isNull() )
return false;
result = loc.obj();
@@ -160,7 +161,7 @@ namespace mongo {
uassert(13430, "no _id index", idxNo>=0);
IndexDetails& i = d->idx( idxNo );
BSONObj key = i.getKeyFromQuery( idquery );
- return i.head.btree()->findSingle( i , i.head , key );
+ return i.idxInterface().findSingle(i , i.head , key);
}
bool Helpers::isEmpty(const char *ns, bool doAuth) {
@@ -178,10 +179,13 @@ namespace mongo {
Client::Context context(ns);
shared_ptr<Cursor> c = DataFileMgr::findAll(ns);
- if ( !c->ok() )
+ if ( !c->ok() ) {
+ context.getClient()->curop()->done();
return false;
+ }
result = c->current();
+ context.getClient()->curop()->done();
return true;
}
@@ -208,12 +212,14 @@ namespace mongo {
OpDebug debug;
Client::Context context(ns);
updateObjects(ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , /*logtheop=*/true , debug );
+ context.getClient()->curop()->done();
}
void Helpers::putSingletonGod(const char *ns, BSONObj obj, bool logTheOp) {
OpDebug debug;
Client::Context context(ns);
_updateObjects(/*god=*/true, ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , logTheOp , debug );
+ context.getClient()->curop()->done();
}
BSONObj Helpers::toKeyFormat( const BSONObj& o , BSONObj& key ) {
@@ -248,11 +254,21 @@ namespace mongo {
IndexDetails& i = nsd->idx( ii );
- shared_ptr<Cursor> c( new BtreeCursor( nsd , ii , i , minClean , maxClean , maxInclusive, 1 ) );
+ shared_ptr<Cursor> c( BtreeCursor::make( nsd , ii , i , minClean , maxClean , maxInclusive, 1 ) );
auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
cc->setDoingDeletes( true );
while ( c->ok() ) {
+
+ if ( yield && ! cc->yieldSometimes( ClientCursor::WillNeed) ) {
+ // cursor got finished by someone else, so we're done
+ cc.release(); // if the collection/db is dropped, cc may be deleted
+ break;
+ }
+
+ if ( ! c->ok() )
+ break;
+
DiskLoc rloc = c->currLoc();
if ( callback )
@@ -269,11 +285,7 @@ namespace mongo {
getDur().commitIfNeeded();
- if ( yield && ! cc->yield() ) {
- // cursor got finished by someone else, so we're done
- cc.release(); // if the collection/db is dropped, cc may be deleted
- break;
- }
+
}
return num;
diff --git a/db/dbmessage.cpp b/db/dbmessage.cpp
new file mode 100644
index 0000000..c86b5a0
--- /dev/null
+++ b/db/dbmessage.cpp
@@ -0,0 +1,108 @@
+// dbmessage.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "dbmessage.h"
+#include "../client/dbclient.h"
+
+namespace mongo {
+
+ string Message::toString() const {
+ stringstream ss;
+ ss << "op: " << opToString( operation() ) << " len: " << size();
+ if ( operation() >= 2000 && operation() < 2100 ) {
+ DbMessage d(*this);
+ ss << " ns: " << d.getns();
+ switch ( operation() ) {
+ case dbUpdate: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj();
+ BSONObj o = d.nextJsObj();
+ ss << " flags: " << flags << " query: " << q << " update: " << o;
+ break;
+ }
+ case dbInsert:
+ ss << d.nextJsObj();
+ break;
+ case dbDelete: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj();
+ ss << " flags: " << flags << " query: " << q;
+ break;
+ }
+ default:
+ ss << " CANNOT HANDLE YET";
+ }
+
+
+ }
+ return ss.str();
+ }
+
+
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ void *data, int size,
+ int nReturned, int startingFrom,
+ long long cursorId
+ ) {
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult));
+ b.appendBuf(data, size);
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->_resultFlags() = queryResultFlags;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = cursorId;
+ qr->startingFrom = startingFrom;
+ qr->nReturned = nReturned;
+ b.decouple();
+ Message resp(qr, true);
+ p->reply(requestMsg, resp, requestMsg.header()->id);
+ }
+
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ BSONObj& responseObj) {
+ replyToQuery(queryResultFlags,
+ p, requestMsg,
+ (void *) responseObj.objdata(), responseObj.objsize(), 1);
+ }
+
+ void replyToQuery(int queryResultFlags, Message &m, DbResponse &dbresponse, BSONObj obj) {
+ BufBuilder b;
+ b.skip(sizeof(QueryResult));
+ b.appendBuf((void*) obj.objdata(), obj.objsize());
+ QueryResult* msgdata = (QueryResult *) b.buf();
+ b.decouple();
+ QueryResult *qr = msgdata;
+ qr->_resultFlags() = queryResultFlags;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+ Message *resp = new Message();
+ resp->setData(msgdata, true); // transport will free
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header()->id;
+ }
+
+
+
+}
diff --git a/db/dbmessage.h b/db/dbmessage.h
index cc1d1d8..a789bff 100644
--- a/db/dbmessage.h
+++ b/db/dbmessage.h
@@ -1,3 +1,5 @@
+// dbmessage.h
+
/**
* Copyright (C) 2008 10gen Inc.
*
@@ -19,8 +21,9 @@
#include "diskloc.h"
#include "jsobj.h"
#include "namespace-inl.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../client/constants.h"
+#include "instance.h"
namespace mongo {
@@ -34,7 +37,48 @@ namespace mongo {
list of marshalled JSObjects;
*/
- extern bool objcheck;
+/* db request message format
+
+ unsigned opid; // arbitary; will be echoed back
+ byte operation;
+ int options;
+
+ then for:
+
+ dbInsert:
+ string collection;
+ a series of JSObjects
+ dbDelete:
+ string collection;
+ int flags=0; // 1=DeleteSingle
+ JSObject query;
+ dbUpdate:
+ string collection;
+ int flags; // 1=upsert
+ JSObject query;
+ JSObject objectToUpdate;
+ objectToUpdate may include { $inc: <field> } or { $set: ... }, see struct Mod.
+ dbQuery:
+ string collection;
+ int nToSkip;
+ int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
+ // greater than zero is simply a hint on how many objects to send back per "cursor batch".
+ // a negative number indicates a hard limit.
+ JSObject query;
+ [JSObject fieldsToReturn]
+ dbGetMore:
+ string collection; // redundant, might use for security.
+ int nToReturn;
+ int64 cursorID;
+ dbKillCursors=2007:
+ int n;
+ int64 cursorIDs[n];
+
+ Note that on Update, there is only one object, which is different
+ from insert where you can pass a list of objects to insert in the db.
+ Note that the update field layout is very similar layout to Query.
+*/
+
#pragma pack(1)
struct QueryResult : public MsgData {
@@ -53,7 +97,11 @@ namespace mongo {
void setResultFlagsToOk() {
_resultFlags() = ResultFlag_AwaitCapable;
}
+ void initializeResultFlags() {
+ _resultFlags() = 0;
+ }
};
+
#pragma pack()
/* For the database/server protocol, these objects and functions encapsulate
@@ -72,7 +120,11 @@ namespace mongo {
nextjsobj = data;
}
- /** the 32 bit field before the ns */
+ /** the 32 bit field before the ns
+ * track all bit usage here as its cross op
+ * 0: InsertOption_ContinueOnError
+ * 1: fromWriteback
+ */
int& reservedField() { return *reserved; }
const char * getns() const {
@@ -150,7 +202,7 @@ namespace mongo {
massert( 10305 , "Client Error: Invalid object size", js.objsize() > 3 );
massert( 10306 , "Client Error: Next object larger than space left in message",
js.objsize() < ( theEnd - data ) );
- if ( objcheck && !js.valid() ) {
+ if ( cmdLine.objcheck && !js.valid() ) {
massert( 10307 , "Client Error: bad object in message", false);
}
nextjsobj += js.objsize();
@@ -178,6 +230,12 @@ namespace mongo {
const char *theEnd;
const char * mark;
+
+ public:
+ enum ReservedOptions {
+ Reserved_InsertOption_ContinueOnError = 1 << 0 ,
+ Reserved_FromWriteback = 1 << 1
+ };
};
@@ -204,70 +262,21 @@ namespace mongo {
}
};
-} // namespace mongo
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ void *data, int size,
+ int nReturned, int startingFrom = 0,
+ long long cursorId = 0
+ );
-#include "../client/dbclient.h"
-
-namespace mongo {
-
- inline void replyToQuery(int queryResultFlags,
- AbstractMessagingPort* p, Message& requestMsg,
- void *data, int size,
- int nReturned, int startingFrom = 0,
- long long cursorId = 0
- ) {
- BufBuilder b(32768);
- b.skip(sizeof(QueryResult));
- b.appendBuf(data, size);
- QueryResult *qr = (QueryResult *) b.buf();
- qr->_resultFlags() = queryResultFlags;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->cursorId = cursorId;
- qr->startingFrom = startingFrom;
- qr->nReturned = nReturned;
- b.decouple();
- Message resp(qr, true);
- p->reply(requestMsg, resp, requestMsg.header()->id);
- }
-
-} // namespace mongo
-
-//#include "bsonobj.h"
-
-#include "instance.h"
-
-namespace mongo {
/* object reply helper. */
- inline void replyToQuery(int queryResultFlags,
- AbstractMessagingPort* p, Message& requestMsg,
- BSONObj& responseObj) {
- replyToQuery(queryResultFlags,
- p, requestMsg,
- (void *) responseObj.objdata(), responseObj.objsize(), 1);
- }
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ BSONObj& responseObj);
/* helper to do a reply using a DbResponse object */
- inline void replyToQuery(int queryResultFlags, Message &m, DbResponse &dbresponse, BSONObj obj) {
- BufBuilder b;
- b.skip(sizeof(QueryResult));
- b.appendBuf((void*) obj.objdata(), obj.objsize());
- QueryResult* msgdata = (QueryResult *) b.buf();
- b.decouple();
- QueryResult *qr = msgdata;
- qr->_resultFlags() = queryResultFlags;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->cursorId = 0;
- qr->startingFrom = 0;
- qr->nReturned = 1;
- Message *resp = new Message();
- resp->setData(msgdata, true); // transport will free
- dbresponse.response = resp;
- dbresponse.responseTo = m.header()->id;
- }
-
- string debugString( Message& m );
+ void replyToQuery(int queryResultFlags, Message &m, DbResponse &dbresponse, BSONObj obj);
+
} // namespace mongo
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index 7aa6148..78c09c0 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -20,7 +20,7 @@
*/
#include "pch.h"
-#include "../util/miniwebserver.h"
+#include "../util/net/miniwebserver.h"
#include "../util/mongoutils/html.h"
#include "../util/md5.hpp"
#include "db.h"
@@ -31,7 +31,7 @@
#include "commands.h"
#include "../util/version.h"
#include "../util/ramlog.h"
-#include <pcrecpp.h>
+#include "pcrecpp.h"
#include "../util/admin_access.h"
#include "dbwebserver.h"
#include <boost/date_time/posix_time/posix_time.hpp>
@@ -61,7 +61,7 @@ namespace mongo {
class DbWebServer : public MiniWebServer {
public:
DbWebServer(const string& ip, int port, const AdminAccess* webUsers)
- : MiniWebServer(ip, port), _webUsers(webUsers) {
+ : MiniWebServer("admin web console", ip, port), _webUsers(webUsers) {
WebStatusPlugin::initAll();
}
@@ -148,7 +148,7 @@ namespace mongo {
if ( ! allowed( rq , headers, from ) ) {
responseCode = 401;
- headers.push_back( "Content-Type: text/plain" );
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
responseMsg = "not allowed\n";
return;
}
@@ -187,7 +187,7 @@ namespace mongo {
}
responseCode = 404;
- headers.push_back( "Content-Type: text/html" );
+ headers.push_back( "Content-Type: text/html;charset=utf-8" );
responseMsg = "<html><body>unknown url</body></html>\n";
return;
}
@@ -196,6 +196,7 @@ namespace mongo {
if ( ! allowed( rq , headers, from ) ) {
responseCode = 401;
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
responseMsg = "not allowed\n";
return;
}
@@ -248,6 +249,7 @@ namespace mongo {
ss << "</body></html>\n";
responseMsg = ss.str();
+ headers.push_back( "Content-Type: text/html;charset=utf-8" );
}
void _rejectREST( string& responseMsg , int& responseCode, vector<string>& headers ) {
@@ -256,7 +258,7 @@ namespace mongo {
ss << "REST is not enabled. use --rest to turn on.\n";
ss << "check that port " << _port << " is secured for the network too.\n";
responseMsg = ss.str();
- headers.push_back( "Content-Type: text/plain" );
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
}
};
@@ -312,9 +314,11 @@ namespace mongo {
}
virtual void init() {
- assert( ! _log );
- _log = new RamLog();
- Logstream::get().addGlobalTee( _log );
+ _log = RamLog::get( "global" );
+ if ( ! _log ) {
+ _log = new RamLog("global");
+ Logstream::get().addGlobalTee( _log );
+ }
}
virtual void run( stringstream& ss ) {
@@ -374,7 +378,7 @@ namespace mongo {
string& responseMsg, int& responseCode,
vector<string>& headers, const SockAddr &from ) {
responseCode = 404;
- headers.push_back( "Content-Type: text/plain" );
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
responseMsg = "no favicon\n";
}
@@ -387,7 +391,7 @@ namespace mongo {
virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
vector<string>& headers, const SockAddr &from ) {
- headers.push_back( "Content-Type: application/json" );
+ headers.push_back( "Content-Type: application/json;charset=utf-8" );
responseCode = 200;
static vector<string> commands;
@@ -420,7 +424,7 @@ namespace mongo {
string errmsg;
BSONObjBuilder sub;
- if ( ! c->run( "admin.$cmd" , co , errmsg , sub , false ) )
+ if ( ! c->run( "admin.$cmd" , co , 0, errmsg , sub , false ) )
buf.append( cmd , errmsg );
else
buf.append( cmd , sub.obj() );
@@ -439,7 +443,7 @@ namespace mongo {
virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
vector<string>& headers, const SockAddr &from ) {
- headers.push_back( "Content-Type: text/html" );
+ headers.push_back( "Content-Type: text/html;charset=utf-8" );
responseCode = 200;
stringstream ss;
@@ -509,11 +513,11 @@ namespace mongo {
responseMsg = j;
if( text ) {
- headers.push_back( "Content-Type: text/plain" );
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
responseMsg += '\n';
}
else {
- headers.push_back( "Content-Type: application/json" );
+ headers.push_back( "Content-Type: application/json;charset=utf-8" );
}
}
@@ -527,7 +531,6 @@ namespace mongo {
Client::initThread("websvr");
const int p = cmdLine.port + 1000;
DbWebServer mini(cmdLine.bind_ip, p, adminAccessPtr.get());
- log() << "web admin interface listening on port " << p << endl;
mini.initAndListen();
cc().shutdown();
}
diff --git a/db/diskloc.h b/db/diskloc.h
index f356c73..e717556 100644
--- a/db/diskloc.h
+++ b/db/diskloc.h
@@ -29,26 +29,28 @@ namespace mongo {
class Record;
class DeletedRecord;
class Extent;
- class BtreeBucket;
class MongoDataFile;
+ template< class Version > class BtreeBucket;
+
#pragma pack(1)
/** represents a disk location/offset on disk in a database. 64 bits.
it is assumed these will be passed around by value a lot so don't do anything to make them large
(such as adding a virtual function)
*/
class DiskLoc {
- int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
+ int _a; // this will be volume, file #, etsc. but is a logical value could be anything depending on storage engine
int ofs;
public:
enum SentinelValues {
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
NullOfs = -1,
MaxFiles=16000 // thus a limit of about 32TB of data per db
};
- DiskLoc(int a, int b) : _a(a), ofs(b) { }
+ DiskLoc(int a, int Ofs) : _a(a), ofs(Ofs) { }
DiskLoc() { Null(); }
DiskLoc(const DiskLoc& l) {
_a=l._a;
@@ -139,9 +141,13 @@ namespace mongo {
Record* rec() const;
DeletedRecord* drec() const;
Extent* ext() const;
- const BtreeBucket* btree() const;
+
+ template< class V >
+ const BtreeBucket<V> * btree() const;
+
// Explicitly signals we are writing and casts away const
- BtreeBucket* btreemod() const;
+ template< class V >
+ BtreeBucket<V> * btreemod() const;
/*MongoDataFile& pdf() const;*/
};
diff --git a/db/driverHelpers.cpp b/db/driverHelpers.cpp
index d98a33b..12aa018 100644
--- a/db/driverHelpers.cpp
+++ b/db/driverHelpers.cpp
@@ -46,7 +46,7 @@ namespace mongo {
class ObjectIdTest : public BasicDriverHelper {
public:
ObjectIdTest() : BasicDriverHelper( "driverOIDTest" ) {}
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( cmdObj.firstElement().type() != jstOID ) {
errmsg = "not oid";
return false;
diff --git a/db/dur.cpp b/db/dur.cpp
index 15b4565..4861773 100644
--- a/db/dur.cpp
+++ b/db/dur.cpp
@@ -17,7 +17,7 @@
*/
/*
- phases
+ phases:
PREPLOGBUFFER
we will build an output buffer ourself and then use O_DIRECT
@@ -36,6 +36,22 @@
there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
be required. so doing these remaps fractionally is helpful.
+ mutexes:
+
+ READLOCK dbMutex
+ LOCK groupCommitMutex
+ PREPLOGBUFFER()
+ READLOCK mmmutex
+ commitJob.reset()
+ UNLOCK dbMutex // now other threads can write
+ WRITETOJOURNAL()
+ WRITETODATAFILES()
+ UNLOCK mmmutex
+ UNLOCK groupCommitMutex
+
+ on the next write lock acquisition for dbMutex: // see MongoMutex::_acquiredWriteLock()
+ REMAPPRIVATEVIEW()
+
@see https://docs.google.com/drawings/edit?id=1TklsmZzm7ohIZkwgeK6rMvsdaR13KjtJYMsfLr175Zc
*/
@@ -46,11 +62,11 @@
#include "dur_journal.h"
#include "dur_commitjob.h"
#include "dur_recover.h"
+#include "dur_stats.h"
#include "../util/concurrency/race.h"
#include "../util/mongoutils/hash.h"
#include "../util/mongoutils/str.h"
#include "../util/timer.h"
-#include "dur_stats.h"
using namespace mongoutils;
@@ -58,8 +74,9 @@ namespace mongo {
namespace dur {
- void WRITETODATAFILES();
- void PREPLOGBUFFER();
+ void PREPLOGBUFFER(JSectHeader& outParm);
+ void WRITETOJOURNAL(JSectHeader h, AlignedBuilder& uncompressed);
+ void WRITETODATAFILES(const JSectHeader& h, AlignedBuilder& uncompressed);
/** declared later in this file
only used in this file -- use DurableInterface::commitNow() outside
@@ -84,12 +101,36 @@ namespace mongo {
Stats::S * Stats::other() {
return curr == &_a ? &_b : &_a;
}
+ string _CSVHeader();
+
+ string Stats::S::_CSVHeader() {
+ return "cmts jrnMB\twrDFMB\tcIWLk\tearly\tprpLgB wrToJ\twrToDF\trmpPrVw";
+ }
+
+ string Stats::S::_asCSV() {
+ stringstream ss;
+ ss <<
+ setprecision(2) <<
+ _commits << '\t' << fixed <<
+ _journaledBytes / 1000000.0 << '\t' <<
+ _writeToDataFilesBytes / 1000000.0 << '\t' <<
+ _commitsInWriteLock << '\t' <<
+ _earlyCommits << '\t' <<
+ (unsigned) (_prepLogBufferMicros/1000) << '\t' <<
+ (unsigned) (_writeToJournalMicros/1000) << '\t' <<
+ (unsigned) (_writeToDataFilesMicros/1000) << '\t' <<
+ (unsigned) (_remapPrivateViewMicros/1000);
+ return ss.str();
+ }
+ //int getAgeOutJournalFiles();
BSONObj Stats::S::_asObj() {
- return BSON(
+ BSONObjBuilder b;
+ b <<
"commits" << _commits <<
"journaledMB" << _journaledBytes / 1000000.0 <<
"writeToDataFilesMB" << _writeToDataFilesBytes / 1000000.0 <<
+ "compression" << _journaledBytes / (_uncompressedBytes+1.0) <<
"commitsInWriteLock" << _commitsInWriteLock <<
"earlyCommits" << _earlyCommits <<
"timeMs" <<
@@ -98,8 +139,15 @@ namespace mongo {
"writeToJournal" << (unsigned) (_writeToJournalMicros/1000) <<
"writeToDataFiles" << (unsigned) (_writeToDataFilesMicros/1000) <<
"remapPrivateView" << (unsigned) (_remapPrivateViewMicros/1000)
- )
- );
+ );
+ /*int r = getAgeOutJournalFiles();
+ if( r == -1 )
+ b << "ageOutJournalFiles" << "mutex timeout";
+ if( r == 0 )
+ b << "ageOutJournalFiles" << false;*/
+ if( cmdLine.journalCommitInterval != 0 )
+ b << "journalCommitIntervalMs" << cmdLine.journalCommitInterval;
+ return b.obj();
}
BSONObj Stats::asObj() {
@@ -123,14 +171,22 @@ namespace mongo {
}
void DurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
+ // we are at least read locked, so we need not worry about REMAPPRIVATEVIEW herein.
+ DEV dbMutex.assertAtLeastReadLocked();
+
MemoryMappedFile::makeWritable(dst, len);
+ // we enter the RecoveryJob mutex here, so that if WRITETODATAFILES is happening we do not
+ // conflict with it
+ scoped_lock lk1( RecoveryJob::get()._mx );
+
// we stay in this mutex for everything to work with DurParanoid/validateSingleMapMatches
//
- // this also makes setNoJournal threadsafe, which is good as we call it from a read (not a write) lock
- // in class SlaveTracking
+ // either of these mutexes also makes setNoJournal threadsafe, which is good as we call it from a read
+ // (not a write) lock in class SlaveTracking
//
scoped_lock lk( privateViews._mutex() );
+
size_t ofs;
MongoMMF *f = privateViews.find_inlock(dst, ofs);
assert(f);
@@ -171,7 +227,7 @@ namespace mongo {
}
bool DurableImpl::awaitCommit() {
- commitJob.awaitNextCommit();
+ commitJob._notify.awaitBeyondNow();
return true;
}
@@ -211,7 +267,15 @@ namespace mongo {
return p;
}
+ bool DurableImpl::aCommitIsNeeded() const {
+ DEV commitJob._nSinceCommitIfNeededCall = 0;
+ return commitJob.bytes() > UncommittedBytesLimit;
+ }
+
bool DurableImpl::commitIfNeeded() {
+ if ( ! dbMutex.isWriteLocked() ) // we implicitly commit if needed when releasing write lock
+ return false;
+
DEV commitJob._nSinceCommitIfNeededCall = 0;
if (commitJob.bytes() > UncommittedBytesLimit) { // should this also fire if CmdLine::DurAlwaysCommit?
stats.curr->_earlyCommits++;
@@ -259,7 +323,7 @@ namespace mongo {
return;
}
}
- log() << "dur data after write area " << i.start() << " does not agree" << endl;
+ log() << "journal data after write area " << i.start() << " does not agree" << endl;
log() << " was: " << ((void*)b) << " " << hexdump((char*)b, 8) << endl;
log() << " now: " << ((void*)a) << " " << hexdump((char*)a, 8) << endl;
log() << " n: " << n << endl;
@@ -268,15 +332,6 @@ namespace mongo {
}
#endif
- /** write the buffer we have built to the journal and fsync it.
- outside of lock as that could be slow.
- */
- static void WRITETOJOURNAL(AlignedBuilder& ab) {
- Timer t;
- journal(ab);
- stats.curr->_writeToJournalMicros += t.micros();
- }
-
// Functor to be called over all MongoFiles
class validateSingleMapMatches {
@@ -285,8 +340,8 @@ namespace mongo {
void operator () (MongoFile *mf) {
if( mf->isMongoMMF() ) {
MongoMMF *mmf = (MongoMMF*) mf;
- const char *p = (const char *) mmf->getView();
- const char *w = (const char *) mmf->view_write();
+ const unsigned char *p = (const unsigned char *) mmf->getView();
+ const unsigned char *w = (const unsigned char *) mmf->view_write();
if (!p || !w) return; // File not fully opened yet
@@ -310,6 +365,8 @@ namespace mongo {
log() << endl; // separate blocks of mismatches
lastMismatch= i;
if( ++logged < 60 ) {
+ if( logged == 1 )
+ log() << "ofs % 628 = 0x" << hex << (i%628) << endl; // for .ns files to find offset in record
stringstream ss;
ss << "mismatch ofs:" << hex << i << "\tfilemap:" << setw(2) << (unsigned) w[i] << "\tprivmap:" << setw(2) << (unsigned) p[i];
if( p[i] > 32 && p[i] <= 126 )
@@ -324,7 +381,7 @@ namespace mongo {
}
if( low != 0xffffffff ) {
std::stringstream ss;
- ss << "dur error warning views mismatch " << mmf->filename() << ' ' << (hex) << low << ".." << high << " len:" << high-low+1;
+ ss << "journal error warning views mismatch " << mmf->filename() << ' ' << (hex) << low << ".." << high << " len:" << high-low+1;
log() << ss.str() << endl;
log() << "priv loc: " << (void*)(p+low) << ' ' << endl;
set<WriteIntent>& b = commitJob.writes();
@@ -357,6 +414,9 @@ namespace mongo {
Call within write lock.
*/
void _REMAPPRIVATEVIEW() {
+ // todo: Consider using ProcessInfo herein and watching for getResidentSize to drop. that could be a way
+ // to assure very good behavior here.
+
static unsigned startAt;
static unsigned long long lastRemap;
@@ -370,9 +430,11 @@ namespace mongo {
// remapping.
unsigned long long now = curTimeMicros64();
double fraction = (now-lastRemap)/2000000.0;
+ if( cmdLine.durOptions & CmdLine::DurAlwaysRemap )
+ fraction = 1;
lastRemap = now;
- rwlock lk(MongoFile::mmmutex, false);
+ RWLockRecursive::Shared lk(MongoFile::mmmutex);
set<MongoFile*>& files = MongoFile::getAllFiles();
unsigned sz = files.size();
if( sz == 0 )
@@ -422,11 +484,79 @@ namespace mongo {
stats.curr->_remapPrivateViewMicros += t.micros();
}
+ // lock order: dbMutex first, then this
mutex groupCommitMutex("groupCommit");
- /** locking: in read lock when called. */
+ bool _groupCommitWithLimitedLocks() {
+ scoped_ptr<readlocktry> lk1( new readlocktry("", 500) );
+ if( !lk1->got() )
+ return false;
+
+ scoped_lock lk2(groupCommitMutex);
+
+ commitJob.beginCommit();
+
+ if( !commitJob.hasWritten() ) {
+ // getlasterror request could have came after the data was already committed
+ commitJob.notifyCommitted();
+ return true;
+ }
+ JSectHeader h;
+ PREPLOGBUFFER(h);
+
+ RWLockRecursive::Shared lk3(MongoFile::mmmutex);
+
+ unsigned abLen = commitJob._ab.len();
+ commitJob.reset(); // must be reset before allowing anyone to write
+ DEV assert( !commitJob.hasWritten() );
+
+ // release the readlock -- allowing others to now write while we are writing to the journal (etc.)
+ lk1.reset();
+
+ // ****** now other threads can do writes ******
+ WRITETOJOURNAL(h, commitJob._ab);
+ assert( abLen == commitJob._ab.len() ); // a check that no one touched the builder while we were doing work. if so, our locking is wrong.
+
+ // data is now in the journal, which is sufficient for acknowledging getLastError.
+ // (ok to crash after that)
+ commitJob.notifyCommitted();
+
+ WRITETODATAFILES(h, commitJob._ab);
+ assert( abLen == commitJob._ab.len() ); // check again wasn't modded
+ commitJob._ab.reset();
+
+ // can't : dbMutex._remapPrivateViewRequested = true;
+
+ return true;
+ }
+
+ /** @return true if committed; false if lock acquisition timed out (we only try for a read lock herein and only wait for a certain duration). */
+ bool groupCommitWithLimitedLocks() {
+ try {
+ return _groupCommitWithLimitedLocks();
+ }
+ catch(DBException& e ) {
+ log() << "dbexception in groupCommitLL causing immediate shutdown: " << e.toString() << endl;
+ mongoAbort("dur1");
+ }
+ catch(std::ios_base::failure& e) {
+ log() << "ios_base exception in groupCommitLL causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("dur2");
+ }
+ catch(std::bad_alloc& e) {
+ log() << "bad_alloc exception in groupCommitLL causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("dur3");
+ }
+ catch(std::exception& e) {
+ log() << "exception in dur::groupCommitLL causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("dur4");
+ }
+ return false;
+ }
+
+ /** locking: in read lock when called. */
static void _groupCommit() {
- stats.curr->_commits++;
+ commitJob.beginCommit();
if( !commitJob.hasWritten() ) {
// getlasterror request could have came after the data was already committed
@@ -438,20 +568,23 @@ namespace mongo {
// (and we are only read locked in the dbMutex, so it could happen)
scoped_lock lk(groupCommitMutex);
- PREPLOGBUFFER();
+ JSectHeader h;
+ PREPLOGBUFFER(h);
// todo : write to the journal outside locks, as this write can be slow.
// however, be careful then about remapprivateview as that cannot be done
// if new writes are then pending in the private maps.
- WRITETOJOURNAL(commitJob._ab);
+ WRITETOJOURNAL(h, commitJob._ab);
// data is now in the journal, which is sufficient for acknowledging getLastError.
// (ok to crash after that)
commitJob.notifyCommitted();
- WRITETODATAFILES();
+ WRITETODATAFILES(h, commitJob._ab);
+ debugValidateAllMapsMatch();
commitJob.reset();
+ commitJob._ab.reset();
// REMAPPRIVATEVIEW
//
@@ -463,7 +596,7 @@ namespace mongo {
// this needs done in a write lock (as there is a short window during remapping when each view
// might not exist) thus we do it on the next acquisition of that instead of here (there is no
// rush if you aren't writing anyway -- but it must happen, if it is done, before any uncommitted
- // writes occur). If desired, perhpas this can be eliminated on posix as it may be that the remap
+ // writes occur). If desired, perhaps this can be eliminated on posix as it may be that the remap
// is race-free there.
//
dbMutex._remapPrivateViewRequested = true;
@@ -478,7 +611,8 @@ namespace mongo {
}
}
- /** locking in read lock when called
+ /** locking: in read lock when called
+ or, for early commits (commitIfNeeded), in write lock
@see MongoMMF::close()
*/
static void groupCommit() {
@@ -491,29 +625,33 @@ namespace mongo {
}
catch(DBException& e ) {
log() << "dbexception in groupCommit causing immediate shutdown: " << e.toString() << endl;
- abort();
+ mongoAbort("gc1");
}
catch(std::ios_base::failure& e) {
log() << "ios_base exception in groupCommit causing immediate shutdown: " << e.what() << endl;
- abort();
+ mongoAbort("gc2");
}
catch(std::bad_alloc& e) {
log() << "bad_alloc exception in groupCommit causing immediate shutdown: " << e.what() << endl;
- abort();
+ mongoAbort("gc3");
}
catch(std::exception& e) {
log() << "exception in dur::groupCommit causing immediate shutdown: " << e.what() << endl;
- abort(); // based on myTerminate()
+ mongoAbort("gc4");
}
}
static void go() {
- if( !commitJob.hasWritten() ){
- commitJob.notifyCommitted();
- return;
+ const int N = 10;
+ static int n;
+ if( privateMapBytes < UncommittedBytesLimit && ++n % N && (cmdLine.durOptions&CmdLine::DurAlwaysRemap)==0 ) {
+ // limited locks version doesn't do any remapprivateview at all, so only try this if privateMapBytes
+ // is in an acceptable range. also every Nth commit, we do everything so we can do some remapping;
+ // remapping a lot all at once could cause jitter from a large amount of copy-on-writes all at once.
+ if( groupCommitWithLimitedLocks() )
+ return;
}
-
- {
+ else {
readlocktry lk("", 1000);
if( lk.got() ) {
groupCommit();
@@ -542,45 +680,53 @@ namespace mongo {
else {
assert( inShutdown() );
if( commitJob.hasWritten() ) {
- log() << "dur warning files are closing outside locks with writes pending" << endl;
+ log() << "journal warning files are closing outside locks with writes pending" << endl;
}
}
}
- CodeBlock durThreadMain;
+ filesystem::path getJournalDir();
void durThread() {
- Client::initThread("dur");
- const int HowOftenToGroupCommitMs = 90;
+ Client::initThread("journal");
+
+ bool samePartition = true;
+ try {
+ const string dbpathDir = boost::filesystem::path(dbpath).native_directory_string();
+ samePartition = onSamePartition(getJournalDir().string(), dbpathDir);
+ }
+ catch(...) {
+ }
+
while( !inShutdown() ) {
- sleepmillis(10);
- CodeBlock::Within w(durThreadMain);
+ unsigned ms = cmdLine.journalCommitInterval;
+ if( ms == 0 ) {
+ // use default
+ ms = samePartition ? 100 : 30;
+ }
+
+ unsigned oneThird = (ms / 3) + 1; // +1 so never zero
+
try {
- int millis = HowOftenToGroupCommitMs;
- {
- stats.rotate();
- {
- Timer t;
- journalRotate(); // note we do this part outside of mongomutex
- millis -= t.millis();
- assert( millis <= HowOftenToGroupCommitMs );
- if( millis < 5 )
- millis = 5;
- }
+ stats.rotate();
- // we do this in a couple blocks, which makes it a tiny bit faster (only a little) on throughput,
- // but is likely also less spiky on our cpu usage, which is good:
- sleepmillis(millis/2);
- commitJob.wi()._deferred.invoke();
- sleepmillis(millis/2);
+ // we do this in a couple blocks (the invoke()), which makes it a tiny bit faster (only a little) on throughput,
+ // but is likely also less spiky on our cpu usage, which is good.
+
+ // commit sooner if one or more getLastError j:true is pending
+ sleepmillis(oneThird);
+ for( unsigned i = 1; i <= 2; i++ ) {
+ if( commitJob._notify.nWaiting() )
+ break;
commitJob.wi()._deferred.invoke();
+ sleepmillis(oneThird);
}
go();
}
catch(std::exception& e) {
log() << "exception in durThread causing immediate shutdown: " << e.what() << endl;
- abort(); // based on myTerminate()
+ mongoAbort("exception in durThread");
}
}
cc().shutdown();
@@ -604,6 +750,19 @@ namespace mongo {
if( !cmdLine.dur )
return;
+#if defined(_DURABLEDEFAULTON)
+ DEV {
+ if( time(0) & 1 ) {
+ cmdLine.durOptions |= CmdLine::DurAlwaysCommit;
+ log() << "_DEBUG _DURABLEDEFAULTON : forcing DurAlwaysCommit mode for this run" << endl;
+ }
+ if( time(0) & 2 ) {
+ cmdLine.durOptions |= CmdLine::DurAlwaysRemap;
+ log() << "_DEBUG _DURABLEDEFAULTON : forcing DurAlwaysRemap mode for this run" << endl;
+ }
+ }
+#endif
+
DurableInterface::enableDurability();
journalMakeDir();
@@ -623,6 +782,13 @@ namespace mongo {
void DurableImpl::syncDataAndTruncateJournal() {
dbMutex.assertWriteLocked();
+ // a commit from the commit thread won't begin while we are in the write lock,
+ // but it may already be in progress and the end of that work is done outside
+ // (dbMutex) locks. This line waits for that to complete if already underway.
+ {
+ scoped_lock lk(groupCommitMutex);
+ }
+
groupCommit();
MongoFile::flushAll(true);
journalCleanup();
diff --git a/db/dur.h b/db/dur.h
index a8035e4..f06ff50 100644
--- a/db/dur.h
+++ b/db/dur.h
@@ -9,6 +9,9 @@ namespace mongo {
class NamespaceDetails;
+ void mongoAbort(const char *msg);
+ void abort(); // not defined -- use mongoAbort() instead
+
namespace dur {
// a smaller limit is likely better on 32 bit
@@ -100,6 +103,9 @@ namespace mongo {
*/
virtual bool commitIfNeeded() = 0;
+ /** @return true if time to commit but does NOT do a commit */
+ virtual bool aCommitIsNeeded() const = 0;
+
/** Declare write intent for a DiskLoc. @see DiskLoc::writing() */
inline DiskLoc& writingDiskLoc(DiskLoc& d) { return *((DiskLoc*) writingPtr(&d, sizeof(d))); }
@@ -152,7 +158,7 @@ namespace mongo {
*/
Record* writing(Record* r);
/** Intentionally unimplemented method. BtreeBuckets are allocated in buffers larger than sizeof( BtreeBucket ). */
- BtreeBucket* writing( BtreeBucket* );
+// BtreeBucket* writing( BtreeBucket* );
/** Intentionally unimplemented method. NamespaceDetails may be based on references to 'Extra' objects. */
NamespaceDetails* writing( NamespaceDetails* );
@@ -174,6 +180,7 @@ namespace mongo {
bool awaitCommit() { return false; }
bool commitNow() { return false; }
bool commitIfNeeded() { return false; }
+ bool aCommitIsNeeded() const { return false; }
void setNoJournal(void *dst, void *src, unsigned len);
void syncDataAndTruncateJournal() {}
};
@@ -186,6 +193,7 @@ namespace mongo {
void createdFile(string filename, unsigned long long len);
bool awaitCommit();
bool commitNow();
+ bool aCommitIsNeeded() const;
bool commitIfNeeded();
void setNoJournal(void *dst, void *src, unsigned len);
void syncDataAndTruncateJournal();
diff --git a/db/dur_commitjob.cpp b/db/dur_commitjob.cpp
index af77c4f..a459cd4 100644
--- a/db/dur_commitjob.cpp
+++ b/db/dur_commitjob.cpp
@@ -18,6 +18,7 @@
#include "pch.h"
#include "dur_commitjob.h"
+#include "dur_stats.h"
#include "taskqueue.h"
namespace mongo {
@@ -126,17 +127,24 @@ namespace mongo {
size_t privateMapBytes = 0; // used by _REMAPPRIVATEVIEW to track how much / how fast to remap
+ void CommitJob::beginCommit() {
+ DEV dbMutex.assertAtLeastReadLocked();
+ _commitNumber = _notify.now();
+ stats.curr->_commits++;
+ }
+
void CommitJob::reset() {
_hasWritten = false;
_wi.clear();
- _ab.reset();
privateMapBytes += _bytes;
_bytes = 0;
_nSinceCommitIfNeededCall = 0;
}
CommitJob::CommitJob() : _ab(4 * 1024 * 1024) , _hasWritten(false),
- _bytes(0), _nSinceCommitIfNeededCall(0) { }
+ _bytes(0), _nSinceCommitIfNeededCall(0) {
+ _commitNumber = 0;
+ }
void CommitJob::note(void* p, int len) {
// from the point of view of the dur module, it would be fine (i think) to only
@@ -149,7 +157,7 @@ namespace mongo {
if( !_hasWritten ) {
// you can't be writing if one of these is pending, so this is a verification.
- assert( !dbMutex._remapPrivateViewRequested );
+ assert( !dbMutex._remapPrivateViewRequested ); // safe to assert here since it must be the first write in a write lock
// we don't bother doing a group commit when nothing is written, so we have a var to track that
_hasWritten = true;
@@ -196,8 +204,11 @@ namespace mongo {
#if defined(_DEBUG)
_nSinceCommitIfNeededCall++;
if( _nSinceCommitIfNeededCall >= 80 ) {
- if( _nSinceCommitIfNeededCall % 40 == 0 )
+ if( _nSinceCommitIfNeededCall % 40 == 0 ) {
log() << "debug nsincecommitifneeded:" << _nSinceCommitIfNeededCall << " bytes:" << _bytes << endl;
+ if( _nSinceCommitIfNeededCall == 120 || _nSinceCommitIfNeededCall == 1200 )
+ printStackTrace();
+ }
}
#endif
if (_bytes > UncommittedBytesLimit * 3) {
diff --git a/db/dur_commitjob.h b/db/dur_commitjob.h
index 104d054..a5f8515 100644
--- a/db/dur_commitjob.h
+++ b/db/dur_commitjob.h
@@ -38,8 +38,8 @@ namespace mongo {
* since that is heavily used in set lookup.
*/
struct WriteIntent { /* copyable */
- WriteIntent() : w_ptr(0), p(0) { }
- WriteIntent(void *a, unsigned b) : w_ptr(0), p((char*)a+b), len(b) { }
+ WriteIntent() : /*w_ptr(0), */ p(0) { }
+ WriteIntent(void *a, unsigned b) : /*w_ptr(0), */ p((char*)a+b), len(b) { }
void* start() const { return (char*)p - len; }
void* end() const { return p; }
@@ -64,7 +64,7 @@ namespace mongo {
return (out << "p: " << wi.p << " end: " << wi.end() << " len: " << wi.len);
}
- mutable void *w_ptr; // writable mapping of p.
+ //mutable void *w_ptr; // writable mapping of p.
// mutable because set::iterator is const but this isn't used in op<
#if defined(_EXPERIMENTAL)
mutable unsigned ofsInJournalBuffer;
@@ -189,14 +189,10 @@ namespace mongo {
/** we use the commitjob object over and over, calling reset() rather than reconstructing */
void reset();
- /** the commit code calls this when data reaches the journal (on disk) */
- void notifyCommitted() { _notify.notifyAll(); }
+ void beginCommit();
- /** Wait until the next group commit occurs. That is, wait until someone calls notifyCommitted. */
- void awaitNextCommit() {
- if( hasWritten() )
- _notify.wait();
- }
+ /** the commit code calls this when data reaches the journal (on disk) */
+ void notifyCommitted() { _notify.notifyAll(_commitNumber); }
/** we check how much written and if it is getting to be a lot, we commit sooner. */
size_t bytes() const { return _bytes; }
@@ -207,11 +203,12 @@ namespace mongo {
Writes& wi() { return _wi; }
private:
+ NotifyAll::When _commitNumber;
bool _hasWritten;
Writes _wi; // todo: fix name
size_t _bytes;
- NotifyAll _notify; // for getlasterror fsync:true acknowledgements
public:
+ NotifyAll _notify; // for getlasterror fsync:true acknowledgements
unsigned _nSinceCommitIfNeededCall;
};
diff --git a/db/dur_journal.cpp b/db/dur_journal.cpp
index 946f94c..95a95c9 100644
--- a/db/dur_journal.cpp
+++ b/db/dur_journal.cpp
@@ -25,7 +25,7 @@
#include "../util/logfile.h"
#include "../util/timer.h"
#include "../util/alignedbuilder.h"
-#include "../util/message.h" // getelapsedtimemillis
+#include "../util/net/listen.h" // getelapsedtimemillis
#include "../util/concurrency/race.h"
#include <boost/static_assert.hpp>
#undef assert
@@ -33,6 +33,8 @@
#include "../util/mongoutils/str.h"
#include "dur_journalimpl.h"
#include "../util/file.h"
+#include "../util/checksum.h"
+#include "../util/compress.h"
using namespace mongoutils;
@@ -40,7 +42,25 @@ namespace mongo {
class AlignedBuilder;
+ unsigned goodRandomNumberSlow();
+
namespace dur {
+ // Rotate after reaching this data size in a journal (j._<n>) file
+ // We use a smaller size for 32 bit as the journal is mmapped during recovery (only)
+ // Note if you take a set of datafiles, including journal files, from 32->64 or vice-versa, it must
+ // work. (and should as-is)
+ // --smallfiles makes the limit small.
+
+#if defined(_DEBUG)
+ unsigned long long DataLimitPerJournalFile = 128 * 1024 * 1024;
+#elif defined(__APPLE__)
+ // assuming a developer box if OS X
+ unsigned long long DataLimitPerJournalFile = 256 * 1024 * 1024;
+#else
+ unsigned long long DataLimitPerJournalFile = (sizeof(void*)==4) ? 256 * 1024 * 1024 : 1 * 1024 * 1024 * 1024;
+#endif
+
+ BOOST_STATIC_ASSERT( sizeof(Checksum) == 16 );
BOOST_STATIC_ASSERT( sizeof(JHeader) == 8192 );
BOOST_STATIC_ASSERT( sizeof(JSectHeader) == 20 );
BOOST_STATIC_ASSERT( sizeof(JSectFooter) == 32 );
@@ -61,8 +81,6 @@ namespace mongo {
return getJournalDir()/"lsn";
}
- extern CodeBlock durThreadMain;
-
/** this should be called when something really bad happens so that we can flag appropriately
*/
void journalingFailure(const char *msg) {
@@ -75,6 +93,35 @@ namespace mongo {
assert(false);
}
+ JSectFooter::JSectFooter() {
+ memset(this, 0, sizeof(*this));
+ sentinel = JEntry::OpCode_Footer;
+ }
+
+ JSectFooter::JSectFooter(const void* begin, int len) { // needs buffer to compute hash
+ sentinel = JEntry::OpCode_Footer;
+ reserved = 0;
+ magic[0] = magic[1] = magic[2] = magic[3] = '\n';
+
+ Checksum c;
+ c.gen(begin, (unsigned) len);
+ memcpy(hash, c.bytes, sizeof(hash));
+ }
+
+ bool JSectFooter::checkHash(const void* begin, int len) const {
+ if( !magicOk() ) {
+ log() << "journal footer not valid" << endl;
+ return false;
+ }
+ Checksum c;
+ c.gen(begin, len);
+ DEV log() << "checkHash len:" << len << " hash:" << toHex(hash, 16) << " current:" << toHex(c.bytes, 16) << endl;
+ if( memcmp(hash, c.bytes, sizeof(hash)) == 0 )
+ return true;
+ log() << "journal checkHash mismatch, got: " << toHex(c.bytes, 16) << " expected: " << toHex(hash,16) << endl;
+ return false;
+ }
+
JHeader::JHeader(string fname) {
magic[0] = 'j'; magic[1] = '\n';
_version = CurrentVersion;
@@ -85,21 +132,20 @@ namespace mongo {
strncpy(dbpath, fname.c_str(), sizeof(dbpath)-1);
{
fileId = t&0xffffffff;
- fileId |= ((unsigned long long)getRandomNumber()) << 32;
+ fileId |= ((unsigned long long)goodRandomNumberSlow()) << 32;
}
memset(reserved3, 0, sizeof(reserved3));
txt2[0] = txt2[1] = '\n';
n1 = n2 = n3 = n4 = '\n';
}
- // class Journal
-
Journal j;
const unsigned long long LsnShutdownSentinel = ~((unsigned long long)0);
Journal::Journal() :
_curLogFileMutex("JournalLfMutex") {
+ _ageOut = true;
_written = 0;
_nextFileNumber = 0;
_curLogFile = 0;
@@ -163,15 +209,20 @@ namespace mongo {
throw;
}
assert(!haveJournalFiles());
+
+ flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
+
log(1) << "removeJournalFiles end" << endl;
}
/** at clean shutdown */
bool okToCleanUp = false; // successful recovery would set this to true
- void Journal::cleanup() {
+ void Journal::cleanup(bool _log) {
if( !okToCleanUp )
return;
+ if( _log )
+ log() << "journalCleanup..." << endl;
try {
scoped_lock lk(_curLogFileMutex);
closeCurrentJournalFile();
@@ -182,7 +233,7 @@ namespace mongo {
throw;
}
}
- void journalCleanup() { j.cleanup(); }
+ void journalCleanup(bool log) { j.cleanup(log); }
bool _preallocateIsFaster() {
bool faster = false;
@@ -215,21 +266,45 @@ namespace mongo {
return faster;
}
bool preallocateIsFaster() {
- return _preallocateIsFaster() && _preallocateIsFaster() && _preallocateIsFaster();
+ Timer t;
+ bool res = false;
+ if( _preallocateIsFaster() && _preallocateIsFaster() ) {
+ // maybe system is just super busy at the moment? sleep a second to let it calm down.
+ // deciding to to prealloc is a medium big decision:
+ sleepsecs(1);
+ res = _preallocateIsFaster();
+ }
+ if( t.millis() > 3000 )
+ log() << "preallocateIsFaster check took " << t.millis()/1000.0 << " secs" << endl;
+ return res;
}
// throws
void preallocateFile(filesystem::path p, unsigned long long len) {
if( exists(p) )
return;
+
+ log() << "preallocating a journal file " << p.string() << endl;
const unsigned BLKSZ = 1024 * 1024;
- log() << "preallocating a journal file " << p.string() << endl;
- LogFile f(p.string());
- AlignedBuilder b(BLKSZ);
- for( unsigned long long x = 0; x < len; x += BLKSZ ) {
- f.synchronousAppend(b.buf(), BLKSZ);
+ assert( len % BLKSZ == 0 );
+
+ AlignedBuilder b(BLKSZ);
+ memset((void*)b.buf(), 0, BLKSZ);
+
+ ProgressMeter m(len, 3/*secs*/, 10/*hits between time check (once every 6.4MB)*/);
+
+ File f;
+ f.open( p.string().c_str() , /*read-only*/false , /*direct-io*/false );
+ assert( f.is_open() );
+ fileofs loc = 0;
+ while ( loc < len ) {
+ f.write( loc , b.buf() , BLKSZ );
+ loc += BLKSZ;
+ m.hit(BLKSZ);
}
+ assert( loc == len );
+ f.fsync();
}
// throws
@@ -238,7 +313,7 @@ namespace mongo {
string fn = str::stream() << "prealloc." << i;
filesystem::path filepath = getJournalDir() / fn;
- unsigned long long limit = Journal::DataLimit;
+ unsigned long long limit = DataLimitPerJournalFile;
if( debug && i == 1 ) {
// moving 32->64, the prealloc files would be short. that is "ok", but we want to exercise that
// case, so we force exercising here when _DEBUG is set by arbitrarily stopping prealloc at a low
@@ -251,14 +326,14 @@ namespace mongo {
}
void preallocateFiles() {
- if( preallocateIsFaster() ||
- exists(getJournalDir()/"prealloc.0") || // if enabled previously, keep using
- exists(getJournalDir()/"prealloc.1") ) {
+ if( exists(getJournalDir()/"prealloc.0") || // if enabled previously, keep using
+ exists(getJournalDir()/"prealloc.1") ||
+ ( cmdLine.preallocj && preallocateIsFaster() ) ) {
usingPreallocate = true;
try {
_preallocateFiles();
}
- catch(...) {
+ catch(...) {
log() << "warning caught exception in preallocateFiles, continuing" << endl;
}
}
@@ -273,7 +348,19 @@ namespace mongo {
filesystem::path filepath = getJournalDir() / fn;
if( !filesystem::exists(filepath) ) {
// we can recycle this file into this prealloc file location
- boost::filesystem::rename(p, filepath);
+ filesystem::path temppath = getJournalDir() / (fn+".temp");
+ boost::filesystem::rename(p, temppath);
+ {
+ // zero the header
+ File f;
+ f.open(temppath.string().c_str(), false, false);
+ char buf[8192];
+ memset(buf, 0, 8192);
+ f.write(0, buf, 8192);
+ f.truncate(DataLimitPerJournalFile);
+ f.fsync();
+ }
+ boost::filesystem::rename(temppath, filepath);
return;
}
}
@@ -385,7 +472,7 @@ namespace mongo {
if something highly surprising, throws to abort
*/
unsigned long long LSNFile::get() {
- uassert(13614, "unexpected version number of lsn file in journal/ directory", ver == 0);
+ uassert(13614, str::stream() << "unexpected version number of lsn file in journal/ directory got: " << ver , ver == 0);
if( ~lsn != checkbytes ) {
log() << "lsnfile not valid. recovery will be from log start. lsn: " << hex << lsn << " checkbytes: " << hex << checkbytes << endl;
return 0;
@@ -396,12 +483,6 @@ namespace mongo {
/** called during recovery (the error message text below assumes that)
*/
unsigned long long journalReadLSN() {
- if( !debug ) {
- // in nondebug build, for now, be conservative until more tests written, and apply the whole journal.
- // however we will still write the lsn file to exercise that code, and use in _DEBUG build.
- return 0;
- }
-
if( !MemoryMappedFile::exists(lsnPath()) ) {
log() << "info no lsn file in journal/ directory" << endl;
return 0;
@@ -414,6 +495,11 @@ namespace mongo {
File f;
f.open(lsnPath().string().c_str());
assert(f.is_open());
+ if( f.len() == 0 ) {
+ // this could be 'normal' if we crashed at the right moment
+ log() << "info lsn file is zero bytes long" << endl;
+ return 0;
+ }
f.read(0,(char*)&L, sizeof(L));
unsigned long long lsn = L.get();
return lsn;
@@ -434,7 +520,6 @@ namespace mongo {
void Journal::updateLSNFile() {
if( !_writeToLSNNeeded )
return;
- durThreadMain.assertWithin();
_writeToLSNNeeded = false;
try {
// os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
@@ -446,10 +531,12 @@ namespace mongo {
log() << "warning: open of lsn file failed" << endl;
return;
}
- log() << "lsn set " << _lastFlushTime << endl;
+ LOG(1) << "lsn set " << _lastFlushTime << endl;
LSNFile lsnf;
lsnf.set(_lastFlushTime);
f.write(0, (char*)&lsnf, sizeof(lsnf));
+ // do we want to fsync here? if we do it probably needs to be async so the durthread
+ // is not delayed.
}
catch(std::exception& e) {
log() << "warning: write to lsn file failed " << e.what() << endl;
@@ -502,32 +589,29 @@ namespace mongo {
}
}
- /** check if time to rotate files. assure a file is open.
- done separately from the journal() call as we can do this part
- outside of lock.
- thread: durThread()
- */
- void journalRotate() {
- j.rotate();
+ /*int getAgeOutJournalFiles() {
+ mutex::try_lock lk(j._curLogFileMutex, 4000);
+ if( !lk.ok )
+ return -1;
+ return j._ageOut ? 1 : 0;
+ }*/
+ void setAgeOutJournalFiles(bool a) {
+ scoped_lock lk(j._curLogFileMutex);
+ j._ageOut = a;
}
- void Journal::rotate() {
- assert( !dbMutex.atLeastReadLocked() );
- durThreadMain.assertWithin();
-
- scoped_lock lk(_curLogFileMutex);
+ void Journal::_rotate() {
if ( inShutdown() || !_curLogFile )
return;
j.updateLSNFile();
- if( _curLogFile && _written < DataLimit )
+ if( _curLogFile && _written < DataLimitPerJournalFile )
return;
if( _curLogFile ) {
-
+ _curLogFile->truncate();
closeCurrentJournalFile();
-
removeUnneededJournalFiles();
}
@@ -545,24 +629,74 @@ namespace mongo {
}
}
- /** write to journal
+ /** write (append) the buffer we have built to the journal and fsync it.
+ outside of dbMutex lock as this could be slow.
+ @param uncompressed - a buffer that will be written to the journal after compression
+ will not return until on disk
*/
- void journal(const AlignedBuilder& b) {
- j.journal(b);
- }
- void Journal::journal(const AlignedBuilder& b) {
+ void WRITETOJOURNAL(JSectHeader h, AlignedBuilder& uncompressed) {
+ Timer t;
+ j.journal(h, uncompressed);
+ stats.curr->_writeToJournalMicros += t.micros();
+ }
+ void Journal::journal(const JSectHeader& h, const AlignedBuilder& uncompressed) {
+ RACECHECK
+ static AlignedBuilder b(32*1024*1024);
+ /* buffer to journal will be
+ JSectHeader
+ compressed operations
+ JSectFooter
+ */
+ const unsigned headTailSize = sizeof(JSectHeader) + sizeof(JSectFooter);
+ const unsigned max = maxCompressedLength(uncompressed.len()) + headTailSize;
+ b.reset(max);
+
+ {
+ dassert( h.sectionLen() == (unsigned) 0xffffffff ); // we will backfill later
+ b.appendStruct(h);
+ }
+
+ size_t compressedLength = 0;
+ rawCompress(uncompressed.buf(), uncompressed.len(), b.cur(), &compressedLength);
+ assert( compressedLength < 0xffffffff );
+ assert( compressedLength < max );
+ b.skip(compressedLength);
+
+ // footer
+ unsigned L = 0xffffffff;
+ {
+ // pad to alignment, and set the total section length in the JSectHeader
+ assert( 0xffffe000 == (~(Alignment-1)) );
+ unsigned lenUnpadded = b.len() + sizeof(JSectFooter);
+ L = (lenUnpadded + Alignment-1) & (~(Alignment-1));
+ dassert( L >= lenUnpadded );
+
+ ((JSectHeader*)b.atOfs(0))->setSectionLen(lenUnpadded);
+
+ JSectFooter f(b.buf(), b.len()); // computes checksum
+ b.appendStruct(f);
+ dassert( b.len() == lenUnpadded );
+
+ b.skip(L - lenUnpadded);
+ dassert( b.len() % Alignment == 0 );
+ }
+
try {
mutex::scoped_lock lk(_curLogFileMutex);
// must already be open -- so that _curFileId is correct for previous buffer building
assert( _curLogFile );
- stats.curr->_journaledBytes += b.len();
- _written += b.len();
- _curLogFile->synchronousAppend((void *) b.buf(), b.len());
+ stats.curr->_uncompressedBytes += b.len();
+ unsigned w = b.len();
+ _written += w;
+ assert( w <= L );
+ stats.curr->_journaledBytes += L;
+ _curLogFile->synchronousAppend((const void *) b.buf(), L);
+ _rotate();
}
catch(std::exception& e) {
- log() << "warning exception in dur::journal " << e.what() << endl;
+ log() << "error exception in dur::journal " << e.what() << endl;
throw;
}
}
diff --git a/db/dur_journal.h b/db/dur_journal.h
index 81957b5..664f639 100644
--- a/db/dur_journal.h
+++ b/db/dur_journal.h
@@ -27,8 +27,12 @@ namespace mongo {
*/
extern bool okToCleanUp;
- /** at termination after db files closed & fsynced */
- void journalCleanup();
+ /** at termination after db files closed & fsynced
+ also after recovery
+ closes and removes journal files
+ @param log report in log that we are cleaning up if we actually do any work
+ */
+ void journalCleanup(bool log = false);
/** assure journal/ dir exists. throws */
void journalMakeDir();
@@ -40,12 +44,6 @@ namespace mongo {
*/
void journalRotate();
- /** write/append to journal file *
- @param buf - a buffer that will be written to the journal.
- will not return until on disk
- */
- void journal(const AlignedBuilder& buf);
-
/** flag that something has gone wrong during writing to the journal
(not for recovery mode)
*/
@@ -64,5 +62,7 @@ namespace mongo {
// in case disk controller buffers writes
const long long ExtraKeepTimeMs = 10000;
+ const unsigned JournalCommitIntervalDefault = 100;
+
}
}
diff --git a/db/dur_journalformat.h b/db/dur_journalformat.h
index d29f94d..10ed848 100644
--- a/db/dur_journalformat.h
+++ b/db/dur_journalformat.h
@@ -18,12 +18,12 @@
#pragma once
-#include "../util/md5.hpp"
-
namespace mongo {
namespace dur {
+ const unsigned Alignment = 8192;
+
#pragma pack(1)
/** beginning header for a journal/j._<n> file
there is nothing important int this header at this time. except perhaps version #.
@@ -36,7 +36,11 @@ namespace mongo {
// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
// that. simply incrementing the version # is safe on a fwd basis.
- enum { CurrentVersion = 0x4147 };
+#if defined(_NOCOMPRESS)
+ enum { CurrentVersion = 0x4148 };
+#else
+ enum { CurrentVersion = 0x4149 };
+#endif
unsigned short _version;
// these are just for diagnostic ease (make header more useful as plain text)
@@ -57,11 +61,25 @@ namespace mongo {
/** "Section" header. A section corresponds to a group commit.
len is length of the entire section including header and footer.
+ header and footer are not compressed, just the stuff in between.
*/
struct JSectHeader {
- unsigned len; // length in bytes of the whole section
+ private:
+ unsigned _sectionLen; // unpadded length in bytes of the whole section
+ public:
unsigned long long seqNumber; // sequence number that can be used on recovery to not do too much work
unsigned long long fileId; // matches JHeader::fileId
+ unsigned sectionLen() const { return _sectionLen; }
+
+ // we store the unpadded length so we can use that when we uncompress. to
+ // get the true total size this must be rounded up to the Alignment.
+ void setSectionLen(unsigned lenUnpadded) { _sectionLen = lenUnpadded; }
+
+ unsigned sectionLenWithPadding() const {
+ unsigned x = (sectionLen() + (Alignment-1)) & (~(Alignment-1));
+ dassert( x % Alignment == 0 );
+ return x;
+ }
};
/** an individual write operation within a group commit section. Either the entire section should
@@ -113,31 +131,21 @@ namespace mongo {
/** group commit section footer. md5 is a key field. */
struct JSectFooter {
- JSectFooter(const void* begin, int len) { // needs buffer to compute hash
- sentinel = JEntry::OpCode_Footer;
- reserved = 0;
- magic[0] = magic[1] = magic[2] = magic[3] = '\n';
-
- // skip section header since size modified after hashing
- (const char*&)begin += sizeof(JSectHeader);
- len -= sizeof(JSectHeader);
-
- md5(begin, len, hash);
- }
+ JSectFooter();
+ JSectFooter(const void* begin, int len); // needs buffer to compute hash
unsigned sentinel;
- md5digest hash; // unsigned char[16]
+ unsigned char hash[16];
unsigned long long reserved;
char magic[4]; // "\n\n\n\n"
- bool checkHash(const void* begin, int len) const {
- // skip section header since size modified after hashing
- (const char*&)begin += sizeof(JSectHeader);
- len -= sizeof(JSectHeader);
- md5digest current;
- md5(begin, len, current);
- DEV log() << "checkHash len:" << len << " hash:" << toHex(hash, 16) << " current:" << toHex(current, 16) << endl;
- return (memcmp(hash, current, sizeof(hash)) == 0);
- }
+ /** used by recovery to see if buffer is valid
+ @param begin the buffer
+ @param len buffer len
+ @return true if buffer looks valid
+ */
+ bool checkHash(const void* begin, int len) const;
+
+ bool magicOk() const { return *((unsigned*)magic) == 0x0a0a0a0a; }
};
/** declares "the next entry(s) are for this database / file path prefix" */
diff --git a/db/dur_journalimpl.h b/db/dur_journalimpl.h
index 9566dff..bf771c5 100644
--- a/db/dur_journalimpl.h
+++ b/db/dur_journalimpl.h
@@ -18,6 +18,7 @@
#pragma once
+#include "dur_journalformat.h"
#include "../util/logfile.h"
namespace mongo {
@@ -40,20 +41,14 @@ namespace mongo {
*/
void rotate();
- /** write to journal
+ /** append to the journal file
*/
- void journal(const AlignedBuilder& b);
+ void journal(const JSectHeader& h, const AlignedBuilder& b);
boost::filesystem::path getFilePathFor(int filenumber) const;
unsigned long long lastFlushTime() const { return _lastFlushTime; }
- void cleanup();
-
- // Rotate after reaching this data size in a journal (j._<n>) file
- // We use a smaller size for 32 bit as the journal is mmapped during recovery (only)
- // Note if you take a set of datafiles, including journal files, from 32->64 or vice-versa, it must
- // work. (and should as-is)
- static const unsigned long long DataLimit = (sizeof(void*)==4) ? 256 * 1024 * 1024 : 1 * 1024 * 1024 * 1024;
+ void cleanup(bool log); // closes and removes journal files
unsigned long long curFileId() const { return _curFileId; }
@@ -67,14 +62,21 @@ namespace mongo {
void open();
private:
+ /** check if time to rotate files. assure a file is open.
+ * internally called with every commit
+ */
+ void _rotate();
+
void _open();
void closeCurrentJournalFile();
void removeUnneededJournalFiles();
unsigned long long _written; // bytes written so far to the current journal (log) file
unsigned _nextFileNumber;
-
+ public:
mutex _curLogFileMutex;
+ bool _ageOut;
+ private:
LogFile *_curLogFile; // use _curLogFileMutex
unsigned long long _curFileId; // current file id see JHeader::fileId
diff --git a/db/dur_preplogbuffer.cpp b/db/dur_preplogbuffer.cpp
index 1648e89..0d8ef36 100644
--- a/db/dur_preplogbuffer.cpp
+++ b/db/dur_preplogbuffer.cpp
@@ -35,6 +35,7 @@
#include "../util/alignedbuilder.h"
#include "../util/timer.h"
#include "dur_stats.h"
+#include "../server.h"
using namespace mongoutils;
@@ -58,9 +59,8 @@ namespace mongo {
void prepBasicWrite_inlock(AlignedBuilder&bb, const WriteIntent *i, RelativePath& lastDbPath) {
size_t ofs = 1;
MongoMMF *mmf = findMMF_inlock(i->start(), /*out*/ofs);
- dassert( i->w_ptr == 0 );
- if( !mmf->willNeedRemap() ) {
+ if( unlikely(!mmf->willNeedRemap()) ) {
// tag this mmf as needed a remap of its private view later.
// usually it will already be dirty/already set, so we do the if above first
// to avoid possibility of cpu cache line contention
@@ -69,8 +69,13 @@ namespace mongo {
// since we have already looked up the mmf, we go ahead and remember the write view location
// so we don't have to find the MongoMMF again later in WRITETODATAFILES()
+ //
+ // this was for WRITETODATAFILES_Impl2 so commented out now
+ //
+ /*
dassert( i->w_ptr == 0 );
i->w_ptr = ((char*)mmf->view_write()) + ofs;
+ */
JEntry e;
e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); //dont write past end of file
@@ -92,8 +97,8 @@ namespace mongo {
#endif
bb.appendBuf(i->start(), e.len);
- if (e.len != (unsigned)i->length()) {
- log() << "dur info splitting prepBasicWrite at boundary" << endl;
+ if (unlikely(e.len != (unsigned)i->length())) {
+ log() << "journal info splitting prepBasicWrite at boundary" << endl;
// This only happens if we write to the last byte in a file and
// the fist byte in another file that is mapped adjacently. I
@@ -120,23 +125,20 @@ namespace mongo {
}
}
- void resetLogBuffer(AlignedBuilder& bb) {
+ void resetLogBuffer(/*out*/JSectHeader& h, AlignedBuilder& bb) {
bb.reset();
- // JSectHeader
- JSectHeader h;
- h.len = (unsigned) 0xffffffff; // total length, will fill in later
+ h.setSectionLen(0xffffffff); // total length, will fill in later
h.seqNumber = getLastDataFileFlushTime();
h.fileId = j.curFileId();
-
- bb.appendStruct(h);
}
/** we will build an output buffer ourself and then use O_DIRECT
we could be in read lock for this
caller handles locking
+ @return partially populated sectheader and _ab set
*/
- void _PREPLOGBUFFER() {
+ void _PREPLOGBUFFER(JSectHeader& h) {
assert( cmdLine.dur );
{
@@ -148,7 +150,7 @@ namespace mongo {
}
AlignedBuilder& bb = commitJob._ab;
- resetLogBuffer(bb);
+ resetLogBuffer(h, bb); // adds JSectHeader
// ops other than basic writes (DurOp's)
{
@@ -157,34 +159,14 @@ namespace mongo {
}
}
- {
- prepBasicWrites(bb);
- }
-
- {
- JSectFooter f(bb.buf(), bb.len());
- bb.appendStruct(f);
- }
-
- {
- // pad to alignment, and set the total section length in the JSectHeader
- assert( 0xffffe000 == (~(Alignment-1)) );
- unsigned L = (bb.len() + Alignment-1) & (~(Alignment-1));
- dassert( L >= (unsigned) bb.len() );
-
- *((unsigned*)bb.atOfs(0)) = L;
-
- unsigned padding = L - bb.len();
- bb.skip(padding);
- dassert( bb.len() % Alignment == 0 );
- }
+ prepBasicWrites(bb);
return;
}
- void PREPLOGBUFFER() {
+ void PREPLOGBUFFER(/*out*/ JSectHeader& h) {
Timer t;
j.assureLogFileOpen(); // so fileId is set
- _PREPLOGBUFFER();
+ _PREPLOGBUFFER(h);
stats.curr->_prepLogBufferMicros += t.micros();
}
diff --git a/db/dur_recover.cpp b/db/dur_recover.cpp
index 1480a59..3c9fee7 100644
--- a/db/dur_recover.cpp
+++ b/db/dur_recover.cpp
@@ -19,6 +19,7 @@
#include "pch.h"
#include "dur.h"
+#include "dur_stats.h"
#include "dur_recover.h"
#include "dur_journal.h"
#include "dur_journalformat.h"
@@ -26,13 +27,16 @@
#include "namespace.h"
#include "../util/mongoutils/str.h"
#include "../util/bufreader.h"
+#include "../util/concurrency/race.h"
#include "pdfile.h"
#include "database.h"
#include "db.h"
#include "../util/unittest.h"
+#include "../util/checksum.h"
#include "cmdline.h"
#include "curop.h"
#include "mongommf.h"
+#include "../util/compress.h"
#include <sys/stat.h>
#include <fcntl.h>
@@ -90,62 +94,73 @@ namespace mongo {
throws
*/
class JournalSectionIterator : boost::noncopyable {
+ auto_ptr<BufReader> _entries;
+ const JSectHeader _h;
+ const char *_lastDbName; // pointer into mmaped journal file
+ const bool _doDurOps;
+ string _uncompressed;
public:
- JournalSectionIterator(const void *p, unsigned len, bool doDurOps)
- : _br(p, len)
- , _sectHead(static_cast<const JSectHeader*>(_br.skip(sizeof(JSectHeader))))
- , _lastDbName(NULL)
- , _doDurOps(doDurOps)
- {}
+ JournalSectionIterator(const JSectHeader& h, const void *compressed, unsigned compressedLen, bool doDurOpsRecovering) :
+ _h(h),
+ _lastDbName(0)
+ , _doDurOps(doDurOpsRecovering)
+ {
+ assert( doDurOpsRecovering );
+ bool ok = uncompress((const char *)compressed, compressedLen, &_uncompressed);
+ if( !ok ) {
+ // it should always be ok (i think?) as there is a previous check to see that the JSectFooter is ok
+ log() << "couldn't uncompress journal section" << endl;
+ msgasserted(15874, "couldn't uncompress journal section");
+ }
+ const char *p = _uncompressed.c_str();
+ assert( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
+ _entries = auto_ptr<BufReader>( new BufReader(p, _uncompressed.size()) );
+ }
+
+ // we work with the uncompressed buffer when doing a WRITETODATAFILES (for speed)
+ JournalSectionIterator(const JSectHeader &h, const void *p, unsigned len) :
+ _entries( new BufReader((const char *) p, len) ),
+ _h(h),
+ _lastDbName(0)
+ , _doDurOps(false)
- bool atEof() const { return _br.atEof(); }
+ { }
- unsigned long long seqNumber() const { return _sectHead->seqNumber; }
+ bool atEof() const { return _entries->atEof(); }
+
+ unsigned long long seqNumber() const { return _h.seqNumber; }
/** get the next entry from the log. this function parses and combines JDbContext and JEntry's.
- * @return true if got an entry. false at successful end of section (and no entry returned).
* throws on premature end of section.
*/
- bool next(ParsedJournalEntry& e) {
+ void next(ParsedJournalEntry& e) {
unsigned lenOrOpCode;
- _br.read(lenOrOpCode);
+ _entries->read(lenOrOpCode);
if (lenOrOpCode > JEntry::OpCode_Min) {
switch( lenOrOpCode ) {
case JEntry::OpCode_Footer: {
- if (_doDurOps) {
- const char* pos = (const char*) _br.pos();
- pos -= sizeof(lenOrOpCode); // rewind to include OpCode
- const JSectFooter& footer = *(const JSectFooter*)pos;
- int len = pos - (char*)_sectHead;
- if (!footer.checkHash(_sectHead, len)) {
- massert(13594, str::stream() << "Journal checksum doesn't match. recorded: "
- << toHex(footer.hash, sizeof(footer.hash))
- << " actual: " << md5simpledigest(_sectHead, len)
- , false);
- }
- }
- return false; // false return value denotes end of section
+ assert( false );
}
case JEntry::OpCode_FileCreated:
case JEntry::OpCode_DropDb: {
e.dbName = 0;
- boost::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, _br);
+ boost::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, *_entries);
if (_doDurOps) {
e.op = op;
}
- return true;
+ return;
}
case JEntry::OpCode_DbContext: {
- _lastDbName = (const char*) _br.pos();
- const unsigned limit = std::min((unsigned)Namespace::MaxNsLen, _br.remaining());
+ _lastDbName = (const char*) _entries->pos();
+ const unsigned limit = std::min((unsigned)Namespace::MaxNsLen, _entries->remaining());
const unsigned len = strnlen(_lastDbName, limit);
massert(13533, "problem processing journal file during recovery", _lastDbName[len] == '\0');
- _br.skip(len+1); // skip '\0' too
- _br.read(lenOrOpCode);
+ _entries->skip(len+1); // skip '\0' too
+ _entries->read(lenOrOpCode); // read this for the fall through
}
// fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
@@ -157,18 +172,13 @@ namespace mongo {
// JEntry - a basic write
assert( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
- _br.rewind(4);
- e.e = (JEntry *) _br.skip(sizeof(JEntry));
+ _entries->rewind(4);
+ e.e = (JEntry *) _entries->skip(sizeof(JEntry));
e.dbName = e.e->isLocalDbContext() ? "local" : _lastDbName;
assert( e.e->len == lenOrOpCode );
- _br.skip(e.e->len);
- return true;
+ _entries->skip(e.e->len);
}
- private:
- BufReader _br;
- const JSectHeader* _sectHead;
- const char *_lastDbName; // pointer into mmaped journal file
- const bool _doDurOps;
+
};
static string fileName(const char* dbName, int fileNo) {
@@ -204,6 +214,11 @@ namespace mongo {
}
void RecoveryJob::write(const ParsedJournalEntry& entry) {
+ //TODO(mathias): look into making some of these dasserts
+ assert(entry.e);
+ assert(entry.dbName);
+ assert(strnlen(entry.dbName, MaxDatabaseNameLen) < MaxDatabaseNameLen);
+
const string fn = fileName(entry.dbName, entry.e->getFileNo());
MongoFile* file;
{
@@ -225,8 +240,12 @@ namespace mongo {
}
if ((entry.e->ofs + entry.e->len) <= mmf->length()) {
+ assert(mmf->view_write());
+ assert(entry.e->srcData());
+
void* dest = (char*)mmf->view_write() + entry.e->ofs;
memcpy(dest, entry.e->srcData(), entry.e->len);
+ stats.curr->_writeToDataFilesBytes += entry.e->len;
}
else {
massert(13622, "Trying to write past end of file in WRITETODATAFILES", _recovering);
@@ -278,27 +297,64 @@ namespace mongo {
log() << "END section" << endl;
}
- void RecoveryJob::processSection(const void *p, unsigned len) {
+ void RecoveryJob::processSection(const JSectHeader *h, const void *p, unsigned len, const JSectFooter *f) {
scoped_lock lk(_mx);
+ RACECHECK
+
+ /** todo: we should really verify the checksum to see that seqNumber is ok?
+ that is expensive maybe there is some sort of checksum of just the header
+ within the header itself
+ */
+ if( _recovering && _lastDataSyncedFromLastRun > h->seqNumber + ExtraKeepTimeMs ) {
+ if( h->seqNumber != _lastSeqMentionedInConsoleLog ) {
+ static int n;
+ if( ++n < 10 ) {
+ log() << "recover skipping application of section seq:" << h->seqNumber << " < lsn:" << _lastDataSyncedFromLastRun << endl;
+ }
+ else if( n == 10 ) {
+ log() << "recover skipping application of section more..." << endl;
+ }
+ _lastSeqMentionedInConsoleLog = h->seqNumber;
+ }
+ return;
+ }
- vector<ParsedJournalEntry> entries;
- JournalSectionIterator i(p, len, _recovering);
+ auto_ptr<JournalSectionIterator> i;
+ if( _recovering ) {
+ i = auto_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, p, len, _recovering));
+ }
+ else {
+ i = auto_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, /*after header*/p, /*w/out header*/len));
+ }
- //DEV log() << "recovery processSection seq:" << i.seqNumber() << endl;
- if( _recovering && _lastDataSyncedFromLastRun > i.seqNumber() + ExtraKeepTimeMs ) {
- if( i.seqNumber() != _lastSeqMentionedInConsoleLog ) {
- log() << "recover skipping application of section seq:" << i.seqNumber() << " < lsn:" << _lastDataSyncedFromLastRun << endl;
- _lastSeqMentionedInConsoleLog = i.seqNumber();
+ // we use a static so that we don't have to reallocate every time through. occasionally we
+ // go back to a small allocation so that if there were a spiky growth it won't stick forever.
+ static vector<ParsedJournalEntry> entries;
+ entries.clear();
+/** TEMP uncomment
+ RARELY OCCASIONALLY {
+ if( entries.capacity() > 2048 ) {
+ entries.shrink_to_fit();
+ entries.reserve(2048);
}
- return;
}
+*/
// first read all entries to make sure this section is valid
ParsedJournalEntry e;
- while( i.next(e) ) {
+ while( !i->atEof() ) {
+ i->next(e);
entries.push_back(e);
}
+ // after the entries check the footer checksum
+ if( _recovering ) {
+ assert( ((const char *)h) + sizeof(JSectHeader) == p );
+ if( !f->checkHash(h, len + sizeof(JSectHeader)) ) {
+ msgasserted(13594, "journal checksum doesn't match");
+ }
+ }
+
// got all the entries for one group commit. apply them:
applyEntries(entries);
}
@@ -334,11 +390,16 @@ namespace mongo {
if( h.fileId != fileId ) {
if( debug || (cmdLine.durOptions & CmdLine::DurDumpJournal) ) {
log() << "Ending processFileBuffer at differing fileId want:" << fileId << " got:" << h.fileId << endl;
- log() << " sect len:" << h.len << " seqnum:" << h.seqNumber << endl;
+ log() << " sect len:" << h.sectionLen() << " seqnum:" << h.seqNumber << endl;
}
return true;
}
- processSection(br.skip(h.len), h.len);
+ unsigned slen = h.sectionLen();
+ unsigned dataLen = slen - sizeof(JSectHeader) - sizeof(JSectFooter);
+ const char *hdr = (const char *) br.skip(h.sectionLenWithPadding());
+ const char *data = hdr + sizeof(JSectHeader);
+ const char *footer = data + dataLen;
+ processSection((const JSectHeader*) hdr, data, dataLen, (const JSectFooter*) footer);
// ctrl c check
killCurrentOp.checkForInterrupt(false);
@@ -356,6 +417,17 @@ namespace mongo {
/** apply a specific journal file */
bool RecoveryJob::processFile(path journalfile) {
log() << "recover " << journalfile.string() << endl;
+
+ try {
+ if( boost::filesystem::file_size( journalfile.string() ) == 0 ) {
+ log() << "recover info " << journalfile.string() << " has zero length" << endl;
+ return true;
+ }
+ } catch(...) {
+ // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
+ log() << "recover exception checking filesize" << endl;
+ }
+
MemoryMappedFile f;
void *p = f.mapWithOptions(journalfile.string().c_str(), MongoFile::READONLY | MongoFile::SEQUENTIAL);
massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
@@ -371,13 +443,19 @@ namespace mongo {
_lastDataSyncedFromLastRun = journalReadLSN();
log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
+ // todo: we could truncate the journal file at rotation time to the right length, then this abruptEnd
+ // check can be turned back on. this is relevant when prealloc is being used.
for( unsigned i = 0; i != files.size(); ++i ) {
- /*bool abruptEnd = */processFile(files[i]);
- /*if( abruptEnd && i+1 < files.size() ) {
+ bool abruptEnd = processFile(files[i]);
+ if( abruptEnd && i+1 < files.size() ) {
+#if 1 // Leaving this as a warning for now. TODO: make this an error post 2.0
+ log() << "recover warning: abrupt end to file " << files[i].string() << ", yet it isn't the last journal file" << endl;
+#else
log() << "recover error: abrupt end to file " << files[i].string() << ", yet it isn't the last journal file" << endl;
close();
uasserted(13535, "recover abrupt journal file end");
- }*/
+#endif
+ }
}
close();
diff --git a/db/dur_recover.h b/db/dur_recover.h
index 1022fdc..955e730 100644
--- a/db/dur_recover.h
+++ b/db/dur_recover.h
@@ -2,6 +2,7 @@
#pragma once
+#include "dur_journalformat.h"
#include "../util/concurrency/mutex.h"
#include "../util/file.h"
@@ -15,10 +16,14 @@ namespace mongo {
*/
class RecoveryJob : boost::noncopyable {
public:
- RecoveryJob() :_lastDataSyncedFromLastRun(0), _mx("recovery"), _recovering(false) { _lastSeqMentionedInConsoleLog = 1; }
+ RecoveryJob() : _lastDataSyncedFromLastRun(0),
+ _mx("recovery"), _recovering(false) { _lastSeqMentionedInConsoleLog = 1; }
void go(vector<path>& files);
~RecoveryJob();
- void processSection(const void *, unsigned len);
+
+ /** @param data data between header and footer. compressed if recovering. */
+ void processSection(const JSectHeader *h, const void *data, unsigned len, const JSectFooter *f);
+
void close(); // locks and calls _close()
static RecoveryJob & get() { return _instance; }
@@ -34,9 +39,9 @@ namespace mongo {
unsigned long long _lastDataSyncedFromLastRun;
unsigned long long _lastSeqMentionedInConsoleLog;
-
- mongo::mutex _mx; // protects _mmfs
-
+ public:
+ mongo::mutex _mx; // protects _mmfs; see setNoJournal() too
+ private:
bool _recovering; // are we in recovery or WRITETODATAFILES
static RecoveryJob &_instance;
diff --git a/db/dur_stats.h b/db/dur_stats.h
index 5f5a188..50a26d1 100644
--- a/db/dur_stats.h
+++ b/db/dur_stats.h
@@ -13,11 +13,14 @@ namespace mongo {
unsigned _intervalMicros;
struct S {
BSONObj _asObj();
+ string _asCSV();
+ string _CSVHeader();
void reset();
unsigned _commits;
unsigned _earlyCommits; // count of early commits from commitIfNeeded() or from getDur().commitNow()
unsigned long long _journaledBytes;
+ unsigned long long _uncompressedBytes;
unsigned long long _writeToDataFilesBytes;
unsigned long long _prepLogBufferMicros;
diff --git a/db/dur_writetodatafiles.cpp b/db/dur_writetodatafiles.cpp
index 50797ea..6724f07 100644
--- a/db/dur_writetodatafiles.cpp
+++ b/db/dur_writetodatafiles.cpp
@@ -47,11 +47,13 @@ namespace mongo {
@see https://docs.google.com/drawings/edit?id=1TklsmZzm7ohIZkwgeK6rMvsdaR13KjtJYMsfLr175Zc&hl=en
*/
- void WRITETODATAFILES_Impl1() {
- RecoveryJob::get().processSection(commitJob._ab.buf(), commitJob._ab.len());
+ void WRITETODATAFILES_Impl1(const JSectHeader& h, AlignedBuilder& uncompressed) {
+ RWLockRecursive::Shared lk(MongoFile::mmmutex);
+ RecoveryJob::get().processSection(&h, uncompressed.buf(), uncompressed.len(), 0);
}
- // the old implementation
+#if 0
+ // the old implementation. doesn't work with groupCommitWithLimitedLocks()
void WRITETODATAFILES_Impl2() {
/* we go backwards as what is at the end is most likely in the cpu cache. it won't be much, but we'll take it. */
for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ) {
@@ -61,8 +63,10 @@ namespace mongo {
memcpy(intent.w_ptr, intent.start(), intent.length());
}
}
+#endif
#if defined(_EXPERIMENTAL)
+ // doesn't work with groupCommitWithLimitedLocks()
void WRITETODATAFILES_Impl3() {
/* we go backwards as what is at the end is most likely in the cpu cache. it won't be much, but we'll take it. */
for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ) {
@@ -76,23 +80,15 @@ namespace mongo {
}
#endif
- void WRITETODATAFILES() {
- dbMutex.assertAtLeastReadLocked();
-
- MongoFile::markAllWritable(); // for _DEBUG. normally we don't write in a read lock
-
+ // concurrency: in mmmutex, not necessarily in dbMutex
+ void WRITETODATAFILES(const JSectHeader& h, AlignedBuilder& uncompressed) {
Timer t;
#if defined(_EXPERIMENTAL)
WRITETODATAFILES_Impl3();
#else
- WRITETODATAFILES_Impl1();
+ WRITETODATAFILES_Impl1(h, uncompressed);
#endif
stats.curr->_writeToDataFilesMicros += t.micros();
-
- if (!dbMutex.isWriteLocked())
- MongoFile::unmarkAllWritable();
-
- debugValidateAllMapsMatch();
}
}
diff --git a/db/durop.cpp b/db/durop.cpp
index 344b21e..80ee504 100644
--- a/db/durop.cpp
+++ b/db/durop.cpp
@@ -48,7 +48,7 @@ namespace mongo {
op = shared_ptr<DurOp>( new DropDbOp(br) );
break;
default:
- massert(13546, (str::stream() << "dur recover unrecognized opcode in journal " << opcode), false);
+ massert(13546, (str::stream() << "journal recover: unrecognized opcode in journal " << opcode), false);
}
return op;
}
@@ -152,6 +152,7 @@ namespace mongo {
ofs += w;
}
f.fsync();
+ flushMyDirectory(full);
massert(13628, str::stream() << "recover failure writing file " << full, !f.bad() );
}
diff --git a/db/durop.h b/db/durop.h
index c4574c2..9ab1bfc 100644
--- a/db/durop.h
+++ b/db/durop.h
@@ -28,8 +28,6 @@ namespace mongo {
namespace dur {
- const unsigned Alignment = 8192;
-
/** DurOp - Operations we journal that aren't just basic writes.
*
* Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
diff --git a/db/extsort.cpp b/db/extsort.cpp
index 2e6d8d8..0cc36f1 100644
--- a/db/extsort.cpp
+++ b/db/extsort.cpp
@@ -27,11 +27,12 @@
namespace mongo {
- BSONObj BSONObjExternalSorter::extSortOrder;
+ IndexInterface *BSONObjExternalSorter::extSortIdxInterface;
+ Ordering BSONObjExternalSorter::extSortOrder( Ordering::make(BSONObj()) );
unsigned long long BSONObjExternalSorter::_compares = 0;
- BSONObjExternalSorter::BSONObjExternalSorter( const BSONObj & order , long maxFileSize )
- : _order( order.getOwned() ) , _maxFilesize( maxFileSize ) ,
+ BSONObjExternalSorter::BSONObjExternalSorter( IndexInterface &i, const BSONObj & order , long maxFileSize )
+ : _idxi(i), _order( order.getOwned() ) , _maxFilesize( maxFileSize ) ,
_arraySize(1000000), _cur(0), _curSizeSoFar(0), _sorted(0) {
stringstream rootpath;
@@ -52,7 +53,6 @@ namespace mongo {
delete _cur;
_cur = 0;
}
-
unsigned long removed = remove_all( _root );
wassert( removed == 1 + _files.size() );
}
@@ -61,7 +61,8 @@ namespace mongo {
// extSortComp needs to use glbals
// qsort_r only seems available on bsd, which is what i really want to use
dblock l;
- extSortOrder = _order;
+ extSortIdxInterface = &_idxi;
+ extSortOrder = Ordering::make(_order);
_cur->sort( BSONObjExternalSorter::extSortComp );
}
@@ -147,7 +148,7 @@ namespace mongo {
// ---------------------------------
BSONObjExternalSorter::Iterator::Iterator( BSONObjExternalSorter * sorter ) :
- _cmp( sorter->_order ) , _in( 0 ) {
+ _cmp( sorter->_idxi, sorter->_order ) , _in( 0 ) {
for ( list<string>::iterator i=sorter->_files.begin(); i!=sorter->_files.end(); i++ ) {
_files.push_back( new FileIterator( *i ) );
@@ -158,8 +159,6 @@ namespace mongo {
_in = sorter->_cur;
_it = sorter->_cur->begin();
}
-
-
}
BSONObjExternalSorter::Iterator::~Iterator() {
diff --git a/db/extsort.h b/db/extsort.h
index c0791db..ae6a334 100644
--- a/db/extsort.h
+++ b/db/extsort.h
@@ -26,27 +26,47 @@
namespace mongo {
-
/**
- for sorting by BSONObj and attaching a value
+ for external (disk) sorting by BSONObj and attaching a value
*/
class BSONObjExternalSorter : boost::noncopyable {
public:
-
+ BSONObjExternalSorter( IndexInterface &i, const BSONObj & order = BSONObj() , long maxFileSize = 1024 * 1024 * 100 );
+ ~BSONObjExternalSorter();
typedef pair<BSONObj,DiskLoc> Data;
-
+
private:
- static BSONObj extSortOrder;
+ IndexInterface& _idxi;
- static int extSortComp( const void *lv, const void *rv ) {
+ static int _compare(IndexInterface& i, const Data& l, const Data& r, const Ordering& order) {
RARELY killCurrentOp.checkForInterrupt();
_compares++;
+ int x = i.keyCompare(l.first, r.first, order);
+ if ( x )
+ return x;
+ return l.second.compare( r.second );
+ }
+
+ class MyCmp {
+ public:
+ MyCmp( IndexInterface& i, BSONObj order = BSONObj() ) : _i(i), _order( Ordering::make(order) ) {}
+ bool operator()( const Data &l, const Data &r ) const {
+ return _compare(_i, l, r, _order) < 0;
+ };
+ private:
+ IndexInterface& _i;
+ const Ordering _order;
+ };
+
+ static IndexInterface *extSortIdxInterface;
+ static Ordering extSortOrder;
+ static int extSortComp( const void *lv, const void *rv ) {
+ DEV RARELY {
+ dbMutex.assertWriteLocked(); // must be as we use a global var
+ }
Data * l = (Data*)lv;
Data * r = (Data*)rv;
- int cmp = l->first.woCompare( r->first , extSortOrder );
- if ( cmp )
- return cmp;
- return l->second.compare( r->second );
+ return _compare(*extSortIdxInterface, *l, *r, extSortOrder);
};
class FileIterator : boost::noncopyable {
@@ -61,22 +81,6 @@ namespace mongo {
char * _end;
};
- class MyCmp {
- public:
- MyCmp( const BSONObj & order = BSONObj() ) : _order( order ) {}
- bool operator()( const Data &l, const Data &r ) const {
- RARELY killCurrentOp.checkForInterrupt();
- _compares++;
- int x = l.first.woCompare( r.first , _order );
- if ( x )
- return x < 0;
- return l.second.compare( r.second ) < 0;
- };
-
- private:
- BSONObj _order;
- };
-
public:
typedef FastArray<Data> InMemory;
@@ -99,9 +103,6 @@ namespace mongo {
};
- BSONObjExternalSorter( const BSONObj & order = BSONObj() , long maxFileSize = 1024 * 1024 * 100 );
- ~BSONObjExternalSorter();
-
void add( const BSONObj& o , const DiskLoc & loc );
void add( const BSONObj& o , int a , int b ) {
add( o , DiskLoc( a , b ) );
diff --git a/db/geo/2d.cpp b/db/geo/2d.cpp
index 7b2bf17..b873490 100644
--- a/db/geo/2d.cpp
+++ b/db/geo/2d.cpp
@@ -26,12 +26,31 @@
#include "../btree.h"
#include "../curop-inl.h"
#include "../matcher.h"
-
#include "core.h"
+// Note: we use indexinterface herein to talk to the btree code. In the future it would be nice to
+// be able to use the V1 key class (see key.h) instead of toBson() which has some cost.
+// toBson() is new with v1 so this could be slower than it used to be? a quick profiling
+// might make sense.
+
namespace mongo {
+ class GeoKeyNode {
+ GeoKeyNode();
+ public:
+ GeoKeyNode(DiskLoc r, BSONObj k) : recordLoc(r), _key(k) { }
+ const DiskLoc recordLoc;
+ const BSONObj _key;
+ };
+
+ // just use old indexes for geo for now. todo.
+// typedef BtreeBucket<V0> GeoBtreeBucket;
+// typedef GeoBtreeBucket::KeyNode GeoKeyNode;
+
+//#define BTREE btree<V0>
+
#if 0
+# define GEODEBUGGING
# define GEODEBUG(x) cout << x << endl;
# define GEODEBUGPRINT(x) PRINT(x)
inline void PREFIXDEBUG(GeoHash prefix, const GeoConvert* g) {
@@ -77,6 +96,8 @@ namespace mongo {
class Geo2dType : public IndexType , public GeoConvert {
public:
+ virtual ~Geo2dType() { }
+
Geo2dType( const IndexPlugin * plugin , const IndexSpec* spec )
: IndexType( plugin , spec ) {
@@ -98,34 +119,42 @@ namespace mongo {
uassert( 13024 , "no geo field specified" , _geo.size() );
- _bits = _configval( spec , "bits" , 26 ); // for lat/long, ~ 1ft
+ double bits = _configval( spec , "bits" , 26 ); // for lat/long, ~ 1ft
+
+ uassert( 13028 , "bits in geo index must be between 1 and 32" , bits > 0 && bits <= 32 );
- uassert( 13028 , "can't have more than 32 bits in geo index" , _bits <= 32 );
+ _bits = (unsigned) bits;
- _max = _configval( spec , "max" , 180 );
- _min = _configval( spec , "min" , -180 );
+ _max = _configval( spec , "max" , 180.0 );
+ _min = _configval( spec , "min" , -180.0 );
- _scaling = (1024*1024*1024*4.0)/(_max-_min);
+ double numBuckets = (1024 * 1024 * 1024 * 4.0);
+
+ _scaling = numBuckets / ( _max - _min );
_order = orderBuilder.obj();
GeoHash a(0, 0, _bits);
GeoHash b = a;
b.move(1, 1);
- _error = distance(a, b);
+
+ // Epsilon is 1/100th of a bucket size
+ // TODO: Can we actually find error bounds for the sqrt function?
+ double epsilon = 0.001 / _scaling;
+ _error = distance(a, b) + epsilon;
+
+ // Error in radians
+ _errorSphere = deg2rad( _error );
}
- int _configval( const IndexSpec* spec , const string& name , int def ) {
+ double _configval( const IndexSpec* spec , const string& name , double def ) {
BSONElement e = spec->info[name];
- if ( e.isNumber() )
- return e.numberInt();
+ if ( e.isNumber() ) {
+ return e.numberDouble();
+ }
return def;
}
- ~Geo2dType() {
-
- }
-
virtual BSONObj fixKey( const BSONObj& in ) {
if ( in.firstElement().type() == BinData )
return in;
@@ -148,54 +177,132 @@ namespace mongo {
return b.obj();
}
- virtual void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
- BSONElement geo = obj.getFieldDotted(_geo.c_str());
- if ( geo.eoo() )
- return;
+ /** Finds the key objects to put in an index */
+ virtual void getKeys( const BSONObj& obj, BSONObjSet& keys ) const {
+ getKeys( obj, &keys, NULL );
+ }
- BSONObjBuilder b(64);
+ /** Finds all locations in a geo-indexed object */
+ // TODO: Can we just return references to the locs, if they won't change?
+ void getKeys( const BSONObj& obj, vector< BSONObj >& locs ) const {
+ getKeys( obj, NULL, &locs );
+ }
- if ( ! geo.isABSONObj() )
- return;
+ /** Finds the key objects and/or locations for a geo-indexed object */
+ void getKeys( const BSONObj &obj, BSONObjSet* keys, vector< BSONObj >* locs ) const {
+
+ BSONElementMSet bSet;
+
+ // Get all the nested location fields, but don't return individual elements from
+ // the last array, if it exists.
+ obj.getFieldsDotted(_geo.c_str(), bSet, false);
- BSONObj embed = geo.embeddedObject();
- if ( embed.isEmpty() )
+ if( bSet.empty() )
return;
- _hash( embed ).append( b , "" );
+ for( BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI ) {
- // Go through all the other index keys
- for ( vector<string>::const_iterator i = _other.begin(); i != _other.end(); ++i ){
+ BSONElement geo = *setI;
- // Get *all* fields for the index key
- BSONElementSet eSet;
- obj.getFieldsDotted( *i, eSet );
+ GEODEBUG( "Element " << geo << " found for query " << _geo.c_str() );
+ if ( geo.eoo() || ! geo.isABSONObj() )
+ continue;
- if ( eSet.size() == 0 )
- b.appendAs( _spec->missingField(), "" );
- else if ( eSet.size() == 1 )
- b.appendAs( *(eSet.begin()), "" );
- else{
+ //
+ // Grammar for location lookup:
+ // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc}|loc
+ // loc ::= { <k1> : #, <k2> : # }|[#, #]|{}
+ //
+ // Empty locations are ignored, preserving single-location semantics
+ //
- // If we have more than one key, store as an array of the objects
- // TODO: Store multiple keys?
+ BSONObj embed = geo.embeddedObject();
+ if ( embed.isEmpty() )
+ continue;
- BSONArrayBuilder aBuilder;
+ // Differentiate between location arrays and locations
+ // by seeing if the first element value is a number
+ bool singleElement = embed.firstElement().isNumber();
- for( BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end(); ++ei ){
- aBuilder.append( *ei );
- }
+ BSONObjIterator oi(embed);
- BSONArray arr = aBuilder.arr();
+ while( oi.more() ) {
- b.append( "", arr );
+ BSONObj locObj;
- }
+ if( singleElement ) locObj = embed;
+ else {
+ BSONElement locElement = oi.next();
+
+ uassert( 13654, str::stream() << "location object expected, location array not in correct format",
+ locElement.isABSONObj() );
+
+ locObj = locElement.embeddedObject();
+
+ if( locObj.isEmpty() )
+ continue;
+ }
+
+ BSONObjBuilder b(64);
+
+ // Remember the actual location object if needed
+ if( locs )
+ locs->push_back( locObj );
+
+ // Stop if we don't need to get anything but location objects
+ if( ! keys ) {
+ if( singleElement ) break;
+ else continue;
+ }
+
+ _hash( locObj ).append( b , "" );
+
+ // Go through all the other index keys
+ for ( vector<string>::const_iterator i = _other.begin(); i != _other.end(); ++i ) {
- }
+ // Get *all* fields for the index key
+ BSONElementSet eSet;
+ obj.getFieldsDotted( *i, eSet );
+
+
+ if ( eSet.size() == 0 )
+ b.appendAs( _spec->missingField(), "" );
+ else if ( eSet.size() == 1 )
+ b.appendAs( *(eSet.begin()), "" );
+ else {
+
+ // If we have more than one key, store as an array of the objects
+
+ BSONArrayBuilder aBuilder;
+
+ for( BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end(); ++ei ) {
+ aBuilder.append( *ei );
+ }
+
+ BSONArray arr = aBuilder.arr();
+
+ b.append( "", arr );
+
+ }
+
+ }
+
+ keys->insert( b.obj() );
+
+ if( singleElement ) break;
+
+ }
+ }
- keys.insert( b.obj() );
+ }
+
+ BSONObj _fromBSONHash( const BSONElement& e ) const {
+ return _unhash( _tohash( e ) );
+ }
+
+ BSONObj _fromBSONHash( const BSONObj& o ) const {
+ return _unhash( _tohash( o.firstElement() ) );
}
GeoHash _tohash( const BSONElement& e ) const {
@@ -217,6 +324,10 @@ namespace mongo {
return hash( x.number() , y.number() );
}
+ GeoHash hash( const Point& p ) const {
+ return hash( p._x, p._y );
+ }
+
GeoHash hash( double x , double y ) const {
return GeoHash( _convert(x), _convert(y) , _bits );
}
@@ -231,9 +342,9 @@ namespace mongo {
}
unsigned _convert( double in ) const {
- uassert( 13027 , "point not in range" , in <= (_max + _error) && in >= (_min - _error) );
+ uassert( 13027 , str::stream() << "point not in interval of [ " << _min << ", " << _max << " )", in < _max && in >= _min );
in -= _min;
- assert( in > 0 );
+ assert( in >= 0 );
return (unsigned)(in * _scaling);
}
@@ -269,6 +380,10 @@ namespace mongo {
}
double sizeEdge( const GeoHash& a ) const {
+
+ if( ! a.constrains() )
+ return _max - _min;
+
double ax,ay,bx,by;
GeoHash b = a;
b.move( 1 , 1 );
@@ -297,13 +412,15 @@ namespace mongo {
case BSONObj::opNEAR:
case BSONObj::opWITHIN:
return OPTIMAL;
- default:;
+ default:
+ // We can try to match if there's no other indexing defined,
+ // this is assumed a point
+ return HELPFUL;
}
}
case Array:
- // Non-geo index data is stored in a non-standard way, cannot use for exact lookups with
- // additional criteria
- if ( query.nFields() > 1 ) return USELESS;
+ // We can try to match if there's no other indexing defined,
+ // this is assumed a point
return HELPFUL;
default:
return USELESS;
@@ -314,12 +431,13 @@ namespace mongo {
vector<string> _other;
unsigned _bits;
- int _max;
- int _min;
+ double _max;
+ double _min;
double _scaling;
BSONObj _order;
double _error;
+ double _errorSphere;
};
class Box {
@@ -341,6 +459,10 @@ namespace mongo {
Box() {}
+ BSONArray toBSON() const {
+ return BSON_ARRAY( BSON_ARRAY( _min._x << _min._y ) << BSON_ARRAY( _max._x << _max._y ) );
+ }
+
string toString() const {
StringBuilder buf(64);
buf << _min.toString() << " -->> " << _max.toString();
@@ -351,6 +473,10 @@ namespace mongo {
return val + fudge >= min && val <= max + fudge;
}
+ bool onBoundary( double bound, double val, double fudge = 0 ) const {
+ return ( val >= bound - fudge && val <= bound + fudge );
+ }
+
bool mid( double amin , double amax , double bmin , double bmax , bool min , double& res ) const {
assert( amin <= amax );
assert( bmin <= bmax );
@@ -380,18 +506,43 @@ namespace mongo {
Box intersection( boundMin , boundMax );
- return intersection.area() / ( ( area() + other.area() ) / 2 );
+ return intersection.area() / area();
}
double area() const {
return ( _max._x - _min._x ) * ( _max._y - _min._y );
}
+ double maxDim() const {
+ return max( _max._x - _min._x, _max._y - _min._y );
+ }
+
Point center() const {
return Point( ( _min._x + _max._x ) / 2 ,
( _min._y + _max._y ) / 2 );
}
+ void truncate( const Geo2dType* g ) {
+ if( _min._x < g->_min ) _min._x = g->_min;
+ if( _min._y < g->_min ) _min._y = g->_min;
+ if( _max._x > g->_max ) _max._x = g->_max;
+ if( _max._y > g->_max ) _max._y = g->_max;
+ }
+
+ void fudge( const Geo2dType* g ) {
+ _min._x -= g->_error;
+ _min._y -= g->_error;
+ _max._x += g->_error;
+ _max._y += g->_error;
+ }
+
+ bool onBoundary( Point p, double fudge = 0 ) {
+ return onBoundary( _min._x, p._x, fudge ) ||
+ onBoundary( _max._x, p._x, fudge ) ||
+ onBoundary( _min._y, p._y, fudge ) ||
+ onBoundary( _max._y, p._y, fudge );
+ }
+
bool inside( Point p , double fudge = 0 ) {
bool res = inside( p._x , p._y , fudge );
//cout << "is : " << p.toString() << " in " << toString() << " = " << res << endl;
@@ -412,405 +563,481 @@ namespace mongo {
Point _max;
};
- class Geo2dPlugin : public IndexPlugin {
+
+ class Polygon {
public:
- Geo2dPlugin() : IndexPlugin( GEO2DNAME ) {
- }
- virtual IndexType* generate( const IndexSpec* spec ) const {
- return new Geo2dType( this , spec );
+ Polygon( void ) : _centroidCalculated( false ) {}
+
+ Polygon( vector<Point> points ) : _centroidCalculated( false ),
+ _points( points ) { }
+
+ void add( Point p ) {
+ _centroidCalculated = false;
+ _points.push_back( p );
}
- } geo2dplugin;
- struct GeoUnitTest : public UnitTest {
+ int size( void ) const {
+ return _points.size();
+ }
- int round( double d ) {
- return (int)(.5+(d*1000));
+ /**
+ * Determine if the point supplied is contained by the current polygon.
+ *
+ * The algorithm uses a ray casting method.
+ */
+ bool contains( const Point& p ) const {
+ return contains( p, 0 ) > 0;
}
-#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == GeoHash(b) ); }
+ int contains( const Point &p, double fudge ) const {
- void run() {
- assert( ! GeoHash::isBitSet( 0 , 0 ) );
- assert( ! GeoHash::isBitSet( 0 , 31 ) );
- assert( GeoHash::isBitSet( 1 , 31 ) );
+ Box fudgeBox( Point( p._x - fudge, p._y - fudge ), Point( p._x + fudge, p._y + fudge ) );
- IndexSpec i( BSON( "loc" << "2d" ) );
- Geo2dType g( &geo2dplugin , &i );
- {
- double x = 73.01212;
- double y = 41.352964;
- BSONObj in = BSON( "x" << x << "y" << y );
- GeoHash h = g._hash( in );
- BSONObj out = g._unhash( h );
- assert( round(x) == round( out["x"].number() ) );
- assert( round(y) == round( out["y"].number() ) );
- assert( round( in["x"].number() ) == round( out["x"].number() ) );
- assert( round( in["y"].number() ) == round( out["y"].number() ) );
- }
+ int counter = 0;
+ Point p1 = _points[0];
+ for ( int i = 1; i <= size(); i++ ) {
+ Point p2 = _points[i % size()];
- {
- double x = -73.01212;
- double y = 41.352964;
- BSONObj in = BSON( "x" << x << "y" << y );
- GeoHash h = g._hash( in );
- BSONObj out = g._unhash( h );
- assert( round(x) == round( out["x"].number() ) );
- assert( round(y) == round( out["y"].number() ) );
- assert( round( in["x"].number() ) == round( out["x"].number() ) );
- assert( round( in["y"].number() ) == round( out["y"].number() ) );
- }
+ GEODEBUG( "Doing intersection check of " << fudgeBox.toString() << " with seg " << p1.toString() << " to " << p2.toString() );
- {
- GeoHash h( "0000" );
- h.move( 0 , 1 );
- GEOHEQ( h , "0001" );
- h.move( 0 , -1 );
- GEOHEQ( h , "0000" );
+ // We need to check whether or not this segment intersects our error box
+ if( fudge > 0 &&
+ // Points not too far below box
+ fudgeBox._min._y <= std::max( p1._y, p2._y ) &&
+ // Points not too far above box
+ fudgeBox._max._y >= std::min( p1._y, p2._y ) &&
+ // Points not too far to left of box
+ fudgeBox._min._x <= std::max( p1._x, p2._x ) &&
+ // Points not too far to right of box
+ fudgeBox._max._x >= std::min( p1._x, p2._x ) ) {
- h.init( "0001" );
- h.move( 0 , 1 );
- GEOHEQ( h , "0100" );
- h.move( 0 , -1 );
- GEOHEQ( h , "0001" );
+ GEODEBUG( "Doing detailed check" );
+ // If our box contains one or more of these points, we need to do an exact check.
+ if( fudgeBox.inside(p1) ) {
+ GEODEBUG( "Point 1 inside" );
+ return 0;
+ }
+ if( fudgeBox.inside(p2) ) {
+ GEODEBUG( "Point 2 inside" );
+ return 0;
+ }
- h.init( "0000" );
- h.move( 1 , 0 );
- GEOHEQ( h , "0010" );
- }
+ // Do intersection check for vertical sides
+ if ( p1._y != p2._y ) {
- {
- Box b( 5 , 5 , 2 );
- assert( "(5,5) -->> (7,7)" == b.toString() );
- }
+ double invSlope = ( p2._x - p1._x ) / ( p2._y - p1._y );
- {
- GeoHash a = g.hash( 1 , 1 );
- GeoHash b = g.hash( 4 , 5 );
- assert( 5 == (int)(g.distance( a , b ) ) );
- a = g.hash( 50 , 50 );
- b = g.hash( 42 , 44 );
- assert( round(10) == round(g.distance( a , b )) );
- }
+ double xintersT = ( fudgeBox._max._y - p1._y ) * invSlope + p1._x;
+ if( fudgeBox._min._x <= xintersT && fudgeBox._max._x >= xintersT ) {
+ GEODEBUG( "Top intersection @ " << xintersT );
+ return 0;
+ }
- {
- GeoHash x("0000");
- assert( 0 == x.getHash() );
- x.init( 0 , 1 , 32 );
- GEOHEQ( x , "0000000000000000000000000000000000000000000000000000000000000001" )
+ double xintersB = ( fudgeBox._min._y - p1._y ) * invSlope + p1._x;
+ if( fudgeBox._min._x <= xintersB && fudgeBox._max._x >= xintersB ) {
+ GEODEBUG( "Bottom intersection @ " << xintersB );
+ return 0;
+ }
- assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
- assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
- }
+ }
- {
- GeoHash x("1010");
- GEOHEQ( x , "1010" );
- GeoHash y = x + "01";
- GEOHEQ( y , "101001" );
- }
+ // Do intersection check for horizontal sides
+ if( p1._x != p2._x ) {
- {
+ double slope = ( p2._y - p1._y ) / ( p2._x - p1._x );
- GeoHash a = g.hash( 5 , 5 );
- GeoHash b = g.hash( 5 , 7 );
- GeoHash c = g.hash( 100 , 100 );
- /*
- cout << "a: " << a << endl;
- cout << "b: " << b << endl;
- cout << "c: " << c << endl;
+ double yintersR = ( p1._x - fudgeBox._max._x ) * slope + p1._y;
+ if( fudgeBox._min._y <= yintersR && fudgeBox._max._y >= yintersR ) {
+ GEODEBUG( "Right intersection @ " << yintersR );
+ return 0;
+ }
- cout << "a: " << a.toStringHex1() << endl;
- cout << "b: " << b.toStringHex1() << endl;
- cout << "c: " << c.toStringHex1() << endl;
- */
- BSONObj oa = a.wrap();
- BSONObj ob = b.wrap();
- BSONObj oc = c.wrap();
- /*
- cout << "a: " << oa.hexDump() << endl;
- cout << "b: " << ob.hexDump() << endl;
- cout << "c: " << oc.hexDump() << endl;
- */
- assert( oa.woCompare( ob ) < 0 );
- assert( oa.woCompare( oc ) < 0 );
+ double yintersL = ( p1._x - fudgeBox._min._x ) * slope + p1._y;
+ if( fudgeBox._min._y <= yintersL && fudgeBox._max._y >= yintersL ) {
+ GEODEBUG( "Left intersection @ " << yintersL );
+ return 0;
+ }
- }
+ }
- {
- GeoHash x( "000000" );
- x.move( -1 , 0 );
- GEOHEQ( x , "101010" );
- x.move( 1 , -1 );
- GEOHEQ( x , "010101" );
- x.move( 0 , 1 );
- GEOHEQ( x , "000000" );
- }
+ }
+ else if( fudge == 0 ){
- {
- GeoHash prefix( "110011000000" );
- GeoHash entry( "1100110000011100000111000001110000011100000111000001000000000000" );
- assert( ! entry.hasPrefix( prefix ) );
+ // If this is an exact vertex, we won't intersect, so check this
+ if( p._y == p1._y && p._x == p1._x ) return 1;
+ else if( p._y == p2._y && p._x == p2._x ) return 1;
- entry = GeoHash("1100110000001100000111000001110000011100000111000001000000000000");
- assert( entry.toString().find( prefix.toString() ) == 0 );
- assert( entry.hasPrefix( GeoHash( "1100" ) ) );
- assert( entry.hasPrefix( prefix ) );
+ // If this is a horizontal line we won't intersect, so check this
+ if( p1._y == p2._y && p._y == p1._y ){
+ // Check that the x-coord lies in the line
+ if( p._x >= std::min( p1._x, p2._x ) && p._x <= std::max( p1._x, p2._x ) ) return 1;
+ }
+
+ }
+
+ // Normal intersection test.
+ // TODO: Invert these for clearer logic?
+ if ( p._y > std::min( p1._y, p2._y ) ) {
+ if ( p._y <= std::max( p1._y, p2._y ) ) {
+ if ( p._x <= std::max( p1._x, p2._x ) ) {
+ if ( p1._y != p2._y ) {
+ double xinters = (p._y-p1._y)*(p2._x-p1._x)/(p2._y-p1._y)+p1._x;
+ // Special case of point on vertical line
+ if ( p1._x == p2._x && p._x == p1._x ){
+
+ // Need special case for the vertical edges, for example:
+ // 1) \e pe/----->
+ // vs.
+ // 2) \ep---e/----->
+ //
+ // if we count exact as intersection, then 1 is in but 2 is out
+ // if we count exact as no-int then 1 is out but 2 is in.
+
+ return 1;
+ }
+ else if( p1._x == p2._x || p._x <= xinters ) {
+ counter++;
+ }
+ }
+ }
+ }
+ }
+
+ p1 = p2;
}
- {
- GeoHash a = g.hash( 50 , 50 );
- GeoHash b = g.hash( 48 , 54 );
- assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
+ if ( counter % 2 == 0 ) {
+ return -1;
}
+ else {
+ return 1;
+ }
+ }
+ /**
+ * Calculate the centroid, or center of mass of the polygon object.
+ */
+ Point centroid( void ) {
- {
- Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
- assert( b.inside( 29.763 , -95.363 ) );
- assert( ! b.inside( 32.9570255 , -96.1082497 ) );
- assert( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
+ /* Centroid is cached, it won't change betwen points */
+ if ( _centroidCalculated ) {
+ return _centroid;
}
- {
- GeoHash a( "11001111" );
- assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
- assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
+ Point cent;
+ double signedArea = 0.0;
+ double area = 0.0; // Partial signed area
+
+ /// For all vertices except last
+ int i = 0;
+ for ( i = 0; i < size() - 1; ++i ) {
+ area = _points[i]._x * _points[i+1]._y - _points[i+1]._x * _points[i]._y ;
+ signedArea += area;
+ cent._x += ( _points[i]._x + _points[i+1]._x ) * area;
+ cent._y += ( _points[i]._y + _points[i+1]._y ) * area;
}
- {
- int N = 10000;
- {
- Timer t;
- for ( int i=0; i<N; i++ ) {
- unsigned x = (unsigned)rand();
- unsigned y = (unsigned)rand();
- GeoHash h( x , y );
- unsigned a,b;
- h.unhash_slow( a,b );
- assert( a == x );
- assert( b == y );
- }
- //cout << "slow: " << t.millis() << endl;
- }
+ // Do last vertex
+ area = _points[i]._x * _points[0]._y - _points[0]._x * _points[i]._y;
+ cent._x += ( _points[i]._x + _points[0]._x ) * area;
+ cent._y += ( _points[i]._y + _points[0]._y ) * area;
+ signedArea += area;
+ signedArea *= 0.5;
+ cent._x /= ( 6 * signedArea );
+ cent._y /= ( 6 * signedArea );
- {
- Timer t;
- for ( int i=0; i<N; i++ ) {
- unsigned x = (unsigned)rand();
- unsigned y = (unsigned)rand();
- GeoHash h( x , y );
- unsigned a,b;
- h.unhash_fast( a,b );
- assert( a == x );
- assert( b == y );
- }
- //cout << "fast: " << t.millis() << endl;
- }
+ _centroidCalculated = true;
+ _centroid = cent;
- }
+ return cent;
+ }
- {
- // see http://en.wikipedia.org/wiki/Great-circle_distance#Worked_example
+ Box bounds( void ) {
- {
- Point BNA (-86.67, 36.12);
- Point LAX (-118.40, 33.94);
+ // TODO: Cache this
- double dist1 = spheredist_deg(BNA, LAX);
- double dist2 = spheredist_deg(LAX, BNA);
+ _bounds._max = _points[0];
+ _bounds._min = _points[0];
- // target is 0.45306
- assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
- assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
- }
- {
- Point BNA (-1.5127, 0.6304);
- Point LAX (-2.0665, 0.5924);
+ for ( int i = 1; i < size(); i++ ) {
- double dist1 = spheredist_rad(BNA, LAX);
- double dist2 = spheredist_rad(LAX, BNA);
+ _bounds._max._x = max( _bounds._max._x, _points[i]._x );
+ _bounds._max._y = max( _bounds._max._y, _points[i]._y );
+ _bounds._min._x = min( _bounds._min._x, _points[i]._x );
+ _bounds._min._y = min( _bounds._min._y, _points[i]._y );
- // target is 0.45306
- assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
- assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
- }
- {
- Point JFK (-73.77694444, 40.63861111 );
- Point LAX (-118.40, 33.94);
+ }
- double dist = spheredist_deg(JFK, LAX) * EARTH_RADIUS_MILES;
- assert( dist > 2469 && dist < 2470 );
- }
+ return _bounds;
- {
- Point BNA (-86.67, 36.12);
- Point LAX (-118.40, 33.94);
- Point JFK (-73.77694444, 40.63861111 );
- assert( spheredist_deg(BNA, BNA) < 1e-6);
- assert( spheredist_deg(LAX, LAX) < 1e-6);
- assert( spheredist_deg(JFK, JFK) < 1e-6);
+ }
- Point zero (0, 0);
- Point antizero (0,-180);
+ private:
- // these were known to cause NaN
- assert( spheredist_deg(zero, zero) < 1e-6);
- assert( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
- assert( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
- }
- }
+ bool _centroidCalculated;
+ Point _centroid;
+
+ Box _bounds;
+
+ vector<Point> _points;
+ };
+
+ class Geo2dPlugin : public IndexPlugin {
+ public:
+ Geo2dPlugin() : IndexPlugin( GEO2DNAME ) {
}
- } geoUnitTest;
+
+ virtual IndexType* generate( const IndexSpec* spec ) const {
+ return new Geo2dType( this , spec );
+ }
+ } geo2dplugin;
+
+ void __forceLinkGeoPlugin() {
+ geo2dplugin.getName();
+ }
+
+
+
+ class GeoHopper;
class GeoPoint {
public:
- GeoPoint() {
+
+ GeoPoint() : _distance( -1 ), _exact( false )
+ {}
+
+ //// Distance not used ////
+
+ GeoPoint( const GeoKeyNode& node )
+ : _key( node._key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ), _distance( -1 ) , _exact( false ) {
}
- GeoPoint( const KeyNode& node , double distance )
- : _key( node.key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ) , _distance( distance ) {
+ //// Immediate initialization of distance ////
+
+ GeoPoint( const GeoKeyNode& node, double distance, bool exact )
+ : _key( node._key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ), _distance( distance ), _exact( exact ) {
}
- GeoPoint( const BSONObj& key , DiskLoc loc , double distance )
- : _key(key) , _loc(loc) , _o( loc.obj() ) , _distance( distance ) {
+ GeoPoint( const GeoPoint& pt, double distance, bool exact )
+ : _key( pt.key() ) , _loc( pt.loc() ) , _o( pt.obj() ), _distance( distance ), _exact( exact ) {
}
bool operator<( const GeoPoint& other ) const {
- return _distance < other._distance;
+ if( _distance != other._distance ) return _distance < other._distance;
+ if( _exact != other._exact ) return _exact < other._exact;
+ return _loc < other._loc;
}
- bool isEmpty() const {
+ double distance() const {
+ return _distance;
+ }
+
+ bool isExact() const {
+ return _exact;
+ }
+
+ BSONObj key() const {
+ return _key;
+ }
+
+ DiskLoc loc() const {
+ return _loc;
+ }
+
+ BSONObj obj() const {
+ return _o;
+ }
+
+ BSONObj pt() const {
+ return _pt;
+ }
+
+ bool isEmpty() {
return _o.isEmpty();
}
+ string toString() const {
+ return str::stream() << "Point from " << _o << " dist : " << _distance << ( _exact ? " (ex)" : " (app)" );
+ }
+
BSONObj _key;
DiskLoc _loc;
BSONObj _o;
+ BSONObj _pt;
+
double _distance;
+ bool _exact;
};
+ // GeoBrowse subclasses this
class GeoAccumulator {
public:
- GeoAccumulator( const Geo2dType * g , const BSONObj& filter )
- : _g(g) , _lookedAt(0) , _objectsLoaded(0) , _found(0) {
+ GeoAccumulator( const Geo2dType * g , const BSONObj& filter, bool uniqueDocs, bool needDistance )
+ : _g(g) ,
+ _keysChecked(0) ,
+ _lookedAt(0) ,
+ _matchesPerfd(0) ,
+ _objectsLoaded(0) ,
+ _pointsLoaded(0) ,
+ _found(0) ,
+ _uniqueDocs( uniqueDocs ) ,
+ _needDistance( needDistance )
+ {
if ( ! filter.isEmpty() ) {
_matcher.reset( new CoveredIndexMatcher( filter , g->keyPattern() ) );
+ GEODEBUG( "Matcher is now " << _matcher->docMatcher().toString() );
}
}
- virtual ~GeoAccumulator() {
- }
+ virtual ~GeoAccumulator() { }
+
+ /** Check if we've already looked at a key. ALSO marks as seen, anticipating a follow-up call
+ to add(). This is broken out to avoid some work extracting the key bson if it's an
+ already seen point.
+ */
+ private:
+ set< pair<DiskLoc,int> > _seen;
+ public:
+ bool seen(DiskLoc bucket, int pos) {
- virtual void add( const KeyNode& node ) {
- // when looking at other boxes, don't want to look at some object twice
- pair<set<DiskLoc>::iterator,bool> seenBefore = _seen.insert( node.recordLoc );
+ _keysChecked++;
+
+ pair< set<pair<DiskLoc,int> >::iterator, bool > seenBefore = _seen.insert( make_pair(bucket,pos) );
if ( ! seenBefore.second ) {
- GEODEBUG( "\t\t\t\t already seen : " << node.recordLoc.obj()["_id"] );
- return;
+ GEODEBUG( "\t\t\t\t already seen : " << bucket.toString() << ' ' << pos ); // node.key.toString() << " @ " << Point( _g, GeoHash( node.key.firstElement() ) ).toString() << " with " << node.recordLoc.obj()["_id"] );
+ return true;
}
+ return false;
+ }
+
+ enum KeyResult { BAD, BORDER, GOOD };
+
+ virtual void add( const GeoKeyNode& node ) {
+
+ GEODEBUG( "\t\t\t\t checking key " << node._key.toString() )
+
_lookedAt++;
- // distance check
- double d = 0;
- if ( ! checkDistance( GeoHash( node.key.firstElement() ) , d ) ) {
- GEODEBUG( "\t\t\t\t bad distance : " << node.recordLoc.obj() << "\t" << d );
+ ////
+ // Approximate distance check using key data
+ ////
+ double keyD = 0;
+ Point keyP( _g, GeoHash( node._key.firstElement(), _g->_bits ) );
+ KeyResult keyOk = approxKeyCheck( keyP, keyD );
+ if ( keyOk == BAD ) {
+ GEODEBUG( "\t\t\t\t bad distance : " << node.recordLoc.obj() << "\t" << keyD );
return;
}
- GEODEBUG( "\t\t\t\t good distance : " << node.recordLoc.obj() << "\t" << d );
+ GEODEBUG( "\t\t\t\t good distance : " << node.recordLoc.obj() << "\t" << keyD );
- // matcher
- MatchDetails details;
- if ( _matcher.get() ) {
- bool good = _matcher->matches( node.key , node.recordLoc , &details );
- if ( details.loadedObject )
- _objectsLoaded++;
+ ////
+ // Check for match using other key (and potentially doc) criteria
+ ////
+ // Remember match results for each object
+ map<DiskLoc, bool>::iterator match = _matched.find( node.recordLoc );
+ bool newDoc = match == _matched.end();
+ if( newDoc ) {
+
+ GEODEBUG( "\t\t\t\t matching new doc with " << (_matcher ? _matcher->docMatcher().toString() : "(empty)" ) );
+
+ // matcher
+ MatchDetails details;
+ if ( _matcher.get() ) {
+ bool good = _matcher->matchesWithSingleKeyIndex( node._key , node.recordLoc , &details );
+
+ _matchesPerfd++;
- if ( ! good ) {
- GEODEBUG( "\t\t\t\t didn't match : " << node.recordLoc.obj()["_id"] );
- return;
+ if ( details._loadedObject )
+ _objectsLoaded++;
+
+ if ( ! good ) {
+ GEODEBUG( "\t\t\t\t didn't match : " << node.recordLoc.obj()["_id"] );
+ _matched[ node.recordLoc ] = false;
+ return;
+ }
}
+
+ _matched[ node.recordLoc ] = true;
+
+ if ( ! details._loadedObject ) // don't double count
+ _objectsLoaded++;
+
+ }
+ else if( !((*match).second) ) {
+ GEODEBUG( "\t\t\t\t previously didn't match : " << node.recordLoc.obj()["_id"] );
+ return;
}
- if ( ! details.loadedObject ) // dont double count
- _objectsLoaded++;
+ ////
+ // Exact check with particular data fields
+ ////
+ // Can add multiple points
+ int diff = addSpecific( node , keyP, keyOk == BORDER, keyD, newDoc );
+ if( diff > 0 ) _found += diff;
+ else _found -= -diff;
- addSpecific( node , d );
- _found++;
}
- virtual void addSpecific( const KeyNode& node , double d ) = 0;
- virtual bool checkDistance( const GeoHash& node , double& d ) = 0;
+ virtual void getPointsFor( const BSONObj& key, const BSONObj& obj, vector< BSONObj >& locsForNode, bool allPoints = false ){
- long long found() const {
- return _found;
- }
+ // Find all the location objects from the keys
+ vector< BSONObj > locs;
+ _g->getKeys( obj, allPoints ? locsForNode : locs );
+ _pointsLoaded++;
- const Geo2dType * _g;
- set<DiskLoc> _seen;
- auto_ptr<CoveredIndexMatcher> _matcher;
+ if( allPoints ) return;
+ if( locs.size() == 1 ){
+ locsForNode.push_back( locs[0] );
+ return;
+ }
- long long _lookedAt;
- long long _objectsLoaded;
- long long _found;
- };
+ // Find the particular location we want
+ GeoHash keyHash( key.firstElement(), _g->_bits );
- class GeoHopper : public GeoAccumulator {
- public:
- typedef multiset<GeoPoint> Holder;
+ // log() << "Hash: " << node.key << " and " << keyHash.getHash() << " unique " << _uniqueDocs << endl;
+ for( vector< BSONObj >::iterator i = locs.begin(); i != locs.end(); ++i ) {
- GeoHopper( const Geo2dType * g , unsigned max , const Point& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN)
- : GeoAccumulator( g , filter ) , _max( max ) , _near( n ), _maxDistance( maxDistance ), _type( type ), _farthest(-1)
- {}
+ // Ignore all locations not hashed to the key's hash, since we may see
+ // those later
+ if( _g->_hash( *i ) != keyHash ) continue;
+
+ locsForNode.push_back( *i );
- virtual bool checkDistance( const GeoHash& h , double& d ) {
- switch (_type) {
- case GEO_PLAIN:
- d = _near.distance( Point(_g, h) );
- break;
- case GEO_SPHERE:
- d = spheredist_deg(_near, Point(_g, h));
- break;
- default:
- assert(0);
}
- bool good = d < _maxDistance && ( _points.size() < _max || d < farthest() );
- GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString() << "\t" << h << "\t" << d
- << " ok: " << good << " farthest: " << farthest() );
- return good;
+
}
- virtual void addSpecific( const KeyNode& node , double d ) {
- GEODEBUG( "\t\t" << GeoHash( node.key.firstElement() ) << "\t" << node.recordLoc.obj() << "\t" << d );
- _points.insert( GeoPoint( node.key , node.recordLoc , d ) );
- if ( _points.size() > _max ) {
- _points.erase( --_points.end() );
+ virtual int addSpecific( const GeoKeyNode& node, const Point& p , bool inBounds, double d, bool newDoc ) = 0;
+ virtual KeyResult approxKeyCheck( const Point& p , double& keyD ) = 0;
+ virtual bool exactDocCheck( const Point& p , double& d ) = 0;
+ virtual bool expensiveExactCheck(){ return false; }
- Holder::iterator i = _points.end();
- i--;
- _farthest = i->_distance;
- }
- else {
- if (d > _farthest)
- _farthest = d;
- }
- }
- double farthest() const {
- return _farthest;
+ long long found() const {
+ return _found;
}
+ const Geo2dType * _g;
+ map<DiskLoc, bool> _matched;
+ shared_ptr<CoveredIndexMatcher> _matcher;
+
+ long long _keysChecked;
+ long long _lookedAt;
+ long long _matchesPerfd;
+ long long _objectsLoaded;
+ long long _pointsLoaded;
+ long long _found;
+
+ bool _uniqueDocs;
+ bool _needDistance;
- unsigned _max;
- Point _near;
- Holder _points;
- double _maxDistance;
- GeoDistType _type;
- double _farthest;
};
struct BtreeLocation {
+ BtreeLocation() : ii(0) { }
+ IndexInterface *ii;
int pos;
bool found;
DiskLoc bucket;
@@ -818,11 +1045,13 @@ namespace mongo {
BSONObj key() {
if ( bucket.isNull() )
return BSONObj();
- return bucket.btree()->keyNode( pos ).key;
+ return ii->keyAt(bucket, pos);
+ //return bucket.btree<V>()->keyNode( pos ).key.toBson();
}
bool hasPrefix( const GeoHash& hash ) {
- BSONElement e = key().firstElement();
+ BSONObj k = key();
+ BSONElement e = k.firstElement();
if ( e.eoo() )
return false;
return GeoHash( e ).hasPrefix( hash );
@@ -832,7 +1061,7 @@ namespace mongo {
if ( bucket.isNull() )
return false;
- bucket = bucket.btree()->advance( bucket , pos , direction , "btreelocation" );
+ bucket = ii->advance( bucket , pos , direction , "btreelocation" );
if ( all )
return checkCur( totalFound , all );
@@ -844,9 +1073,15 @@ namespace mongo {
if ( bucket.isNull() )
return false;
- if ( bucket.btree()->isUsed(pos) ) {
+ if ( ii->isUsed(bucket, pos) ) {
totalFound++;
- all->add( bucket.btree()->keyNode( pos ) );
+ if( !all->seen(bucket, pos) ) {
+ BSONObj o;
+ DiskLoc recLoc;
+ ii->keyAt(bucket, pos, o, recLoc);
+ GeoKeyNode n(recLoc, o);
+ all->add(n);
+ }
}
else {
GEODEBUG( "\t\t\t\t not used: " << key() );
@@ -861,6 +1096,9 @@ namespace mongo {
return ss.str();
}
+ // Returns the min and max keys which bound a particular location.
+ // The only time these may be equal is when we actually equal the location
+ // itself, otherwise our expanding algorithm will fail.
static bool initial( const IndexDetails& id , const Geo2dType * spec ,
BtreeLocation& min , BtreeLocation& max ,
GeoHash start ,
@@ -868,211 +1106,33 @@ namespace mongo {
Ordering ordering = Ordering::make(spec->_order);
- min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
- ordering , min.pos , min.found , minDiskLoc );
- if (hopper) min.checkCur( found , hopper );
- max = min;
+ IndexInterface *ii = &id.idxInterface();
+ min.ii = ii;
+ max.ii = ii;
- if ( min.bucket.isNull() || ( hopper && !(hopper->found()) ) ) {
- min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
- ordering , min.pos , min.found , minDiskLoc , -1 );
- if (hopper) min.checkCur( found , hopper );
- }
+ min.bucket = ii->locate( id , id.head , start.wrap() ,
+ ordering , min.pos , min.found , minDiskLoc, -1 );
- return ! min.bucket.isNull() || ! max.bucket.isNull();
- }
- };
-
- class GeoSearch {
- public:
- GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN)
- : _spec( g ) ,_startPt(g,n), _start( n ) ,
- _numWanted( numWanted ) , _filter( filter ) , _maxDistance( maxDistance ) ,
- _hopper( new GeoHopper( g , numWanted , _startPt , filter , maxDistance, type ) ), _type(type) {
- assert( g->getDetails() );
- _nscanned = 0;
- _found = 0;
-
- if (type == GEO_PLAIN) {
- _scanDistance = maxDistance;
- }
- else if (type == GEO_SPHERE) {
- if (maxDistance == numeric_limits<double>::max()) {
- _scanDistance = maxDistance;
- }
- else {
- //TODO: consider splitting into x and y scan distances
- _scanDistance = computeXScanDistance(_startPt._y, rad2deg(maxDistance));
- }
- }
- else {
- assert(0);
- }
- }
-
- void exec() {
- const IndexDetails& id = *_spec->getDetails();
-
- const BtreeBucket * head = id.head.btree();
- assert( head );
- /*
- * Search algorithm
- * 1) use geohash prefix to find X items
- * 2) compute max distance from want to an item
- * 3) find optimal set of boxes that complete circle
- * 4) use regular btree cursors to scan those boxes
- */
-
- GeoHopper * hopper = _hopper.get();
-
- _prefix = _start;
- BtreeLocation min,max;
- {
- // 1 regular geo hash algorithm
-
-
- if ( ! BtreeLocation::initial( id , _spec , min , max , _start , _found , NULL ) )
- return;
-
- while ( !_prefix.constrains() || // if next pass would cover universe, just keep going
- ( _hopper->found() < _numWanted && _spec->sizeEdge( _prefix ) <= _scanDistance)) {
- GEODEBUG( _prefix << "\t" << _found << "\t DESC" );
- while ( min.hasPrefix(_prefix) && min.checkCur(_found, hopper) && min.advance(-1, _found, NULL) )
- _nscanned++;
- GEODEBUG( _prefix << "\t" << _found << "\t ASC" );
- while ( max.hasPrefix(_prefix) && max.checkCur(_found, hopper) && max.advance(+1, _found, NULL) )
- _nscanned++;
-
- if ( ! _prefix.constrains() ) {
- GEODEBUG( "done search w/o part 2" )
- return;
- }
-
- _alreadyScanned = Box(_spec, _prefix);
- _prefix = _prefix.up();
- }
- }
- GEODEBUG( "done part 1" );
- {
- // 2
- double farthest = hopper->farthest();
- GEODEBUGPRINT(hopper->farthest());
- if (hopper->found() < _numWanted) {
- // Not enough found in Phase 1
- farthest = _scanDistance;
- }
- else if (_type == GEO_SPHERE) {
- farthest = std::min(_scanDistance, computeXScanDistance(_startPt._y, rad2deg(farthest)));
- }
- GEODEBUGPRINT(farthest);
-
- Box want( _startPt._x - farthest , _startPt._y - farthest , farthest * 2 );
- GEODEBUGPRINT(want.toString());
-
- _prefix = _start;
- while (_prefix.constrains() && _spec->sizeEdge( _prefix ) < farthest ) {
- _prefix = _prefix.up();
- }
-
- PREFIXDEBUG(_prefix, _spec);
-
- if (_prefix.getBits() <= 1) {
- // TODO consider walking in $natural order
-
- while ( min.checkCur(_found, hopper) && min.advance(-1, _found, NULL) )
- _nscanned++;
- while ( max.checkCur(_found, hopper) && max.advance(+1, _found, NULL) )
- _nscanned++;
-
- GEODEBUG( "done search after scanning whole collection" )
- return;
- }
-
- if ( logLevel > 0 ) {
- log(1) << "want: " << want << " found:" << _found << " nscanned: " << _nscanned << " hash size:" << _spec->sizeEdge( _prefix )
- << " farthest: " << farthest << " using box: " << Box( _spec , _prefix ).toString() << endl;
- }
-
- for ( int x=-1; x<=1; x++ ) {
- for ( int y=-1; y<=1; y++ ) {
- GeoHash toscan = _prefix;
- toscan.move( x , y );
-
- // 3 & 4
- doBox( id , want , toscan );
- }
- }
- }
- GEODEBUG( "done search" )
-
- }
-
- void doBox( const IndexDetails& id , const Box& want , const GeoHash& toscan , int depth = 0 ) {
- Box testBox( _spec , toscan );
- if ( logLevel > 2 ) {
- cout << "\t";
- for ( int i=0; i<depth; i++ )
- cout << "\t";
- cout << " doBox: " << testBox.toString() << "\t" << toscan.toString() << " scanned so far: " << _nscanned << endl;
- }
- else {
- GEODEBUGPRINT(testBox.toString());
- }
-
- if (_alreadyScanned.contains(testBox, _spec->_error)) {
- GEODEBUG("skipping box: already scanned");
- return; // been here, done this
- }
-
- double intPer = testBox.intersects( want );
-
- if ( intPer <= 0 ) {
- GEODEBUG("skipping box: not in want");
- return;
- }
-
- bool goDeeper = intPer < .5 && depth < 2;
+ if (hopper) min.checkCur( found , hopper );
- long long myscanned = 0;
+ // TODO: Might be able to avoid doing a full lookup in some cases here,
+ // but would add complexity and we're hitting pretty much the exact same data.
+ // Cannot set this = min in general, however.
+ max.bucket = ii->locate( id , id.head , start.wrap() ,
+ ordering , max.pos , max.found , minDiskLoc, 1 );
- BtreeLocation loc;
- loc.bucket = id.head.btree()->locate( id , id.head , toscan.wrap() , Ordering::make(_spec->_order) ,
- loc.pos , loc.found , minDiskLoc );
- loc.checkCur( _found , _hopper.get() );
- while ( loc.hasPrefix( toscan ) && loc.advance( 1 , _found , _hopper.get() ) ) {
- _nscanned++;
- if ( ++myscanned > 100 && goDeeper ) {
- doBox( id , want , toscan + "00" , depth + 1);
- doBox( id , want , toscan + "01" , depth + 1);
- doBox( id , want , toscan + "10" , depth + 1);
- doBox( id , want , toscan + "11" , depth + 1);
- return;
- }
- }
+ if (hopper) max.checkCur( found , hopper );
+ return ! min.bucket.isNull() || ! max.bucket.isNull();
}
-
-
- const Geo2dType * _spec;
-
- Point _startPt;
- GeoHash _start;
- GeoHash _prefix;
- int _numWanted;
- BSONObj _filter;
- double _maxDistance;
- double _scanDistance;
- shared_ptr<GeoHopper> _hopper;
-
- long long _nscanned;
- int _found;
- GeoDistType _type;
-
- Box _alreadyScanned;
};
+
class GeoCursorBase : public Cursor {
public:
+
+ static const shared_ptr< CoveredIndexMatcher > emptyMatcher;
+
GeoCursorBase( const Geo2dType * spec )
: _spec( spec ), _id( _spec->getDetails() ) {
@@ -1106,68 +1166,34 @@ namespace mongo {
const IndexDetails * _id;
};
- class GeoSearchCursor : public GeoCursorBase {
- public:
- GeoSearchCursor( shared_ptr<GeoSearch> s )
- : GeoCursorBase( s->_spec ) ,
- _s( s ) , _cur( s->_hopper->_points.begin() ) , _end( s->_hopper->_points.end() ), _nscanned() {
- if ( _cur != _end ) {
- ++_nscanned;
- }
- }
-
- virtual ~GeoSearchCursor() {}
-
- virtual bool ok() {
- return _cur != _end;
- }
+ const shared_ptr< CoveredIndexMatcher > GeoCursorBase::emptyMatcher( new CoveredIndexMatcher( BSONObj(), BSONObj(), false ) );
- virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
- virtual BSONObj current() { assert(ok()); return _cur->_o; }
- virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
- virtual bool advance() {
- if( ok() ){
- _cur++;
- incNscanned();
- return ok();
- }
- return false;
- }
- virtual BSONObj currKey() const { return _cur->_key; }
-
- virtual string toString() {
- return "GeoSearchCursor";
- }
-
-
- virtual BSONObj prettyStartKey() const {
- return BSON( _s->_spec->_geo << _s->_prefix.toString() );
- }
- virtual BSONObj prettyEndKey() const {
- GeoHash temp = _s->_prefix;
- temp.move( 1 , 1 );
- return BSON( _s->_spec->_geo << temp.toString() );
- }
+ // TODO: Pull out the cursor bit from the browse, have GeoBrowse as field of cursor to clean up
+ // this hierarchy a bit. Also probably useful to look at whether GeoAccumulator can be a member instead
+ // of a superclass.
+ class GeoBrowse : public GeoCursorBase , public GeoAccumulator {
+ public:
- virtual long long nscanned() { return _nscanned; }
+ // The max points which should be added to an expanding box
+ static const int maxPointsHeuristic = 300;
- virtual CoveredIndexMatcher *matcher() const {
- return _s->_hopper->_matcher.get();
- }
+ // Expand states
+ enum State {
+ START ,
+ DOING_EXPAND ,
+ DONE_NEIGHBOR ,
+ DONE
+ } _state;
- shared_ptr<GeoSearch> _s;
- GeoHopper::Holder::iterator _cur;
- GeoHopper::Holder::iterator _end;
+ GeoBrowse( const Geo2dType * g , string type , BSONObj filter = BSONObj(), bool uniqueDocs = true, bool needDistance = false )
+ : GeoCursorBase( g ), GeoAccumulator( g , filter, uniqueDocs, needDistance ) ,
+ _type( type ) , _filter( filter ) , _firstCall(true), _nscanned(), _centerPrefix(0, 0, 0) {
- void incNscanned() { if ( ok() ) { ++_nscanned; } }
- long long _nscanned;
- };
+ // Set up the initial expand state
+ _state = START;
+ _neighbor = -1;
+ _foundInExp = 0;
- class GeoBrowse : public GeoCursorBase , public GeoAccumulator {
- public:
- GeoBrowse( const Geo2dType * g , string type , BSONObj filter = BSONObj() )
- : GeoCursorBase( g ) ,GeoAccumulator( g , filter ) ,
- _type( type ) , _filter( filter ) , _firstCall(true), _nscanned() {
}
virtual string toString() {
@@ -1177,7 +1203,7 @@ namespace mongo {
virtual bool ok() {
bool first = _firstCall;
if ( _firstCall ) {
- fillStack();
+ fillStack( maxPointsHeuristic );
_firstCall = false;
}
if ( ! _cur.isEmpty() || _stack.size() ) {
@@ -1188,7 +1214,7 @@ namespace mongo {
}
while ( moreToDo() ) {
- fillStack();
+ fillStack( maxPointsHeuristic );
if ( ! _cur.isEmpty() ) {
if ( first ) {
++_nscanned;
@@ -1214,7 +1240,7 @@ namespace mongo {
return false;
while ( _cur.isEmpty() && moreToDo() )
- fillStack();
+ fillStack( maxPointsHeuristic );
return ! _cur.isEmpty() && ++_nscanned;
}
@@ -1223,18 +1249,308 @@ namespace mongo {
virtual DiskLoc currLoc() { assert(ok()); return _cur._loc; }
virtual BSONObj currKey() const { return _cur._key; }
- virtual CoveredIndexMatcher *matcher() const {
- return _matcher.get();
+ virtual CoveredIndexMatcher* matcher() const {
+ if( _matcher.get() ) return _matcher.get();
+ else return GeoCursorBase::emptyMatcher.get();
+ }
+
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const {
+ if( _matcher.get() ) return _matcher;
+ else return GeoCursorBase::emptyMatcher;
+ }
+
+ // Are we finished getting points?
+ virtual bool moreToDo() {
+ return _state != DONE;
}
- virtual bool moreToDo() = 0;
- virtual void fillStack() = 0;
+ virtual bool supportGetMore() { return true; }
+
+ // Fills the stack, but only checks a maximum number of maxToCheck points at a time.
+ // Further calls to this function will continue the expand/check neighbors algorithm.
+ virtual void fillStack( int maxToCheck, int maxToAdd = -1, bool onlyExpand = false ) {
+
+#ifdef GEODEBUGGING
+ log() << "Filling stack with maximum of " << maxToCheck << ", state : " << (int) _state << endl;
+#endif
+
+ if( maxToAdd < 0 ) maxToAdd = maxToCheck;
+ int maxFound = _foundInExp + maxToCheck;
+ assert( maxToCheck > 0 );
+ assert( maxFound > 0 );
+ assert( _found <= 0x7fffffff ); // conversion to int
+ int maxAdded = static_cast<int>(_found) + maxToAdd;
+ assert( maxAdded >= 0 ); // overflow check
+
+ bool isNeighbor = _centerPrefix.constrains();
+
+ // Starting a box expansion
+ if ( _state == START ) {
+
+ // Get the very first hash point, if required
+ if( ! isNeighbor )
+ _prefix = expandStartHash();
+
+ GEODEBUG( "initializing btree" );
+
+#ifdef GEODEBUGGING
+ log() << "Initializing from b-tree with hash of " << _prefix << " @ " << Box( _g, _prefix ) << endl;
+#endif
+
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , _prefix , _foundInExp , this ) )
+ _state = isNeighbor ? DONE_NEIGHBOR : DONE;
+ else {
+ _state = DOING_EXPAND;
+ _lastPrefix.reset();
+ }
+
+ GEODEBUG( (_state == DONE_NEIGHBOR || _state == DONE ? "not initialized" : "initializedFig") );
+
+ }
+
+ // Doing the actual box expansion
+ if ( _state == DOING_EXPAND ) {
+
+ while ( true ) {
+
+ GEODEBUG( "box prefix [" << _prefix << "]" );
+#ifdef GEODEBUGGING
+ if( _prefix.constrains() ) {
+ log() << "current expand box : " << Box( _g, _prefix ).toString() << endl;
+ }
+ else {
+ log() << "max expand box." << endl;
+ }
+#endif
+
+ GEODEBUG( "expanding box points... ");
+
+ // Record the prefix we're actively exploring...
+ _expPrefix.reset( new GeoHash( _prefix ) );
+
+ // Find points inside this prefix
+ while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _foundInExp , this ) && _foundInExp < maxFound && _found < maxAdded );
+ while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _foundInExp , this ) && _foundInExp < maxFound && _found < maxAdded );
+
+#ifdef GEODEBUGGING
+
+ log() << "finished expand, checked : " << ( maxToCheck - ( maxFound - _foundInExp ) )
+ << " found : " << ( maxToAdd - ( maxAdded - _found ) )
+ << " max : " << maxToCheck << " / " << maxToAdd << endl;
+
+#endif
+
+ GEODEBUG( "finished expand, found : " << ( maxToAdd - ( maxAdded - _found ) ) );
+ if( _foundInExp >= maxFound || _found >= maxAdded ) return;
+
+ // We've searched this prefix fully, remember
+ _lastPrefix.reset( new GeoHash( _prefix ));
+
+ // If we've searched the entire space, we're finished.
+ if ( ! _prefix.constrains() ) {
+ GEODEBUG( "box exhausted" );
+ _state = DONE;
+ notePrefix();
+ return;
+ }
+
+ // If we won't fit in the box, and we're not doing a sub-scan, increase the size
+ if ( ! fitsInBox( _g->sizeEdge( _prefix ) ) && _fringe.size() <= 1 ) {
+
+ // If we're still not expanded bigger than the box size, expand again
+ // TODO: Is there an advantage to scanning prior to expanding?
+ _prefix = _prefix.up();
+ continue;
+
+ }
+
+ // We're done and our size is large enough
+ _state = DONE_NEIGHBOR;
+
+ // Go to the next sub-box, if applicable
+ if( _fringe.size() > 0 ) _fringe.pop_back();
+ // Go to the next neighbor if this was the last sub-search
+ if( _fringe.size() == 0 ) _neighbor++;
+
+ break;
+
+ }
+
+ notePrefix();
+ }
+
+ // If we doeighbors
+ if( onlyExpand ) return;
+
+ // If we're done expanding the current box...
+ if( _state == DONE_NEIGHBOR ) {
+
+ // Iterate to the next neighbor
+ // Loop is useful for cases where we want to skip over boxes entirely,
+ // otherwise recursion increments the neighbors.
+ for ( ; _neighbor < 9; _neighbor++ ) {
+
+ // If we have no fringe for the neighbor, make sure we have the default fringe
+ if( _fringe.size() == 0 ) _fringe.push_back( "" );
+
+ if( ! isNeighbor ) {
+ _centerPrefix = _prefix;
+ _centerBox = Box( _g, _centerPrefix );
+ isNeighbor = true;
+ }
+
+ int i = (_neighbor / 3) - 1;
+ int j = (_neighbor % 3) - 1;
+
+ if ( ( i == 0 && j == 0 ) ||
+ ( i < 0 && _centerBox._min._x <= _g->_min ) ||
+ ( j < 0 && _centerBox._min._y <= _g->_min ) ||
+ ( i > 0 && _centerBox._max._x >= _g->_max ) ||
+ ( j > 0 && _centerBox._max._y >= _g->_max ) ) {
+ continue; // main box or wrapped edge
+ // TODO: We may want to enable wrapping in future, probably best as layer on top of
+ // this search.
+ }
+
+ // Make sure we've got a reasonable center
+ assert( _centerPrefix.constrains() );
+
+ GeoHash _neighborPrefix = _centerPrefix;
+ _neighborPrefix.move( i, j );
+
+ GEODEBUG( "moving to " << i << " , " << j << " fringe : " << _fringe.size() );
+ PREFIXDEBUG( _centerPrefix, _g );
+ PREFIXDEBUG( _neighborPrefix , _g );
+ while( _fringe.size() > 0 ) {
+
+ _prefix = _neighborPrefix + _fringe.back();
+ Box cur( _g , _prefix );
+
+ PREFIXDEBUG( _prefix, _g );
+
+ double intAmt = intersectsBox( cur );
+
+ // No intersection
+ if( intAmt <= 0 ) {
+ GEODEBUG( "skipping box" << cur.toString() );
+ _fringe.pop_back();
+ continue;
+ }
+ // Large intersection, refine search
+ else if( intAmt > 0.5 && _prefix.canRefine() && _fringe.back().size() < 4 /* two bits */ ) {
+
+ GEODEBUG( "Adding to fringe: " << _fringe.back() << " curr prefix : " << _prefix << " bits : " << _prefix.getBits() );
+
+ // log() << "Diving to level : " << ( _fringe.back().size() / 2 + 1 ) << endl;
+
+ string lastSuffix = _fringe.back();
+ _fringe.pop_back();
+ _fringe.push_back( lastSuffix + "00" );
+ _fringe.push_back( lastSuffix + "01" );
+ _fringe.push_back( lastSuffix + "11" );
+ _fringe.push_back( lastSuffix + "10" );
+
+ continue;
+ }
+
+ // Restart our search from a diff box.
+ _state = START;
+
+ assert( ! onlyExpand );
+
+ assert( _found <= 0x7fffffff );
+ fillStack( maxFound - _foundInExp, maxAdded - static_cast<int>(_found) );
+
+ // When we return from the recursive fillStack call, we'll either have checked enough points or
+ // be entirely done. Max recurse depth is < 8 * 16.
+
+ // If we're maxed out on points, return
+ if( _foundInExp >= maxFound || _found >= maxAdded ) {
+ // Make sure we'll come back to add more points
+ assert( _state == DOING_EXPAND );
+ return;
+ }
+
+ // Otherwise we must be finished to return
+ assert( _state == DONE );
+ return;
+
+ }
+
+ }
+
+ // Finished with neighbors
+ _state = DONE;
+ }
- virtual void addSpecific( const KeyNode& node , double d ) {
- if ( _cur.isEmpty() )
- _cur = GeoPoint( node , d );
- else
- _stack.push_back( GeoPoint( node , d ) );
+ }
+
+ // The initial geo hash box for our first expansion
+ virtual GeoHash expandStartHash() = 0;
+
+ // Whether the current box width is big enough for our search area
+ virtual bool fitsInBox( double width ) = 0;
+
+ // The amount the current box overlaps our search area
+ virtual double intersectsBox( Box& cur ) = 0;
+
+ virtual int addSpecific( const GeoKeyNode& node , const Point& keyP , bool onBounds , double keyD , bool newDoc ) {
+
+ int found = 0;
+
+ // We need to handle every possible point in this method, even those not in the key value, to
+ // avoid us tracking which hashes we've already seen.
+ if( ! newDoc ){
+ // log() << "Already handled doc!" << endl;
+ return 0;
+ }
+
+ if( _uniqueDocs && ! onBounds ) {
+ // log() << "Added ind to " << _type << endl;
+ _stack.push_front( GeoPoint( node ) );
+ found++;
+ }
+ else {
+ // We now handle every possible point in the document, even those not in the key value,
+ // since we're iterating through them anyway - prevents us from having to save the hashes
+ // we've seen per-doc
+
+ // If we're filtering by hash, get the original
+ bool expensiveExact = expensiveExactCheck();
+
+ vector< BSONObj > locs;
+ getPointsFor( node._key, node.recordLoc.obj(), locs, true );
+ for( vector< BSONObj >::iterator i = locs.begin(); i != locs.end(); ++i ){
+
+ double d = -1;
+ Point p( *i );
+
+ // We can avoid exact document checks by redoing approx checks,
+ // if the exact checks are more expensive.
+ bool needExact = true;
+ if( expensiveExact ){
+ assert( false );
+ KeyResult result = approxKeyCheck( p, d );
+ if( result == BAD ) continue;
+ else if( result == GOOD ) needExact = false;
+ }
+
+ if( ! needExact || exactDocCheck( p, d ) ){
+ // log() << "Added mult to " << _type << endl;
+ _stack.push_front( GeoPoint( node ) );
+ found++;
+ // If returning unique, just exit after first point is added
+ if( _uniqueDocs ) break;
+ }
+ }
+ }
+
+ if ( _cur.isEmpty() && _stack.size() > 0 ){
+ _cur = _stack.front();
+ _stack.pop_front();
+ }
+
+ return found;
}
virtual long long nscanned() {
@@ -1244,6 +1560,35 @@ namespace mongo {
return _nscanned;
}
+ virtual void explainDetails( BSONObjBuilder& b ){
+ b << "keysChecked" << _keysChecked;
+ b << "lookedAt" << _lookedAt;
+ b << "matchesPerfd" << _matchesPerfd;
+ b << "objectsLoaded" << _objectsLoaded;
+ b << "pointsLoaded" << _pointsLoaded;
+ }
+
+ virtual BSONObj prettyIndexBounds() const {
+
+ vector<GeoHash>::const_iterator i = _expPrefixes.end();
+ if( _expPrefixes.size() > 0 && *(--i) != *( _expPrefix.get() ) )
+ _expPrefixes.push_back( *( _expPrefix.get() ) );
+
+ BSONObjBuilder bob;
+ BSONArrayBuilder bab;
+ for( i = _expPrefixes.begin(); i != _expPrefixes.end(); ++i ){
+ bab << Box( _g, *i ).toBSON();
+ }
+ bob << _g->_geo << bab.arr();
+
+ return bob.obj();
+
+ }
+
+ void notePrefix() {
+ _expPrefixes.push_back( _prefix );
+ }
+
string _type;
BSONObj _filter;
list<GeoPoint> _stack;
@@ -1253,189 +1598,695 @@ namespace mongo {
long long _nscanned;
+ // The current box we're expanding (-1 is first/center box)
+ int _neighbor;
+
+ // The points we've found so far
+ // TODO: Long long?
+ int _foundInExp;
+
+ // The current hash prefix we're expanding and the center-box hash prefix
+ GeoHash _prefix;
+ shared_ptr<GeoHash> _lastPrefix;
+ GeoHash _centerPrefix;
+ list<string> _fringe;
+ int recurseDepth;
+ Box _centerBox;
+
+ // Start and end of our search range in the current box
+ BtreeLocation _min;
+ BtreeLocation _max;
+
+ shared_ptr<GeoHash> _expPrefix;
+ mutable vector<GeoHash> _expPrefixes;
+
};
- class GeoCircleBrowse : public GeoBrowse {
+
+ class GeoHopper : public GeoBrowse {
public:
+ typedef multiset<GeoPoint> Holder;
- enum State {
- START ,
- DOING_EXPAND ,
- DOING_AROUND ,
- DONE
- } _state;
+ GeoHopper( const Geo2dType * g , unsigned max , const Point& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN, bool uniqueDocs = false, bool needDistance = true )
+ : GeoBrowse( g, "search", filter, uniqueDocs, needDistance ), _max( max ) , _near( n ), _maxDistance( maxDistance ), _type( type ), _distError( type == GEO_PLAIN ? g->_error : g->_errorSphere ), _farthest(0)
+ {}
- GeoCircleBrowse( const Geo2dType * g , const BSONObj& circle , BSONObj filter = BSONObj() , const string& type="$center")
- : GeoBrowse( g , "circle" , filter ) {
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
- uassert( 13060 , "$center needs 2 fields (middle,max distance)" , circle.nFields() == 2 );
- BSONObjIterator i(circle);
- BSONElement center = i.next();
- _start = g->_tohash(center);
- _startPt = Point(center);
- _prefix = _start;
- _maxDistance = i.next().numberDouble();
- uassert( 13061 , "need a max distance > 0 " , _maxDistance > 0 );
- _maxDistance += g->_error;
+ // Always check approximate distance, since it lets us avoid doing
+ // checks of the rest of the object if it succeeds
- _state = START;
- _found = 0;
+ switch (_type) {
+ case GEO_PLAIN:
+ d = _near.distance( p );
+ break;
+ case GEO_SPHERE:
+ checkEarthBounds( p );
+ d = spheredist_deg( _near, p );
+ break;
+ default: assert( false );
+ }
+ assert( d >= 0 );
- if (type == "$center") {
- _type = GEO_PLAIN;
- _xScanDistance = _maxDistance;
- _yScanDistance = _maxDistance;
+ GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString()
+ << "\t" << p.toString() << "\t" << d
+ << " farthest: " << farthest() );
+
+ // If we need more points
+ double borderDist = ( _points.size() < _max ? _maxDistance : farthest() );
+
+ if( d >= borderDist - 2 * _distError && d <= borderDist + 2 * _distError ) return BORDER;
+ else return d < borderDist ? GOOD : BAD;
+
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+
+ bool within = false;
+
+ // Get the appropriate distance for the type
+ switch ( _type ) {
+ case GEO_PLAIN:
+ d = _near.distance( p );
+ within = _near.distanceWithin( p, _maxDistance );
+ break;
+ case GEO_SPHERE:
+ checkEarthBounds( p );
+ d = spheredist_deg( _near, p );
+ within = ( d <= _maxDistance );
+ break;
+ default: assert( false );
}
- else if (type == "$centerSphere") {
- uassert(13461, "Spherical MaxDistance > PI. Are you sure you are using radians?", _maxDistance < M_PI);
- _type = GEO_SPHERE;
- _yScanDistance = rad2deg(_maxDistance);
- _xScanDistance = computeXScanDistance(_startPt._y, _yScanDistance);
+ return within;
+ }
- uassert(13462, "Spherical distance would require wrapping, which isn't implemented yet",
- (_startPt._x + _xScanDistance < 180) && (_startPt._x - _xScanDistance > -180) &&
- (_startPt._y + _yScanDistance < 90) && (_startPt._y - _yScanDistance > -90));
+ // Always in distance units, whether radians or normal
+ double farthest() const {
+ return _farthest;
+ }
+
+ virtual int addSpecific( const GeoKeyNode& node, const Point& keyP, bool onBounds, double keyD, bool newDoc ) {
- GEODEBUGPRINT(_maxDistance);
- GEODEBUGPRINT(_xScanDistance);
- GEODEBUGPRINT(_yScanDistance);
+ // Unique documents
+
+ GeoPoint newPoint( node, keyD, false );
+
+ int prevSize = _points.size();
+
+ // STEP 1 : Remove old duplicate points from the set if needed
+ if( _uniqueDocs ){
+
+ // Lookup old point with same doc
+ map< DiskLoc , Holder::iterator >::iterator oldPointIt = _seenPts.find( newPoint.loc() );
+
+ if( oldPointIt != _seenPts.end() ){
+ const GeoPoint& oldPoint = *(oldPointIt->second);
+ // We don't need to care if we've already seen this same approx pt or better,
+ // or we've already gone to disk once for the point
+ if( oldPoint < newPoint ){
+ GEODEBUG( "\t\tOld point closer than new point" );
+ return 0;
+ }
+ GEODEBUG( "\t\tErasing old point " << oldPointIt->first.obj() );
+ _points.erase( oldPointIt->second );
+ }
}
- else {
- uassert(13460, "invalid $center query type: " + type, false);
+
+ Holder::iterator newIt = _points.insert( newPoint );
+ if( _uniqueDocs ) _seenPts[ newPoint.loc() ] = newIt;
+
+ GEODEBUG( "\t\tInserted new point " << newPoint.toString() << " approx : " << keyD );
+
+ assert( _max > 0 );
+
+ Holder::iterator lastPtIt = _points.end();
+ lastPtIt--;
+ _farthest = lastPtIt->distance() + 2 * _distError;
+
+ return _points.size() - prevSize;
+
+ }
+
+ // Removes extra points from end of _points set.
+ // Check can be a bit costly if we have lots of exact points near borders,
+ // so we'll do this every once and awhile.
+ void processExtraPoints(){
+
+ if( _points.size() == 0 ) return;
+
+ int prevSize = _points.size();
+
+ // Erase all points from the set with a position >= _max *and*
+ // whose distance isn't close to the _max - 1 position distance
+
+ int numToErase = _points.size() - _max;
+ if( numToErase < 0 ) numToErase = 0;
+
+ // Get the first point definitely in the _points array
+ Holder::iterator startErase = _points.end();
+ for( int i = 0; i < numToErase + 1; i++ ) startErase--;
+ _farthest = startErase->distance() + 2 * _distError;
+
+ GEODEBUG( "\t\tPotentially erasing " << numToErase << " points, " << " size : " << _points.size() << " max : " << _max << " dist : " << startErase->distance() << " farthest dist : " << _farthest << " from error : " << _distError );
+
+ startErase++;
+ while( numToErase > 0 && startErase->distance() <= _farthest ){
+ GEODEBUG( "\t\tNot erasing point " << startErase->toString() );
+ numToErase--;
+ startErase++;
+ assert( startErase != _points.end() || numToErase == 0 );
}
- ok();
+ if( _uniqueDocs ){
+ for( Holder::iterator i = startErase; i != _points.end(); ++i )
+ _seenPts.erase( i->loc() );
+ }
+
+ _points.erase( startErase, _points.end() );
+
+ int diff = _points.size() - prevSize;
+ if( diff > 0 ) _found += diff;
+ else _found -= -diff;
+
}
- virtual bool moreToDo() {
- return _state != DONE;
+ unsigned _max;
+ Point _near;
+ Holder _points;
+ double _maxDistance;
+ GeoDistType _type;
+ double _distError;
+ double _farthest;
+
+ map< DiskLoc , Holder::iterator > _seenPts;
+
+ };
+
+
+
+ class GeoSearch : public GeoHopper {
+ public:
+ GeoSearch( const Geo2dType * g , const Point& startPt , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN, bool uniqueDocs = false, bool needDistance = false )
+ : GeoHopper( g , numWanted , startPt , filter , maxDistance, type, uniqueDocs, needDistance ),
+ _start( g->hash( startPt._x, startPt._y ) ),
+ // TODO: Remove numWanted...
+ _numWanted( numWanted ),
+ _type(type)
+ {
+
+ assert( g->getDetails() );
+ _nscanned = 0;
+ _found = 0;
+
+ if( _maxDistance < 0 ){
+ _scanDistance = numeric_limits<double>::max();
+ }
+ else if (type == GEO_PLAIN) {
+ _scanDistance = maxDistance + _spec->_error;
+ }
+ else if (type == GEO_SPHERE) {
+ checkEarthBounds( startPt );
+ // TODO: consider splitting into x and y scan distances
+ _scanDistance = computeXScanDistance( startPt._y, rad2deg( _maxDistance ) + _spec->_error );
+ }
+
+ assert( _scanDistance > 0 );
+
}
- virtual void fillStack() {
+ void exec() {
- if ( _state == START ) {
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
- _prefix , _found , this ) ) {
- _state = DONE;
- return;
+ if( _numWanted == 0 ) return;
+
+ /*
+ * Search algorithm
+ * 1) use geohash prefix to find X items
+ * 2) compute max distance from want to an item
+ * 3) find optimal set of boxes that complete circle
+ * 4) use regular btree cursors to scan those boxes
+ */
+
+#ifdef GEODEBUGGING
+
+ log() << "start near search for " << _numWanted << " points near " << _near << " (max dist " << _maxDistance << ")" << endl;
+
+#endif
+
+ // Part 1
+ {
+ do {
+ long long f = found();
+ assert( f <= 0x7fffffff );
+ fillStack( maxPointsHeuristic, _numWanted - static_cast<int>(f) , true );
+ processExtraPoints();
+ } while( _state != DONE && _state != DONE_NEIGHBOR &&
+ found() < _numWanted &&
+ (! _prefix.constrains() || _g->sizeEdge( _prefix ) <= _scanDistance ) );
+
+ // If we couldn't scan or scanned everything, we're done
+ if( _state == DONE ){
+ expandEndPoints();
+ return;
+ }
+ }
+
+#ifdef GEODEBUGGING
+
+ log() << "part 1 of near search completed, found " << found() << " points (out of " << _foundInExp << " scanned)"
+ << " in expanded region " << _prefix << " @ " << Box( _g, _prefix )
+ << " with furthest distance " << farthest() << endl;
+
+#endif
+
+ // Part 2
+ {
+
+ // Find farthest distance for completion scan
+ double farDist = farthest();
+ if( found() < _numWanted ) {
+ // Not enough found in Phase 1
+ farDist = _scanDistance;
}
- _state = DOING_EXPAND;
- }
+ else if ( _type == GEO_PLAIN ) {
+ // Enough found, but need to search neighbor boxes
+ farDist += _spec->_error;
+ }
+ else if ( _type == GEO_SPHERE ) {
+ // Enough found, but need to search neighbor boxes
+ farDist = std::min( _scanDistance, computeXScanDistance( _near._y, rad2deg( farDist ) ) + 2 * _spec->_error );
+ }
+ assert( farDist >= 0 );
+ GEODEBUGPRINT( farDist );
+ // Find the box that includes all the points we need to return
+ _want = Box( _near._x - farDist , _near._y - farDist , farDist * 2 );
+ GEODEBUGPRINT( _want.toString() );
- if ( _state == DOING_AROUND ) {
- // TODO could rework and return rather than looping
- for (int i=-1; i<=1; i++) {
- for (int j=-1; j<=1; j++) {
- if (i == 0 && j == 0)
- continue; // main box
+ // log() << "Found : " << found() << " wanted : " << _numWanted << " Far distance : " << farDist << " box : " << _want << endl;
- GeoHash newBox = _prefix;
- newBox.move(i, j);
+ // Remember the far distance for further scans
+ _scanDistance = farDist;
- PREFIXDEBUG(newBox, _g);
- if (needToCheckBox(newBox)) {
- // TODO consider splitting into quadrants
- getPointsForPrefix(newBox);
- }
- else {
- GEODEBUG("skipping box");
- }
- }
+ // Reset the search, our distances have probably changed
+ if( _state == DONE_NEIGHBOR ){
+ _state = DOING_EXPAND;
+ _neighbor = -1;
}
- _state = DONE;
+#ifdef GEODEBUGGING
+
+ log() << "resetting search with start at " << _start << " (edge length " << _g->sizeEdge( _start ) << ")" << endl;
+
+#endif
+
+ // Do regular search in the full region
+ do {
+ fillStack( maxPointsHeuristic );
+ processExtraPoints();
+ }
+ while( _state != DONE );
+
+ }
+
+ GEODEBUG( "done near search with " << _points.size() << " points " );
+
+ expandEndPoints();
+
+ }
+
+ void addExactPoints( const GeoPoint& pt, Holder& points, bool force ){
+ int before, after;
+ addExactPoints( pt, points, before, after, force );
+ }
+
+ void addExactPoints( const GeoPoint& pt, Holder& points, int& before, int& after, bool force ){
+
+ before = 0;
+ after = 0;
+
+ GEODEBUG( "Adding exact points for " << pt.toString() );
+
+ if( pt.isExact() ){
+ if( force ) points.insert( pt );
return;
}
- if (_state == DOING_EXPAND) {
- GEODEBUG( "circle prefix [" << _prefix << "]" );
- PREFIXDEBUG(_prefix, _g);
+ vector<BSONObj> locs;
+ getPointsFor( pt.key(), pt.obj(), locs, _uniqueDocs );
+
+ GeoPoint nearestPt( pt, -1, true );
+
+ for( vector<BSONObj>::iterator i = locs.begin(); i != locs.end(); i++ ){
- while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _found , this ) );
- while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _found , this ) );
+ Point loc( *i );
- if ( ! _prefix.constrains() ) {
- GEODEBUG( "\t exhausted the btree" );
- _state = DONE;
- return;
+ double d;
+ if( ! exactDocCheck( loc, d ) ) continue;
+
+ if( _uniqueDocs && ( nearestPt.distance() < 0 || d < nearestPt.distance() ) ){
+ nearestPt._distance = d;
+ nearestPt._pt = *i;
+ continue;
+ }
+ else if( ! _uniqueDocs ){
+ GeoPoint exactPt( pt, d, true );
+ exactPt._pt = *i;
+ GEODEBUG( "Inserting exact pt " << exactPt.toString() << " for " << pt.toString() << " exact : " << d << " is less? " << ( exactPt < pt ) << " bits : " << _g->_bits );
+ points.insert( exactPt );
+ exactPt < pt ? before++ : after++;
}
- Point ll (_g, _prefix);
- GeoHash trHash = _prefix;
- trHash.move( 1 , 1 );
- Point tr (_g, trHash);
- double sideLen = fabs(tr._x - ll._x);
+ }
+
+ if( _uniqueDocs && nearestPt.distance() >= 0 ){
+ GEODEBUG( "Inserting unique exact pt " << nearestPt.toString() << " for " << pt.toString() << " exact : " << nearestPt.distance() << " is less? " << ( nearestPt < pt ) << " bits : " << _g->_bits );
+ points.insert( nearestPt );
+ if( nearestPt < pt ) before++;
+ else after++;
+ }
+
+ }
+
+ // TODO: Refactor this back into holder class, allow to run periodically when we are seeing a lot of pts
+ void expandEndPoints( bool finish = true ){
+
+ processExtraPoints();
+
+ // All points in array *could* be in maxDistance
+
+ // Step 1 : Trim points to max size
+ // TODO: This check will do little for now, but is skeleton for future work in incremental $near
+ // searches
+ if( _max > 0 ){
+
+ int numToErase = _points.size() - _max;
+
+ if( numToErase > 0 ){
+
+ Holder tested;
+
+ // Work backward through all points we're not sure belong in the set
+ Holder::iterator maybePointIt = _points.end();
+ maybePointIt--;
+ double approxMin = maybePointIt->distance() - 2 * _distError;
+
+ GEODEBUG( "\t\tNeed to erase " << numToErase << " max : " << _max << " min dist " << approxMin << " error : " << _distError << " starting from : " << (*maybePointIt).toString() );
+
+ // Insert all
+ int erased = 0;
+ while( _points.size() > 0 && ( maybePointIt->distance() >= approxMin || erased < numToErase ) ){
+
+ Holder::iterator current = maybePointIt--;
+
+ addExactPoints( *current, tested, true );
+ _points.erase( current );
+ erased++;
+
+ if( tested.size() )
+ approxMin = tested.begin()->distance() - 2 * _distError;
- if (sideLen > std::max(_xScanDistance, _yScanDistance)) { // circle must be contained by surrounding squares
- if ( (ll._x + _xScanDistance < _startPt._x && ll._y + _yScanDistance < _startPt._y) &&
- (tr._x - _xScanDistance > _startPt._x && tr._y - _yScanDistance > _startPt._y) ) {
- GEODEBUG("square fully contains circle");
- _state = DONE;
}
- else if (_prefix.getBits() > 1) {
- GEODEBUG("checking surrounding squares");
- _state = DOING_AROUND;
+
+ GEODEBUG( "\t\tEnding search at point " << ( _points.size() == 0 ? "(beginning)" : maybePointIt->toString() ) );
+
+ int numToAddBack = erased - numToErase;
+ assert( numToAddBack >= 0 );
+
+ GEODEBUG( "\t\tNum tested valid : " << tested.size() << " erased : " << erased << " added back : " << numToAddBack );
+
+#ifdef GEODEBUGGING
+ for( Holder::iterator it = tested.begin(); it != tested.end(); it++ ){
+ log() << "Tested Point: " << *it << endl;
}
- else {
- GEODEBUG("using simple search");
- _prefix = _prefix.up();
+#endif
+ Holder::iterator testedIt = tested.begin();
+ for( int i = 0; i < numToAddBack && testedIt != tested.end(); i++ ){
+ _points.insert( *testedIt );
+ testedIt++;
}
}
- else {
- _prefix = _prefix.up();
+ }
+
+#ifdef GEODEBUGGING
+ for( Holder::iterator it = _points.begin(); it != _points.end(); it++ ){
+ log() << "Point: " << *it << endl;
+ }
+#endif
+ // We've now trimmed first set of unneeded points
+
+ GEODEBUG( "\t\t Start expanding, num points : " << _points.size() << " max : " << _max );
+
+ // Step 2: iterate through all points and add as needed
+
+ unsigned expandedPoints = 0;
+ Holder::iterator it = _points.begin();
+ double expandWindowEnd = -1;
+ while( it != _points.end() ){
+ const GeoPoint& currPt = *it;
+
+ // TODO: If one point is exact, maybe not 2 * _distError
+
+ // See if we're in an expand window
+ bool inWindow = currPt.distance() <= expandWindowEnd;
+ // If we're not, and we're done with points, break
+ if( ! inWindow && expandedPoints >= _max ) break;
+
+ bool expandApprox = ! currPt.isExact() && ( ! _uniqueDocs || ( finish && _needDistance ) || inWindow );
+
+ if( expandApprox ){
+
+ // Add new point(s)
+ // These will only be added in a radius of 2 * _distError around the current point,
+ // so should not affect previously valid points.
+ int before, after;
+ addExactPoints( currPt, _points, before, after, false );
+ expandedPoints += before;
+
+ if( _max > 0 && expandedPoints < _max )
+ expandWindowEnd = currPt.distance() + 2 * _distError;
+
+ // Iterate to the next point
+ Holder::iterator current = it++;
+ // Erase the current point
+ _points.erase( current );
+
+ }
+ else{
+ expandedPoints++;
+ it++;
}
+ }
- return;
+ GEODEBUG( "\t\tFinished expanding, num points : " << _points.size() << " max : " << _max );
+
+ // Finish
+ // TODO: Don't really need to trim?
+ for( ; expandedPoints > _max; expandedPoints-- ) it--;
+ _points.erase( it, _points.end() );
+
+#ifdef GEODEBUGGING
+ for( Holder::iterator it = _points.begin(); it != _points.end(); it++ ){
+ log() << "Point: " << *it << endl;
}
+#endif
+ }
- /* Clients are expected to use moreToDo before calling
- * fillStack, so DONE is checked for there. If any more
- * State values are defined, you should handle them
- * here. */
- assert(0);
+ virtual GeoHash expandStartHash(){
+ return _start;
}
- bool needToCheckBox(const GeoHash& prefix) {
- Point ll (_g, prefix);
- if (fabs(ll._x - _startPt._x) <= _xScanDistance) return true;
- if (fabs(ll._y - _startPt._y) <= _yScanDistance) return true;
+ // Whether the current box width is big enough for our search area
+ virtual bool fitsInBox( double width ){
+ return width >= _scanDistance;
+ }
+
+ // Whether the current box overlaps our search area
+ virtual double intersectsBox( Box& cur ){
+ return cur.intersects( _want );
+ }
+
+ GeoHash _start;
+ int _numWanted;
+ double _scanDistance;
+
+ long long _nscanned;
+ int _found;
+ GeoDistType _type;
+
+ Box _want;
+ };
+
+ class GeoSearchCursor : public GeoCursorBase {
+ public:
+
+ GeoSearchCursor( shared_ptr<GeoSearch> s )
+ : GeoCursorBase( s->_spec ) ,
+ _s( s ) , _cur( s->_points.begin() ) , _end( s->_points.end() ), _nscanned() {
+ if ( _cur != _end ) {
+ ++_nscanned;
+ }
+ }
- GeoHash trHash = prefix;
- trHash.move( 1 , 1 );
- Point tr (_g, trHash);
+ virtual ~GeoSearchCursor() {}
- if (fabs(tr._x - _startPt._x) <= _xScanDistance) return true;
- if (fabs(tr._y - _startPt._y) <= _yScanDistance) return true;
+ virtual bool ok() {
+ return _cur != _end;
+ }
+ virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
+ virtual BSONObj current() { assert(ok()); return _cur->_o; }
+ virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
+ virtual bool advance() {
+ if( ok() ){
+ _cur++;
+ incNscanned();
+ return ok();
+ }
return false;
}
+ virtual BSONObj currKey() const { return _cur->_key; }
- void getPointsForPrefix(const GeoHash& prefix) {
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ) {
- return;
+ virtual string toString() {
+ return "GeoSearchCursor";
+ }
+
+
+ virtual BSONObj prettyStartKey() const {
+ return BSON( _s->_g->_geo << _s->_prefix.toString() );
+ }
+ virtual BSONObj prettyEndKey() const {
+ GeoHash temp = _s->_prefix;
+ temp.move( 1 , 1 );
+ return BSON( _s->_g->_geo << temp.toString() );
+ }
+
+ virtual long long nscanned() { return _nscanned; }
+
+ virtual CoveredIndexMatcher* matcher() const {
+ if( _s->_matcher.get() ) return _s->_matcher.get();
+ else return emptyMatcher.get();
+ }
+
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const {
+ if( _s->_matcher.get() ) return _s->_matcher;
+ else return emptyMatcher;
+ }
+
+ shared_ptr<GeoSearch> _s;
+ GeoHopper::Holder::iterator _cur;
+ GeoHopper::Holder::iterator _end;
+
+ void incNscanned() { if ( ok() ) { ++_nscanned; } }
+ long long _nscanned;
+ };
+
+ class GeoCircleBrowse : public GeoBrowse {
+ public:
+
+ GeoCircleBrowse( const Geo2dType * g , const BSONObj& circle , BSONObj filter = BSONObj() , const string& type="$center", bool uniqueDocs = true )
+ : GeoBrowse( g , "circle" , filter, uniqueDocs ) {
+
+ uassert( 13060 , "$center needs 2 fields (middle,max distance)" , circle.nFields() == 2 );
+
+ BSONObjIterator i(circle);
+ BSONElement center = i.next();
+
+ uassert( 13656 , "the first field of $center object must be a location object" , center.isABSONObj() );
+
+ // Get geohash and exact center point
+ // TODO: For wrapping search, may be useful to allow center points outside-of-bounds here.
+ // Calculating the nearest point as a hash start inside the region would then be required.
+ _start = g->_tohash(center);
+ _startPt = Point(center);
+
+ _maxDistance = i.next().numberDouble();
+ uassert( 13061 , "need a max distance >= 0 " , _maxDistance >= 0 );
+
+ if (type == "$center") {
+ // Look in box with bounds of maxDistance in either direction
+ _type = GEO_PLAIN;
+ _xScanDistance = _maxDistance + _g->_error;
+ _yScanDistance = _maxDistance + _g->_error;
+ }
+ else if (type == "$centerSphere") {
+ // Same, but compute maxDistance using spherical transform
+
+ uassert(13461, "Spherical MaxDistance > PI. Are you sure you are using radians?", _maxDistance < M_PI);
+ checkEarthBounds( _startPt );
+
+ _type = GEO_SPHERE;
+ _yScanDistance = rad2deg( _maxDistance ) + _g->_error;
+ _xScanDistance = computeXScanDistance(_startPt._y, _yScanDistance);
+
+ uassert(13462, "Spherical distance would require wrapping, which isn't implemented yet",
+ (_startPt._x + _xScanDistance < 180) && (_startPt._x - _xScanDistance > -180) &&
+ (_startPt._y + _yScanDistance < 90) && (_startPt._y - _yScanDistance > -90));
+ }
+ else {
+ uassert(13460, "invalid $center query type: " + type, false);
}
- while ( _min.hasPrefix( prefix ) && _min.advance( -1 , _found , this ) );
- while ( _max.hasPrefix( prefix ) && _max.advance( 1 , _found , this ) );
+ // Bounding box includes fudge factor.
+ // TODO: Is this correct, since fudge factor may be spherically transformed?
+ _bBox._min = Point( _startPt._x - _xScanDistance, _startPt._y - _yScanDistance );
+ _bBox._max = Point( _startPt._x + _xScanDistance, _startPt._y + _yScanDistance );
+
+ GEODEBUG( "Bounding box for circle query : " << _bBox.toString() << " (max distance : " << _maxDistance << ")" << " starting from " << _startPt.toString() );
+
+ ok();
}
+ virtual GeoHash expandStartHash() {
+ return _start;
+ }
+
+ virtual bool fitsInBox( double width ) {
+ return width >= std::max(_xScanDistance, _yScanDistance);
+ }
- virtual bool checkDistance( const GeoHash& h , double& d ) {
+ virtual double intersectsBox( Box& cur ) {
+ return cur.intersects( _bBox );
+ }
+
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
+
+ // Inexact hash distance checks.
+ double error = 0;
switch (_type) {
case GEO_PLAIN:
- d = _g->distance( _start , h );
+ d = _startPt.distance( p );
+ error = _g->_error;
+ break;
+ case GEO_SPHERE: {
+ checkEarthBounds( p );
+ d = spheredist_deg( _startPt, p );
+ error = _g->_errorSphere;
+ break;
+ }
+ default: assert( false );
+ }
+
+ // If our distance is in the error bounds...
+ if( d >= _maxDistance - error && d <= _maxDistance + error ) return BORDER;
+ return d > _maxDistance ? BAD : GOOD;
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+
+ switch (_type) {
+ case GEO_PLAIN: {
+ if( _startPt.distanceWithin( p, _maxDistance ) ) return true;
break;
+ }
case GEO_SPHERE:
- d = spheredist_deg(_startPt, Point(_g, h));
+ checkEarthBounds( p );
+ if( spheredist_deg( _startPt , p ) <= _maxDistance ) return true;
break;
- default:
- assert(0);
+ default: assert( false );
}
- GEODEBUG( "\t " << h << "\t" << d );
- return d <= _maxDistance;
+ return false;
}
GeoDistType _type;
@@ -1444,153 +2295,158 @@ namespace mongo {
double _maxDistance; // user input
double _xScanDistance; // effected by GeoDistType
double _yScanDistance; // effected by GeoDistType
-
- int _found;
-
- GeoHash _prefix;
- BtreeLocation _min;
- BtreeLocation _max;
+ Box _bBox;
};
class GeoBoxBrowse : public GeoBrowse {
public:
- enum State {
- START ,
- DOING_EXPAND ,
- DONE
- } _state;
-
- GeoBoxBrowse( const Geo2dType * g , const BSONObj& box , BSONObj filter = BSONObj() )
- : GeoBrowse( g , "box" , filter ) {
+ GeoBoxBrowse( const Geo2dType * g , const BSONObj& box , BSONObj filter = BSONObj(), bool uniqueDocs = true )
+ : GeoBrowse( g , "box" , filter, uniqueDocs ) {
uassert( 13063 , "$box needs 2 fields (bottomLeft,topRight)" , box.nFields() == 2 );
+
+ // Initialize an *exact* box from the given obj.
BSONObjIterator i(box);
- _bl = g->_tohash( i.next() );
- _tr = g->_tohash( i.next() );
+ _want._min = Point( i.next() );
+ _want._max = Point( i.next() );
- _want._min = Point( _g , _bl );
- _want._max = Point( _g , _tr );
+ _wantRegion = _want;
+ _wantRegion.fudge( g ); // Need to make sure we're checking regions within error bounds of where we want
+ fixBox( g, _wantRegion );
+ fixBox( g, _want );
uassert( 13064 , "need an area > 0 " , _want.area() > 0 );
- _state = START;
- _found = 0;
-
Point center = _want.center();
- _prefix = _g->hash( center._x , center._y );
+ _start = _g->hash( center._x , center._y );
GEODEBUG( "center : " << center.toString() << "\t" << _prefix );
- {
- GeoHash a(0LL,32);
- GeoHash b(0LL,32);
- b.move(1,1);
- _fudge = _g->distance(a,b);
- }
-
- _wantLen = _fudge + std::max((_want._max._x - _want._min._x), (_want._max._y - _want._min._y));
+ _fudge = _g->_error;
+ _wantLen = _fudge +
+ std::max( ( _want._max._x - _want._min._x ) ,
+ ( _want._max._y - _want._min._y ) ) / 2;
ok();
}
- virtual bool moreToDo() {
- return _state != DONE;
+ void fixBox( const Geo2dType* g, Box& box ) {
+ if( box._min._x > box._max._x )
+ swap( box._min._x, box._max._x );
+ if( box._min._y > box._max._y )
+ swap( box._min._y, box._max._y );
+
+ double gMin = g->_min;
+ double gMax = g->_max;
+
+ if( box._min._x < gMin ) box._min._x = gMin;
+ if( box._min._y < gMin ) box._min._y = gMin;
+ if( box._max._x > gMax) box._max._x = gMax;
+ if( box._max._y > gMax ) box._max._y = gMax;
}
- virtual void fillStack() {
- if ( _state == START ) {
+ void swap( double& a, double& b ) {
+ double swap = a;
+ a = b;
+ b = swap;
+ }
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
- _prefix , _found , this ) ) {
- _state = DONE;
- return;
- }
- _state = DOING_EXPAND;
- }
+ virtual GeoHash expandStartHash() {
+ return _start;
+ }
- if ( _state == DOING_EXPAND ) {
- int started = _found;
- while ( started == _found || _state == DONE ) {
- GEODEBUG( "box prefix [" << _prefix << "]" );
- while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _found , this ) );
- while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _found , this ) );
+ virtual bool fitsInBox( double width ) {
+ return width >= _wantLen;
+ }
- if ( _state == DONE )
- return;
+ virtual double intersectsBox( Box& cur ) {
+ return cur.intersects( _wantRegion );
+ }
- if ( ! _prefix.constrains() ) {
- GEODEBUG( "box exhausted" );
- _state = DONE;
- return;
- }
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
+ if( _want.onBoundary( p, _fudge ) ) return BORDER;
+ else return _want.inside( p, _fudge ) ? GOOD : BAD;
- if (_g->sizeEdge(_prefix) < _wantLen) {
- _prefix = _prefix.up();
- }
- else {
- for (int i=-1; i<=1; i++) {
- for (int j=-1; j<=1; j++) {
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+ return _want.inside( p );
+ }
- if (i == 0 && j == 0)
- continue; // main box
+ Box _want;
+ Box _wantRegion;
+ double _wantLen;
+ double _fudge;
- GeoHash newBox = _prefix;
- newBox.move(i, j);
+ GeoHash _start;
- PREFIXDEBUG(newBox, _g);
+ };
- Box cur( _g , newBox );
- if (_want.intersects(cur)) {
- // TODO consider splitting into quadrants
- getPointsForPrefix(newBox);
- }
- else {
- GEODEBUG("skipping box");
- }
- }
- }
- _state = DONE;
- }
+ class GeoPolygonBrowse : public GeoBrowse {
+ public:
- }
- return;
+ GeoPolygonBrowse( const Geo2dType* g , const BSONObj& polyPoints ,
+ BSONObj filter = BSONObj(), bool uniqueDocs = true ) : GeoBrowse( g , "polygon" , filter, uniqueDocs ) {
+
+ GEODEBUG( "In Polygon" )
+
+ BSONObjIterator i( polyPoints );
+ BSONElement first = i.next();
+ _poly.add( Point( first ) );
+
+ while ( i.more() ) {
+ _poly.add( Point( i.next() ) );
}
+ uassert( 14030, "polygon must be defined by three points or more", _poly.size() >= 3 );
+
+ _bounds = _poly.bounds();
+ _bounds.fudge( g ); // We need to check regions within the error bounds of these bounds
+ _bounds.truncate( g ); // We don't need to look anywhere outside the space
+
+ _maxDim = _g->_error + _bounds.maxDim() / 2;
+
+ ok();
}
- void getPointsForPrefix(const GeoHash& prefix) {
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ) {
- return;
- }
+ // The initial geo hash box for our first expansion
+ virtual GeoHash expandStartHash() {
+ return _g->hash( _bounds.center() );
+ }
- while ( _min.hasPrefix( prefix ) && _min.advance( -1 , _found , this ) );
- while ( _max.hasPrefix( prefix ) && _max.advance( 1 , _found , this ) );
+ // Whether the current box width is big enough for our search area
+ virtual bool fitsInBox( double width ) {
+ return _maxDim <= width;
}
- virtual bool checkDistance( const GeoHash& h , double& d ) {
- bool res = _want.inside( Point( _g , h ) , _fudge );
- GEODEBUG( "\t want : " << _want.toString()
- << " point: " << Point( _g , h ).toString()
- << " in : " << res );
- return res;
+ // Whether the current box overlaps our search area
+ virtual double intersectsBox( Box& cur ) {
+ return cur.intersects( _bounds );
}
- GeoHash _bl;
- GeoHash _tr;
- Box _want;
- double _wantLen;
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
- int _found;
+ int in = _poly.contains( p, _g->_error );
- GeoHash _prefix;
- BtreeLocation _min;
- BtreeLocation _max;
+ if( in == 0 ) return BORDER;
+ else return in > 0 ? GOOD : BAD;
- double _fudge;
- };
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+ return _poly.contains( p );
+ }
+
+ private:
+ Polygon _poly;
+ Box _bounds;
+ double _maxDim;
+
+ GeoHash _start;
+ };
shared_ptr<Cursor> Geo2dType::newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
if ( numWanted < 0 )
@@ -1605,66 +2461,92 @@ namespace mongo {
if ( _geo != e.fieldName() )
continue;
- if ( e.type() != Object )
- continue;
+ if ( e.type() == Array ) {
+ // If we get an array query, assume it is a location, and do a $within { $center : [[x, y], 0] } search
+ shared_ptr<Cursor> c( new GeoCircleBrowse( this , BSON( "0" << e.embeddedObjectUserCheck() << "1" << 0 ), query.filterFieldsUndotted( BSON( _geo << "" ), false ), "$center", true ) );
+ return c;
+ }
+ else if ( e.type() == Object ) {
- switch ( e.embeddedObject().firstElement().getGtLtOp() ) {
- case BSONObj::opNEAR: {
- BSONObj n = e.embeddedObject();
- e = n.firstElement();
+ // TODO: Filter out _geo : { $special... } field so it doesn't get matched accidentally,
+ // if matcher changes
- const char* suffix = e.fieldName() + 5; // strlen("$near") == 5;
- GeoDistType type;
- if (suffix[0] == '\0') {
- type = GEO_PLAIN;
- }
- else if (strcmp(suffix, "Sphere") == 0) {
- type = GEO_SPHERE;
- }
- else {
- uassert(13464, string("invalid $near search type: ") + e.fieldName(), false);
- type = GEO_PLAIN; // prevents uninitialized warning
- }
+ switch ( e.embeddedObject().firstElement().getGtLtOp() ) {
+ case BSONObj::opNEAR: {
+ BSONObj n = e.embeddedObject();
+ e = n.firstElement();
- double maxDistance = numeric_limits<double>::max();
- if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ) {
- BSONObjIterator i(e.embeddedObject());
- i.next();
- i.next();
- BSONElement e = i.next();
- if ( e.isNumber() )
- maxDistance = e.numberDouble();
- }
- {
- BSONElement e = n["$maxDistance"];
- if ( e.isNumber() )
- maxDistance = e.numberDouble();
- }
- shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query , maxDistance, type ) );
- s->exec();
- shared_ptr<Cursor> c;
- c.reset( new GeoSearchCursor( s ) );
- return c;
- }
- case BSONObj::opWITHIN: {
- e = e.embeddedObject().firstElement();
- uassert( 13057 , "$within has to take an object or array" , e.isABSONObj() );
- e = e.embeddedObject().firstElement();
- string type = e.fieldName();
- if ( startsWith(type, "$center") ) {
- uassert( 13059 , "$center has to take an object or array" , e.isABSONObj() );
- shared_ptr<Cursor> c( new GeoCircleBrowse( this , e.embeddedObjectUserCheck() , query , type) );
+ const char* suffix = e.fieldName() + 5; // strlen("$near") == 5;
+ GeoDistType type;
+ if (suffix[0] == '\0') {
+ type = GEO_PLAIN;
+ }
+ else if (strcmp(suffix, "Sphere") == 0) {
+ type = GEO_SPHERE;
+ }
+ else {
+ uassert(13464, string("invalid $near search type: ") + e.fieldName(), false);
+ type = GEO_PLAIN; // prevents uninitialized warning
+ }
+
+ double maxDistance = numeric_limits<double>::max();
+ if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ) {
+ BSONObjIterator i(e.embeddedObject());
+ i.next();
+ i.next();
+ BSONElement e = i.next();
+ if ( e.isNumber() )
+ maxDistance = e.numberDouble();
+ }
+ {
+ BSONElement e = n["$maxDistance"];
+ if ( e.isNumber() )
+ maxDistance = e.numberDouble();
+ }
+
+ bool uniqueDocs = false;
+ if( ! n["$uniqueDocs"].eoo() ) uniqueDocs = n["$uniqueDocs"].trueValue();
+
+ shared_ptr<GeoSearch> s( new GeoSearch( this , Point( e ) , numWanted , query , maxDistance, type, uniqueDocs ) );
+ s->exec();
+ shared_ptr<Cursor> c;
+ c.reset( new GeoSearchCursor( s ) );
return c;
}
- else if ( type == "$box" ) {
- uassert( 13065 , "$box has to take an object or array" , e.isABSONObj() );
- shared_ptr<Cursor> c( new GeoBoxBrowse( this , e.embeddedObjectUserCheck() , query ) );
+ case BSONObj::opWITHIN: {
+
+ e = e.embeddedObject().firstElement();
+ uassert( 13057 , "$within has to take an object or array" , e.isABSONObj() );
+
+ BSONObj context = e.embeddedObject();
+ e = e.embeddedObject().firstElement();
+ string type = e.fieldName();
+
+ bool uniqueDocs = true;
+ if( ! context["$uniqueDocs"].eoo() ) uniqueDocs = context["$uniqueDocs"].trueValue();
+
+ if ( startsWith(type, "$center") ) {
+ uassert( 13059 , "$center has to take an object or array" , e.isABSONObj() );
+ shared_ptr<Cursor> c( new GeoCircleBrowse( this , e.embeddedObjectUserCheck() , query , type, uniqueDocs ) );
+ return c;
+ }
+ else if ( type == "$box" ) {
+ uassert( 13065 , "$box has to take an object or array" , e.isABSONObj() );
+ shared_ptr<Cursor> c( new GeoBoxBrowse( this , e.embeddedObjectUserCheck() , query, uniqueDocs ) );
+ return c;
+ }
+ else if ( startsWith( type, "$poly" ) ) {
+ uassert( 14029 , "$polygon has to take an object or array" , e.isABSONObj() );
+ shared_ptr<Cursor> c( new GeoPolygonBrowse( this , e.embeddedObjectUserCheck() , query, uniqueDocs ) );
+ return c;
+ }
+ throw UserException( 13058 , (string)"unknown $within type: " + type );
+ }
+ default:
+ // Otherwise... assume the object defines a point, and we want to do a zero-radius $within $center
+ shared_ptr<Cursor> c( new GeoCircleBrowse( this , BSON( "0" << e.embeddedObjectUserCheck() << "1" << 0 ), query.filterFieldsUndotted( BSON( _geo << "" ), false ) ) );
return c;
}
- throw UserException( 13058 , (string)"unknown $with type: " + type );
- }
- default:
- break;
}
}
@@ -1682,7 +2564,7 @@ namespace mongo {
bool slaveOk() const { return true; }
void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; }
bool slaveOverrideOk() { return true; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = dbname + "." + cmdObj.firstElement().valuestr();
NamespaceDetails * d = nsdetails( ns.c_str() );
@@ -1713,12 +2595,20 @@ namespace mongo {
assert( &id == g->getDetails() );
int numWanted = 100;
- if ( cmdObj["num"].isNumber() )
+ if ( cmdObj["num"].isNumber() ) {
numWanted = cmdObj["num"].numberInt();
+ assert( numWanted >= 0 );
+ }
+
+ bool uniqueDocs = false;
+ if( ! cmdObj["uniqueDocs"].eoo() ) uniqueDocs = cmdObj["uniqueDocs"].trueValue();
+
+ bool includeLocs = false;
+ if( ! cmdObj["includeLocs"].eoo() ) includeLocs = cmdObj["includeLocs"].trueValue();
uassert(13046, "'near' param missing/invalid", !cmdObj["near"].eoo());
- const GeoHash n = g->_tohash( cmdObj["near"] );
- result.append( "near" , n.toString() );
+ const Point n( cmdObj["near"] );
+ result.append( "near" , g->_tohash( cmdObj["near"] ).toString() );
BSONObj filter;
if ( cmdObj["query"].type() == Object )
@@ -1732,7 +2622,7 @@ namespace mongo {
if ( cmdObj["spherical"].trueValue() )
type = GEO_SPHERE;
- GeoSearch gs( g , n , numWanted , filter , maxDistance , type);
+ GeoSearch gs( g , n , numWanted , filter , maxDistance , type, uniqueDocs, true );
if ( cmdObj["start"].type() == String) {
GeoHash start ((string) cmdObj["start"].valuestr());
@@ -1747,17 +2637,17 @@ namespace mongo {
double totalDistance = 0;
-
BSONObjBuilder arr( result.subarrayStart( "results" ) );
int x = 0;
- for ( GeoHopper::Holder::iterator i=gs._hopper->_points.begin(); i!=gs._hopper->_points.end(); i++ ) {
- const GeoPoint& p = *i;
+ for ( GeoHopper::Holder::iterator i=gs._points.begin(); i!=gs._points.end(); i++ ) {
- double dis = distanceMultiplier * p._distance;
+ const GeoPoint& p = *i;
+ double dis = distanceMultiplier * p.distance();
totalDistance += dis;
BSONObjBuilder bb( arr.subobjStart( BSONObjBuilder::numStr( x++ ) ) );
bb.append( "dis" , dis );
+ if( includeLocs ) bb.append( "loc" , p._pt );
bb.append( "obj" , p._o );
bb.done();
}
@@ -1766,10 +2656,10 @@ namespace mongo {
BSONObjBuilder stats( result.subobjStart( "stats" ) );
stats.append( "time" , cc().curop()->elapsedMillis() );
stats.appendNumber( "btreelocs" , gs._nscanned );
- stats.appendNumber( "nscanned" , gs._hopper->_lookedAt );
- stats.appendNumber( "objectsLoaded" , gs._hopper->_objectsLoaded );
+ stats.appendNumber( "nscanned" , gs._lookedAt );
+ stats.appendNumber( "objectsLoaded" , gs._objectsLoaded );
stats.append( "avgDistance" , totalDistance / x );
- stats.append( "maxDistance" , gs._hopper->farthest() );
+ stats.append( "maxDistance" , gs.farthest() );
stats.done();
return true;
@@ -1783,7 +2673,7 @@ namespace mongo {
virtual LockType locktype() const { return READ; }
bool slaveOk() const { return true; }
bool slaveOverrideOk() { return true; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = dbname + "." + cmdObj.firstElement().valuestr();
NamespaceDetails * d = nsdetails( ns.c_str() );
@@ -1819,7 +2709,8 @@ namespace mongo {
int max = 100000;
- BtreeCursor c( d , geoIdx , id , BSONObj() , BSONObj() , true , 1 );
+ auto_ptr<BtreeCursor> bc( BtreeCursor::make( d , geoIdx , id , BSONObj() , BSONObj() , true , 1 ) );
+ BtreeCursor &c = *bc;
while ( c.ok() && max-- ) {
GeoHash h( c.currKey().firstElement() );
int len;
@@ -1837,4 +2728,248 @@ namespace mongo {
} geoWalkCmd;
+ struct GeoUnitTest : public UnitTest {
+
+ int round( double d ) {
+ return (int)(.5+(d*1000));
+ }
+
+#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == GeoHash(b) ); }
+
+ void run() {
+ assert( ! GeoHash::isBitSet( 0 , 0 ) );
+ assert( ! GeoHash::isBitSet( 0 , 31 ) );
+ assert( GeoHash::isBitSet( 1 , 31 ) );
+
+ IndexSpec i( BSON( "loc" << "2d" ) );
+ Geo2dType g( &geo2dplugin , &i );
+ {
+ double x = 73.01212;
+ double y = 41.352964;
+ BSONObj in = BSON( "x" << x << "y" << y );
+ GeoHash h = g._hash( in );
+ BSONObj out = g._unhash( h );
+ assert( round(x) == round( out["x"].number() ) );
+ assert( round(y) == round( out["y"].number() ) );
+ assert( round( in["x"].number() ) == round( out["x"].number() ) );
+ assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ }
+
+ {
+ double x = -73.01212;
+ double y = 41.352964;
+ BSONObj in = BSON( "x" << x << "y" << y );
+ GeoHash h = g._hash( in );
+ BSONObj out = g._unhash( h );
+ assert( round(x) == round( out["x"].number() ) );
+ assert( round(y) == round( out["y"].number() ) );
+ assert( round( in["x"].number() ) == round( out["x"].number() ) );
+ assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ }
+
+ {
+ GeoHash h( "0000" );
+ h.move( 0 , 1 );
+ GEOHEQ( h , "0001" );
+ h.move( 0 , -1 );
+ GEOHEQ( h , "0000" );
+
+ h.init( "0001" );
+ h.move( 0 , 1 );
+ GEOHEQ( h , "0100" );
+ h.move( 0 , -1 );
+ GEOHEQ( h , "0001" );
+
+
+ h.init( "0000" );
+ h.move( 1 , 0 );
+ GEOHEQ( h , "0010" );
+ }
+
+ {
+ Box b( 5 , 5 , 2 );
+ assert( "(5,5) -->> (7,7)" == b.toString() );
+ }
+
+ {
+ GeoHash a = g.hash( 1 , 1 );
+ GeoHash b = g.hash( 4 , 5 );
+ assert( 5 == (int)(g.distance( a , b ) ) );
+ a = g.hash( 50 , 50 );
+ b = g.hash( 42 , 44 );
+ assert( round(10) == round(g.distance( a , b )) );
+ }
+
+ {
+ GeoHash x("0000");
+ assert( 0 == x.getHash() );
+ x.init( 0 , 1 , 32 );
+ GEOHEQ( x , "0000000000000000000000000000000000000000000000000000000000000001" )
+
+ assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
+ assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
+ }
+
+ {
+ GeoHash x("1010");
+ GEOHEQ( x , "1010" );
+ GeoHash y = x + "01";
+ GEOHEQ( y , "101001" );
+ }
+
+ {
+
+ GeoHash a = g.hash( 5 , 5 );
+ GeoHash b = g.hash( 5 , 7 );
+ GeoHash c = g.hash( 100 , 100 );
+ /*
+ cout << "a: " << a << endl;
+ cout << "b: " << b << endl;
+ cout << "c: " << c << endl;
+
+ cout << "a: " << a.toStringHex1() << endl;
+ cout << "b: " << b.toStringHex1() << endl;
+ cout << "c: " << c.toStringHex1() << endl;
+ */
+ BSONObj oa = a.wrap();
+ BSONObj ob = b.wrap();
+ BSONObj oc = c.wrap();
+ /*
+ cout << "a: " << oa.hexDump() << endl;
+ cout << "b: " << ob.hexDump() << endl;
+ cout << "c: " << oc.hexDump() << endl;
+ */
+ assert( oa.woCompare( ob ) < 0 );
+ assert( oa.woCompare( oc ) < 0 );
+
+ }
+
+ {
+ GeoHash x( "000000" );
+ x.move( -1 , 0 );
+ GEOHEQ( x , "101010" );
+ x.move( 1 , -1 );
+ GEOHEQ( x , "010101" );
+ x.move( 0 , 1 );
+ GEOHEQ( x , "000000" );
+ }
+
+ {
+ GeoHash prefix( "110011000000" );
+ GeoHash entry( "1100110000011100000111000001110000011100000111000001000000000000" );
+ assert( ! entry.hasPrefix( prefix ) );
+
+ entry = GeoHash("1100110000001100000111000001110000011100000111000001000000000000");
+ assert( entry.toString().find( prefix.toString() ) == 0 );
+ assert( entry.hasPrefix( GeoHash( "1100" ) ) );
+ assert( entry.hasPrefix( prefix ) );
+ }
+
+ {
+ GeoHash a = g.hash( 50 , 50 );
+ GeoHash b = g.hash( 48 , 54 );
+ assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
+ }
+
+
+ {
+ Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
+ assert( b.inside( 29.763 , -95.363 ) );
+ assert( ! b.inside( 32.9570255 , -96.1082497 ) );
+ assert( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
+ }
+
+ {
+ GeoHash a( "11001111" );
+ assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
+ assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
+ }
+
+ {
+ int N = 10000;
+ {
+ Timer t;
+ for ( int i=0; i<N; i++ ) {
+ unsigned x = (unsigned)rand();
+ unsigned y = (unsigned)rand();
+ GeoHash h( x , y );
+ unsigned a,b;
+ h.unhash_slow( a,b );
+ assert( a == x );
+ assert( b == y );
+ }
+ //cout << "slow: " << t.millis() << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i=0; i<N; i++ ) {
+ unsigned x = (unsigned)rand();
+ unsigned y = (unsigned)rand();
+ GeoHash h( x , y );
+ unsigned a,b;
+ h.unhash_fast( a,b );
+ assert( a == x );
+ assert( b == y );
+ }
+ //cout << "fast: " << t.millis() << endl;
+ }
+
+ }
+
+ {
+ // see http://en.wikipedia.org/wiki/Great-circle_distance#Worked_example
+
+ {
+ Point BNA (-86.67, 36.12);
+ Point LAX (-118.40, 33.94);
+
+ double dist1 = spheredist_deg(BNA, LAX);
+ double dist2 = spheredist_deg(LAX, BNA);
+
+ // target is 0.45306
+ assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ }
+ {
+ Point BNA (-1.5127, 0.6304);
+ Point LAX (-2.0665, 0.5924);
+
+ double dist1 = spheredist_rad(BNA, LAX);
+ double dist2 = spheredist_rad(LAX, BNA);
+
+ // target is 0.45306
+ assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ }
+ {
+ Point JFK (-73.77694444, 40.63861111 );
+ Point LAX (-118.40, 33.94);
+
+ double dist = spheredist_deg(JFK, LAX) * EARTH_RADIUS_MILES;
+ assert( dist > 2469 && dist < 2470 );
+ }
+
+ {
+ Point BNA (-86.67, 36.12);
+ Point LAX (-118.40, 33.94);
+ Point JFK (-73.77694444, 40.63861111 );
+ assert( spheredist_deg(BNA, BNA) < 1e-6);
+ assert( spheredist_deg(LAX, LAX) < 1e-6);
+ assert( spheredist_deg(JFK, JFK) < 1e-6);
+
+ Point zero (0, 0);
+ Point antizero (0,-180);
+
+ // these were known to cause NaN
+ assert( spheredist_deg(zero, zero) < 1e-6);
+ assert( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
+ assert( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
+ }
+ }
+ }
+ } geoUnitTest;
+
+
}
+
diff --git a/db/geo/core.h b/db/geo/core.h
index 602b513..b779978 100644
--- a/db/geo/core.h
+++ b/db/geo/core.h
@@ -59,6 +59,7 @@ namespace mongo {
class GeoHash {
public:
+
GeoHash()
: _hash(0),_bits(0) {
}
@@ -71,6 +72,14 @@ namespace mongo {
init( hash );
}
+ static GeoHash makeFromBinData(const char *bindata, unsigned bits) {
+ GeoHash h;
+ h._bits = bits;
+ h._copy( (char*)&h._hash , bindata );
+ h._fix();
+ return h;
+ }
+
explicit GeoHash( const BSONElement& e , unsigned bits=32 ) {
_bits = bits;
if ( e.type() == BinData ) {
@@ -80,7 +89,7 @@ namespace mongo {
_bits = bits;
}
else {
- cout << "GeoHash cons e : " << e << endl;
+ cout << "GeoHash bad element: " << e << endl;
uassert(13047,"wrong type for geo index. if you're using a pre-release version, need to rebuild index",0);
}
_fix();
@@ -214,6 +223,10 @@ namespace mongo {
return _bits > 0;
}
+ bool canRefine() const {
+ return _bits < 32;
+ }
+
void move( int x , int y ) {
assert( _bits );
_move( 0 , x );
@@ -265,10 +278,19 @@ namespace mongo {
return *this;
}
- bool operator==(const GeoHash& h ) {
+ bool operator==(const GeoHash& h ) const {
return _hash == h._hash && _bits == h._bits;
}
+ bool operator!=(const GeoHash& h ) const {
+ return !( *this == h );
+ }
+
+ bool operator<(const GeoHash& h ) const {
+ if( _hash != h._hash ) return _hash < h._hash;
+ return _bits < h._bits;
+ }
+
GeoHash& operator+=( const char * s ) {
unsigned pos = _bits * 2;
_bits += strlen(s) / 2;
@@ -289,6 +311,10 @@ namespace mongo {
return n;
}
+ GeoHash operator+( string s ) const {
+ return operator+( s.c_str() );
+ }
+
void _fix() {
static long long FULL = 0xFFFFFFFFFFFFFFFFLL;
long long mask = FULL << ( 64 - ( _bits * 2 ) );
@@ -322,7 +348,7 @@ namespace mongo {
private:
- void _copy( char * dst , const char * src ) const {
+ static void _copy( char * dst , const char * src ) {
for ( unsigned a=0; a<8; a++ ) {
dst[a] = src[7-a];
}
@@ -378,9 +404,61 @@ namespace mongo {
double distance( const Point& p ) const {
double a = _x - p._x;
double b = _y - p._y;
+
+ // Avoid numerical error if possible...
+ if( a == 0 ) return abs( _y - p._y );
+ if( b == 0 ) return abs( _x - p._x );
+
return sqrt( ( a * a ) + ( b * b ) );
}
+ /**
+ * Distance method that compares x or y coords when other direction is zero,
+ * avoids numerical error when distances are very close to radius but axis-aligned.
+ *
+ * An example of the problem is:
+ * (52.0 - 51.9999) - 0.0001 = 3.31965e-15 and 52.0 - 51.9999 > 0.0001 in double arithmetic
+ * but:
+ * 51.9999 + 0.0001 <= 52.0
+ *
+ * This avoids some (but not all!) suprising results in $center queries where points are
+ * ( radius + center.x, center.y ) or vice-versa.
+ */
+ bool distanceWithin( const Point& p, double radius ) const {
+ double a = _x - p._x;
+ double b = _y - p._y;
+
+ if( a == 0 ) {
+ //
+ // Note: For some, unknown reason, when a 32-bit g++ optimizes this call, the sum is
+ // calculated imprecisely. We need to force the compiler to always evaluate it correctly,
+ // hence the weirdness.
+ //
+ // On some 32-bit linux machines, removing the volatile keyword or calculating the sum inline
+ // will make certain geo tests fail. Of course this check will force volatile for all 32-bit systems,
+ // not just affected systems.
+ if( sizeof(void*) <= 4 ){
+ volatile double sum = _y > p._y ? p._y + radius : _y + radius;
+ return _y > p._y ? sum >= _y : sum >= p._y;
+ }
+ else {
+ // Original math, correct for most systems
+ return _y > p._y ? p._y + radius >= _y : _y + radius >= p._y;
+ }
+ }
+ if( b == 0 ) {
+ if( sizeof(void*) <= 4 ){
+ volatile double sum = _x > p._x ? p._x + radius : _x + radius;
+ return _x > p._x ? sum >= _x : sum >= p._x;
+ }
+ else {
+ return _x > p._x ? p._x + radius >= _x : _x + radius >= p._x;
+ }
+ }
+
+ return sqrt( ( a * a ) + ( b * b ) ) <= radius;
+ }
+
string toString() const {
StringBuilder buf(32);
buf << "(" << _x << "," << _y << ")";
@@ -396,6 +474,12 @@ namespace mongo {
extern const double EARTH_RADIUS_KM;
extern const double EARTH_RADIUS_MILES;
+ // Technically lat/long bounds, not really tied to earth radius.
+ inline void checkEarthBounds( Point p ) {
+ uassert( 14808, str::stream() << "point " << p.toString() << " must be in earth-like bounds of long : [-180, 180), lat : [-90, 90] ",
+ p._x >= -180 && p._x < 180 && p._y >= -90 && p._y <= 90 );
+ }
+
inline double deg2rad(double deg) { return deg * (M_PI/180); }
inline double rad2deg(double rad) { return rad * (180/M_PI); }
diff --git a/db/geo/haystack.cpp b/db/geo/haystack.cpp
index 7f278ca..a5dd478 100644
--- a/db/geo/haystack.cpp
+++ b/db/geo/haystack.cpp
@@ -119,7 +119,7 @@ namespace mongo {
return ss.str();
}
- void _add( const BSONObj& obj, const string& root , const BSONElement& e , BSONObjSetDefaultOrder& keys ) const {
+ void _add( const BSONObj& obj, const string& root , const BSONElement& e , BSONObjSet& keys ) const {
BSONObjBuilder buf;
buf.append( "" , root );
if ( e.eoo() )
@@ -132,7 +132,7 @@ namespace mongo {
keys.insert( key );
}
- void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
BSONElement loc = obj.getFieldDotted( _geo );
if ( loc.eoo() )
@@ -207,15 +207,15 @@ namespace mongo {
GEOQUADDEBUG( "KEY: " << key );
set<DiskLoc> thisPass;
- BtreeCursor cursor( nsd , idxNo , *getDetails() , key , key , true , 1 );
- while ( cursor.ok() ) {
- pair<set<DiskLoc>::iterator, bool> p = thisPass.insert( cursor.currLoc() );
+ scoped_ptr<BtreeCursor> cursor( BtreeCursor::make( nsd , idxNo , *getDetails() , key , key , true , 1 ) );
+ while ( cursor->ok() ) {
+ pair<set<DiskLoc>::iterator, bool> p = thisPass.insert( cursor->currLoc() );
if ( p.second ) {
- hopper.got( cursor.currLoc() );
- GEOQUADDEBUG( "\t" << cursor.current() );
+ hopper.got( cursor->currLoc() );
+ GEOQUADDEBUG( "\t" << cursor->current() );
btreeMatches++;
}
- cursor.advance();
+ cursor->advance();
}
}
@@ -264,7 +264,7 @@ namespace mongo {
virtual LockType locktype() const { return READ; }
bool slaveOk() const { return true; }
bool slaveOverrideOk() const { return true; }
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = dbname + "." + cmdObj.firstElement().valuestr();
diff --git a/db/index.cpp b/db/index.cpp
index 218ecb3..67a0d44 100644
--- a/db/index.cpp
+++ b/db/index.cpp
@@ -1,4 +1,4 @@
-// index.cpp
+/** @file index.cpp */
/**
* Copyright (C) 2008 10gen Inc.
@@ -20,12 +20,86 @@
#include "namespace-inl.h"
#include "index.h"
#include "btree.h"
-#include "query.h"
#include "background.h"
#include "repl/rs.h"
+#include "ops/delete.h"
+
namespace mongo {
+ template< class V >
+ class IndexInterfaceImpl : public IndexInterface {
+ public:
+ typedef typename V::KeyOwned KeyOwned;
+ virtual int keyCompare(const BSONObj& l,const BSONObj& r, const Ordering &ordering);
+
+/* virtual DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction) {
+ return thisLoc.btree<V>()->locate(idx, thisLoc, key, order, pos, found, recordLoc, direction);
+ }
+ */
+ virtual long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order) {
+ return thisLoc.btree<V>()->fullValidate(thisLoc, order);
+ }
+ virtual DiskLoc findSingle(const IndexDetails &indexdetails , const DiskLoc& thisLoc, const BSONObj& key) const {
+ return thisLoc.btree<V>()->findSingle(indexdetails,thisLoc,key);
+ }
+ virtual bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const {
+ return thisLoc.btree<V>()->unindex(thisLoc, id, key, recordLoc);
+ }
+ virtual int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel = true) const {
+ return thisLoc.btree<V>()->bt_insert(thisLoc, recordLoc, key, order, dupsAllowed, idx, toplevel);
+ }
+ virtual DiskLoc addBucket(const IndexDetails& id) {
+ return BtreeBucket<V>::addBucket(id);
+ }
+ virtual void uassertIfDups(IndexDetails& idx, vector<BSONObj*>& addedKeys, DiskLoc head, DiskLoc self, const Ordering& ordering) {
+ const BtreeBucket<V> *h = head.btree<V>();
+ for( vector<BSONObj*>::iterator i = addedKeys.begin(); i != addedKeys.end(); i++ ) {
+ KeyOwned k(**i);
+ bool dup = h->wouldCreateDup(idx, head, k, ordering, self);
+ uassert( 11001 , h->dupKeyError( idx , k ) , !dup);
+ }
+ }
+
+ // for geo:
+ virtual bool isUsed(DiskLoc thisLoc, int pos) { return thisLoc.btree<V>()->isUsed(pos); }
+ virtual void keyAt(DiskLoc thisLoc, int pos, BSONObj& key, DiskLoc& recordLoc) {
+ typename BtreeBucket<V>::KeyNode kn = thisLoc.btree<V>()->keyNode(pos);
+ key = kn.key.toBson();
+ recordLoc = kn.recordLoc;
+ }
+ virtual BSONObj keyAt(DiskLoc thisLoc, int pos) {
+ return thisLoc.btree<V>()->keyAt(pos).toBson();
+ }
+ virtual DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) {
+ return thisLoc.btree<V>()->locate(idx, thisLoc, key, order, pos, found, recordLoc, direction);
+ }
+ virtual DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ return thisLoc.btree<V>()->advance(thisLoc,keyOfs,direction,caller);
+ }
+ };
+
+ int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o); // key.cpp
+
+ template <>
+ int IndexInterfaceImpl< V0 >::keyCompare(const BSONObj& l, const BSONObj& r, const Ordering &ordering) {
+ return oldCompare(l, r, ordering);
+ }
+
+ template <>
+ int IndexInterfaceImpl< V1 >::keyCompare(const BSONObj& l, const BSONObj& r, const Ordering &ordering) {
+ return l.woCompare(r, ordering, /*considerfieldname*/false);
+ }
+
+ IndexInterfaceImpl<V0> iii_v0;
+ IndexInterfaceImpl<V1> iii_v1;
+
+ IndexInterface *IndexDetails::iis[] = { &iii_v0, &iii_v1 };
+
int removeFromSysIndexes(const char *ns, const char *idxName) {
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
@@ -66,7 +140,7 @@ namespace mongo {
}
const IndexSpec& IndexDetails::getSpec() const {
- scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
return NamespaceDetailsTransient::get_inlock( info.obj()["ns"].valuestr() ).getIndexSpec( this );
}
@@ -104,13 +178,15 @@ namespace mongo {
}
}
- void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const {
+ void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSet& keys) const {
getSpec().getKeys( obj, keys );
}
- void setDifference(BSONObjSetDefaultOrder &l, BSONObjSetDefaultOrder &r, vector<BSONObj*> &diff) {
- BSONObjSetDefaultOrder::iterator i = l.begin();
- BSONObjSetDefaultOrder::iterator j = r.begin();
+ void setDifference(BSONObjSet &l, BSONObjSet &r, vector<BSONObj*> &diff) {
+ // l and r must use the same ordering spec.
+ verify( 14819, l.key_comp().order() == r.key_comp().order() );
+ BSONObjSet::iterator i = l.begin();
+ BSONObjSet::iterator j = r.begin();
while ( 1 ) {
if ( i == l.end() )
break;
@@ -189,7 +265,6 @@ namespace mongo {
uassert(10097, "bad table to index name on add index attempt",
cc().database()->name == nsToDatabase(sourceNS.c_str()));
-
BSONObj key = io.getObjectField("key");
uassert(12524, "index key pattern too large", key.objsize() <= 2048);
if( !validKeyPattern(key) ) {
@@ -260,30 +335,53 @@ namespace mongo {
string pluginName = IndexPlugin::findPluginName( key );
IndexPlugin * plugin = pluginName.size() ? IndexPlugin::get( pluginName ) : 0;
- if ( plugin ) {
- fixedIndexObject = plugin->adjustIndexSpec( io );
- }
- else if ( io["v"].eoo() ) {
- // add "v" if it doesn't exist
- // if it does - leave whatever value was there
- // this is for testing and replication
- BSONObjBuilder b( io.objsize() + 32 );
- b.appendElements( io );
- b.append( "v" , 0 );
+
+ {
+ BSONObj o = io;
+ if ( plugin ) {
+ o = plugin->adjustIndexSpec(o);
+ }
+ BSONObjBuilder b;
+ int v = DefaultIndexVersionNumber;
+ if( !o["v"].eoo() ) {
+ double vv = o["v"].Number();
+ // note (one day) we may be able to fresh build less versions than we can use
+ // isASupportedIndexVersionNumber() is what we can use
+ uassert(14803, str::stream() << "this version of mongod cannot build new indexes of version number " << vv,
+ vv == 0 || vv == 1);
+ v = (int) vv;
+ }
+ // idea is to put things we use a lot earlier
+ b.append("v", v);
+ b.append(o["key"]);
+ if( o["unique"].trueValue() )
+ b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
+ b.append(o["ns"]);
+
+ {
+ // stripping _id
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ string s = e.fieldName();
+ if( s != "_id" && s != "v" && s != "ns" && s != "unique" && s != "key" )
+ b.append(e);
+ }
+ }
+
fixedIndexObject = b.obj();
}
return true;
}
-
void IndexSpec::reset( const IndexDetails * details ) {
_details = details;
reset( details->info );
}
- void IndexSpec::reset( const DiskLoc& loc ) {
- info = loc.obj();
+ void IndexSpec::reset( const BSONObj& _info ) {
+ info = _info;
keyPattern = info["key"].embeddedObjectUserCheck();
if ( keyPattern.objsize() == 0 ) {
out() << info.toString() << endl;
diff --git a/db/index.h b/db/index.h
index d13bd1d..54b0639 100644
--- a/db/index.h
+++ b/db/index.h
@@ -22,9 +22,34 @@
#include "diskloc.h"
#include "jsobj.h"
#include "indexkey.h"
+#include "key.h"
namespace mongo {
+ class IndexInterface {
+ protected:
+ virtual ~IndexInterface() { }
+ public:
+ virtual int keyCompare(const BSONObj& l,const BSONObj& r, const Ordering &ordering) = 0;
+ virtual long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order) = 0;
+ virtual DiskLoc findSingle(const IndexDetails &indexdetails , const DiskLoc& thisLoc, const BSONObj& key) const = 0;
+ virtual bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const = 0;
+ virtual int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel = true) const = 0;
+ virtual DiskLoc addBucket(const IndexDetails&) = 0;
+ virtual void uassertIfDups(IndexDetails& idx, vector<BSONObj*>& addedKeys, DiskLoc head,
+ DiskLoc self, const Ordering& ordering) = 0;
+
+ // these are for geo
+ virtual bool isUsed(DiskLoc thisLoc, int pos) = 0;
+ virtual void keyAt(DiskLoc thisLoc, int pos, BSONObj&, DiskLoc& recordLoc) = 0;
+ virtual BSONObj keyAt(DiskLoc thisLoc, int pos) = 0;
+ virtual DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) = 0;
+ virtual DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) = 0;
+ };
+
/* Details about a particular index. There is one of these effectively for each object in
system.namespaces (although this also includes the head pointer, which is not in that
collection).
@@ -45,7 +70,7 @@ namespace mongo {
/* Location of index info object. Format:
{ name:"nameofindex", ns:"parentnsname", key: {keypattobject}
- [, unique: <bool>, background: <bool>]
+ [, unique: <bool>, background: <bool>, v:<version>]
}
This object is in the system.indexes collection. Note that since we
@@ -68,7 +93,7 @@ namespace mongo {
only when it's a "multikey" array.
keys will be left empty if key not found in the object.
*/
- void getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const;
+ void getKeysFromObject( const BSONObj& obj, BSONObjSet& keys) const;
/* get the key pattern for this object.
e.g., { lastname:1, firstname:1 }
@@ -86,7 +111,6 @@ namespace mongo {
/* true if the specified key is in the index */
bool hasKey(const BSONObj& key);
- bool wouldCreateDup(const BSONObj& key, DiskLoc self);
// returns name of this index's storage area
// database.table.$index
@@ -126,6 +150,21 @@ namespace mongo {
return io.getStringField("ns");
}
+ static int versionForIndexObj( const BSONObj &obj ) {
+ BSONElement e = obj["v"];
+ if( e.type() == NumberInt )
+ return e._numberInt();
+ // should normally be an int. this is for backward compatibility
+ int v = e.numberInt();
+ uassert(14802, "index v field should be Integer type", v == 0);
+ return v;
+ }
+
+ int version() const {
+ return versionForIndexObj( info.obj() );
+ }
+
+ /** @return true if index has unique constraint */
bool unique() const {
BSONObj io = info.obj();
return io["unique"].trueValue() ||
@@ -133,33 +172,43 @@ namespace mongo {
isIdIndex();
}
- /* if set, when building index, if any duplicates, drop the duplicating object */
+ /** return true if dropDups was set when building index (if any duplicates, dropdups drops the duplicating objects) */
bool dropDups() const {
return info.obj().getBoolField( "dropDups" );
}
- /* delete this index. does NOT clean up the system catalog
- (system.indexes or system.namespaces) -- only NamespaceIndex.
+ /** delete this index. does NOT clean up the system catalog
+ (system.indexes or system.namespaces) -- only NamespaceIndex.
*/
void kill_idx();
const IndexSpec& getSpec() const;
- void checkVersion() const {
- // TODO: cache?
- massert( 13658 ,
- str::stream() << "using a newer index version: " << info.obj() << " v: " << info.obj().getIntField("v" ) ,
- info.obj().getIntField("v") <= 0 );
- }
-
string toString() const {
return info.obj().toString();
}
+
+ /** @return true if supported. supported means we can use the index, including adding new keys.
+ it may not mean we can build the index version in question: we may not maintain building
+ of indexes in old formats in the future.
+ */
+ static bool isASupportedIndexVersionNumber(int v) { return (v&1)==v; } // v == 0 || v == 1
+
+ /** @return the interface for this interface, which varies with the index version.
+ used for backward compatibility of index versions/formats.
+ */
+ IndexInterface& idxInterface() const {
+ int v = version();
+ dassert( isASupportedIndexVersionNumber(v) );
+ return *iis[v&1];
+ }
+
+ static IndexInterface *iis[];
};
struct IndexChanges { /*on an update*/
- BSONObjSetDefaultOrder oldkeys;
- BSONObjSetDefaultOrder newkeys;
+ BSONObjSet oldkeys;
+ BSONObjSet newkeys;
vector<BSONObj*> removed; // these keys were removed as part of the change
vector<BSONObj*> added; // these keys were added as part of the change
@@ -169,10 +218,8 @@ namespace mongo {
void dupCheck(IndexDetails& idx, DiskLoc curObjLoc) {
if( added.empty() || !idx.unique() )
return;
- for( vector<BSONObj*>::iterator i = added.begin(); i != added.end(); i++ ) {
- bool dup = idx.wouldCreateDup(**i, curObjLoc);
- uassert( 11001 , "E11001 duplicate key on update", !dup);
- }
+ const Ordering ordering = Ordering::make(idx.keyPattern());
+ idx.idxInterface().uassertIfDups(idx, added, idx.head, curObjLoc, ordering); // "E11001 duplicate key on update"
}
};
diff --git a/db/indexkey.cpp b/db/indexkey.cpp
index 34f30fa..6d6fcc5 100644
--- a/db/indexkey.cpp
+++ b/db/indexkey.cpp
@@ -20,11 +20,17 @@
#include "namespace-inl.h"
#include "index.h"
#include "btree.h"
-#include "query.h"
+#include "ops/query.h"
#include "background.h"
+#include "../util/text.h"
namespace mongo {
+ /** old (<= v1.8) : 0
+ 1 is new version
+ */
+ const int DefaultIndexVersionNumber = 1;
+
map<string,IndexPlugin*> * IndexPlugin::_plugins;
IndexType::IndexType( const IndexPlugin * plugin , const IndexSpec * spec )
@@ -100,6 +106,14 @@ namespace mongo {
}
{
+ // _undefinedElt
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ _undefinedObj = b.obj();
+ _undefinedElt = _undefinedObj.firstElement();
+ }
+
+ {
// handle plugins
string pluginName = IndexPlugin::findPluginName( keyPattern );
if ( pluginName.size() ) {
@@ -116,131 +130,289 @@ namespace mongo {
_finishedInit = true;
}
-
- void IndexSpec::getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
- if ( _indexType.get() ) {
- _indexType->getKeys( obj , keys );
- return;
- }
- vector<const char*> fieldNames( _fieldNames );
- vector<BSONElement> fixed( _fixed );
- _getKeys( fieldNames , fixed , obj, keys );
- if ( keys.empty() && ! _sparse )
- keys.insert( _nullKey );
+ void assertParallelArrays( const char *first, const char *second ) {
+ stringstream ss;
+ ss << "cannot index parallel arrays [" << first << "] [" << second << "]";
+ uasserted( 10088 , ss.str() );
}
-
- void IndexSpec::_getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
- BSONElement arrElt;
- unsigned arrIdx = ~0;
- int numNotFound = 0;
-
- for( unsigned i = 0; i < fieldNames.size(); ++i ) {
- if ( *fieldNames[ i ] == '\0' )
- continue;
-
- BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
-
- if ( e.eoo() ) {
- e = _nullElt; // no matching field
- numNotFound++;
+
+ class KeyGeneratorV0 {
+ public:
+ KeyGeneratorV0( const IndexSpec &spec ) : _spec( spec ) {}
+
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+ if ( _spec._indexType.get() ) { //plugin (eg geo)
+ _spec._indexType->getKeys( obj , keys );
+ return;
}
-
- if ( e.type() != Array )
- fieldNames[ i ] = ""; // no matching field or non-array match
-
- if ( *fieldNames[ i ] == '\0' )
- fixed[ i ] = e; // no need for further object expansion (though array expansion still possible)
-
- if ( e.type() == Array && arrElt.eoo() ) { // we only expand arrays on a single path -- track the path here
- arrIdx = i;
- arrElt = e;
+ vector<const char*> fieldNames( _spec._fieldNames );
+ vector<BSONElement> fixed( _spec._fixed );
+ _getKeys( fieldNames , fixed , obj, keys );
+ if ( keys.empty() && ! _spec._sparse )
+ keys.insert( _spec._nullKey );
+ }
+
+ private:
+ void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSet &keys ) const {
+ BSONElement arrElt;
+ unsigned arrIdx = ~0;
+ int numNotFound = 0;
+
+ for( unsigned i = 0; i < fieldNames.size(); ++i ) {
+ if ( *fieldNames[ i ] == '\0' )
+ continue;
+
+ BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
+
+ if ( e.eoo() ) {
+ e = _spec._nullElt; // no matching field
+ numNotFound++;
+ }
+
+ if ( e.type() != Array )
+ fieldNames[ i ] = ""; // no matching field or non-array match
+
+ if ( *fieldNames[ i ] == '\0' )
+ fixed[ i ] = e; // no need for further object expansion (though array expansion still possible)
+
+ if ( e.type() == Array && arrElt.eoo() ) { // we only expand arrays on a single path -- track the path here
+ arrIdx = i;
+ arrElt = e;
+ }
+
+ // enforce single array path here
+ if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
+ assertParallelArrays( e.fieldName(), arrElt.fieldName() );
+ }
}
-
- // enforce single array path here
- if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
- stringstream ss;
- ss << "cannot index parallel arrays [" << e.fieldName() << "] [" << arrElt.fieldName() << "]";
- uasserted( 10088 , ss.str() );
+
+ bool allFound = true; // have we found elements for all field names in the key spec?
+ for( vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end(); ++i ) {
+ if ( **i != '\0' ) {
+ allFound = false;
+ break;
+ }
}
- }
-
- bool allFound = true; // have we found elements for all field names in the key spec?
- for( vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end(); ++i ) {
- if ( **i != '\0' ) {
- allFound = false;
- break;
+
+ if ( _spec._sparse && numNotFound == _spec._nFields ) {
+ // we didn't find any fields
+ // so we're not going to index this document
+ return;
}
- }
-
- if ( _sparse && numNotFound == _nFields ) {
- // we didn't find any fields
- // so we're not going to index this document
- return;
- }
-
- bool insertArrayNull = false;
-
- if ( allFound ) {
- if ( arrElt.eoo() ) {
- // no terminal array element to expand
- BSONObjBuilder b(_sizeTracker);
- for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
- b.appendAs( *i, "" );
- keys.insert( b.obj() );
+
+ bool insertArrayNull = false;
+
+ if ( allFound ) {
+ if ( arrElt.eoo() ) {
+ // no terminal array element to expand
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
+ b.appendAs( *i, "" );
+ keys.insert( b.obj() );
+ }
+ else {
+ // terminal array element to expand, so generate all keys
+ BSONObjIterator i( arrElt.embeddedObject() );
+ if ( i.more() ) {
+ while( i.more() ) {
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( unsigned j = 0; j < fixed.size(); ++j ) {
+ if ( j == arrIdx )
+ b.appendAs( i.next(), "" );
+ else
+ b.appendAs( fixed[ j ], "" );
+ }
+ keys.insert( b.obj() );
+ }
+ }
+ else if ( fixed.size() > 1 ) {
+ insertArrayNull = true;
+ }
+ }
}
else {
- // terminal array element to expand, so generate all keys
+ // nonterminal array element to expand, so recurse
+ assert( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ) {
while( i.more() ) {
- BSONObjBuilder b(_sizeTracker);
- for( unsigned j = 0; j < fixed.size(); ++j ) {
- if ( j == arrIdx )
- b.appendAs( i.next(), "" );
- else
- b.appendAs( fixed[ j ], "" );
+ BSONElement e = i.next();
+ if ( e.type() == Object ) {
+ _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
}
- keys.insert( b.obj() );
}
}
- else if ( fixed.size() > 1 ) {
+ else {
insertArrayNull = true;
}
}
- }
- else {
- // nonterminal array element to expand, so recurse
- assert( !arrElt.eoo() );
- BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ) {
- while( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() == Object ) {
- _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
+
+ if ( insertArrayNull ) {
+ // x : [] - need to insert undefined
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( unsigned j = 0; j < fixed.size(); ++j ) {
+ if ( j == arrIdx ) {
+ b.appendUndefined( "" );
+ }
+ else {
+ BSONElement e = fixed[j];
+ if ( e.eoo() )
+ b.appendNull( "" );
+ else
+ b.appendAs( e , "" );
}
}
+ keys.insert( b.obj() );
}
- else {
- insertArrayNull = true;
+ }
+
+ const IndexSpec &_spec;
+ };
+
+ class KeyGeneratorV1 {
+ public:
+ KeyGeneratorV1( const IndexSpec &spec ) : _spec( spec ) {}
+
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+ if ( _spec._indexType.get() ) { //plugin (eg geo)
+ _spec._indexType->getKeys( obj , keys );
+ return;
+ }
+ vector<const char*> fieldNames( _spec._fieldNames );
+ vector<BSONElement> fixed( _spec._fixed );
+ _getKeys( fieldNames , fixed , obj, keys );
+ if ( keys.empty() && ! _spec._sparse )
+ keys.insert( _spec._nullKey );
+ }
+
+ private:
+ /**
+ * @param arrayNestedArray - set if the returned element is an array nested directly within arr.
+ */
+ BSONElement extractNextElement( const BSONObj &obj, const BSONObj &arr, const char *&field, bool &arrayNestedArray ) const {
+ string firstField = mongoutils::str::before( field, '.' );
+ bool haveObjField = !obj.getField( firstField ).eoo();
+ BSONElement arrField = arr.getField( firstField );
+ bool haveArrField = !arrField.eoo();
+
+ // An index component field name cannot exist in both a document array and one of that array's children.
+ uassert( 15855 , "Parallel references while expanding indexed field in array", !haveObjField || !haveArrField );
+
+ arrayNestedArray = false;
+ if ( haveObjField ) {
+ return obj.getFieldDottedOrArray( field );
+ }
+ else if ( haveArrField ) {
+ if ( arrField.type() == Array ) {
+ arrayNestedArray = true;
+ }
+ return arr.getFieldDottedOrArray( field );
}
+ return BSONElement();
}
-
- if ( insertArrayNull ) {
- // x : [] - need to insert undefined
- BSONObjBuilder b(_sizeTracker);
- for( unsigned j = 0; j < fixed.size(); ++j ) {
- if ( j == arrIdx ) {
- b.appendUndefined( "" );
+
+ void _getKeysArrEltFixed( vector<const char*> &fieldNames , vector<BSONElement> &fixed , const BSONElement &arrEntry, BSONObjSet &keys, int numNotFound, const BSONElement &arrObjElt, const set< unsigned > &arrIdxs, bool mayExpandArrayUnembedded ) const {
+ // set up any terminal array values
+ for( set<unsigned>::const_iterator j = arrIdxs.begin(); j != arrIdxs.end(); ++j ) {
+ if ( *fieldNames[ *j ] == '\0' ) {
+ fixed[ *j ] = mayExpandArrayUnembedded ? arrEntry : arrObjElt;
+ }
+ }
+ // recurse
+ _getKeys( fieldNames, fixed, ( arrEntry.type() == Object ) ? arrEntry.embeddedObject() : BSONObj(), keys, numNotFound, arrObjElt.embeddedObject() );
+ }
+
+ /**
+ * @param fieldNames - fields to index, may be postfixes in recursive calls
+ * @param fixed - values that have already been identified for their index fields
+ * @param obj - object from which keys should be extracted, based on names in fieldNames
+ * @param keys - set where index keys are written
+ * @param numNotFound - number of index fields that have already been identified as missing
+ * @param array - array from which keys should be extracted, based on names in fieldNames
+ * If obj and array are both nonempty, obj will be one of the elements of array.
+ */
+ void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSet &keys, int numNotFound = 0, const BSONObj &array = BSONObj() ) const {
+ BSONElement arrElt;
+ set<unsigned> arrIdxs;
+ bool mayExpandArrayUnembedded = true;
+ for( unsigned i = 0; i < fieldNames.size(); ++i ) {
+ if ( *fieldNames[ i ] == '\0' ) {
+ continue;
+ }
+
+ bool arrayNestedArray;
+ // Extract element matching fieldName[ i ] from object xor array.
+ BSONElement e = extractNextElement( obj, array, fieldNames[ i ], arrayNestedArray );
+
+ if ( e.eoo() ) {
+ // if field not present, set to null
+ fixed[ i ] = _spec._nullElt;
+ // done expanding this field name
+ fieldNames[ i ] = "";
+ numNotFound++;
+ }
+ else if ( e.type() == Array ) {
+ arrIdxs.insert( i );
+ if ( arrElt.eoo() ) {
+ // we only expand arrays on a single path -- track the path here
+ arrElt = e;
+ }
+ else if ( e.rawdata() != arrElt.rawdata() ) {
+ // enforce single array path here
+ assertParallelArrays( e.fieldName(), arrElt.fieldName() );
+ }
+ if ( arrayNestedArray ) {
+ mayExpandArrayUnembedded = false;
+ }
}
else {
- BSONElement e = fixed[j];
- if ( e.eoo() )
- b.appendNull( "" );
- else
- b.appendAs( e , "" );
+ // not an array - no need for further expansion
+ fixed[ i ] = e;
}
}
- keys.insert( b.obj() );
+
+ if ( arrElt.eoo() ) {
+ // No array, so generate a single key.
+ if ( _spec._sparse && numNotFound == _spec._nFields ) {
+ return;
+ }
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i ) {
+ b.appendAs( *i, "" );
+ }
+ keys.insert( b.obj() );
+ }
+ else if ( arrElt.embeddedObject().firstElement().eoo() ) {
+ // Empty array, so set matching fields to undefined.
+ _getKeysArrEltFixed( fieldNames, fixed, _spec._undefinedElt, keys, numNotFound, arrElt, arrIdxs, true );
+ }
+ else {
+ // Non empty array that can be expanded, so generate a key for each member.
+ BSONObj arrObj = arrElt.embeddedObject();
+ BSONObjIterator i( arrObj );
+ while( i.more() ) {
+ _getKeysArrEltFixed( fieldNames, fixed, i.next(), keys, numNotFound, arrElt, arrIdxs, mayExpandArrayUnembedded );
+ }
+ }
+ }
+
+ const IndexSpec &_spec;
+ };
+
+ void IndexSpec::getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+ switch( indexVersion() ) {
+ case 0: {
+ KeyGeneratorV0 g( *this );
+ g.getKeys( obj, keys );
+ break;
+ }
+ case 1: {
+ KeyGeneratorV1 g( *this );
+ g.getKeys( obj, keys );
+ break;
+ }
+ default:
+ massert( 15869, "Invalid index version for key generation.", false );
}
}
@@ -275,6 +447,13 @@ namespace mongo {
IndexSuitability IndexType::suitability( const BSONObj& query , const BSONObj& order ) const {
return _spec->_suitability( query , order );
}
+
+ int IndexSpec::indexVersion() const {
+ if ( !info.hasField( "v" ) ) {
+ return DefaultIndexVersionNumber;
+ }
+ return IndexDetails::versionForIndexObj( info );
+ }
bool IndexType::scanAndOrderRequired( const BSONObj& query , const BSONObj& order ) const {
return ! order.isEmpty();
diff --git a/db/indexkey.h b/db/indexkey.h
index be73171..c04cd63 100644
--- a/db/indexkey.h
+++ b/db/indexkey.h
@@ -25,6 +25,8 @@
namespace mongo {
+ extern const int DefaultIndexVersionNumber;
+
class Cursor;
class IndexSpec;
class IndexType; // TODO: this name sucks
@@ -44,7 +46,7 @@ namespace mongo {
IndexType( const IndexPlugin * plugin , const IndexSpec * spec );
virtual ~IndexType();
- virtual void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const = 0;
+ virtual void getKeys( const BSONObj &obj, BSONObjSet &keys ) const = 0;
virtual shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const = 0;
/** optional op : changes query to match what's in the index */
@@ -122,7 +124,7 @@ namespace mongo {
: _details(0) , _finishedInit(false) {
}
- IndexSpec( const BSONObj& k , const BSONObj& m = BSONObj() )
+ explicit IndexSpec( const BSONObj& k , const BSONObj& m = BSONObj() )
: keyPattern(k) , info(m) , _details(0) , _finishedInit(false) {
_init();
}
@@ -131,14 +133,15 @@ namespace mongo {
this is a DiscLoc of an IndexDetails info
should have a key field
*/
- IndexSpec( const DiskLoc& loc ) {
+ explicit IndexSpec( const DiskLoc& loc ) {
reset( loc );
}
- void reset( const DiskLoc& loc );
+ void reset( const BSONObj& info );
+ void reset( const DiskLoc& infoLoc ) { reset(infoLoc.obj()); }
void reset( const IndexDetails * details );
- void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const;
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const;
BSONElement missingField() const { return _nullElt; }
@@ -160,33 +163,33 @@ namespace mongo {
protected:
+ int indexVersion() const;
+
IndexSuitability _suitability( const BSONObj& query , const BSONObj& order ) const ;
- void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const;
-
BSONSizeTracker _sizeTracker;
-
vector<const char*> _fieldNames;
vector<BSONElement> _fixed;
BSONObj _nullKey; // a full key with all fields null
-
BSONObj _nullObj; // only used for _nullElt
BSONElement _nullElt; // jstNull
+ BSONObj _undefinedObj; // only used for _undefinedElt
+ BSONElement _undefinedElt; // undefined
+
int _nFields; // number of fields in the index
bool _sparse; // if the index is sparse
-
shared_ptr<IndexType> _indexType;
-
const IndexDetails * _details;
void _init();
+ friend class IndexType;
+ friend class KeyGeneratorV0;
+ friend class KeyGeneratorV1;
public:
bool _finishedInit;
-
- friend class IndexType;
};
diff --git a/db/instance.cpp b/db/instance.cpp
index bb2d9a5..6727867 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -19,7 +19,6 @@
#include "pch.h"
#include "db.h"
-#include "query.h"
#include "introspect.h"
#include "repl.h"
#include "dbmessage.h"
@@ -27,7 +26,7 @@
#include "lasterror.h"
#include "security.h"
#include "json.h"
-#include "replpair.h"
+#include "replutil.h"
#include "../s/d_logic.h"
#include "../util/file_allocator.h"
#include "../util/goodies.h"
@@ -39,6 +38,9 @@
#include "background.h"
#include "dur_journal.h"
#include "dur_recover.h"
+#include "ops/update.h"
+#include "ops/delete.h"
+#include "ops/query.h"
namespace mongo {
@@ -56,8 +58,6 @@ namespace mongo {
string dbExecCommand;
- char *appsrvPath = NULL;
-
DiagLog _diaglog;
bool useCursors = true;
@@ -73,14 +73,12 @@ namespace mongo {
KillCurrentOp killCurrentOp;
int lockFile = 0;
-#ifdef WIN32
+#ifdef _WIN32
HANDLE lockFileHandle;
#endif
// see FSyncCommand:
- unsigned lockedForWriting;
- mongo::mutex lockedForWritingMutex("lockedForWriting");
- bool unlockRequested = false;
+ extern bool lockedForWriting;
void inProgCmd( Message &m, DbResponse &dbresponse ) {
BSONObjBuilder b;
@@ -113,7 +111,7 @@ namespace mongo {
unsigned x = lockedForWriting;
if( x ) {
b.append("fsyncLock", x);
- b.append("info", "use db.$cmd.sys.unlock.findOne() to terminate the fsync write/snapshot lock");
+ b.append("info", "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
}
}
@@ -144,16 +142,20 @@ namespace mongo {
replyToQuery(0, m, dbresponse, obj);
}
+ void unlockFsyncAndWait();
void unlockFsync(const char *ns, Message& m, DbResponse &dbresponse) {
BSONObj obj;
- if( ! cc().isAdmin() || strncmp(ns, "admin.", 6) != 0 ) {
+ if ( ! cc().isAdmin() ) { // checks auth
obj = fromjson("{\"err\":\"unauthorized\"}");
}
+ else if (strncmp(ns, "admin.", 6) != 0 ) {
+ obj = fromjson("{\"err\":\"unauthorized - this command must be run against the admin DB\"}");
+ }
else {
if( lockedForWriting ) {
log() << "command: unlock requested" << endl;
- obj = fromjson("{ok:1,\"info\":\"unlock requested\"}");
- unlockRequested = true;
+ obj = fromjson("{ok:1,\"info\":\"unlock completed\"}");
+ unlockFsyncAndWait();
}
else {
obj = fromjson("{ok:0,\"errmsg\":\"not locked\"}");
@@ -178,7 +180,7 @@ namespace mongo {
}
catch ( AssertionException& e ) {
ok = false;
- op.debug().str << " exception ";
+ op.debug().exceptionInfo = e.getInfo();
LOGSOME {
log() << "assertion " << e.toString() << " ns:" << q.ns << " query:" <<
(q.query.valid() ? q.query.toString() : "query object is corrupt") << endl;
@@ -210,9 +212,7 @@ namespace mongo {
resp->setData( msgdata, true );
}
- if ( op.shouldDBProfile( 0 ) ) {
- op.debug().str << " bytes:" << resp->header()->dataLen();
- }
+ op.debug().responseLength = resp->header()->dataLen();
dbresponse.response = resp.release();
dbresponse.responseTo = responseTo;
@@ -220,8 +220,17 @@ namespace mongo {
return ok;
}
+ void (*reportEventToSystem)(const char *msg) = 0;
+
+ void mongoAbort(const char *msg) {
+ if( reportEventToSystem )
+ reportEventToSystem(msg);
+ rawOut(msg);
+ ::abort();
+ }
+
// Returns false when request includes 'end'
- void assembleResponse( Message &m, DbResponse &dbresponse, const SockAddr &client ) {
+ void assembleResponse( Message &m, DbResponse &dbresponse, const HostAndPort& remote ) {
// before we lock...
int op = m.operation();
@@ -268,11 +277,10 @@ namespace mongo {
currentOpP = nestedOp.get();
}
CurOp& currentOp = *currentOpP;
- currentOp.reset(client,op);
+ currentOp.reset(remote,op);
OpDebug& debug = currentOp.debug();
- StringBuilder& ss = debug.str;
- ss << opToString( op ) << " ";
+ debug.op = op;
int logThreshold = cmdLine.slowMS;
bool log = logLevel >= 1;
@@ -291,7 +299,7 @@ namespace mongo {
char *p = m.singleData()->_data;
int len = strlen(p);
if ( len > 400 )
- out() << curTimeMillis() % 10000 <<
+ out() << curTimeMillis64() % 10000 <<
" long msg received, len:" << len << endl;
Message *resp = new Message();
@@ -324,7 +332,6 @@ namespace mongo {
else if ( op == dbKillCursors ) {
currentOp.ensureStarted();
logThreshold = 10;
- ss << "killcursors ";
receivedKillCursors(m);
}
else {
@@ -335,11 +342,11 @@ namespace mongo {
}
catch ( UserException& ue ) {
tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing " << ue.toString() << endl;
- ss << " exception " << ue.toString();
+ debug.exceptionInfo = ue.getInfo();
}
catch ( AssertionException& e ) {
tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing " << e.toString() << endl;
- ss << " exception " << e.toString();
+ debug.exceptionInfo = e.getInfo();
log = true;
}
}
@@ -350,12 +357,12 @@ namespace mongo {
//DEV log = true;
if ( log || ms > logThreshold ) {
- if( logLevel < 3 && op == dbGetMore && strstr(ns, ".oplog.") && ms < 3000 && !log ) {
+ if( logLevel < 3 && op == dbGetMore && strstr(ns, ".oplog.") && ms < 4300 && !log ) {
/* it's normal for getMore on the oplog to be slow because of use of awaitdata flag. */
}
else {
- ss << ' ' << ms << "ms";
- mongo::tlog() << ss.str() << endl;
+ debug.executionTime = ms;
+ mongo::tlog() << debug << endl;
}
}
@@ -367,15 +374,16 @@ namespace mongo {
else {
writelock lk;
if ( dbHolder.isLoaded( nsToDatabase( currentOp.getNS() ) , dbpath ) ) {
- Client::Context c( currentOp.getNS() );
- profile(ss.str().c_str(), ms);
+ Client::Context cx( currentOp.getNS() );
+ profile(c , currentOp );
}
else {
mongo::log() << "note: not profiling because db went away - probably a close on: " << currentOp.getNS() << endl;
}
}
}
-
+
+ debug.reset();
} /* assembleResponse() */
void receivedKillCursors(Message& m) {
@@ -383,9 +391,10 @@ namespace mongo {
x++; // reserved
int n = *x++;
- assert( m.dataSize() == 8 + ( 8 * n ) );
+ uassert( 13659 , "sent 0 cursors to kill" , n != 0 );
+ massert( 13658 , str::stream() << "bad kill cursors size: " << m.dataSize() , m.dataSize() == 8 + ( 8 * n ) );
+ uassert( 13004 , str::stream() << "sent negative cursors to kill: " << n , n >= 1 );
- uassert( 13004 , "sent 0 cursors to kill" , n >= 1 );
if ( n > 2000 ) {
log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
assert( n < 30000 );
@@ -432,9 +441,7 @@ namespace mongo {
void receivedUpdate(Message& m, CurOp& op) {
DbMessage d(m);
const char *ns = d.getns();
- assert(*ns);
- uassert( 10054 , "not master", isMasterNs( ns ) );
- op.debug().str << ns << ' ';
+ op.debug().ns = ns;
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
@@ -447,18 +454,15 @@ namespace mongo {
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
bool broadcast = flags & UpdateOption_Broadcast;
- {
- string s = query.toString();
- /* todo: we shouldn't do all this ss stuff when we don't need it, it will slow us down.
- instead, let's just story the query BSON in the debug object, and it can toString()
- lazily
- */
- op.debug().str << " query: " << s;
- op.setQuery(query);
- }
+
+ op.debug().query = query;
+ op.setQuery(query);
writelock lk;
+ // writelock is used to synchronize stepdowns w/ writes
+ uassert( 10054 , "not master", isMasterNs( ns ) );
+
// if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
return;
@@ -472,21 +476,21 @@ namespace mongo {
void receivedDelete(Message& m, CurOp& op) {
DbMessage d(m);
const char *ns = d.getns();
- assert(*ns);
- uassert( 10056 , "not master", isMasterNs( ns ) );
- op.debug().str << ns << ' ';
+ op.debug().ns = ns;
int flags = d.pullInt();
bool justOne = flags & RemoveOption_JustOne;
bool broadcast = flags & RemoveOption_Broadcast;
assert( d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
- {
- string s = pattern.toString();
- op.debug().str << " query: " << s;
- op.setQuery(pattern);
- }
+
+ op.debug().query = pattern;
+ op.setQuery(pattern);
writelock lk(ns);
+
+ // writelock is used to synchronize stepdowns w/ writes
+ uassert( 10056 , "not master", isMasterNs( ns ) );
+
// if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
return;
@@ -500,7 +504,6 @@ namespace mongo {
QueryResult* emptyMoreResult(long long);
bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop ) {
- StringBuilder& ss = curop.debug().str;
bool ok = true;
DbMessage d(m);
@@ -509,9 +512,9 @@ namespace mongo {
int ntoreturn = d.pullInt();
long long cursorid = d.pullInt64();
- ss << ns << " cid:" << cursorid;
- if( ntoreturn )
- ss << " ntoreturn:" << ntoreturn;
+ curop.debug().ns = ns;
+ curop.debug().ntoreturn = ntoreturn;
+ curop.debug().cursorid = cursorid;
time_t start = 0;
int pass = 0;
@@ -523,7 +526,13 @@ namespace mongo {
Client::Context ctx(ns);
msgdata = processGetMore(ns, ntoreturn, cursorid, curop, pass, exhaust);
}
- catch ( GetMoreWaitException& ) {
+ catch ( AssertionException& e ) {
+ exhaust = false;
+ curop.debug().exceptionInfo = e.getInfo();
+ msgdata = emptyMoreResult(cursorid);
+ ok = false;
+ }
+ if (msgdata == 0) {
exhaust = false;
massert(13073, "shutting down", !inShutdown() );
if( pass == 0 ) {
@@ -544,64 +553,89 @@ namespace mongo {
sleepmillis(2);
continue;
}
- catch ( AssertionException& e ) {
- exhaust = false;
- ss << " exception " << e.toString();
- msgdata = emptyMoreResult(cursorid);
- ok = false;
- }
break;
};
Message *resp = new Message();
resp->setData(msgdata, true);
- ss << " bytes:" << resp->header()->dataLen();
- ss << " nreturned:" << msgdata->nReturned;
+ curop.debug().responseLength = resp->header()->dataLen();
+ curop.debug().nreturned = msgdata->nReturned;
+
dbresponse.response = resp;
dbresponse.responseTo = m.header()->id;
+
if( exhaust ) {
- ss << " exhaust ";
+ curop.debug().exhaust = true;
dbresponse.exhaust = ns;
}
+
return ok;
}
+ void checkAndInsert(const char *ns, /*modifies*/BSONObj& js) {
+ uassert( 10059 , "object to insert too large", js.objsize() <= BSONObjMaxUserSize);
+ {
+ // check no $ modifiers. note we only check top level. (scanning deep would be quite expensive)
+ BSONObjIterator i( js );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' );
+ }
+ }
+ theDataFileMgr.insertWithObjMod(ns, js, false); // js may be modified in the call to add an _id field.
+ logOp("i", ns, js);
+ }
+
+ NOINLINE_DECL void insertMulti(DbMessage& d, const char *ns, const BSONObj& _js) {
+ const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
+ int n = 0;
+ BSONObj js(_js);
+ while( 1 ) {
+ try {
+ checkAndInsert(ns, js);
+ ++n;
+ getDur().commitIfNeeded();
+ } catch (const UserException&) {
+ if (!keepGoing || !d.moreJSObjs()){
+ globalOpCounters.incInsertInWriteLock(n);
+ throw;
+ }
+ // otherwise ignore and keep going
+ }
+ if( !d.moreJSObjs() )
+ break;
+ js = d.nextJsObj(); // TODO: refactor to do objcheck outside of writelock
+ }
+ }
+
void receivedInsert(Message& m, CurOp& op) {
DbMessage d(m);
const char *ns = d.getns();
- assert(*ns);
- uassert( 10058 , "not master", isMasterNs( ns ) );
- op.debug().str << ns;
+ op.debug().ns = ns;
+
+ if( !d.moreJSObjs() ) {
+ // strange. should we complain?
+ return;
+ }
+ BSONObj js = d.nextJsObj();
writelock lk(ns);
+ // writelock is used to synchronize stepdowns w/ writes
+ uassert( 10058 , "not master", isMasterNs(ns) );
+
if ( handlePossibleShardedMessage( m , 0 ) )
return;
Client::Context ctx(ns);
- int n = 0;
- while ( d.moreJSObjs() ) {
- BSONObj js = d.nextJsObj();
- uassert( 10059 , "object to insert too large", js.objsize() <= BSONObjMaxUserSize);
- {
- // check no $ modifiers
- BSONObjIterator i( js );
- while ( i.more() ) {
- BSONElement e = i.next();
- uassert( 13511 , "object to insert can't have $ modifiers" , e.fieldName()[0] != '$' );
- }
- }
-
- theDataFileMgr.insertWithObjMod(ns, js, false);
- logOp("i", ns, js);
-
- if( ++n % 4 == 0 ) {
- // if we are inserting quite a few, we may need to commit along the way
- getDur().commitIfNeeded();
- }
+ if( d.moreJSObjs() ) {
+ insertMulti(d, ns, js);
+ return;
}
- globalOpCounters.incInsertInWriteLock(n);
+
+ checkAndInsert(ns, js);
+ globalOpCounters.incInsertInWriteLock(1);
}
void getDatabaseNames( vector< string > &names , const string& usePath ) {
@@ -648,7 +682,7 @@ namespace mongo {
if ( lastError._get() )
lastError.startRequest( toSend, lastError._get() );
DbResponse dbResponse;
- assembleResponse( toSend, dbResponse );
+ assembleResponse( toSend, dbResponse , _clientHost );
assert( dbResponse.response );
dbResponse.response->concat(); // can get rid of this if we make response handling smarter
response = *dbResponse.response;
@@ -656,11 +690,11 @@ namespace mongo {
return true;
}
- void DBDirectClient::say( Message &toSend ) {
+ void DBDirectClient::say( Message &toSend, bool isRetry ) {
if ( lastError._get() )
lastError.startRequest( toSend, lastError._get() );
DbResponse dbResponse;
- assembleResponse( toSend, dbResponse );
+ assembleResponse( toSend, dbResponse , _clientHost );
getDur().commitIfNeeded();
}
@@ -678,6 +712,8 @@ namespace mongo {
ClientCursor::erase( id );
}
+ HostAndPort DBDirectClient::_clientHost = HostAndPort( "0.0.0.0" , 0 );
+
unsigned long long DBDirectClient::count(const string &ns, const BSONObj& query, int options, int limit, int skip ) {
readlock lk( ns );
string errmsg;
@@ -749,7 +785,7 @@ namespace mongo {
}
if( --n <= 0 ) {
log() << "shutdown: couldn't acquire write lock, aborting" << endl;
- abort();
+ mongoAbort("couldn't acquire write lock");
}
log() << "shutdown: waiting for write lock..." << endl;
}
@@ -760,11 +796,10 @@ namespace mongo {
log() << "shutdown: closing all files..." << endl;
stringstream ss3;
MemoryMappedFile::closeAllFiles( ss3 );
- rawOut( ss3.str() );
+ log() << ss3.str() << endl;
if( cmdLine.dur ) {
- log() << "shutdown: journalCleanup..." << endl;
- dur::journalCleanup();
+ dur::journalCleanup(true);
}
#if !defined(__sunos__)
@@ -773,9 +808,9 @@ namespace mongo {
/* This ought to be an unlink(), but Eliot says the last
time that was attempted, there was a race condition
with acquirePathLock(). */
-#ifdef WIN32
+#ifdef _WIN32
if( _chsize( lockFile , 0 ) )
- log() << "couldn't remove fs lock " << getLastError() << endl;
+ log() << "couldn't remove fs lock " << WSAGetLastError() << endl;
CloseHandle(lockFileHandle);
#else
if( ftruncate( lockFile , 0 ) )
@@ -786,8 +821,17 @@ namespace mongo {
#endif
}
+ void exitCleanly( ExitCode code ) {
+ killCurrentOp.killAll();
+ {
+ dblock lk;
+ log() << "now exiting" << endl;
+ dbexit( code );
+ }
+ }
+
/* not using log() herein in case we are already locked */
- void dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
+ NOINLINE_DECL void dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
auto_ptr<writelocktry> wlt;
if ( tryToGetLock ) {
@@ -840,14 +884,14 @@ namespace mongo {
ss << getpid() << endl;
string s = ss.str();
const char * data = s.c_str();
-#ifdef WIN32
+#ifdef _WIN32
assert ( _write( fd, data, strlen( data ) ) );
#else
assert ( write( fd, data, strlen( data ) ) );
#endif
}
- void acquirePathLock() {
+ void acquirePathLock(bool doingRepair) {
string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
bool oldFile = false;
@@ -856,7 +900,7 @@ namespace mongo {
oldFile = true;
}
-#ifdef WIN32
+#ifdef _WIN32
lockFileHandle = CreateFileA( name.c_str(), GENERIC_READ | GENERIC_WRITE,
0 /* do not allow anyone else access */, NULL,
OPEN_ALWAYS /* success if fh can open */, 0, NULL );
@@ -867,13 +911,15 @@ namespace mongo {
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
NULL, code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR)&msg, 0, NULL);
- uasserted( 13627 , msg );
+ string m = msg;
+ str::stripTrailing(m, "\r\n");
+ uasserted( 13627 , str::stream() << "Unable to create/open lock file: " << name << ' ' << m << " Is a mongod instance already running?" );
}
lockFile = _open_osfhandle((intptr_t)lockFileHandle, 0);
#else
lockFile = open( name.c_str(), O_RDWR | O_CREAT , S_IRWXU | S_IRWXG | S_IRWXO );
if( lockFile <= 0 ) {
- uasserted( 10309 , str::stream() << "Unable to create / open lock file for lockfilepath: " << name << ' ' << errnoWithDescription());
+ uasserted( 10309 , str::stream() << "Unable to create/open lock file: " << name << ' ' << errnoWithDescription() << " Is a mongod instance already running?" );
}
if (flock( lockFile, LOCK_EX | LOCK_NB ) != 0) {
close ( lockFile );
@@ -913,17 +959,18 @@ namespace mongo {
}
}
else {
- errmsg = str::stream()
- << "************** \n"
- << "old lock file: " << name << ". probably means unclean shutdown\n"
- << "recommend removing file and running --repair\n"
- << "see: http://dochub.mongodb.org/core/repair for more information\n"
- << "*************";
+ if (!dur::haveJournalFiles() && !doingRepair) {
+ errmsg = str::stream()
+ << "************** \n"
+ << "Unclean shutdown detected.\n"
+ << "Please visit http://dochub.mongodb.org/core/repair for recovery instructions.\n"
+ << "*************";
+ }
}
if (!errmsg.empty()) {
cout << errmsg << endl;
-#ifdef WIN32
+#ifdef _WIN32
CloseHandle( lockFileHandle );
#else
close ( lockFile );
@@ -936,14 +983,13 @@ namespace mongo {
// Not related to lock file, but this is where we handle unclean shutdown
if( !cmdLine.dur && dur::haveJournalFiles() ) {
cout << "**************" << endl;
- cout << "Error: journal files are present in journal directory, yet starting without --dur enabled." << endl;
+ cout << "Error: journal files are present in journal directory, yet starting without --journal enabled." << endl;
cout << "It is recommended that you start with journaling enabled so that recovery may occur." << endl;
- cout << "Alternatively (not recommended), you can backup everything, then delete the journal files, and run --repair" << endl;
cout << "**************" << endl;
- uasserted(13597, "can't start without --dur enabled when journal/ files are present");
+ uasserted(13597, "can't start without --journal enabled when journal/ files are present");
}
-#ifdef WIN32
+#ifdef _WIN32
uassert( 13625, "Unable to truncate lock file", _chsize(lockFile, 0) == 0);
writePid( lockFile );
_commit( lockFile );
@@ -951,20 +997,21 @@ namespace mongo {
uassert( 13342, "Unable to truncate lock file", ftruncate(lockFile, 0) == 0);
writePid( lockFile );
fsync( lockFile );
+ flushMyDirectory(name);
#endif
}
#else
- void acquirePathLock() {
+ void acquirePathLock(bool) {
// TODO - this is very bad that the code above not running here.
// Not related to lock file, but this is where we handle unclean shutdown
if( !cmdLine.dur && dur::haveJournalFiles() ) {
cout << "**************" << endl;
- cout << "Error: journal files are present in journal directory, yet starting without --dur enabled." << endl;
+ cout << "Error: journal files are present in journal directory, yet starting without --journal enabled." << endl;
cout << "It is recommended that you start with journaling enabled so that recovery may occur." << endl;
cout << "Alternatively (not recommended), you can backup everything, then delete the journal files, and run --repair" << endl;
cout << "**************" << endl;
- uasserted(13618, "can't start without --dur enabled when journal/ files are present");
+ uasserted(13618, "can't start without --journal enabled when journal/ files are present");
}
}
#endif
diff --git a/db/instance.h b/db/instance.h
index 2516aec..422c77d 100644
--- a/db/instance.h
+++ b/db/instance.h
@@ -103,7 +103,7 @@ namespace mongo {
~DbResponse() { delete response; }
};
- void assembleResponse( Message &m, DbResponse &dbresponse, const SockAddr &client = unknownAddress );
+ void assembleResponse( Message &m, DbResponse &dbresponse, const HostAndPort &client );
void getDatabaseNames( vector< string > &names , const string& usePath = dbpath );
@@ -130,7 +130,7 @@ namespace mongo {
return "localhost"; // TODO: should this have the port?
}
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 );
- virtual void say( Message &toSend );
+ virtual void say( Message &toSend, bool isRetry = false );
virtual void sayPiggyBack( Message &toSend ) {
// don't need to piggy back when connected locally
return say( toSend );
@@ -145,13 +145,19 @@ namespace mongo {
virtual unsigned long long count(const string &ns, const BSONObj& query = BSONObj(), int options=0, int limit=0, int skip=0 );
virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+
+ double getSoTimeout() const { return 0; }
+
+ virtual bool lazySupported() const { return true; }
+ private:
+ static HostAndPort _clientHost;
};
extern int lockFile;
-#ifdef WIN32
+#ifdef _WIN32
extern HANDLE lockFileHandle;
#endif
- void acquirePathLock();
+ void acquirePathLock(bool doingRepair=false); // if doingRepair=true don't consider unclean shutdown an error
void maybeCreatePidFile();
} // namespace mongo
diff --git a/db/introspect.cpp b/db/introspect.cpp
index cee0da8..7e1d19c 100644
--- a/db/introspect.cpp
+++ b/db/introspect.cpp
@@ -23,17 +23,66 @@
#include "pdfile.h"
#include "jsobj.h"
#include "pdfile.h"
+#include "curop.h"
namespace mongo {
- void profile( const char *str, int millis) {
- BSONObjBuilder b;
+ BufBuilder profileBufBuilder; // reused, instead of allocated every time - avoids a malloc/free cycle
+
+ void profile( const Client& c , CurOp& currentOp ) {
+ assertInWriteLock();
+
+ Database *db = c.database();
+ DEV assert( db );
+ const char *ns = db->profileName.c_str();
+
+ // build object
+ profileBufBuilder.reset();
+ BSONObjBuilder b(profileBufBuilder);
b.appendDate("ts", jsTime());
- b.append("info", str);
- b.append("millis", (double) millis);
+ currentOp.debug().append( currentOp , b );
+
+ b.append("client", c.clientAddress() );
+
+ if ( c.getAuthenticationInfo() )
+ b.append( "user" , c.getAuthenticationInfo()->getUser( nsToDatabase( ns ) ) );
+
BSONObj p = b.done();
- theDataFileMgr.insert(cc().database()->profileName.c_str(),
- p.objdata(), p.objsize(), true);
+
+ if (p.objsize() > 100*1024){
+ string small = p.toString(/*isArray*/false, /*full*/false);
+
+ warning() << "can't add full line to system.profile: " << small;
+
+ // rebuild with limited info
+ BSONObjBuilder b(profileBufBuilder);
+ b.appendDate("ts", jsTime());
+ b.append("client", c.clientAddress() );
+ if ( c.getAuthenticationInfo() )
+ b.append( "user" , c.getAuthenticationInfo()->getUser( nsToDatabase( ns ) ) );
+
+ b.append("err", "profile line too large (max is 100KB)");
+ if (small.size() < 100*1024){ // should be much smaller but if not don't break anything
+ b.append("abbreviated", small);
+ }
+
+ p = b.done();
+ }
+
+ // write: not replicated
+ NamespaceDetails *d = db->namespaceIndex.details(ns);
+ if( d ) {
+ int len = p.objsize();
+ Record *r = theDataFileMgr.fast_oplog_insert(d, ns, len);
+ memcpy(getDur().writingPtr(r->data, len), p.objdata(), len);
+ }
+ else {
+ static time_t last;
+ if( time(0) > last+10 ) {
+ log() << "profile: warning ns " << ns << " does not exist" << endl;
+ last = time(0);
+ }
+ }
}
} // namespace mongo
diff --git a/db/introspect.h b/db/introspect.h
index 3f6ef60..209eeac 100644
--- a/db/introspect.h
+++ b/db/introspect.h
@@ -29,7 +29,6 @@ namespace mongo {
do when database->profile is set
*/
- void profile(const char *str,
- int millis);
+ void profile( const Client& c , CurOp& currentOp );
} // namespace mongo
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index 25ab8a8..dcb7744 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -27,6 +27,7 @@
#include <limits>
#include "../util/unittest.h"
#include "../util/embedded_builder.h"
+#include "../util/stringutils.h"
#include "json.h"
#include "jsobjmanipulator.h"
#include "../util/optime.h"
@@ -44,7 +45,7 @@ BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
namespace mongo {
- BSONElement nullElement;
+ BSONElement eooElement;
GENOIDLabeler GENOID;
@@ -53,52 +54,11 @@ namespace mongo {
MinKeyLabeler MINKEY;
MaxKeyLabeler MAXKEY;
- string escape( string s , bool escape_slash=false) {
- StringBuilder ret;
- for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
- switch ( *i ) {
- case '"':
- ret << "\\\"";
- break;
- case '\\':
- ret << "\\\\";
- break;
- case '/':
- ret << (escape_slash ? "\\/" : "/");
- break;
- case '\b':
- ret << "\\b";
- break;
- case '\f':
- ret << "\\f";
- break;
- case '\n':
- ret << "\\n";
- break;
- case '\r':
- ret << "\\r";
- break;
- case '\t':
- ret << "\\t";
- break;
- default:
- if ( *i >= 0 && *i <= 0x1f ) {
- //TODO: these should be utf16 code-units not bytes
- char c = *i;
- ret << "\\u00" << toHexLower(&c, 1);
- }
- else {
- ret << *i;
- }
- }
- }
- return ret.str();
- }
-
- string BSONElement::jsonString( JsonStringFormat format, bool includeFieldNames, int pretty ) const {
+ // need to move to bson/, but has dependency on base64 so move that to bson/util/ first.
+ inline string BSONElement::jsonString( JsonStringFormat format, bool includeFieldNames, int pretty ) const {
BSONType t = type();
if ( t == Undefined )
- return "";
+ return "undefined";
stringstream s;
if ( includeFieldNames )
@@ -142,19 +102,28 @@ namespace mongo {
s << "[ ";
BSONObjIterator i( embeddedObject() );
BSONElement e = i.next();
- if ( !e.eoo() )
+ if ( !e.eoo() ) {
+ int count = 0;
while ( 1 ) {
if( pretty ) {
s << '\n';
for( int x = 0; x < pretty; x++ )
s << " ";
}
- s << e.jsonString( format, false, pretty?pretty+1:0 );
- e = i.next();
+
+ if (strtol(e.fieldName(), 0, 10) > count) {
+ s << "undefined";
+ }
+ else {
+ s << e.jsonString( format, false, pretty?pretty+1:0 );
+ e = i.next();
+ }
+ count++;
if ( e.eoo() )
break;
s << ", ";
}
+ }
s << " ]";
break;
}
@@ -250,7 +219,6 @@ namespace mongo {
}
}
-
case Code:
s << _asCode();
break;
@@ -328,124 +296,6 @@ namespace mongo {
return def;
}
- /* wo = "well ordered" */
- int BSONElement::woCompare( const BSONElement &e,
- bool considerFieldName ) const {
- int lt = (int) canonicalType();
- int rt = (int) e.canonicalType();
- int x = lt - rt;
- if( x != 0 && (!isNumber() || !e.isNumber()) )
- return x;
- if ( considerFieldName ) {
- x = strcmp(fieldName(), e.fieldName());
- if ( x != 0 )
- return x;
- }
- x = compareElementValues(*this, e);
- return x;
- }
-
- /* must be same type when called, unless both sides are #s
- */
- int compareElementValues(const BSONElement& l, const BSONElement& r) {
- int f;
- double x;
-
- switch ( l.type() ) {
- case EOO:
- case Undefined:
- case jstNULL:
- case MaxKey:
- case MinKey:
- f = l.canonicalType() - r.canonicalType();
- if ( f<0 ) return -1;
- return f==0 ? 0 : 1;
- case Bool:
- return *l.value() - *r.value();
- case Timestamp:
- case Date:
- if ( l.date() < r.date() )
- return -1;
- return l.date() == r.date() ? 0 : 1;
- case NumberLong:
- if( r.type() == NumberLong ) {
- long long L = l._numberLong();
- long long R = r._numberLong();
- if( L < R ) return -1;
- if( L == R ) return 0;
- return 1;
- }
- // else fall through
- case NumberInt:
- case NumberDouble: {
- double left = l.number();
- double right = r.number();
- bool lNan = !( left <= numeric_limits< double >::max() &&
- left >= -numeric_limits< double >::max() );
- bool rNan = !( right <= numeric_limits< double >::max() &&
- right >= -numeric_limits< double >::max() );
- if ( lNan ) {
- if ( rNan ) {
- return 0;
- }
- else {
- return -1;
- }
- }
- else if ( rNan ) {
- return 1;
- }
- x = left - right;
- if ( x < 0 ) return -1;
- return x == 0 ? 0 : 1;
- }
- case jstOID:
- return memcmp(l.value(), r.value(), 12);
- case Code:
- case Symbol:
- case String:
- /* todo: utf version */
- return strcmp(l.valuestr(), r.valuestr());
- case Object:
- case Array:
- return l.embeddedObject().woCompare( r.embeddedObject() );
- case DBRef: {
- int lsz = l.valuesize();
- int rsz = r.valuesize();
- if ( lsz - rsz != 0 ) return lsz - rsz;
- return memcmp(l.value(), r.value(), lsz);
- }
- case BinData: {
- int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
- int rsz = r.objsize();
- if ( lsz - rsz != 0 ) return lsz - rsz;
- return memcmp(l.value()+4, r.value()+4, lsz+1);
- }
- case RegEx: {
- int c = strcmp(l.regex(), r.regex());
- if ( c )
- return c;
- return strcmp(l.regexFlags(), r.regexFlags());
- }
- case CodeWScope : {
- f = l.canonicalType() - r.canonicalType();
- if ( f )
- return f;
- f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
- if ( f )
- return f;
- f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
- if ( f )
- return f;
- return 0;
- }
- default:
- out() << "compareElementValues: bad type " << (int) l.type() << endl;
- assert(false);
- }
- return -1;
- }
-
/* Matcher --------------------------------------*/
// If the element is something like:
@@ -658,6 +508,12 @@ namespace mongo {
}
BSONObj staticNull = fromjson( "{'':null}" );
+ BSONObj makeUndefined() {
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ return b.obj();
+ }
+ BSONObj staticUndefined = makeUndefined();
/* well ordered compare */
int BSONObj::woSortOrder(const BSONObj& other, const BSONObj& sortKey , bool useDotted ) const {
@@ -690,17 +546,19 @@ namespace mongo {
return -1;
}
- void BSONObj::getFieldsDotted(const StringData& name, BSONElementSet &ret ) const {
- BSONElement e = getField( name );
+ template <typename BSONElementColl>
+ void _getFieldsDotted( const BSONObj* obj, const StringData& name, BSONElementColl &ret, bool expandLastArray ) {
+ BSONElement e = obj->getField( name );
+
if ( e.eoo() ) {
const char *p = strchr(name.data(), '.');
if ( p ) {
string left(name.data(), p-name.data());
const char* next = p+1;
- BSONElement e = getField( left.c_str() );
+ BSONElement e = obj->getField( left.c_str() );
if (e.type() == Object) {
- e.embeddedObject().getFieldsDotted(next, ret);
+ e.embeddedObject().getFieldsDotted(next, ret, expandLastArray );
}
else if (e.type() == Array) {
bool allDigits = false;
@@ -711,14 +569,14 @@ namespace mongo {
allDigits = (*temp == '.' || *temp == '\0');
}
if (allDigits) {
- e.embeddedObject().getFieldsDotted(next, ret);
+ e.embeddedObject().getFieldsDotted(next, ret, expandLastArray );
}
else {
BSONObjIterator i(e.embeddedObject());
while ( i.more() ) {
BSONElement e2 = i.next();
if (e2.type() == Object || e2.type() == Array)
- e2.embeddedObject().getFieldsDotted(next, ret);
+ e2.embeddedObject().getFieldsDotted(next, ret, expandLastArray );
}
}
}
@@ -728,7 +586,7 @@ namespace mongo {
}
}
else {
- if (e.type() == Array) {
+ if (e.type() == Array && expandLastArray) {
BSONObjIterator i(e.embeddedObject());
while ( i.more() )
ret.insert(i.next());
@@ -739,9 +597,16 @@ namespace mongo {
}
}
+ void BSONObj::getFieldsDotted(const StringData& name, BSONElementSet &ret, bool expandLastArray ) const {
+ _getFieldsDotted( this, name, ret, expandLastArray );
+ }
+ void BSONObj::getFieldsDotted(const StringData& name, BSONElementMSet &ret, bool expandLastArray ) const {
+ _getFieldsDotted( this, name, ret, expandLastArray );
+ }
+
BSONElement BSONObj::getFieldDottedOrArray(const char *&name) const {
const char *p = strchr(name, '.');
-
+
BSONElement sub;
if ( p ) {
@@ -754,13 +619,13 @@ namespace mongo {
}
if ( sub.eoo() )
- return nullElement;
- else if ( sub.type() == Array || name[0] == '\0')
+ return eooElement;
+ else if ( sub.type() == Array || name[0] == '\0' )
return sub;
else if ( sub.type() == Object )
return sub.embeddedObject().getFieldDottedOrArray( name );
else
- return nullElement;
+ return eooElement;
}
/**
@@ -837,21 +702,6 @@ namespace mongo {
return BSONElement();
}
- int BSONObj::getIntField(const char *name) const {
- BSONElement e = getField(name);
- return e.isNumber() ? (int) e.number() : INT_MIN;
- }
-
- bool BSONObj::getBoolField(const char *name) const {
- BSONElement e = getField(name);
- return e.type() == Bool ? e.boolean() : false;
- }
-
- const char * BSONObj::getStringField(const char *name) const {
- BSONElement e = getField(name);
- return e.type() == String ? e.valuestr() : "";
- }
-
/* grab names of all the fields in this object */
int BSONObj::getFieldNames(set<string>& fields) const {
int n = 0;
@@ -897,8 +747,7 @@ namespace mongo {
}
if ( n ) {
- int len;
- init( b.decouple(len), true );
+ *this = b.obj();
}
return n;
@@ -997,22 +846,6 @@ namespace mongo {
}
}
- string BSONObj::hexDump() const {
- stringstream ss;
- const char *d = objdata();
- int size = objsize();
- for( int i = 0; i < size; ++i ) {
- ss.width( 2 );
- ss.fill( '0' );
- ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec;
- if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) )
- ss << '\'' << d[ i ] << '\'';
- if ( i != size - 1 )
- ss << ' ';
- }
- return ss.str();
- }
-
void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base) {
BSONObjIterator it(obj);
while (it.more()) {
@@ -1092,7 +925,7 @@ namespace mongo {
c.appendRegex("x", "goo");
BSONObj p = c.done();
- assert( !o.woEqual( p ) );
+ assert( !o.binaryEqual( p ) );
assert( o.woCompare( p ) < 0 );
}
@@ -1197,7 +1030,7 @@ namespace mongo {
BSONObj a = A.done();
BSONObj b = B.done();
BSONObj c = C.done();
- assert( !a.woEqual( b ) ); // comments on operator==
+ assert( !a.binaryEqual( b ) ); // comments on operator==
int cmp = a.woCompare(b);
assert( cmp == 0 );
cmp = a.woCompare(c);
@@ -1215,105 +1048,154 @@ namespace mongo {
Labeler::Label NE( "$ne" );
Labeler::Label SIZE( "$size" );
- void BSONElementManipulator::initTimestamp() {
- massert( 10332 , "Expected CurrentTime type", _element.type() == Timestamp );
- unsigned long long &timestamp = *( reinterpret_cast< unsigned long long* >( value() ) );
- if ( timestamp == 0 )
- timestamp = OpTime::now().asDate();
- }
-
void BSONObjBuilder::appendMinForType( const StringData& fieldName , int t ) {
switch ( t ) {
- case MinKey: appendMinKey( fieldName ); return;
- case MaxKey: appendMinKey( fieldName ); return;
+
+ // Shared canonical types
case NumberInt:
case NumberDouble:
case NumberLong:
append( fieldName , - numeric_limits<double>::max() ); return;
+ case Symbol:
+ case String:
+ append( fieldName , "" ); return;
+ case Date:
+ // min varies with V0 and V1 indexes, so we go one type lower.
+ appendBool(fieldName, true);
+ //appendDate( fieldName , numeric_limits<long long>::min() );
+ return;
+ case Timestamp: // TODO integrate with Date SERVER-3304
+ appendTimestamp( fieldName , 0 ); return;
+ case Undefined: // shared with EOO
+ appendUndefined( fieldName ); return;
+
+ // Separate canonical types
+ case MinKey:
+ appendMinKey( fieldName ); return;
+ case MaxKey:
+ appendMaxKey( fieldName ); return;
case jstOID: {
OID o;
memset(&o, 0, sizeof(o));
appendOID( fieldName , &o);
return;
}
- case Bool: appendBool( fieldName , false); return;
- case Date: appendDate( fieldName , 0); return;
- case jstNULL: appendNull( fieldName ); return;
- case Symbol:
- case String: append( fieldName , "" ); return;
- case Object: append( fieldName , BSONObj() ); return;
+ case Bool:
+ appendBool( fieldName , false); return;
+ case jstNULL:
+ appendNull( fieldName ); return;
+ case Object:
+ append( fieldName , BSONObj() ); return;
case Array:
appendArray( fieldName , BSONObj() ); return;
case BinData:
- appendBinData( fieldName , 0 , Function , (const char *) 0 ); return;
- case Undefined:
- appendUndefined( fieldName ); return;
- case RegEx: appendRegex( fieldName , "" ); return;
+ appendBinData( fieldName , 0 , BinDataGeneral , (const char *) 0 ); return;
+ case RegEx:
+ appendRegex( fieldName , "" ); return;
case DBRef: {
OID o;
memset(&o, 0, sizeof(o));
appendDBRef( fieldName , "" , o );
return;
}
- case Code: appendCode( fieldName , "" ); return;
- case CodeWScope: appendCodeWScope( fieldName , "" , BSONObj() ); return;
- case Timestamp: appendTimestamp( fieldName , 0); return;
-
+ case Code:
+ appendCode( fieldName , "" ); return;
+ case CodeWScope:
+ appendCodeWScope( fieldName , "" , BSONObj() ); return;
};
- log() << "type not support for appendMinElementForType: " << t << endl;
+ log() << "type not supported for appendMinElementForType: " << t << endl;
uassert( 10061 , "type not supported for appendMinElementForType" , false );
}
void BSONObjBuilder::appendMaxForType( const StringData& fieldName , int t ) {
switch ( t ) {
- case MinKey: appendMaxKey( fieldName ); break;
- case MaxKey: appendMaxKey( fieldName ); break;
+
+ // Shared canonical types
case NumberInt:
case NumberDouble:
case NumberLong:
- append( fieldName , numeric_limits<double>::max() );
- break;
- case BinData:
- appendMinForType( fieldName , jstOID );
- break;
+ append( fieldName , numeric_limits<double>::max() ); return;
+ case Symbol:
+ case String:
+ appendMinForType( fieldName, Object ); return;
+ case Date:
+ appendDate( fieldName , numeric_limits<long long>::max() ); return;
+ case Timestamp: // TODO integrate with Date SERVER-3304
+ appendTimestamp( fieldName , numeric_limits<unsigned long long>::max() ); return;
+ case Undefined: // shared with EOO
+ appendUndefined( fieldName ); return;
+
+ // Separate canonical types
+ case MinKey:
+ appendMinKey( fieldName ); return;
+ case MaxKey:
+ appendMaxKey( fieldName ); return;
case jstOID: {
OID o;
memset(&o, 0xFF, sizeof(o));
appendOID( fieldName , &o);
- break;
+ return;
}
- case Undefined:
+ case Bool:
+ appendBool( fieldName , true ); return;
case jstNULL:
- appendMinForType( fieldName , NumberInt );
- case Bool: appendBool( fieldName , true); break;
- case Date: appendDate( fieldName , 0xFFFFFFFFFFFFFFFFLL ); break;
- case Symbol:
- case String: append( fieldName , BSONObj() ); break;
+ appendNull( fieldName ); return;
+ case Object:
+ appendMinForType( fieldName, Array ); return;
+ case Array:
+ appendMinForType( fieldName, BinData ); return;
+ case BinData:
+ appendMinForType( fieldName, jstOID ); return;
+ case RegEx:
+ appendMinForType( fieldName, DBRef ); return;
+ case DBRef:
+ appendMinForType( fieldName, Code ); return;
case Code:
+ appendMinForType( fieldName, CodeWScope ); return;
case CodeWScope:
- appendCodeWScope( fieldName , "ZZZ" , BSONObj() ); break;
- case Timestamp:
- appendTimestamp( fieldName , numeric_limits<unsigned long long>::max() ); break;
- default:
- appendMinForType( fieldName , t + 1 );
+ // This upper bound may change if a new bson type is added.
+ appendMinForType( fieldName , MaxKey ); return;
}
+ log() << "type not supported for appendMaxElementForType: " << t << endl;
+ uassert( 14853 , "type not supported for appendMaxElementForType" , false );
+ }
+
+ int BSONElementFieldSorter( const void * a , const void * b ) {
+ const char * x = *((const char**)a);
+ const char * y = *((const char**)b);
+ x++; y++;
+ return lexNumCmp( x , y );
}
- const string BSONObjBuilder::numStrs[] = {
- "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
- "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
- "20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
- "30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
- "40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
- "50", "51", "52", "53", "54", "55", "56", "57", "58", "59",
- "60", "61", "62", "63", "64", "65", "66", "67", "68", "69",
- "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
- "80", "81", "82", "83", "84", "85", "86", "87", "88", "89",
- "90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
- };
+ bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs) {
+ BSONObjIterator l(lhs);
+ BSONObjIterator r(rhs);
+
+ while (l.more() && r.more()){
+ if (strcmp(l.next().fieldName(), r.next().fieldName())) {
+ return false;
+ }
+ }
+
+ return !(l.more() || r.more()); // false if lhs and rhs have diff nFields()
+ }
+
+ BSONObjIteratorSorted::BSONObjIteratorSorted( const BSONObj& o ) {
+ _nfields = o.nFields();
+ _fields = new const char*[_nfields];
+ int x = 0;
+ BSONObjIterator i( o );
+ while ( i.more() ) {
+ _fields[x++] = i.next().rawdata();
+ assert( _fields[x-1] );
+ }
+ assert( x == _nfields );
+ qsort( _fields , _nfields , sizeof(char*) , BSONElementFieldSorter );
+ _cur = 0;
+ }
bool BSONObjBuilder::appendAsNumber( const StringData& fieldName , const string& data ) {
- if ( data.size() == 0 || data == "-")
+ if ( data.size() == 0 || data == "-" || data == ".")
return false;
unsigned int pos=0;
@@ -1355,63 +1237,6 @@ namespace mongo {
catch(bad_lexical_cast &) {
return false;
}
-
- }
-
- void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ) {
- BSONObjIterator i(keyPattern);
- BSONObjIterator j(values);
-
- while ( i.more() && j.more() ) {
- appendAs( j.next() , i.next().fieldName() );
- }
-
- assert( ! i.more() );
- assert( ! j.more() );
- }
-
- int BSONElementFieldSorter( const void * a , const void * b ) {
- const char * x = *((const char**)a);
- const char * y = *((const char**)b);
- x++; y++;
- return lexNumCmp( x , y );
- }
-
- BSONObjIteratorSorted::BSONObjIteratorSorted( const BSONObj& o ) {
- _nfields = o.nFields();
- _fields = new const char*[_nfields];
- int x = 0;
- BSONObjIterator i( o );
- while ( i.more() ) {
- _fields[x++] = i.next().rawdata();
- assert( _fields[x-1] );
- }
- assert( x == _nfields );
- qsort( _fields , _nfields , sizeof(char*) , BSONElementFieldSorter );
- _cur = 0;
- }
-
- /** transform a BSON array into a vector of BSONElements.
- we match array # positions with their vector position, and ignore
- any fields with non-numeric field names.
- */
- vector<BSONElement> BSONElement::Array() const {
- chk(mongo::Array);
- vector<BSONElement> v;
- BSONObjIterator i(Obj());
- while( i.more() ) {
- BSONElement e = i.next();
- const char *f = e.fieldName();
- try {
- unsigned u = stringToNum(f);
- assert( u < 1000000 );
- if( u >= v.size() )
- v.resize(u+1);
- v[u] = e;
- }
- catch(unsigned) { }
- }
- return v;
}
} // namespace mongo
diff --git a/db/json.cpp b/db/json.cpp
index 4a6fad8..b89ff32 100644
--- a/db/json.cpp
+++ b/db/json.cpp
@@ -258,8 +258,12 @@ namespace mongo {
struct numberValue {
numberValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( double d ) const {
- b.back()->append( b.fieldName(), d );
+ void operator() ( const char *start, const char *end ) const {
+ // We re-parse the numeric string here because spirit parsing of strings
+ // to doubles produces different results from strtod in some cases and
+ // we want to use strtod to ensure consistency with other string to
+ // double conversions in our code.
+ b.back()->append( b.fieldName(), strtod( start, 0 ) );
}
ObjectBuilder &b;
};
@@ -315,6 +319,14 @@ namespace mongo {
ObjectBuilder &b;
};
+ struct undefinedValue {
+ undefinedValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendUndefined( b.fieldName() );
+ }
+ ObjectBuilder &b;
+ };
+
struct dbrefNS {
dbrefNS( ObjectBuilder &_b ) : b( _b ) {}
void operator() ( const char *start, const char *end ) const {
@@ -454,12 +466,13 @@ namespace mongo {
elements = list_p(value, ch_p(',')[arrayNext( self.b )]);
value =
str[ stringEnd( self.b ) ] |
- number |
+ number[ numberValue( self.b ) ] |
integer |
array[ arrayEnd( self.b ) ] |
lexeme_d[ str_p( "true" ) ][ trueValue( self.b ) ] |
lexeme_d[ str_p( "false" ) ][ falseValue( self.b ) ] |
lexeme_d[ str_p( "null" ) ][ nullValue( self.b ) ] |
+ lexeme_d[ str_p( "undefined" ) ][ undefinedValue( self.b ) ] |
singleQuoteStr[ stringEnd( self.b ) ] |
date[ dateEnd( self.b ) ] |
oid[ oidEnd( self.b ) ] |
@@ -501,7 +514,7 @@ namespace mongo {
// real_p accepts numbers with nonsignificant zero prefixes, which
// aren't allowed in JSON. Oh well.
- number = strict_real_p[ numberValue( self.b ) ];
+ number = strict_real_p;
static int_parser<long long, 10, 1, numeric_limits<long long>::digits10 + 1> long_long_p;
integer = long_long_p[ intValue(self.b) ];
diff --git a/db/key.cpp b/db/key.cpp
new file mode 100644
index 0000000..011eea1
--- /dev/null
+++ b/db/key.cpp
@@ -0,0 +1,671 @@
+// @file key.cpp
+
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "key.h"
+#include "../util/unittest.h"
+
+namespace mongo {
+
+ extern const Ordering nullOrdering = Ordering::make(BSONObj());
+
+ // KeyBson is for V0 (version #0) indexes
+
+ int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o);
+
+ // "old" = pre signed dates & such; i.e. btree V0
+ /* must be same canon type when called */
+ int oldCompareElementValues(const BSONElement& l, const BSONElement& r) {
+ dassert( l.canonicalType() == r.canonicalType() );
+ int f;
+ double x;
+
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined: // EOO and Undefined are same canonicalType
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ return 0;
+ case Bool:
+ return *l.value() - *r.value();
+ case Timestamp:
+ case Date:
+ // unsigned dates for old version
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case NumberLong:
+ if( r.type() == NumberLong ) {
+ long long L = l._numberLong();
+ long long R = r._numberLong();
+ if( L < R ) return -1;
+ if( L == R ) return 0;
+ return 1;
+ }
+ // else fall through
+ case NumberInt:
+ case NumberDouble: {
+ double left = l.number();
+ double right = r.number();
+ bool lNan = !( left <= numeric_limits< double >::max() &&
+ left >= -numeric_limits< double >::max() );
+ bool rNan = !( right <= numeric_limits< double >::max() &&
+ right >= -numeric_limits< double >::max() );
+ if ( lNan ) {
+ if ( rNan ) {
+ return 0;
+ }
+ else {
+ return -1;
+ }
+ }
+ else if ( rNan ) {
+ return 1;
+ }
+ x = left - right;
+ if ( x < 0 ) return -1;
+ return x == 0 ? 0 : 1;
+ }
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ // nulls not allowed in the middle of strings in the old version
+ return strcmp(l.valuestr(), r.valuestr());
+ case Object:
+ case Array:
+ return oldCompare(l.embeddedObject(), r.embeddedObject(), nullOrdering);
+ case DBRef: {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case BinData: {
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int rsz = r.objsize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value()+4, r.value()+4, lsz+1);
+ }
+ case RegEx: {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ case CodeWScope : {
+ f = l.canonicalType() - r.canonicalType();
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
+ if ( f )
+ return f;
+ return 0;
+ }
+ default:
+ out() << "oldCompareElementValues: bad type " << (int) l.type() << endl;
+ assert(false);
+ }
+ return -1;
+ }
+
+ int oldElemCompare(const BSONElement&l , const BSONElement& r) {
+ int lt = (int) l.canonicalType();
+ int rt = (int) r.canonicalType();
+ int x = lt - rt;
+ if( x )
+ return x;
+ return oldCompareElementValues(l, r);
+ }
+
+ // pre signed dates & such
+ int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o) {
+ BSONObjIterator i(l);
+ BSONObjIterator j(r);
+ unsigned mask = 1;
+ while ( 1 ) {
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ if ( l.eoo() )
+ return r.eoo() ? 0 : -1;
+ if ( r.eoo() )
+ return 1;
+
+ int x;
+ {
+ x = oldElemCompare(l, r);
+ if( o.descending(mask) )
+ x = -x;
+ }
+ if ( x != 0 )
+ return x;
+ mask <<= 1;
+ }
+ return -1;
+ }
+
+ /* old style compares:
+ - dates are unsigned
+ - strings no nulls
+ */
+ int KeyBson::woCompare(const KeyBson& r, const Ordering &o) const {
+ return oldCompare(_o, r._o, o);
+ }
+
+ // woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
+ bool KeyBson::woEqual(const KeyBson& r) const {
+ return oldCompare(_o, r._o, nullOrdering) == 0;
+ }
+
+ // [ ][HASMORE][x][y][canontype_4bits]
+ enum CanonicalsEtc {
+ cminkey=1,
+ cnull=2,
+ cdouble=4,
+ cstring=6,
+ cbindata=7,
+ coid=8,
+ cfalse=10,
+ ctrue=11,
+ cdate=12,
+ cmaxkey=14,
+ cCANONTYPEMASK = 0xf,
+ cY = 0x10,
+ cint = cY | cdouble,
+ cX = 0x20,
+ clong = cX | cdouble,
+ cHASMORE = 0x40,
+ cNOTUSED = 0x80 // but see IsBSON sentinel - this bit not usable without great care
+ };
+
+ // bindata bson type
+ const unsigned BinDataLenMask = 0xf0; // lengths are powers of 2 of this value
+ const unsigned BinDataTypeMask = 0x0f; // 0-7 as you would expect, 8-15 are 128+value. see BinDataType.
+ const int BinDataLenMax = 32;
+ const int BinDataLengthToCode[] = {
+ 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
+ 0x80, -1/*9*/, 0x90/*10*/, -1/*11*/, 0xa0/*12*/, -1/*13*/, 0xb0/*14*/, -1/*15*/,
+ 0xc0/*16*/, -1, -1, -1, 0xd0/*20*/, -1, -1, -1,
+ 0xe0/*24*/, -1, -1, -1, -1, -1, -1, -1,
+ 0xf0/*32*/
+ };
+ const int BinDataCodeToLength[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32
+ };
+
+ int binDataCodeToLength(int codeByte) {
+ return BinDataCodeToLength[codeByte >> 4];
+ }
+
+ /** object cannot be represented in compact format. so store in traditional bson format
+ with a leading sentinel byte IsBSON to indicate it's in that format.
+
+ Given that the KeyV1Owned constructor already grabbed a bufbuilder, we reuse it here
+ so that we don't have to do an extra malloc.
+ */
+ void KeyV1Owned::traditional(const BSONObj& obj) {
+ b.reset();
+ b.appendUChar(IsBSON);
+ b.appendBuf(obj.objdata(), obj.objsize());
+ _keyData = (const unsigned char *) b.buf();
+ }
+
+ // fromBSON to Key format
+ KeyV1Owned::KeyV1Owned(const BSONObj& obj) {
+ BSONObj::iterator i(obj);
+ unsigned char bits = 0;
+ while( 1 ) {
+ BSONElement e = i.next();
+ if( i.more() )
+ bits |= cHASMORE;
+ switch( e.type() ) {
+ case MinKey:
+ b.appendUChar(cminkey|bits);
+ break;
+ case jstNULL:
+ b.appendUChar(cnull|bits);
+ break;
+ case MaxKey:
+ b.appendUChar(cmaxkey|bits);
+ break;
+ case Bool:
+ b.appendUChar( (e.boolean()?ctrue:cfalse) | bits );
+ break;
+ case jstOID:
+ b.appendUChar(coid|bits);
+ b.appendBuf(&e.__oid(), sizeof(OID));
+ break;
+ case BinData:
+ {
+ int t = e.binDataType();
+ // 0-7 and 0x80 to 0x87 are supported by Key
+ if( (t & 0x78) == 0 && t != ByteArrayDeprecated ) {
+ int len;
+ const char * d = e.binData(len);
+ if( len <= BinDataLenMax ) {
+ int code = BinDataLengthToCode[len];
+ if( code >= 0 ) {
+ if( t >= 128 )
+ t = (t-128) | 0x08;
+ dassert( (code&t) == 0 );
+ b.appendUChar( cbindata|bits );
+ b.appendUChar( code | t );
+ b.appendBuf(d, len);
+ break;
+ }
+ }
+ }
+ traditional(obj);
+ return;
+ }
+ case Date:
+ b.appendUChar(cdate|bits);
+ b.appendStruct(e.date());
+ break;
+ case String:
+ {
+ b.appendUChar(cstring|bits);
+ // note we do not store the terminating null, to save space.
+ unsigned x = (unsigned) e.valuestrsize() - 1;
+ if( x > 255 ) {
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(x);
+ b.appendBuf(e.valuestr(), x);
+ break;
+ }
+ case NumberInt:
+ b.appendUChar(cint|bits);
+ b.appendNum((double) e._numberInt());
+ break;
+ case NumberLong:
+ {
+ long long n = e._numberLong();
+ long long m = 2LL << 52;
+ DEV {
+ long long d = m-1;
+ assert( ((long long) ((double) -d)) == -d );
+ }
+ if( n >= m || n <= -m ) {
+ // can't represent exactly as a double
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(clong|bits);
+ b.appendNum((double) n);
+ break;
+ }
+ case NumberDouble:
+ {
+ double d = e._numberDouble();
+ if( isNaN(d) ) {
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(cdouble|bits);
+ b.appendNum(d);
+ break;
+ }
+ default:
+ // if other types involved, store as traditional BSON
+ traditional(obj);
+ return;
+ }
+ if( !i.more() )
+ break;
+ bits = 0;
+ }
+ _keyData = (const unsigned char *) b.buf();
+ dassert( b.len() == dataSize() ); // check datasize method is correct
+ dassert( (*_keyData & cNOTUSED) == 0 );
+ }
+
+ BSONObj KeyV1::toBson() const {
+ assert( _keyData != 0 );
+ if( !isCompactFormat() )
+ return bson();
+
+ BSONObjBuilder b(512);
+ const unsigned char *p = _keyData;
+ while( 1 ) {
+ unsigned bits = *p++;
+
+ switch( bits & 0x3f ) {
+ case cminkey: b.appendMinKey(""); break;
+ case cnull: b.appendNull(""); break;
+ case cfalse: b.appendBool("", false); break;
+ case ctrue: b.appendBool("", true); break;
+ case cmaxkey:
+ b.appendMaxKey("");
+ break;
+ case cstring:
+ {
+ unsigned sz = *p++;
+ // we build the element ourself as we have to null terminate it
+ BufBuilder &bb = b.bb();
+ bb.appendNum((char) String);
+ bb.appendUChar(0); // fieldname ""
+ bb.appendNum(sz+1);
+ bb.appendBuf(p, sz);
+ bb.appendUChar(0); // null char at end of string
+ p += sz;
+ break;
+ }
+ case coid:
+ b.appendOID("", (OID *) p);
+ p += sizeof(OID);
+ break;
+ case cbindata:
+ {
+ int len = binDataCodeToLength(*p);
+ int subtype = (*p) & BinDataTypeMask;
+ if( subtype & 0x8 ) {
+ subtype = (subtype & 0x7) | 0x80;
+ }
+ b.appendBinData("", len, (BinDataType) subtype, ++p);
+ p += len;
+ break;
+ }
+ case cdate:
+ b.appendDate("", (Date_t&) *p);
+ p += 8;
+ break;
+ case cdouble:
+ b.append("", (double&) *p);
+ p += sizeof(double);
+ break;
+ case cint:
+ b.append("", (int) ((double&) *p));
+ p += sizeof(double);
+ break;
+ case clong:
+ b.append("", (long long) ((double&) *p));
+ p += sizeof(double);
+ break;
+ default:
+ assert(false);
+ }
+
+ if( (bits & cHASMORE) == 0 )
+ break;
+ }
+ return b.obj();
+ }
+
+ static int compare(const unsigned char *&l, const unsigned char *&r) {
+ int lt = (*l & cCANONTYPEMASK);
+ int rt = (*r & cCANONTYPEMASK);
+ int x = lt - rt;
+ if( x )
+ return x;
+
+ l++; r++;
+
+ // same type
+ switch( lt ) {
+ case cdouble:
+ {
+ double L = *((double *) l);
+ double R = *((double *) r);
+ if( L < R )
+ return -1;
+ if( L != R )
+ return 1;
+ l += 8; r += 8;
+ break;
+ }
+ case cstring:
+ {
+ int lsz = *l;
+ int rsz = *r;
+ int common = min(lsz, rsz);
+ l++; r++; // skip the size byte
+ // use memcmp as we (will) allow zeros in UTF8 strings
+ int res = memcmp(l, r, common);
+ if( res )
+ return res;
+ // longer string is the greater one
+ int diff = lsz-rsz;
+ if( diff )
+ return diff;
+ l += lsz; r += lsz;
+ break;
+ }
+ case cbindata:
+ {
+ int L = *l;
+ int R = *r;
+ int llen = binDataCodeToLength(L);
+ int diff = L-R; // checks length and subtype simultaneously
+ if( diff ) {
+ // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
+ int rlen = binDataCodeToLength(R);
+ if( llen != rlen )
+ return llen - rlen;
+ return diff;
+ }
+ // same length, same type
+ l++; r++;
+ int res = memcmp(l, r, llen);
+ if( res )
+ return res;
+ l += llen; r += llen;
+ break;
+ }
+ case cdate:
+ {
+ long long L = *((long long *) l);
+ long long R = *((long long *) r);
+ if( L < R )
+ return -1;
+ if( L > R )
+ return 1;
+ l += 8; r += 8;
+ break;
+ }
+ case coid:
+ {
+ int res = memcmp(l, r, sizeof(OID));
+ if( res )
+ return res;
+ l += 12; r += 12;
+ break;
+ }
+ default:
+ // all the others are a match -- e.g. null == null
+ ;
+ }
+
+ return 0;
+ }
+
+ // at least one of this and right are traditional BSON format
+ int NOINLINE_DECL KeyV1::compareHybrid(const KeyV1& right, const Ordering& order) const {
+ BSONObj L = toBson();
+ BSONObj R = right.toBson();
+ return L.woCompare(R, order, /*considerfieldname*/false);
+ }
+
+ int KeyV1::woCompare(const KeyV1& right, const Ordering &order) const {
+ const unsigned char *l = _keyData;
+ const unsigned char *r = right._keyData;
+
+ if( (*l|*r) == IsBSON ) // only can do this if cNOTUSED maintained
+ return compareHybrid(right, order);
+
+ unsigned mask = 1;
+ while( 1 ) {
+ char lval = *l;
+ char rval = *r;
+ {
+ int x = compare(l, r); // updates l and r pointers
+ if( x ) {
+ if( order.descending(mask) )
+ x = -x;
+ return x;
+ }
+ }
+
+ {
+ int x = ((int)(lval & cHASMORE)) - ((int)(rval & cHASMORE));
+ if( x )
+ return x;
+ if( (lval & cHASMORE) == 0 )
+ break;
+ }
+
+ mask <<= 1;
+ }
+
+ return 0;
+ }
+
+ static unsigned sizes[] = {
+ 0,
+ 1, //cminkey=1,
+ 1, //cnull=2,
+ 0,
+ 9, //cdouble=4,
+ 0,
+ 0, //cstring=6,
+ 0,
+ 13, //coid=8,
+ 0,
+ 1, //cfalse=10,
+ 1, //ctrue=11,
+ 9, //cdate=12,
+ 0,
+ 1, //cmaxkey=14,
+ 0
+ };
+
+ inline unsigned sizeOfElement(const unsigned char *p) {
+ unsigned type = *p & cCANONTYPEMASK;
+ unsigned sz = sizes[type];
+ if( sz == 0 ) {
+ if( type == cstring ) {
+ sz = ((unsigned) p[1]) + 2;
+ }
+ else {
+ assert( type == cbindata );
+ sz = binDataCodeToLength(p[1]) + 2;
+ }
+ }
+ return sz;
+ }
+
+ int KeyV1::dataSize() const {
+ const unsigned char *p = _keyData;
+ if( !isCompactFormat() ) {
+ return bson().objsize() + 1;
+ }
+
+ bool more;
+ do {
+ unsigned z = sizeOfElement(p);
+ more = (*p & cHASMORE) != 0;
+ p += z;
+ } while( more );
+ return p - _keyData;
+ }
+
+ bool KeyV1::woEqual(const KeyV1& right) const {
+ const unsigned char *l = _keyData;
+ const unsigned char *r = right._keyData;
+
+ if( (*l|*r) == IsBSON ) {
+ return toBson().equal(right.toBson());
+ }
+
+ while( 1 ) {
+ char lval = *l;
+ char rval = *r;
+ if( (lval&(cCANONTYPEMASK|cHASMORE)) != (rval&(cCANONTYPEMASK|cHASMORE)) )
+ return false;
+ l++; r++;
+ switch( lval&cCANONTYPEMASK ) {
+ case coid:
+ if( *((unsigned*) l) != *((unsigned*) r) )
+ return false;
+ l += 4; r += 4;
+ case cdate:
+ if( *((unsigned long long *) l) != *((unsigned long long *) r) )
+ return false;
+ l += 8; r += 8;
+ break;
+ case cdouble:
+ if( *((double *) l) != *((double *) r) )
+ return false;
+ l += 8; r += 8;
+ break;
+ case cstring:
+ {
+ if( *l != *r )
+ return false; // not same length
+ unsigned sz = ((unsigned) *l) + 1;
+ if( memcmp(l, r, sz) )
+ return false;
+ l += sz; r += sz;
+ break;
+ }
+ case cbindata:
+ {
+ if( *l != *r )
+ return false; // len or subtype mismatch
+ int len = binDataCodeToLength(*l) + 1;
+ if( memcmp(l, r, len) )
+ return false;
+ l += len; r += len;
+ break;
+ }
+ case cminkey:
+ case cnull:
+ case cfalse:
+ case ctrue:
+ case cmaxkey:
+ break;
+ default:
+ assert(false);
+ }
+ if( (lval&cHASMORE) == 0 )
+ break;
+ }
+ return true;
+ }
+
+ struct CmpUnitTest : public UnitTest {
+ void run() {
+ char a[2];
+ char b[2];
+ a[0] = -3;
+ a[1] = 0;
+ b[0] = 3;
+ b[1] = 0;
+ assert( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
+ }
+ } cunittest;
+
+}
diff --git a/db/key.h b/db/key.h
new file mode 100644
index 0000000..9a3495f
--- /dev/null
+++ b/db/key.h
@@ -0,0 +1,112 @@
+// @file key.h class(es) representing individual keys in a btree
+
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "jsobj.h"
+
+namespace mongo {
+
+ /** Key class for precomputing a small format index key that is denser than a traditional BSONObj.
+
+ KeyBson is a legacy wrapper implementation for old BSONObj style keys for v:0 indexes.
+
+ KeyV1 is the new implementation.
+ */
+ class KeyBson /* "KeyV0" */ {
+ public:
+ KeyBson() { }
+ explicit KeyBson(const char *keyData) : _o(keyData) { }
+ explicit KeyBson(const BSONObj& obj) : _o(obj) { }
+ int woCompare(const KeyBson& r, const Ordering &o) const;
+ BSONObj toBson() const { return _o; }
+ string toString() const { return _o.toString(); }
+ int dataSize() const { return _o.objsize(); }
+ const char * data() const { return _o.objdata(); }
+ BSONElement _firstElement() const { return _o.firstElement(); }
+ bool isCompactFormat() const { return false; }
+ bool woEqual(const KeyBson& r) const;
+ void assign(const KeyBson& rhs) { *this = rhs; }
+ private:
+ BSONObj _o;
+ };
+
+ class KeyV1Owned;
+
+ // corresponding to BtreeData_V1
+ class KeyV1 {
+ void operator=(const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
+ KeyV1(const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ public:
+ KeyV1() { _keyData = 0; }
+ ~KeyV1() { DEV _keyData = (const unsigned char *) 1; }
+
+ KeyV1(const KeyV1& rhs) : _keyData(rhs._keyData) {
+ dassert( _keyData > (const unsigned char *) 1 );
+ }
+
+ // explicit version of operator= to be safe
+ void assign(const KeyV1& rhs) {
+ _keyData = rhs._keyData;
+ }
+
+ /** @param keyData can be a buffer containing data in either BSON format, OR in KeyV1 format.
+ when BSON, we are just a wrapper
+ */
+ explicit KeyV1(const char *keyData) : _keyData((unsigned char *) keyData) { }
+
+ int woCompare(const KeyV1& r, const Ordering &o) const;
+ bool woEqual(const KeyV1& r) const;
+ BSONObj toBson() const;
+ string toString() const { return toBson().toString(); }
+
+ /** get the key data we want to store in the btree bucket */
+ const char * data() const { return (const char *) _keyData; }
+
+ /** @return size of data() */
+ int dataSize() const;
+
+ /** only used by geo, which always has bson keys */
+ BSONElement _firstElement() const { return bson().firstElement(); }
+ bool isCompactFormat() const { return *_keyData != IsBSON; }
+ protected:
+ enum { IsBSON = 0xff };
+ const unsigned char *_keyData;
+ BSONObj bson() const {
+ dassert( !isCompactFormat() );
+ return BSONObj((const char *) _keyData+1);
+ }
+ private:
+ int compareHybrid(const KeyV1& right, const Ordering& order) const;
+ };
+
+ class KeyV1Owned : public KeyV1 {
+ KeyV1Owned(const KeyV1Owned&); // not copyable -- StackBufBuilder is not copyable and that owns our buffer
+ void operator=(const KeyV1Owned&);
+ public:
+ /** @obj a BSON object to be translated to KeyV1 format. If the object isn't
+ representable in KeyV1 format (which happens, intentionally, at times)
+ it will stay as bson herein.
+ */
+ KeyV1Owned(const BSONObj& obj);
+ private:
+ StackBufBuilder b;
+ void traditional(const BSONObj& obj); // store as traditional bson not as compact format
+ };
+
+};
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index 240c84b..4ed4dfb 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -18,7 +18,7 @@
#include "pch.h"
#include "../util/unittest.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "lasterror.h"
@@ -85,7 +85,7 @@ namespace mongo {
LastError * LastErrorHolder::disableForCommand() {
LastError *le = _get();
- assert( le );
+ uassert(13649, "no operation yet", le);
le->disabled = true;
le->nPrev--; // caller is a command that shouldn't count as an operation
return le;
diff --git a/db/matcher.cpp b/db/matcher.cpp
index 38e8e05..2b92d57 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -25,6 +25,7 @@
#include "diskloc.h"
#include "../scripting/engine.h"
#include "db.h"
+#include "queryutil.h"
#include "client.h"
#include "pdfile.h"
@@ -40,6 +41,8 @@ namespace {
options.set_multiline(true);
else if ( *flags == 'x' )
options.set_extended(true);
+ else if ( *flags == 's' )
+ options.set_dotall(true);
flags++;
}
return options;
@@ -61,8 +64,14 @@ namespace mongo {
}
~Where() {
- if ( scope.get() )
- scope->execSetup( "_mongo.readOnly = false;" , "make not read only" );
+ if ( scope.get() ){
+ try {
+ scope->execSetup( "_mongo.readOnly = false;" , "make not read only" );
+ }
+ catch( DBException& e ){
+ warning() << "javascript scope cleanup interrupted" << causedBy( e ) << endl;
+ }
+ }
if ( jsScope ) {
delete jsScope;
@@ -83,74 +92,77 @@ namespace mongo {
};
Matcher::~Matcher() {
- delete where;
- where = 0;
+ delete _where;
+ _where = 0;
}
- ElementMatcher::ElementMatcher( BSONElement _e , int _op, bool _isNot )
- : toMatch( _e ) , compareOp( _op ), isNot( _isNot ), subMatcherOnPrimitives(false) {
- if ( _op == BSONObj::opMOD ) {
- BSONObj o = _e.embeddedObject();
- mod = o["0"].numberInt();
- modm = o["1"].numberInt();
+ ElementMatcher::ElementMatcher( BSONElement e , int op, bool isNot )
+ : _toMatch( e ) , _compareOp( op ), _isNot( isNot ), _subMatcherOnPrimitives(false) {
+ if ( op == BSONObj::opMOD ) {
+ BSONObj o = e.embeddedObject();
+ _mod = o["0"].numberInt();
+ _modm = o["1"].numberInt();
- uassert( 10073 , "mod can't be 0" , mod );
+ uassert( 10073 , "mod can't be 0" , _mod );
}
- else if ( _op == BSONObj::opTYPE ) {
- type = (BSONType)(_e.numberInt());
+ else if ( op == BSONObj::opTYPE ) {
+ _type = (BSONType)(e.numberInt());
}
- else if ( _op == BSONObj::opELEM_MATCH ) {
- BSONElement m = _e;
+ else if ( op == BSONObj::opELEM_MATCH ) {
+ BSONElement m = e;
uassert( 12517 , "$elemMatch needs an Object" , m.type() == Object );
BSONObj x = m.embeddedObject();
if ( x.firstElement().getGtLtOp() == 0 ) {
- subMatcher.reset( new Matcher( x ) );
- subMatcherOnPrimitives = false;
+ _subMatcher.reset( new Matcher( x ) );
+ _subMatcherOnPrimitives = false;
}
else {
// meant to act on primitives
- subMatcher.reset( new Matcher( BSON( "" << x ) ) );
- subMatcherOnPrimitives = true;
+ _subMatcher.reset( new Matcher( BSON( "" << x ) ) );
+ _subMatcherOnPrimitives = true;
}
}
}
- ElementMatcher::ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot )
- : toMatch( _e ) , compareOp( _op ), isNot( _isNot ), subMatcherOnPrimitives(false) {
+ ElementMatcher::ElementMatcher( BSONElement e , int op , const BSONObj& array, bool isNot )
+ : _toMatch( e ) , _compareOp( op ), _isNot( isNot ), _subMatcherOnPrimitives(false) {
- myset.reset( new set<BSONElement,element_lt>() );
+ _myset.reset( new set<BSONElement,element_lt>() );
BSONObjIterator i( array );
while ( i.more() ) {
BSONElement ie = i.next();
- if ( _op == BSONObj::opALL && ie.type() == Object && ie.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
+ if ( op == BSONObj::opALL && ie.type() == Object && ie.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
shared_ptr<Matcher> s;
s.reset( new Matcher( ie.embeddedObject().firstElement().embeddedObjectUserCheck() ) );
- allMatchers.push_back( s );
+ _allMatchers.push_back( s );
}
else if ( ie.type() == RegEx ) {
- if ( !myregex.get() ) {
- myregex.reset( new vector< RegexMatcher >() );
+ if ( !_myregex.get() ) {
+ _myregex.reset( new vector< RegexMatcher >() );
}
- myregex->push_back( RegexMatcher() );
- RegexMatcher &rm = myregex->back();
- rm.re.reset( new pcrecpp::RE( ie.regex(), flags2options( ie.regexFlags() ) ) );
- rm.fieldName = 0; // no need for field name
- rm.regex = ie.regex();
- rm.flags = ie.regexFlags();
- rm.isNot = false;
+ _myregex->push_back( RegexMatcher() );
+ RegexMatcher &rm = _myregex->back();
+ rm._re.reset( new pcrecpp::RE( ie.regex(), flags2options( ie.regexFlags() ) ) );
+ rm._fieldName = 0; // no need for field name
+ rm._regex = ie.regex();
+ rm._flags = ie.regexFlags();
+ rm._isNot = false;
bool purePrefix;
- string prefix = simpleRegex(rm.regex, rm.flags, &purePrefix);
+ string prefix = simpleRegex(rm._regex, rm._flags, &purePrefix);
if (purePrefix)
- rm.prefix = prefix;
+ rm._prefix = prefix;
}
else {
- myset->insert(ie);
+ uassert( 15882, "$elemMatch not allowed within $in",
+ ie.type() != Object ||
+ ie.embeddedObject().firstElement().getGtLtOp() != BSONObj::opELEM_MATCH );
+ _myset->insert(ie);
}
}
- if ( allMatchers.size() ) {
- uassert( 13020 , "with $all, can't mix $elemMatch and others" , myset->size() == 0 && !myregex.get());
+ if ( _allMatchers.size() ) {
+ uassert( 13020 , "with $all, can't mix $elemMatch and others" , _myset->size() == 0 && !_myregex.get());
}
}
@@ -158,23 +170,23 @@ namespace mongo {
void Matcher::addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot) {
- if ( nRegex >= 4 ) {
+ if ( _nRegex >= 4 ) {
out() << "ERROR: too many regexes in query" << endl;
}
else {
- RegexMatcher& rm = regexs[nRegex];
- rm.re.reset( new pcrecpp::RE(regex, flags2options(flags)) );
- rm.fieldName = fieldName;
- rm.regex = regex;
- rm.flags = flags;
- rm.isNot = isNot;
- nRegex++;
+ RegexMatcher& rm = _regexs[_nRegex];
+ rm._re.reset( new pcrecpp::RE(regex, flags2options(flags)) );
+ rm._fieldName = fieldName;
+ rm._regex = regex;
+ rm._flags = flags;
+ rm._isNot = isNot;
+ _nRegex++;
if (!isNot) { //TODO something smarter
bool purePrefix;
string prefix = simpleRegex(regex, flags, &purePrefix);
if (purePrefix)
- rm.prefix = prefix;
+ rm._prefix = prefix;
}
}
}
@@ -201,7 +213,7 @@ namespace mongo {
break;
}
case BSONObj::NE: {
- haveNeg = true;
+ _haveNeg = true;
shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
_builders.push_back( b );
b->appendAs(fe, e.fieldName());
@@ -209,15 +221,22 @@ namespace mongo {
break;
}
case BSONObj::opALL:
- all = true;
- case BSONObj::opIN:
+ _all = true;
+ case BSONObj::opIN: {
uassert( 13276 , "$in needs an array" , fe.isABSONObj() );
- basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ _basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ BSONObjIterator i( fe.embeddedObject() );
+ while( i.more() ) {
+ if ( i.next().type() == Array ) {
+ _hasArray = true;
+ }
+ }
break;
+ }
case BSONObj::NIN:
uassert( 13277 , "$nin needs an array" , fe.isABSONObj() );
- haveNeg = true;
- basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ _haveNeg = true;
+ _basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
break;
case BSONObj::opMOD:
case BSONObj::opTYPE:
@@ -226,7 +245,7 @@ namespace mongo {
_builders.push_back( b );
b->appendAs(fe, e.fieldName());
// these are types where ElementMatcher has all the info
- basics.push_back( ElementMatcher( b->done().firstElement() , op, isNot ) );
+ _basics.push_back( ElementMatcher( b->done().firstElement() , op, isNot ) );
break;
}
case BSONObj::opSIZE: {
@@ -234,7 +253,7 @@ namespace mongo {
_builders.push_back( b );
b->appendAs(fe, e.fieldName());
addBasic(b->done().firstElement(), BSONObj::opSIZE, isNot);
- haveSize = true;
+ _haveSize = true;
break;
}
case BSONObj::opEXISTS: {
@@ -270,99 +289,93 @@ namespace mongo {
return true;
}
- void Matcher::parseOr( const BSONElement &e, bool subMatcher, list< shared_ptr< Matcher > > &matchers ) {
- uassert( 13090, "nested $or/$nor not allowed", !subMatcher );
- uassert( 13086, "$or/$nor must be a nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
+ void Matcher::parseExtractedClause( const BSONElement &e, list< shared_ptr< Matcher > > &matchers ) {
+ uassert( 13086, "$and/$or/$nor must be a nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
BSONObjIterator j( e.embeddedObject() );
while( j.more() ) {
BSONElement f = j.next();
- uassert( 13087, "$or/$nor match element must be an object", f.type() == Object );
- // until SERVER-109 this is never a covered index match, so don't constrain index key for $or matchers
+ uassert( 13087, "$and/$or/$nor match element must be an object", f.type() == Object );
matchers.push_back( shared_ptr< Matcher >( new Matcher( f.embeddedObject(), true ) ) );
}
}
- bool Matcher::parseOrNor( const BSONElement &e, bool subMatcher ) {
+ bool Matcher::parseClause( const BSONElement &e ) {
const char *ef = e.fieldName();
if ( ef[ 0 ] != '$' )
return false;
- if ( ef[ 1 ] == 'o' && ef[ 2 ] == 'r' && ef[ 3 ] == 0 ) {
- parseOr( e, subMatcher, _orMatchers );
+ if ( ef[ 1 ] == 'a' && ef[ 2 ] == 'n' && ef[ 3 ] == 'd' ) {
+ parseExtractedClause( e, _andMatchers );
+ }
+ else if ( ef[ 1 ] == 'o' && ef[ 2 ] == 'r' && ef[ 3 ] == 0 ) {
+ parseExtractedClause( e, _orMatchers );
}
else if ( ef[ 1 ] == 'n' && ef[ 2 ] == 'o' && ef[ 3 ] == 'r' && ef[ 4 ] == 0 ) {
- parseOr( e, subMatcher, _norMatchers );
+ parseExtractedClause( e, _norMatchers );
}
else {
return false;
}
return true;
}
-
- /* _jsobj - the query pattern
- */
- Matcher::Matcher(const BSONObj &_jsobj, bool subMatcher) :
- where(0), jsobj(_jsobj), haveSize(), all(), hasArray(0), haveNeg(), _atomic(false), nRegex(0) {
-
- BSONObjIterator i(jsobj);
- while ( i.more() ) {
- BSONElement e = i.next();
+
+ void Matcher::parseMatchExpressionElement( const BSONElement &e, bool nested ) {
+
+ uassert( 13629 , "can't have undefined in a query expression" , e.type() != Undefined );
+
+ if ( parseClause( e ) ) {
+ return;
+ }
+
+ if ( ( e.type() == CodeWScope || e.type() == Code || e.type() == String ) && strcmp(e.fieldName(), "$where")==0 ) {
+ // $where: function()...
+ uassert( 10066 , "$where may only appear once in query", _where == 0 );
+ uassert( 10067 , "$where query, but no script engine", globalScriptEngine );
+ massert( 13089 , "no current client needed for $where" , haveClient() );
+ _where = new Where();
+ _where->scope = globalScriptEngine->getPooledScope( cc().ns() );
+ _where->scope->localConnect( cc().database()->name.c_str() );
- uassert( 13629 , "can't have undefined in a query expression" , e.type() != Undefined );
-
- if ( parseOrNor( e, subMatcher ) ) {
- continue;
- }
-
- if ( ( e.type() == CodeWScope || e.type() == Code || e.type() == String ) && strcmp(e.fieldName(), "$where")==0 ) {
- // $where: function()...
- uassert( 10066 , "$where occurs twice?", where == 0 );
- uassert( 10067 , "$where query, but no script engine", globalScriptEngine );
- massert( 13089 , "no current client needed for $where" , haveClient() );
- where = new Where();
- where->scope = globalScriptEngine->getPooledScope( cc().ns() );
- where->scope->localConnect( cc().database()->name.c_str() );
-
- if ( e.type() == CodeWScope ) {
- where->setFunc( e.codeWScopeCode() );
- where->jsScope = new BSONObj( e.codeWScopeScopeData() , 0 );
- }
- else {
- const char *code = e.valuestr();
- where->setFunc(code);
- }
-
- where->scope->execSetup( "_mongo.readOnly = true;" , "make read only" );
-
- continue;
+ if ( e.type() == CodeWScope ) {
+ _where->setFunc( e.codeWScopeCode() );
+ _where->jsScope = new BSONObj( e.codeWScopeScopeData() );
}
-
- if ( e.type() == RegEx ) {
- addRegex( e.fieldName(), e.regex(), e.regexFlags() );
- continue;
+ else {
+ const char *code = e.valuestr();
+ _where->setFunc(code);
}
-
- // greater than / less than...
- // e.g., e == { a : { $gt : 3 } }
- // or
- // { a : { $in : [1,2,3] } }
- if ( e.type() == Object ) {
- // support {$regex:"a|b", $options:"imx"}
- const char* regex = NULL;
- const char* flags = "";
-
- // e.g., fe == { $gt : 3 }
- BSONObjIterator j(e.embeddedObject());
- bool isOperator = false;
- while ( j.more() ) {
- BSONElement fe = j.next();
- const char *fn = fe.fieldName();
-
- if ( fn[0] == '$' && fn[1] ) {
- isOperator = true;
-
- if ( fn[1] == 'n' && fn[2] == 'o' && fn[3] == 't' && fn[4] == 0 ) {
- haveNeg = true;
- switch( fe.type() ) {
+
+ _where->scope->execSetup( "_mongo.readOnly = true;" , "make read only" );
+
+ return;
+ }
+
+ if ( e.type() == RegEx ) {
+ addRegex( e.fieldName(), e.regex(), e.regexFlags() );
+ return;
+ }
+
+ // greater than / less than...
+ // e.g., e == { a : { $gt : 3 } }
+ // or
+ // { a : { $in : [1,2,3] } }
+ if ( e.type() == Object ) {
+ // support {$regex:"a|b", $options:"imx"}
+ const char* regex = NULL;
+ const char* flags = "";
+
+ // e.g., fe == { $gt : 3 }
+ BSONObjIterator j(e.embeddedObject());
+ bool isOperator = false;
+ while ( j.more() ) {
+ BSONElement fe = j.next();
+ const char *fn = fe.fieldName();
+
+ if ( fn[0] == '$' && fn[1] ) {
+ isOperator = true;
+
+ if ( fn[1] == 'n' && fn[2] == 'o' && fn[3] == 't' && fn[4] == 0 ) {
+ _haveNeg = true;
+ switch( fe.type() ) {
case Object: {
BSONObjIterator k( fe.embeddedObject() );
uassert( 13030, "$not cannot be empty", k.more() );
@@ -376,65 +389,98 @@ namespace mongo {
break;
default:
uassert( 13031, "invalid use of $not", false );
- }
- }
- else {
- if ( !addOp( e, fe, false, regex, flags ) ) {
- isOperator = false;
- break;
- }
}
}
else {
- isOperator = false;
- break;
+ if ( !addOp( e, fe, false, regex, flags ) ) {
+ isOperator = false;
+ break;
+ }
}
}
- if (regex) {
- addRegex(e.fieldName(), regex, flags);
+ else {
+ isOperator = false;
+ break;
}
- if ( isOperator )
- continue;
- }
-
- if ( e.type() == Array ) {
- hasArray = true;
}
- else if( strcmp(e.fieldName(), "$atomic") == 0 ) {
- _atomic = e.trueValue();
- continue;
+ if (regex) {
+ addRegex(e.fieldName(), regex, flags);
}
+ if ( isOperator )
+ return;
+ }
+
+ if ( e.type() == Array ) {
+ _hasArray = true;
+ }
+ else if( strcmp(e.fieldName(), "$atomic") == 0 ) {
+ uassert( 14844, "$atomic specifier must be a top level field", !nested );
+ _atomic = e.trueValue();
+ return;
+ }
+
+ // normal, simple case e.g. { a : "foo" }
+ addBasic(e, BSONObj::Equality, false);
+ }
+
+ /* _jsobj - the query pattern
+ */
+ Matcher::Matcher(const BSONObj &jsobj, bool nested) :
+ _where(0), _jsobj(jsobj), _haveSize(), _all(), _hasArray(0), _haveNeg(), _atomic(false), _nRegex(0) {
- // normal, simple case e.g. { a : "foo" }
- addBasic(e, BSONObj::Equality, false);
+ BSONObjIterator i(_jsobj);
+ while ( i.more() ) {
+ parseMatchExpressionElement( i.next(), nested );
}
}
- Matcher::Matcher( const Matcher &other, const BSONObj &key ) :
- where(0), constrainIndexKey_( key ), haveSize(), all(), hasArray(0), haveNeg(), _atomic(false), nRegex(0) {
- // do not include fields which would make keyMatch() false
- for( vector< ElementMatcher >::const_iterator i = other.basics.begin(); i != other.basics.end(); ++i ) {
- if ( key.hasField( i->toMatch.fieldName() ) ) {
- switch( i->compareOp ) {
+ Matcher::Matcher( const Matcher &docMatcher, const BSONObj &key ) :
+ _where(0), _constrainIndexKey( key ), _haveSize(), _all(), _hasArray(0), _haveNeg(), _atomic(false), _nRegex(0) {
+ // Filter out match components that will provide an incorrect result
+ // given a key from a single key index.
+ for( vector< ElementMatcher >::const_iterator i = docMatcher._basics.begin(); i != docMatcher._basics.end(); ++i ) {
+ if ( key.hasField( i->_toMatch.fieldName() ) ) {
+ switch( i->_compareOp ) {
case BSONObj::opSIZE:
case BSONObj::opALL:
case BSONObj::NE:
case BSONObj::NIN:
+ case BSONObj::opEXISTS: // We can't match on index in this case.
+ case BSONObj::opTYPE: // For $type:10 (null), a null key could be a missing field or a null value field.
+ break;
+ case BSONObj::opIN: {
+ bool inContainsArray = false;
+ for( set<BSONElement,element_lt>::const_iterator j = i->_myset->begin(); j != i->_myset->end(); ++j ) {
+ if ( j->type() == Array ) {
+ inContainsArray = true;
+ break;
+ }
+ }
+ // Can't match an array to its first indexed element.
+ if ( !i->_isNot && !inContainsArray ) {
+ _basics.push_back( *i );
+ }
break;
+ }
default: {
- if ( !i->isNot && i->toMatch.type() != Array ) {
- basics.push_back( *i );
+ // Can't match an array to its first indexed element.
+ if ( !i->_isNot && i->_toMatch.type() != Array ) {
+ _basics.push_back( *i );
}
}
}
}
}
- for( int i = 0; i < other.nRegex; ++i ) {
- if ( !other.regexs[ i ].isNot && key.hasField( other.regexs[ i ].fieldName ) ) {
- regexs[ nRegex++ ] = other.regexs[ i ];
+ for( int i = 0; i < docMatcher._nRegex; ++i ) {
+ if ( !docMatcher._regexs[ i ]._isNot && key.hasField( docMatcher._regexs[ i ]._fieldName ) ) {
+ _regexs[ _nRegex++ ] = docMatcher._regexs[ i ];
}
}
- for( list< shared_ptr< Matcher > >::const_iterator i = other._orMatchers.begin(); i != other._orMatchers.end(); ++i ) {
+ // Recursively filter match components for and and or matchers.
+ for( list< shared_ptr< Matcher > >::const_iterator i = docMatcher._andMatchers.begin(); i != docMatcher._andMatchers.end(); ++i ) {
+ _andMatchers.push_back( shared_ptr< Matcher >( new Matcher( **i, key ) ) );
+ }
+ for( list< shared_ptr< Matcher > >::const_iterator i = docMatcher._orMatchers.begin(); i != docMatcher._orMatchers.end(); ++i ) {
_orMatchers.push_back( shared_ptr< Matcher >( new Matcher( **i, key ) ) );
}
}
@@ -443,12 +489,12 @@ namespace mongo {
switch (e.type()) {
case String:
case Symbol:
- if (rm.prefix.empty())
- return rm.re->PartialMatch(e.valuestr());
+ if (rm._prefix.empty())
+ return rm._re->PartialMatch(e.valuestr());
else
- return !strncmp(e.valuestr(), rm.prefix.c_str(), rm.prefix.size());
+ return !strncmp(e.valuestr(), rm._prefix.c_str(), rm._prefix.size());
case RegEx:
- return !strcmp(rm.regex, e.regex()) && !strcmp(rm.flags, e.regexFlags());
+ return !strcmp(rm._regex, e.regex()) && !strcmp(rm._flags, e.regexFlags());
default:
return false;
}
@@ -463,11 +509,11 @@ namespace mongo {
if ( op == BSONObj::opIN ) {
// { $in : [1,2,3] }
- int count = bm.myset->count(l);
+ int count = bm._myset->count(l);
if ( count )
return count;
- if ( bm.myregex.get() ) {
- for( vector<RegexMatcher>::const_iterator i = bm.myregex->begin(); i != bm.myregex->end(); ++i ) {
+ if ( bm._myregex.get() ) {
+ for( vector<RegexMatcher>::const_iterator i = bm._myregex->begin(); i != bm._myregex->end(); ++i ) {
if ( regexMatches( *i, l ) ) {
return true;
}
@@ -493,11 +539,11 @@ namespace mongo {
if ( ! l.isNumber() )
return false;
- return l.numberLong() % bm.mod == bm.modm;
+ return l.numberLong() % bm._mod == bm._modm;
}
if ( op == BSONObj::opTYPE ) {
- return bm.type == l.type();
+ return bm._type == l.type();
}
/* check LT, GTE, ... */
@@ -512,16 +558,14 @@ namespace mongo {
int Matcher::matchesNe(const char *fieldName, const BSONElement &toMatch, const BSONObj &obj, const ElementMatcher& bm , MatchDetails * details ) {
int ret = matchesDotted( fieldName, toMatch, obj, BSONObj::Equality, bm , false , details );
- if ( bm.toMatch.type() != jstNULL )
+ if ( bm._toMatch.type() != jstNULL )
return ( ret <= 0 ) ? 1 : 0;
else
return -ret;
}
- int retMissing( const ElementMatcher &bm ) {
- if ( bm.compareOp != BSONObj::opEXISTS )
- return 0;
- return bm.toMatch.boolean() ? -1 : 1;
+ int retExistsFound( const ElementMatcher &bm ) {
+ return bm._toMatch.trueValue() ? 1 : -1;
}
/* Check if a particular field matches.
@@ -547,11 +591,11 @@ namespace mongo {
DEBUGMATCHER( "\t matchesDotted : " << fieldName << " hasDetails: " << ( details ? "yes" : "no" ) );
if ( compareOp == BSONObj::opALL ) {
- if ( em.allMatchers.size() ) {
+ if ( em._allMatchers.size() ) {
BSONElement e = obj.getFieldDotted( fieldName );
uassert( 13021 , "$all/$elemMatch needs to be applied to array" , e.type() == Array );
- for ( unsigned i=0; i<em.allMatchers.size(); i++ ) {
+ for ( unsigned i=0; i<em._allMatchers.size(); i++ ) {
bool found = false;
BSONObjIterator x( e.embeddedObject() );
while ( x.more() ) {
@@ -559,7 +603,7 @@ namespace mongo {
if ( f.type() != Object )
continue;
- if ( em.allMatchers[i]->matches( f.embeddedObject() ) ) {
+ if ( em._allMatchers[i]->matches( f.embeddedObject() ) ) {
found = true;
break;
}
@@ -572,13 +616,13 @@ namespace mongo {
return 1;
}
- if ( em.myset->size() == 0 && !em.myregex.get() )
+ if ( em._myset->size() == 0 && !em._myregex.get() )
return -1; // is this desired?
BSONElementSet myValues;
obj.getFieldsDotted( fieldName , myValues );
- for( set< BSONElement, element_lt >::const_iterator i = em.myset->begin(); i != em.myset->end(); ++i ) {
+ for( set< BSONElement, element_lt >::const_iterator i = em._myset->begin(); i != em._myset->end(); ++i ) {
// ignore nulls
if ( i->type() == jstNULL )
continue;
@@ -587,10 +631,10 @@ namespace mongo {
return -1;
}
- if ( !em.myregex.get() )
+ if ( !em._myregex.get() )
return 1;
- for( vector< RegexMatcher >::const_iterator i = em.myregex->begin(); i != em.myregex->end(); ++i ) {
+ for( vector< RegexMatcher >::const_iterator i = em._myregex->begin(); i != em._myregex->end(); ++i ) {
bool match = false;
for( BSONElementSet::const_iterator j = myValues.begin(); j != myValues.end(); ++j ) {
if ( regexMatches( *i, *j ) ) {
@@ -608,15 +652,15 @@ namespace mongo {
if ( compareOp == BSONObj::NE )
return matchesNe( fieldName, toMatch, obj, em , details );
if ( compareOp == BSONObj::NIN ) {
- for( set<BSONElement,element_lt>::const_iterator i = em.myset->begin(); i != em.myset->end(); ++i ) {
+ for( set<BSONElement,element_lt>::const_iterator i = em._myset->begin(); i != em._myset->end(); ++i ) {
int ret = matchesNe( fieldName, *i, obj, em , details );
if ( ret != 1 )
return ret;
}
- if ( em.myregex.get() ) {
+ if ( em._myregex.get() ) {
BSONElementSet s;
obj.getFieldsDotted( fieldName, s );
- for( vector<RegexMatcher>::const_iterator i = em.myregex->begin(); i != em.myregex->end(); ++i ) {
+ for( vector<RegexMatcher>::const_iterator i = em._myregex->begin(); i != em._myregex->end(); ++i ) {
for( BSONElementSet::const_iterator j = s.begin(); j != s.end(); ++j ) {
if ( regexMatches( *i, *j ) ) {
return -1;
@@ -628,13 +672,13 @@ namespace mongo {
}
BSONElement e;
- bool indexed = !constrainIndexKey_.isEmpty();
+ bool indexed = !_constrainIndexKey.isEmpty();
if ( indexed ) {
- e = obj.getFieldUsingIndexNames(fieldName, constrainIndexKey_);
+ e = obj.getFieldUsingIndexNames(fieldName, _constrainIndexKey);
if( e.eoo() ) {
cout << "obj: " << obj << endl;
cout << "fieldName: " << fieldName << endl;
- cout << "constrainIndexKey_: " << constrainIndexKey_ << endl;
+ cout << "_constrainIndexKey: " << _constrainIndexKey << endl;
assert( !e.eoo() );
}
}
@@ -655,6 +699,7 @@ namespace mongo {
}
}
+ // An array was encountered while scanning for components of the field name.
if ( isArr ) {
DEBUGMATCHER( "\t\t isArr 1 : obj : " << obj );
BSONObjIterator ai(obj);
@@ -662,11 +707,16 @@ namespace mongo {
while ( ai.moreWithEOO() ) {
BSONElement z = ai.next();
- if( strcmp(z.fieldName(),fieldName) == 0 && valuesMatch(z, toMatch, compareOp, em) ) {
- // "field.<n>" array notation was used
- if ( details )
- details->elemMatchKey = z.fieldName();
- return 1;
+ if( strcmp(z.fieldName(),fieldName) == 0 ) {
+ if ( compareOp == BSONObj::opEXISTS ) {
+ return retExistsFound( em );
+ }
+ if (valuesMatch(z, toMatch, compareOp, em) ) {
+ // "field.<n>" array notation was used
+ if ( details )
+ details->_elemMatchKey = z.fieldName();
+ return 1;
+ }
}
if ( z.type() == Object ) {
@@ -674,7 +724,7 @@ namespace mongo {
int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, em, false, details );
if ( cmp > 0 ) {
if ( details )
- details->elemMatchKey = z.fieldName();
+ details->_elemMatchKey = z.fieldName();
return 1;
}
else if ( cmp < 0 ) {
@@ -682,11 +732,12 @@ namespace mongo {
}
}
}
- return found ? -1 : retMissing( em );
+ return found ? -1 : 0;
}
if( p ) {
- return retMissing( em );
+ // Left portion of field name was not found or wrong type.
+ return 0;
}
else {
e = obj.getField(fieldName);
@@ -694,7 +745,11 @@ namespace mongo {
}
if ( compareOp == BSONObj::opEXISTS ) {
- return ( e.eoo() ^ ( toMatch.boolean() ^ em.isNot ) ) ? 1 : -1;
+ if( e.eoo() ) {
+ return 0;
+ } else {
+ return retExistsFound( em );
+ }
}
else if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
valuesMatch(e, toMatch, compareOp, em ) ) {
@@ -708,16 +763,16 @@ namespace mongo {
if ( compareOp == BSONObj::opELEM_MATCH ) {
if ( z.type() == Object ) {
- if ( em.subMatcher->matches( z.embeddedObject() ) ) {
+ if ( em._subMatcher->matches( z.embeddedObject() ) ) {
if ( details )
- details->elemMatchKey = z.fieldName();
+ details->_elemMatchKey = z.fieldName();
return 1;
}
}
- else if ( em.subMatcherOnPrimitives ) {
- if ( z.type() && em.subMatcher->matches( z.wrap( "" ) ) ) {
+ else if ( em._subMatcherOnPrimitives ) {
+ if ( z.type() && em._subMatcher->matches( z.wrap( "" ) ) ) {
if ( details )
- details->elemMatchKey = z.fieldName();
+ details->_elemMatchKey = z.fieldName();
return 1;
}
}
@@ -725,21 +780,22 @@ namespace mongo {
else {
if ( valuesMatch( z, toMatch, compareOp, em) ) {
if ( details )
- details->elemMatchKey = z.fieldName();
+ details->_elemMatchKey = z.fieldName();
return 1;
}
}
}
+ // match an entire array to itself
if ( compareOp == BSONObj::Equality && e.woCompare( toMatch , false ) == 0 ) {
- // match an entire array to itself
return 1;
}
-
+ if ( compareOp == BSONObj::opIN && valuesMatch( e, toMatch, compareOp, em ) ) {
+ return 1;
+ }
}
else if ( e.eoo() ) {
- // 0 indicates "missing element"
return 0;
}
return -1;
@@ -754,56 +810,89 @@ namespace mongo {
could be slow sometimes. */
// check normal non-regex cases:
- for ( unsigned i = 0; i < basics.size(); i++ ) {
- ElementMatcher& bm = basics[i];
- BSONElement& m = bm.toMatch;
+ for ( unsigned i = 0; i < _basics.size(); i++ ) {
+ ElementMatcher& bm = _basics[i];
+ BSONElement& m = bm._toMatch;
// -1=mismatch. 0=missing element. 1=match
- int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, bm , false , details );
- if ( bm.compareOp != BSONObj::opEXISTS && bm.isNot )
+ int cmp = matchesDotted(m.fieldName(), m, jsobj, bm._compareOp, bm , false , details );
+ if ( cmp == 0 && bm._compareOp == BSONObj::opEXISTS ) {
+ // If missing, match cmp is opposite of $exists spec.
+ cmp = -retExistsFound(bm);
+ }
+ if ( bm._isNot )
cmp = -cmp;
if ( cmp < 0 )
return false;
if ( cmp == 0 ) {
/* missing is ok iff we were looking for null */
- if ( m.type() == jstNULL || m.type() == Undefined || ( bm.compareOp == BSONObj::opIN && bm.myset->count( staticNull.firstElement() ) > 0 ) ) {
- if ( ( bm.compareOp == BSONObj::NE ) ^ bm.isNot ) {
+ if ( m.type() == jstNULL || m.type() == Undefined || ( bm._compareOp == BSONObj::opIN && bm._myset->count( staticNull.firstElement() ) > 0 ) ) {
+ if ( ( bm._compareOp == BSONObj::NE ) ^ bm._isNot ) {
return false;
}
}
else {
- if ( !bm.isNot ) {
+ if ( !bm._isNot ) {
return false;
}
}
}
}
- for ( int r = 0; r < nRegex; r++ ) {
- RegexMatcher& rm = regexs[r];
+ for ( int r = 0; r < _nRegex; r++ ) {
+ RegexMatcher& rm = _regexs[r];
BSONElementSet s;
- if ( !constrainIndexKey_.isEmpty() ) {
- BSONElement e = jsobj.getFieldUsingIndexNames(rm.fieldName, constrainIndexKey_);
- if ( !e.eoo() )
+ if ( !_constrainIndexKey.isEmpty() ) {
+ BSONElement e = jsobj.getFieldUsingIndexNames(rm._fieldName, _constrainIndexKey);
+
+ // Should only have keys nested one deep here, for geo-indices
+ // TODO: future indices may nest deeper?
+ if( e.type() == Array ){
+ BSONObjIterator i( e.Obj() );
+ while( i.more() ){
+ s.insert( i.next() );
+ }
+ }
+ else if ( !e.eoo() )
s.insert( e );
+
}
else {
- jsobj.getFieldsDotted( rm.fieldName, s );
+ jsobj.getFieldsDotted( rm._fieldName, s );
}
bool match = false;
for( BSONElementSet::const_iterator i = s.begin(); i != s.end(); ++i )
if ( regexMatches(rm, *i) )
match = true;
- if ( !match ^ rm.isNot )
+ if ( !match ^ rm._isNot )
return false;
}
+ if ( _orDedupConstraints.size() > 0 ) {
+ for( vector< shared_ptr< FieldRangeVector > >::const_iterator i = _orDedupConstraints.begin();
+ i != _orDedupConstraints.end(); ++i ) {
+ if ( (*i)->matches( jsobj ) ) {
+ return false;
+ }
+ }
+ }
+
+ if ( _andMatchers.size() > 0 ) {
+ for( list< shared_ptr< Matcher > >::const_iterator i = _andMatchers.begin();
+ i != _andMatchers.end(); ++i ) {
+ // SERVER-3192 Track field matched using details the same as for
+ // top level fields, at least for now.
+ if ( !(*i)->matches( jsobj, details ) ) {
+ return false;
+ }
+ }
+ }
+
if ( _orMatchers.size() > 0 ) {
bool match = false;
for( list< shared_ptr< Matcher > >::const_iterator i = _orMatchers.begin();
i != _orMatchers.end(); ++i ) {
// SERVER-205 don't submit details - we don't want to track field
- // matched within $or, and at this point we've already loaded the
- // whole document
+ // matched within $or
if ( (*i)->matches( jsobj ) ) {
match = true;
break;
@@ -818,40 +907,30 @@ namespace mongo {
for( list< shared_ptr< Matcher > >::const_iterator i = _norMatchers.begin();
i != _norMatchers.end(); ++i ) {
// SERVER-205 don't submit details - we don't want to track field
- // matched within $nor, and at this point we've already loaded the
- // whole document
+ // matched within $nor
if ( (*i)->matches( jsobj ) ) {
return false;
}
}
}
- for( vector< shared_ptr< FieldRangeVector > >::const_iterator i = _orConstraints.begin();
- i != _orConstraints.end(); ++i ) {
- if ( (*i)->matches( jsobj ) ) {
- return false;
- }
- }
-
- if ( where ) {
- if ( where->func == 0 ) {
+ if ( _where ) {
+ if ( _where->func == 0 ) {
uassert( 10070 , "$where compile error", false);
return false; // didn't compile
}
- if ( where->jsScope ) {
- where->scope->init( where->jsScope );
+ if ( _where->jsScope ) {
+ _where->scope->init( _where->jsScope );
}
- where->scope->setThis( const_cast< BSONObj * >( &jsobj ) );
- where->scope->setObject( "obj", const_cast< BSONObj & >( jsobj ) );
- where->scope->setBoolean( "fullObject" , true ); // this is a hack b/c fullObject used to be relevant
+ _where->scope->setObject( "obj", const_cast< BSONObj & >( jsobj ) );
+ _where->scope->setBoolean( "fullObject" , true ); // this is a hack b/c fullObject used to be relevant
- int err = where->scope->invoke( where->func , BSONObj() , 1000 * 60 , false );
- where->scope->setThis( 0 );
+ int err = _where->scope->invoke( _where->func , 0, &jsobj , 1000 * 60 , false );
if ( err == -3 ) { // INVOKE_ERROR
stringstream ss;
ss << "error on invocation of $where function:\n"
- << where->scope->getError();
+ << _where->scope->getError();
uassert( 10071 , ss.str(), false);
return false;
}
@@ -859,38 +938,45 @@ namespace mongo {
uassert( 10072 , "unknown error in invocation of $where function", false);
return false;
}
- return where->scope->getBoolean( "return" ) != 0;
+ return _where->scope->getBoolean( "return" ) != 0;
}
return true;
}
- bool Matcher::hasType( BSONObj::MatchType type ) const {
- for ( unsigned i=0; i<basics.size() ; i++ )
- if ( basics[i].compareOp == type )
- return true;
- return false;
- }
-
- bool Matcher::sameCriteriaCount( const Matcher &other ) const {
- if ( !( basics.size() == other.basics.size() && nRegex == other.nRegex && !where == !other.where ) ) {
+ bool Matcher::keyMatch( const Matcher &docMatcher ) const {
+ // Quick check certain non key match cases.
+ if ( docMatcher._all
+ || docMatcher._haveSize
+ || docMatcher._hasArray // We can't match an array to its first indexed element using keymatch
+ || docMatcher._haveNeg ) {
+ return false;
+ }
+
+ // Check that all match components are available in the index matcher.
+ if ( !( _basics.size() == docMatcher._basics.size() && _nRegex == docMatcher._nRegex && !docMatcher._where ) ) {
+ return false;
+ }
+ if ( _andMatchers.size() != docMatcher._andMatchers.size() ) {
return false;
}
- if ( _norMatchers.size() != other._norMatchers.size() ) {
+ if ( _orMatchers.size() != docMatcher._orMatchers.size() ) {
return false;
}
- if ( _orMatchers.size() != other._orMatchers.size() ) {
+ if ( docMatcher._norMatchers.size() > 0 ) {
return false;
}
- if ( _orConstraints.size() != other._orConstraints.size() ) {
+ if ( docMatcher._orDedupConstraints.size() > 0 ) {
return false;
}
+
+ // Recursively check that all submatchers support key match.
{
- list< shared_ptr< Matcher > >::const_iterator i = _norMatchers.begin();
- list< shared_ptr< Matcher > >::const_iterator j = other._norMatchers.begin();
- while( i != _norMatchers.end() ) {
- if ( !(*i)->sameCriteriaCount( **j ) ) {
+ list< shared_ptr< Matcher > >::const_iterator i = _andMatchers.begin();
+ list< shared_ptr< Matcher > >::const_iterator j = docMatcher._andMatchers.begin();
+ while( i != _andMatchers.end() ) {
+ if ( !(*i)->keyMatch( **j ) ) {
return false;
}
++i; ++j;
@@ -898,14 +984,16 @@ namespace mongo {
}
{
list< shared_ptr< Matcher > >::const_iterator i = _orMatchers.begin();
- list< shared_ptr< Matcher > >::const_iterator j = other._orMatchers.begin();
+ list< shared_ptr< Matcher > >::const_iterator j = docMatcher._orMatchers.begin();
while( i != _orMatchers.end() ) {
- if ( !(*i)->sameCriteriaCount( **j ) ) {
+ if ( !(*i)->keyMatch( **j ) ) {
return false;
}
++i; ++j;
}
}
+ // Nor matchers and or dedup constraints aren't created for index matchers,
+ // so no need to check those here.
return true;
}
diff --git a/db/matcher.h b/db/matcher.h
index d242df6..82ef5cc 100644
--- a/db/matcher.h
+++ b/db/matcher.h
@@ -21,7 +21,7 @@
#pragma once
#include "jsobj.h"
-#include <pcrecpp.h>
+#include "pcrecpp.h"
namespace mongo {
@@ -32,13 +32,13 @@ namespace mongo {
class RegexMatcher {
public:
- const char *fieldName;
- const char *regex;
- const char *flags;
- string prefix;
- shared_ptr< pcrecpp::RE > re;
- bool isNot;
- RegexMatcher() : isNot() {}
+ const char *_fieldName;
+ const char *_regex;
+ const char *_flags;
+ string _prefix;
+ shared_ptr< pcrecpp::RE > _re;
+ bool _isNot;
+ RegexMatcher() : _isNot() {}
};
struct element_lt {
@@ -57,27 +57,27 @@ namespace mongo {
ElementMatcher() {
}
- ElementMatcher( BSONElement _e , int _op, bool _isNot );
+ ElementMatcher( BSONElement e , int op, bool isNot );
- ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot );
+ ElementMatcher( BSONElement e , int op , const BSONObj& array, bool isNot );
~ElementMatcher() { }
- BSONElement toMatch;
- int compareOp;
- bool isNot;
- shared_ptr< set<BSONElement,element_lt> > myset;
- shared_ptr< vector<RegexMatcher> > myregex;
+ BSONElement _toMatch;
+ int _compareOp;
+ bool _isNot;
+ shared_ptr< set<BSONElement,element_lt> > _myset;
+ shared_ptr< vector<RegexMatcher> > _myregex;
// these are for specific operators
- int mod;
- int modm;
- BSONType type;
+ int _mod;
+ int _modm;
+ BSONType _type;
- shared_ptr<Matcher> subMatcher;
- bool subMatcherOnPrimitives ;
+ shared_ptr<Matcher> _subMatcher;
+ bool _subMatcherOnPrimitives ;
- vector< shared_ptr<Matcher> > allMatchers;
+ vector< shared_ptr<Matcher> > _allMatchers;
};
class Where; // used for $where javascript eval
@@ -89,19 +89,19 @@ namespace mongo {
}
void reset() {
- loadedObject = false;
- elemMatchKey = 0;
+ _loadedObject = false;
+ _elemMatchKey = 0;
}
string toString() const {
stringstream ss;
- ss << "loadedObject: " << loadedObject << " ";
- ss << "elemMatchKey: " << ( elemMatchKey ? elemMatchKey : "NULL" ) << " ";
+ ss << "loadedObject: " << _loadedObject << " ";
+ ss << "elemMatchKey: " << ( _elemMatchKey ? _elemMatchKey : "NULL" ) << " ";
return ss.str();
}
- bool loadedObject;
- const char * elemMatchKey; // warning, this may go out of scope if matched object does
+ bool _loadedObject;
+ const char * _elemMatchKey; // warning, this may go out of scope if matched object does
};
/* Match BSON objects against a query pattern.
@@ -134,45 +134,44 @@ namespace mongo {
return op <= BSONObj::LTE ? -1 : 1;
}
- Matcher(const BSONObj &pattern, bool subMatcher = false);
+ Matcher(const BSONObj &pattern, bool nested=false);
~Matcher();
bool matches(const BSONObj& j, MatchDetails * details = 0 );
- // fast rough check to see if we must load the real doc - we also
- // compare field counts against covereed index matcher; for $or clauses
- // we just compare field counts
- bool keyMatch() const { return !all && !haveSize && !hasArray && !haveNeg; }
-
bool atomic() const { return _atomic; }
- bool hasType( BSONObj::MatchType type ) const;
-
string toString() const {
- return jsobj.toString();
+ return _jsobj.toString();
}
- void addOrConstraint( const shared_ptr< FieldRangeVector > &frv ) {
- _orConstraints.push_back( frv );
+ void addOrDedupConstraint( const shared_ptr< FieldRangeVector > &frv ) {
+ _orDedupConstraints.push_back( frv );
}
void popOrClause() {
_orMatchers.pop_front();
}
- bool sameCriteriaCount( const Matcher &other ) const;
-
+ /**
+ * @return true if this key matcher will return the same true/false
+ * value as the provided doc matcher.
+ */
+ bool keyMatch( const Matcher &docMatcher ) const;
+
private:
- // Only specify constrainIndexKey if matches() will be called with
- // index keys having empty string field names.
- Matcher( const Matcher &other, const BSONObj &constrainIndexKey );
+ /**
+ * Generate a matcher for the provided index key format using the
+ * provided full doc matcher.
+ */
+ Matcher( const Matcher &docMatcher, const BSONObj &constrainIndexKey );
void addBasic(const BSONElement &e, int c, bool isNot) {
// TODO May want to selectively ignore these element types based on op type.
if ( e.type() == MinKey || e.type() == MaxKey )
return;
- basics.push_back( ElementMatcher( e , c, isNot ) );
+ _basics.push_back( ElementMatcher( e , c, isNot ) );
}
void addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot = false);
@@ -180,17 +179,19 @@ namespace mongo {
int valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm);
- bool parseOrNor( const BSONElement &e, bool subMatcher );
- void parseOr( const BSONElement &e, bool subMatcher, list< shared_ptr< Matcher > > &matchers );
+ bool parseClause( const BSONElement &e );
+ void parseExtractedClause( const BSONElement &e, list< shared_ptr< Matcher > > &matchers );
- Where *where; // set if query uses $where
- BSONObj jsobj; // the query pattern. e.g., { name: "joe" }
- BSONObj constrainIndexKey_;
- vector<ElementMatcher> basics;
- bool haveSize;
- bool all;
- bool hasArray;
- bool haveNeg;
+ void parseMatchExpressionElement( const BSONElement &e, bool nested );
+
+ Where *_where; // set if query uses $where
+ BSONObj _jsobj; // the query pattern. e.g., { name: "joe" }
+ BSONObj _constrainIndexKey;
+ vector<ElementMatcher> _basics;
+ bool _haveSize;
+ bool _all;
+ bool _hasArray;
+ bool _haveNeg;
/* $atomic - if true, a multi document operation (some removes, updates)
should be done atomically. in that case, we do not yield -
@@ -199,14 +200,15 @@ namespace mongo {
*/
bool _atomic;
- RegexMatcher regexs[4];
- int nRegex;
+ RegexMatcher _regexs[4];
+ int _nRegex;
// so we delete the mem when we're done:
vector< shared_ptr< BSONObjBuilder > > _builders;
+ list< shared_ptr< Matcher > > _andMatchers;
list< shared_ptr< Matcher > > _orMatchers;
list< shared_ptr< Matcher > > _norMatchers;
- vector< shared_ptr< FieldRangeVector > > _orConstraints;
+ vector< shared_ptr< FieldRangeVector > > _orDedupConstraints;
friend class CoveredIndexMatcher;
};
@@ -216,7 +218,13 @@ namespace mongo {
public:
CoveredIndexMatcher(const BSONObj &pattern, const BSONObj &indexKeyPattern , bool alwaysUseRecord=false );
bool matches(const BSONObj &o) { return _docMatcher->matches( o ); }
- bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 , bool keyUsable = true );
+ bool matchesWithSingleKeyIndex(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 ) {
+ return matches( key, recLoc, details, true );
+ }
+ /**
+ * This is the preferred method for matching against a cursor, as it
+ * can handle both multi and single key cursors.
+ */
bool matchesCurrent( Cursor * cursor , MatchDetails * details = 0 );
bool needRecord() { return _needRecord; }
@@ -224,7 +232,7 @@ namespace mongo {
// once this is called, shouldn't use this matcher for matching any more
void advanceOrClause( const shared_ptr< FieldRangeVector > &frv ) {
- _docMatcher->addOrConstraint( frv );
+ _docMatcher->addOrDedupConstraint( frv );
// TODO this is not yet optimal. Since we could skip an entire
// or clause (if a match is impossible) between calls to advanceOrClause()
// we may not pop all the clauses we can.
@@ -234,15 +242,17 @@ namespace mongo {
CoveredIndexMatcher *nextClauseMatcher( const BSONObj &indexKeyPattern, bool alwaysUseRecord=false ) {
return new CoveredIndexMatcher( _docMatcher, indexKeyPattern, alwaysUseRecord );
}
+
+ string toString() const;
+
private:
+ bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 , bool keyUsable = true );
CoveredIndexMatcher(const shared_ptr< Matcher > &docMatcher, const BSONObj &indexKeyPattern , bool alwaysUseRecord=false );
void init( bool alwaysUseRecord );
shared_ptr< Matcher > _docMatcher;
Matcher _keyMatcher;
bool _needRecord; // if the key itself isn't good enough to determine a positive match
- bool _needRecordReject; // if the key itself isn't good enough to determine a negative match
- bool _useRecordOnly;
};
} // namespace mongo
diff --git a/db/matcher_covered.cpp b/db/matcher_covered.cpp
index 18892be..52164f5 100644
--- a/db/matcher_covered.cpp
+++ b/db/matcher_covered.cpp
@@ -46,22 +46,24 @@ namespace mongo {
void CoveredIndexMatcher::init( bool alwaysUseRecord ) {
_needRecord =
alwaysUseRecord ||
- ! ( _docMatcher->keyMatch() &&
- _keyMatcher.sameCriteriaCount( *_docMatcher ) );
-
- _needRecordReject = _keyMatcher.hasType( BSONObj::opEXISTS );
+ !_keyMatcher.keyMatch( *_docMatcher );
}
bool CoveredIndexMatcher::matchesCurrent( Cursor * cursor , MatchDetails * details ) {
// bool keyUsable = ! cursor->isMultiKey() && check for $orish like conditions in matcher SERVER-1264
- return matches( cursor->currKey() , cursor->currLoc() , details );
+ return matches( cursor->currKey() , cursor->currLoc() , details ,
+ !cursor->indexKeyPattern().isEmpty() // unindexed cursor
+ && !cursor->isMultiKey() // multikey cursor
+ );
}
bool CoveredIndexMatcher::matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details , bool keyUsable ) {
+ dassert( key.isValid() );
+
if ( details )
details->reset();
- if ( _needRecordReject == false && keyUsable ) {
+ if ( keyUsable ) {
if ( !_keyMatcher.matches(key, details ) ) {
return false;
@@ -74,10 +76,24 @@ namespace mongo {
}
if ( details )
- details->loadedObject = true;
+ details->_loadedObject = true;
return _docMatcher->matches(recLoc.obj() , details );
}
-
+ string CoveredIndexMatcher::toString() const {
+ StringBuilder buf;
+ buf << "(CoveredIndexMatcher ";
+
+ if ( _needRecord )
+ buf << "needRecord ";
+
+ buf << "keyMatcher: " << _keyMatcher.toString() << " ";
+
+ if ( _docMatcher )
+ buf << "docMatcher: " << _docMatcher->toString() << " ";
+
+ buf << ")";
+ return buf.str();
+ }
}
diff --git a/db/modules/mms.cpp b/db/modules/mms.cpp
index b180262..40abb39 100644
--- a/db/modules/mms.cpp
+++ b/db/modules/mms.cpp
@@ -20,7 +20,7 @@
#include "../db.h"
#include "../instance.h"
#include "../module.h"
-#include "../../util/httpclient.h"
+#include "../../util/net/httpclient.h"
#include "../../util/background.h"
#include "../commands.h"
@@ -142,7 +142,7 @@ namespace mongo {
string errmsg;
BSONObjBuilder sub;
- if ( ! c->run( "admin.$cmd" , co , errmsg , sub , false ) )
+ if ( ! c->run( "admin.$cmd" , co , 0 , errmsg , sub , false ) )
postData.append( cmd , errmsg );
else
postData.append( cmd , sub.obj() );
diff --git a/db/mongommf.cpp b/db/mongommf.cpp
index 5ae573d..7c77ef8 100644
--- a/db/mongommf.cpp
+++ b/db/mongommf.cpp
@@ -53,7 +53,7 @@ namespace mongo {
break;
size_t viewStart = (size_t) x.first;
- size_t viewEnd = viewStart + mmf->length();
+ size_t viewEnd = (size_t) (viewStart + mmf->length());
if( viewEnd <= chunkStart )
break;
@@ -68,7 +68,7 @@ namespace mongo {
bool ok = VirtualProtect((void*)protectStart, protectSize, PAGE_WRITECOPY, &old);
if( !ok ) {
DWORD e = GetLastError();
- log() << "VirtualProtect failed " << chunkno << hex << protectStart << ' ' << protectSize << ' ' << errnoWithDescription(e) << endl;
+ log() << "VirtualProtect failed (mcw) " << mmf->filename() << ' ' << chunkno << hex << protectStart << ' ' << protectSize << ' ' << errnoWithDescription(e) << endl;
assert(false);
}
}
@@ -76,95 +76,16 @@ namespace mongo {
writable.set(chunkno);
}
- __declspec(noinline) void makeChunkWritableOld(size_t chunkno) {
- scoped_lock lk(mapViewMutex);
-
- if( writable.get(chunkno) )
- return;
-
- size_t loc = chunkno * MemoryMappedFile::ChunkSize;
- void *Loc = (void*) loc;
- size_t ofs;
- MongoMMF *mmf = privateViews.find( (void *) (loc), ofs );
- MemoryMappedFile *f = (MemoryMappedFile*) mmf;
- assert(f);
-
- size_t len = MemoryMappedFile::ChunkSize;
- assert( mmf->getView() <= Loc );
- if( ofs + len > f->length() ) {
- // at the very end of the map
- len = f->length() - ofs;
- }
- else {
- ;
- }
-
- // todo: check this goes away on remap
- DWORD old;
- bool ok = VirtualProtect(Loc, len, PAGE_WRITECOPY, &old);
- if( !ok ) {
- DWORD e = GetLastError();
- log() << "VirtualProtect failed " << Loc << ' ' << len << ' ' << errnoWithDescription(e) << endl;
- assert(false);
- }
-
- writable.set(chunkno);
- }
-
- // align so that there is only one map per chunksize so our bitset works right
- void* mapaligned(HANDLE h, unsigned long long _len) {
- void *loc = 0;
- int n = 0;
- while( 1 ) {
- n++;
- void *m = MapViewOfFileEx(h, FILE_MAP_READ, 0, 0, 0, loc);
- if( m == 0 ) {
- DWORD e = GetLastError();
- if( n == 0 ) {
- // if first fails, it isn't going to work
- log() << "mapaligned errno: " << e << endl;
- break;
- }
- if( debug && n == 1 ) {
- log() << "mapaligned info e:" << e << " at n=1" << endl;
- }
- if( n > 98 ) {
- log() << "couldn't align mapped view of file len:" << _len/1024.0/1024.0 << "MB errno:" << e << endl;
- break;
- }
- loc = (void*) (((size_t)loc)+MemoryMappedFile::ChunkSize);
- continue;
- }
-
- size_t x = (size_t) m;
- if( x % MemoryMappedFile::ChunkSize == 0 ) {
- void *end = (void*) (x+_len);
- DEV log() << "mapaligned " << m << '-' << end << " len:" << _len << endl;
- return m;
- }
-
- UnmapViewOfFile(m);
- x = ((x+MemoryMappedFile::ChunkSize-1) / MemoryMappedFile::ChunkSize) * MemoryMappedFile::ChunkSize;
- loc = (void*) x;
- if( n % 20 == 0 ) {
- log() << "warning mapaligned n=20" << endl;
- }
- if( n > 100 ) {
- log() << "couldn't align mapped view of file len:" << _len/1024.0/1024.0 << "MB" << endl;
- break;
- }
- }
- return 0;
- }
-
void* MemoryMappedFile::createPrivateMap() {
assert( maphandle );
scoped_lock lk(mapViewMutex);
- //void *p = mapaligned(maphandle, len);
void *p = MapViewOfFile(maphandle, FILE_MAP_READ, 0, 0, 0);
if ( p == 0 ) {
DWORD e = GetLastError();
- log() << "createPrivateMap failed " << filename() << " " << errnoWithDescription(e) << endl;
+ log() << "createPrivateMap failed " << filename() << " " <<
+ errnoWithDescription(e) << " filelen:" << len <<
+ ((sizeof(void*) == 4 ) ? " (32 bit build)" : "") <<
+ endl;
}
else {
clearWritableBits(p);
@@ -180,7 +101,17 @@ namespace mongo {
scoped_lock lk(mapViewMutex);
clearWritableBits(oldPrivateAddr);
-
+#if 1
+ // https://jira.mongodb.org/browse/SERVER-2942
+ DWORD old;
+ bool ok = VirtualProtect(oldPrivateAddr, (SIZE_T) len, PAGE_READONLY, &old);
+ if( !ok ) {
+ DWORD e = GetLastError();
+ log() << "VirtualProtect failed in remapPrivateView " << filename() << hex << oldPrivateAddr << ' ' << len << ' ' << errnoWithDescription(e) << endl;
+ assert(false);
+ }
+ return oldPrivateAddr;
+#else
if( !UnmapViewOfFile(oldPrivateAddr) ) {
DWORD e = GetLastError();
log() << "UnMapViewOfFile failed " << filename() << ' ' << errnoWithDescription(e) << endl;
@@ -199,6 +130,7 @@ namespace mongo {
}
assert(p == oldPrivateAddr);
return p;
+#endif
}
#endif
@@ -351,7 +283,7 @@ namespace mongo {
if( cmdLine.dur ) {
_view_private = createPrivateMap();
if( _view_private == 0 ) {
- massert( 13636 , "createPrivateMap failed (look in log for error)" , false );
+ msgasserted(13636, str::stream() << "file " << filename() << " open/create failed in createPrivateMap (look in log for more information)");
}
privateViews.add(_view_private, this); // note that testIntent builds use this, even though it points to view_write then...
}
@@ -376,14 +308,12 @@ namespace mongo {
}
/*virtual*/ void MongoMMF::close() {
- {
- if( cmdLine.dur && _view_write/*actually was opened*/ ) {
- if( debug )
- log() << "closingFileNotication:" << filename() << endl;
- dur::closingFileNotification();
- }
- privateViews.remove(_view_private);
+ if( cmdLine.dur && _view_write/*actually was opened*/ ) {
+ dur::closingFileNotification();
}
+
+ RWLockRecursive::Exclusive lk(mmmutex);
+ privateViews.remove(_view_private);
_view_write = _view_private = 0;
MemoryMappedFile::close();
}
diff --git a/db/mongommf.h b/db/mongommf.h
index 5da46fc..0c4e8e4 100644
--- a/db/mongommf.h
+++ b/db/mongommf.h
@@ -27,6 +27,9 @@ namespace mongo {
not this.
*/
class MongoMMF : private MemoryMappedFile {
+ protected:
+ virtual void* viewForFlushing() { return _view_write; }
+
public:
MongoMMF();
virtual ~MongoMMF();
@@ -72,7 +75,7 @@ namespace mongo {
fileSuffixNo() is 3
if the suffix is "ns", fileSuffixNo -1
*/
- RelativePath relativePath() const {
+ const RelativePath& relativePath() const {
DEV assert( !_p._p.empty() );
return _p;
}
diff --git a/db/namespace-inl.h b/db/namespace-inl.h
index a777ff8..a621a22 100644
--- a/db/namespace-inl.h
+++ b/db/namespace-inl.h
@@ -71,21 +71,23 @@ namespace mongo {
}
inline IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected ) {
- if( idxNo < NIndexesBase )
- return _indexes[idxNo];
+ if( idxNo < NIndexesBase ) {
+ IndexDetails& id = _indexes[idxNo];
+ return id;
+ }
Extra *e = extra();
if ( ! e ) {
if ( missingExpected )
throw MsgAssertionException( 13283 , "Missing Extra" );
- massert(13282, "missing Extra", e);
+ massert(14045, "missing Extra", e);
}
int i = idxNo - NIndexesBase;
if( i >= NIndexesExtra ) {
e = e->next(this);
if ( ! e ) {
if ( missingExpected )
- throw MsgAssertionException( 13283 , "missing extra" );
- massert(13283, "missing Extra", e);
+ throw MsgAssertionException( 14823 , "missing extra" );
+ massert(14824, "missing Extra", e);
}
i -= NIndexesExtra;
}
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 0cb0e74..2bc7409 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -25,9 +25,11 @@
#include "btree.h"
#include <algorithm>
#include <list>
-#include "query.h"
#include "queryutil.h"
#include "json.h"
+#include "ops/delete.h"
+#include "ops/query.h"
+
namespace mongo {
@@ -91,7 +93,7 @@ namespace mongo {
boost::filesystem::path dir( dir_ );
dir /= database_;
if ( !boost::filesystem::exists( dir ) )
- BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( dir ) );
+ MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( boost::filesystem::create_directory( dir ), "create dir for db " );
}
unsigned lenForNewNsFiles = 16 * 1024 * 1024;
@@ -99,7 +101,7 @@ namespace mongo {
#if defined(_DEBUG)
void NamespaceDetails::dump(const Namespace& k) {
if( !cmdLine.dur )
- cout << "ns offsets which follow will not display correctly with --dur disabled" << endl;
+ cout << "ns offsets which follow will not display correctly with --journal disabled" << endl;
size_t ofs = 1; // 1 is sentinel that the find call below failed
privateViews.find(this, /*out*/ofs);
@@ -253,7 +255,11 @@ namespace mongo {
}
}
- // lenToAlloc is WITH header
+ /** allocate space for a new record from deleted lists.
+ @param lenToAlloc is WITH header
+ @param extentLoc OUT returns the extent location
+ @return null diskloc if no room - allocate a new extent then
+ */
DiskLoc NamespaceDetails::alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc) {
lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
DiskLoc loc = _alloc(ns, lenToAlloc);
@@ -568,8 +574,8 @@ namespace mongo {
/* ------------------------------------------------------------------------- */
- mongo::mutex NamespaceDetailsTransient::_qcMutex("qc");
- mongo::mutex NamespaceDetailsTransient::_isMutex("is");
+ SimpleMutex NamespaceDetailsTransient::_qcMutex("qc");
+ SimpleMutex NamespaceDetailsTransient::_isMutex("is");
map< string, shared_ptr< NamespaceDetailsTransient > > NamespaceDetailsTransient::_map;
typedef map< string, shared_ptr< NamespaceDetailsTransient > >::iterator ouriter;
@@ -627,7 +633,7 @@ namespace mongo {
options: { capped : ..., size : ... }
*/
void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0) {
- log(1) << "New namespace: " << ns << '\n';
+ LOG(1) << "New namespace: " << ns << endl;
if ( strstr(ns, "system.namespaces") ) {
// system.namespaces holds all the others, so it is not explicitly listed in the catalog.
// TODO: fix above should not be strstr!
@@ -643,6 +649,9 @@ namespace mongo {
char database[256];
nsToDatabase(ns, database);
string s = database;
+ if( cmdLine.configsvr && (s != "config" && s != "admin") ) {
+ uasserted(14037, "can't create user databases on a --configsvr instance");
+ }
s += ".system.namespaces";
theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
}
@@ -711,14 +720,14 @@ namespace mongo {
newIndexSpecB << "ns" << to;
}
BSONObj newIndexSpec = newIndexSpecB.done();
- DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, BSONElement(), false );
+ DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, false );
int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
IndexDetails &indexDetails = details->idx(indexI);
string oldIndexNs = indexDetails.indexNamespace();
indexDetails.info = newIndexSpecLoc;
string newIndexNs = indexDetails.indexNamespace();
- BtreeBucket::renameIndexNamespace( oldIndexNs.c_str(), newIndexNs.c_str() );
+ renameIndexNamespace( oldIndexNs.c_str(), newIndexNs.c_str() );
deleteObjects( s.c_str(), oldIndexSpec.getOwned(), true, false, true );
}
}
diff --git a/db/namespace.h b/db/namespace.h
index ef3d04e..3dfb3f3 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -20,7 +20,7 @@
#include "../pch.h"
#include "jsobj.h"
-#include "queryutil.h"
+#include "querypattern.h"
#include "diskloc.h"
#include "../util/hashtab.h"
#include "mongommf.h"
@@ -44,6 +44,21 @@ namespace mongo {
NamespaceString( const string& ns ) { init(ns.c_str()); }
string ns() const { return db + '.' + coll; }
bool isSystem() const { return strncmp(coll.c_str(), "system.", 7) == 0; }
+
+ /**
+ * @return true if ns is 'normal'. $ used for collections holding index data, which do not contain BSON objects in their records.
+ * special case for the local.oplog.$main ns -- naming it as such was a mistake.
+ */
+ static bool normal(const char* ns) {
+ const char *p = strchr(ns, '$');
+ if( p == 0 )
+ return true;
+ return strcmp( ns, "local.oplog.$main" ) == 0;
+ }
+
+ static bool special(const char *ns) {
+ return !normal(ns) || strstr(ns, ".system.");
+ }
private:
void init(const char *ns) {
const char *p = strchr(ns, '.');
@@ -67,6 +82,9 @@ namespace mongo {
bool operator==(const char *r) const { return strcmp(buf, r) == 0; }
bool operator==(const Namespace& r) const { return strcmp(buf, r.buf) == 0; }
int hash() const; // value returned is always > 0
+
+ size_t size() const { return strlen( buf ); }
+
string toString() const { return (string) buf; }
operator string() const { return (string) buf; }
@@ -93,8 +111,8 @@ namespace mongo {
namespace mongo {
- /** @return true if a client can modify this namespace
- things like *.system.users
+ /** @return true if a client can modify this namespace even though it is under ".system."
+ For example <dbname>.system.users is ok for regular clients to update.
@param write used when .system.js
*/
bool legalClientSystemNS( const string& ns , bool write );
@@ -154,7 +172,7 @@ namespace mongo {
unsigned long long reservedA;
long long extraOffset; // where the $extra info is located (bytes relative to this)
public:
- int indexBuildInProgress; // 1 if in prog
+ int indexBuildInProgress; // 1 if in prog
unsigned reservedB;
// ofs 424 (8)
struct Capped2 {
@@ -302,13 +320,17 @@ namespace mongo {
void paddingFits() {
double x = paddingFactor - 0.01;
- if ( x >= 1.0 )
- getDur().setNoJournal(&paddingFactor, &x, sizeof(x));
+ if ( x >= 1.0 ) {
+ *getDur().writing(&paddingFactor) = x;
+ //getDur().setNoJournal(&paddingFactor, &x, sizeof(x));
+ }
}
void paddingTooSmall() {
double x = paddingFactor + 0.6;
- if ( x <= 2.0 )
- getDur().setNoJournal(&paddingFactor, &x, sizeof(x));
+ if ( x <= 2.0 ) {
+ *getDur().writing(&paddingFactor) = x;
+ //getDur().setNoJournal(&paddingFactor, &x, sizeof(x));
+ }
}
// @return offset in indexes[]
@@ -337,6 +359,10 @@ namespace mongo {
return -1;
}
+ bool haveIdIndex() {
+ return (flags & NamespaceDetails::Flag_HaveIdIndex) || findIdIndex() >= 0;
+ }
+
/* return which "deleted bucket" for this size object */
static int bucket(int n) {
for ( int i = 0; i < Buckets; i++ )
@@ -412,9 +438,12 @@ namespace mongo {
static std::map< string, shared_ptr< NamespaceDetailsTransient > > _map;
public:
NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(false), _qcWriteCount() { }
+ private:
/* _get() is not threadsafe -- see get_inlock() comments */
static NamespaceDetailsTransient& _get(const char *ns);
- /* use get_w() when doing write operations */
+ public:
+ /* use get_w() when doing write operations. this is safe as there is only 1 write op and it's exclusive to everything else.
+ for reads you must lock and then use get_inlock() instead. */
static NamespaceDetailsTransient& get_w(const char *ns) {
DEV assertInWriteLock();
return _get(ns);
@@ -427,6 +456,26 @@ namespace mongo {
static void clearForPrefix(const char *prefix);
static void eraseForPrefix(const char *prefix);
+ /**
+ * @return a cursor interface to the query optimizer. The implementation may
+ * utilize a single query plan or interleave results from multiple query
+ * plans before settling on a single query plan. Note that the schema of
+ * currKey() documents, the matcher(), and the isMultiKey() nature of the
+ * cursor may change over the course of iteration.
+ *
+ * @param order - If no index exists that satisfies this sort order, an
+ * empty shared_ptr will be returned.
+ *
+ * The returned cursor may @throw inside of advance() or recoverFromYield() in
+ * certain error cases, for example if a capped overrun occurred during a yield.
+ * This indicates that the cursor was unable to perform a complete scan.
+ *
+ * This is a work in progress. Partial list of features not yet implemented:
+ * - modification of scanned documents
+ * - covered indexes
+ */
+ static shared_ptr<Cursor> getCursor( const char *ns, const BSONObj &query, const BSONObj &order = BSONObj() );
+
/* indexKeys() cache ---------------------------------------------------- */
/* assumed to be in write lock for this */
private:
@@ -447,12 +496,12 @@ namespace mongo {
/* IndexSpec caching */
private:
map<const IndexDetails*,IndexSpec> _indexSpecs;
- static mongo::mutex _isMutex;
+ static SimpleMutex _isMutex;
public:
const IndexSpec& getIndexSpec( const IndexDetails * details ) {
IndexSpec& spec = _indexSpecs[details];
if ( ! spec._finishedInit ) {
- scoped_lock lk(_isMutex);
+ SimpleMutex::scoped_lock lk(_isMutex);
if ( ! spec._finishedInit ) {
spec.reset( details );
assert( spec._finishedInit );
@@ -466,7 +515,7 @@ namespace mongo {
int _qcWriteCount;
map< QueryPattern, pair< BSONObj, long long > > _qcCache;
public:
- static mongo::mutex _qcMutex;
+ static SimpleMutex _qcMutex;
/* you must be in the qcMutex when calling this (and using the returned val): */
static NamespaceDetailsTransient& get_inlock(const char *ns) {
return _get(ns);
@@ -479,7 +528,7 @@ namespace mongo {
void notifyOfWriteOp() {
if ( _qcCache.empty() )
return;
- if ( ++_qcWriteCount >= 100 )
+ if ( ++_qcWriteCount >= 1000 )
clearQueryCache();
}
BSONObj indexForPattern( const QueryPattern &pattern ) {
@@ -564,6 +613,8 @@ namespace mongo {
boost::filesystem::path path() const;
+ unsigned long long fileLength() const { return f.length(); }
+
private:
void maybeMkdir() const;
diff --git a/db/nonce.cpp b/db/nonce.cpp
index 6f35c79..379e88f 100644
--- a/db/nonce.cpp
+++ b/db/nonce.cpp
@@ -23,7 +23,9 @@ extern int do_md5_test(void);
namespace mongo {
- BOOST_STATIC_ASSERT( sizeof(nonce) == 8 );
+ BOOST_STATIC_ASSERT( sizeof(nonce64) == 8 );
+
+ static Security security; // needs to be static so _initialized is preset to false (see initsafe below)
Security::Security() {
static int n;
@@ -31,7 +33,7 @@ namespace mongo {
init();
}
- void Security::init() {
+ NOINLINE_DECL void Security::init() {
if( _initialized ) return;
_initialized = true;
@@ -39,7 +41,7 @@ namespace mongo {
_devrandom = new ifstream("/dev/urandom", ios::binary|ios::in);
massert( 10353 , "can't open dev/urandom", _devrandom->is_open() );
#elif defined(_WIN32)
- srand(curTimeMicros());
+ srand(curTimeMicros()); // perhaps not relevant for rand_s but we might want elsewhere anyway
#else
srandomdev();
#endif
@@ -50,21 +52,12 @@ namespace mongo {
#endif
}
- nonce Security::getNonce() {
- static mongo::mutex m("getNonce");
- scoped_lock lk(m);
-
- if ( ! _initialized )
- init();
-
- /* question/todo: /dev/random works on OS X. is it better
- to use that than random() / srandom()?
- */
-
- nonce n;
+ nonce64 Security::__getNonce() {
+ dassert( _initialized );
+ nonce64 n;
#if defined(__linux__) || defined(__sunos__) || defined(__APPLE__)
_devrandom->read((char*)&n, sizeof(n));
- massert( 10355 , "devrandom failed", !_devrandom->fail());
+ massert(10355 , "devrandom failed", !_devrandom->fail());
#elif defined(_WIN32)
unsigned a=0, b=0;
assert( rand_s(&a) == 0 );
@@ -75,9 +68,28 @@ namespace mongo {
#endif
return n;
}
- unsigned getRandomNumber() { return (unsigned) security.getNonce(); }
- bool Security::_initialized;
- Security security;
+ SimpleMutex nonceMutex("nonce");
+ nonce64 Security::_getNonce() {
+ // not good this is a static as gcc will mutex protect it which costs time
+ SimpleMutex::scoped_lock lk(nonceMutex);
+ if( !_initialized )
+ init();
+ return __getNonce();
+ }
+
+ nonce64 Security::getNonceDuringInit() {
+ // the mutex might not be inited yet. init phase should be one thread anyway (hopefully we don't spawn threads therein)
+ if( !security._initialized )
+ security.init();
+ return security.__getNonce();
+ }
+
+ nonce64 Security::getNonce() {
+ return security._getNonce();
+ }
+
+ // name warns us this might be a little slow (see code above)
+ unsigned goodRandomNumberSlow() { return (unsigned) Security::getNonce(); }
} // namespace mongo
diff --git a/db/nonce.h b/db/nonce.h
index 21592ab..d6a147a 100644
--- a/db/nonce.h
+++ b/db/nonce.h
@@ -1,4 +1,4 @@
-// nonce.h
+// @file nonce.h
/* Copyright 2009 10gen Inc.
*
@@ -19,24 +19,18 @@
namespace mongo {
- typedef unsigned long long nonce;
+ typedef unsigned long long nonce64;
struct Security {
Security();
-
- nonce getNonce();
-
- /** safe during global var initialization */
- nonce getNonceInitSafe() {
- init();
- return getNonce();
- }
+ static nonce64 getNonce();
+ static nonce64 getNonceDuringInit(); // use this version during global var constructors
private:
+ nonce64 _getNonce();
+ nonce64 __getNonce();
ifstream *_devrandom;
- static bool _initialized;
+ bool _initialized;
void init(); // can call more than once
};
- extern Security security;
-
} // namespace mongo
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 1557cbd..dc9db76 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -23,6 +23,12 @@
#include "commands.h"
#include "repl/rs.h"
#include "stats/counters.h"
+#include "../util/file.h"
+#include "../util/unittest.h"
+#include "queryoptimizer.h"
+#include "ops/update.h"
+#include "ops/delete.h"
+#include "ops/query.h"
namespace mongo {
@@ -113,10 +119,12 @@ namespace mongo {
*b = EOO;
}
+ // global is safe as we are in write lock. we put the static outside the function to avoid the implicit mutex
+ // the compiler would use if inside the function. the reason this is static is to avoid a malloc/free for this
+ // on every logop call.
+ static BufBuilder logopbufbuilder(8*1024);
static void _logOpRS(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
DEV assertInWriteLock();
- // ^- static is safe as we are in write lock
- static BufBuilder bufbuilder(8*1024);
if ( strncmp(ns, "local.", 6) == 0 ) {
if ( strncmp(ns, "local.slaves", 12) == 0 )
@@ -125,7 +133,6 @@ namespace mongo {
}
const OpTime ts = OpTime::now();
-
long long hashNew;
if( theReplSet ) {
massert(13312, "replSet error : logOp() but not primary?", theReplSet->box.getState().primary());
@@ -141,12 +148,10 @@ namespace mongo {
instead we do a single copy to the destination position in the memory mapped file.
*/
- bufbuilder.reset();
- BSONObjBuilder b(bufbuilder);
-
+ logopbufbuilder.reset();
+ BSONObjBuilder b(logopbufbuilder);
b.appendTimestamp("ts", ts.asDate());
b.append("h", hashNew);
-
b.append("op", opstr);
b.append("ns", ns);
if ( bb )
@@ -361,7 +366,7 @@ namespace mongo {
sz = (256-64) * 1000 * 1000;
#else
sz = 990.0 * 1000 * 1000;
- boost::intmax_t free = freeSpace(); //-1 if call not supported.
+ boost::intmax_t free = File::freeSpace(dbpath); //-1 if call not supported.
double fivePct = free * 0.05;
if ( fivePct > sz )
sz = fivePct;
@@ -389,11 +394,151 @@ namespace mongo {
// -------------------------------------
- struct TestOpTime {
- TestOpTime() {
+ FindingStartCursor::FindingStartCursor( const QueryPlan & qp ) :
+ _qp( qp ),
+ _findingStart( true ),
+ _findingStartMode(),
+ _findingStartTimer( 0 )
+ { init(); }
+
+ void FindingStartCursor::next() {
+ if ( !_findingStartCursor || !_findingStartCursor->ok() ) {
+ _findingStart = false;
+ _c = _qp.newCursor(); // on error, start from beginning
+ destroyClientCursor();
+ return;
+ }
+ switch( _findingStartMode ) {
+ // Initial mode: scan backwards from end of collection
+ case Initial: {
+ if ( !_matcher->matchesCurrent( _findingStartCursor->c() ) ) {
+ _findingStart = false; // found first record out of query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->currLoc() );
+ destroyClientCursor();
+ return;
+ }
+ _findingStartCursor->advance();
+ RARELY {
+ if ( _findingStartTimer.seconds() >= __findingStartInitialTimeout ) {
+ // If we've scanned enough, switch to find extent mode.
+ createClientCursor( extentFirstLoc( _findingStartCursor->currLoc() ) );
+ _findingStartMode = FindExtent;
+ return;
+ }
+ }
+ return;
+ }
+ // FindExtent mode: moving backwards through extents, check first
+ // document of each extent.
+ case FindExtent: {
+ if ( !_matcher->matchesCurrent( _findingStartCursor->c() ) ) {
+ _findingStartMode = InExtent;
+ return;
+ }
+ DiskLoc prev = prevExtentFirstLoc( _findingStartCursor->currLoc() );
+ if ( prev.isNull() ) { // hit beginning, so start scanning from here
+ createClientCursor();
+ _findingStartMode = InExtent;
+ return;
+ }
+ // There might be a more efficient implementation than creating new cursor & client cursor each time,
+ // not worrying about that for now
+ createClientCursor( prev );
+ return;
+ }
+ // InExtent mode: once an extent is chosen, find starting doc in the extent.
+ case InExtent: {
+ if ( _matcher->matchesCurrent( _findingStartCursor->c() ) ) {
+ _findingStart = false; // found first record in query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->currLoc() );
+ destroyClientCursor();
+ return;
+ }
+ _findingStartCursor->advance();
+ return;
+ }
+ default: {
+ massert( 14038, "invalid _findingStartMode", false );
+ }
+ }
+ }
+
+ DiskLoc FindingStartCursor::extentFirstLoc( const DiskLoc &rec ) {
+ Extent *e = rec.rec()->myExtent( rec );
+ if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent ) )
+ return e->firstRecord;
+ // Likely we are on the fresh side of capExtent, so return first fresh record.
+ // If we are on the stale side of capExtent, then the collection is small and it
+ // doesn't matter if we start the extent scan with capFirstNewRecord.
+ return _qp.nsd()->capFirstNewRecord;
+ }
+
+ void wassertExtentNonempty( const Extent *e ) {
+ // TODO ensure this requirement is clearly enforced, or fix.
+ wassert( !e->firstRecord.isNull() );
+ }
+
+ DiskLoc FindingStartCursor::prevExtentFirstLoc( const DiskLoc &rec ) {
+ Extent *e = rec.rec()->myExtent( rec );
+ if ( _qp.nsd()->capLooped() ) {
+ if ( e->xprev.isNull() ) {
+ e = _qp.nsd()->lastExtent.ext();
+ }
+ else {
+ e = e->xprev.ext();
+ }
+ if ( e->myLoc != _qp.nsd()->capExtent ) {
+ wassertExtentNonempty( e );
+ return e->firstRecord;
+ }
+ }
+ else {
+ if ( !e->xprev.isNull() ) {
+ e = e->xprev.ext();
+ wassertExtentNonempty( e );
+ return e->firstRecord;
+ }
+ }
+ return DiskLoc(); // reached beginning of collection
+ }
+
+ void FindingStartCursor::createClientCursor( const DiskLoc &startLoc ) {
+ shared_ptr<Cursor> c = _qp.newCursor( startLoc );
+ _findingStartCursor.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns()) );
+ }
+
+ bool FindingStartCursor::firstDocMatchesOrEmpty() const {
+ shared_ptr<Cursor> c = _qp.newCursor();
+ return !c->ok() || _matcher->matchesCurrent( c.get() );
+ }
+
+ void FindingStartCursor::init() {
+ BSONElement tsElt = _qp.originalQuery()[ "ts" ];
+ massert( 13044, "no ts field in query", !tsElt.eoo() );
+ BSONObjBuilder b;
+ b.append( tsElt );
+ BSONObj tsQuery = b.obj();
+ _matcher.reset(new CoveredIndexMatcher(tsQuery, _qp.indexKey()));
+ if ( firstDocMatchesOrEmpty() ) {
+ _c = _qp.newCursor();
+ _findingStart = false;
+ return;
+ }
+ // Use a ClientCursor here so we can release db mutex while scanning
+ // oplog (can take quite a while with large oplogs).
+ shared_ptr<Cursor> c = _qp.newReverseCursor();
+ _findingStartCursor.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns(), BSONObj()) );
+ _findingStartTimer.reset();
+ _findingStartMode = Initial;
+ }
+
+ // -------------------------------------
+
+ struct TestOpTime : public UnitTest {
+ void run() {
OpTime t;
for ( int i = 0; i < 10; i++ ) {
- OpTime s = OpTime::now();
+ OpTime s = OpTime::now_inlock();
assert( s != t );
t = s;
}
@@ -481,18 +626,23 @@ namespace mongo {
}
void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
+ assertInWriteLock();
+ LOG(6) << "applying op: " << op << endl;
+
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
- if( logLevel >= 6 )
- log() << "applying op: " << op << endl;
+ const char *names[] = { "o", "ns", "op", "b" };
+ BSONElement fields[4];
+ op.getFields(4, names, fields);
- assertInWriteLock();
+ BSONObj o;
+ if( fields[0].isABSONObj() )
+ o = fields[0].embeddedObject();
+
+ const char *ns = fields[1].valuestrsafe();
- OpDebug debug;
- BSONObj o = op.getObjectField("o");
- const char *ns = op.getStringField("ns");
// operation type -- see logOp() comments for types
- const char *opType = op.getStringField("op");
+ const char *opType = fields[2].valuestrsafe();
if ( *opType == 'i' ) {
opCounters->gotInsert();
@@ -505,57 +655,53 @@ namespace mongo {
}
else {
// do upserts for inserts as we might get replayed more than once
+ OpDebug debug;
BSONElement _id;
if( !o.getObjectID(_id) ) {
/* No _id. This will be very slow. */
Timer t;
- updateObjects(ns, o, o, true, false, false , debug );
+ updateObjects(ns, o, o, true, false, false, debug );
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
}
else {
- BSONObjBuilder b;
- b.append(_id);
-
/* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
+ BSONObjBuilder b;
+ b.append(_id);
updateObjects(ns, o, b.done(), true, false, false , debug );
}
}
}
else if ( *opType == 'u' ) {
opCounters->gotUpdate();
-
RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
- updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ op.getBoolField("b"), /*multi*/ false, /*logop*/ false , debug );
+ OpDebug debug;
+ updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ fields[3].booleanSafe(), /*multi*/ false, /*logop*/ false , debug );
}
else if ( *opType == 'd' ) {
opCounters->gotDelete();
-
if ( opType[1] == 0 )
- deleteObjects(ns, o, op.getBoolField("b"));
+ deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
else
assert( opType[1] == 'b' ); // "db" advertisement
}
- else if ( *opType == 'n' ) {
- // no op
- }
else if ( *opType == 'c' ) {
opCounters->gotCommand();
-
BufBuilder bb;
BSONObjBuilder ob;
_runCommands(ns, o, bb, ob, true, 0);
}
+ else if ( *opType == 'n' ) {
+ // no op
+ }
else {
- stringstream ss;
- ss << "unknown opType [" << opType << "]";
- throw MsgAssertionException( 13141 , ss.str() );
+ throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
}
}
@@ -566,9 +712,9 @@ namespace mongo {
virtual LockType locktype() const { return WRITE; }
ApplyOpsCmd() : Command( "applyOps" ) {}
virtual void help( stringstream &help ) const {
- help << "examples: { applyOps : [ ] , preCondition : [ { ns : ... , q : ... , res : ... } ] }";
+ help << "internal (sharding)\n{ applyOps : [ ] , preCondition : [ { ns : ... , q : ... , res : ... } ] }";
}
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( cmdObj.firstElement().type() != Array ) {
errmsg = "ops has to be an array";
diff --git a/db/oplog.h b/db/oplog.h
index d9073ab..2f2b286 100644
--- a/db/oplog.h
+++ b/db/oplog.h
@@ -26,8 +26,7 @@
#include "pdfile.h"
#include "db.h"
#include "dbhelpers.h"
-#include "query.h"
-#include "queryoptimizer.h"
+#include "clientcursor.h"
#include "../client/dbclient.h"
#include "../util/optime.h"
#include "../util/timer.h"
@@ -64,82 +63,41 @@ namespace mongo {
extern int __findingStartInitialTimeout; // configurable for testing
+ class QueryPlan;
+
+ /** Implements an optimized procedure for finding the first op in the oplog. */
class FindingStartCursor {
public:
- FindingStartCursor( const QueryPlan & qp ) :
- _qp( qp ),
- _findingStart( true ),
- _findingStartMode(),
- _findingStartTimer( 0 )
- { init(); }
+
+ /**
+ * The cursor will attempt to find the first op in the oplog matching the
+ * 'ts' field of the qp's query.
+ */
+ FindingStartCursor( const QueryPlan & qp );
+
+ /** @return true if the first matching op in the oplog has been found. */
bool done() const { return !_findingStart; }
- shared_ptr<Cursor> cRelease() { return _c; }
- void next() {
- if ( !_findingStartCursor || !_findingStartCursor->ok() ) {
- _findingStart = false;
- _c = _qp.newCursor(); // on error, start from beginning
- destroyClientCursor();
- return;
- }
- switch( _findingStartMode ) {
- case Initial: {
- if ( !_matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
- _findingStart = false; // found first record out of query range, so scan normally
- _c = _qp.newCursor( _findingStartCursor->currLoc() );
- destroyClientCursor();
- return;
- }
- _findingStartCursor->advance();
- RARELY {
- if ( _findingStartTimer.seconds() >= __findingStartInitialTimeout ) {
- createClientCursor( startLoc( _findingStartCursor->currLoc() ) );
- _findingStartMode = FindExtent;
- return;
- }
- }
- return;
- }
- case FindExtent: {
- if ( !_matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
- _findingStartMode = InExtent;
- return;
- }
- DiskLoc prev = prevLoc( _findingStartCursor->currLoc() );
- if ( prev.isNull() ) { // hit beginning, so start scanning from here
- createClientCursor();
- _findingStartMode = InExtent;
- return;
- }
- // There might be a more efficient implementation than creating new cursor & client cursor each time,
- // not worrying about that for now
- createClientCursor( prev );
- return;
- }
- case InExtent: {
- if ( _matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
- _findingStart = false; // found first record in query range, so scan normally
- _c = _qp.newCursor( _findingStartCursor->currLoc() );
- destroyClientCursor();
- return;
- }
- _findingStartCursor->advance();
- return;
- }
- default: {
- massert( 12600, "invalid _findingStartMode", false );
- }
- }
- }
+
+ /** @return cursor pointing to the first matching op, if done(). */
+ shared_ptr<Cursor> cursor() { verify( 14835, done() ); return _c; }
+
+ /** Iterate the cursor, to continue trying to find matching op. */
+ void next();
+
+ /** Yield cursor, if not done(). */
bool prepareToYield() {
if ( _findingStartCursor ) {
return _findingStartCursor->prepareToYield( _yieldData );
}
- return true;
+ return false;
}
+
+ /** Recover from cursor yield. */
void recoverFromYield() {
if ( _findingStartCursor ) {
if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
_findingStartCursor.reset( 0 );
+ msgassertedNoTrace( 15889, "FindingStartCursor::recoverFromYield() failed to recover" );
}
}
}
@@ -153,56 +111,15 @@ namespace mongo {
ClientCursor::CleanupPointer _findingStartCursor;
shared_ptr<Cursor> _c;
ClientCursor::YieldData _yieldData;
- DiskLoc startLoc( const DiskLoc &rec ) {
- Extent *e = rec.rec()->myExtent( rec );
- if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent ) )
- return e->firstRecord;
- // Likely we are on the fresh side of capExtent, so return first fresh record.
- // If we are on the stale side of capExtent, then the collection is small and it
- // doesn't matter if we start the extent scan with capFirstNewRecord.
- return _qp.nsd()->capFirstNewRecord;
- }
+ DiskLoc extentFirstLoc( const DiskLoc &rec );
- // should never have an empty extent in the oplog, so don't worry about that case
- DiskLoc prevLoc( const DiskLoc &rec ) {
- Extent *e = rec.rec()->myExtent( rec );
- if ( _qp.nsd()->capLooped() ) {
- if ( e->xprev.isNull() )
- e = _qp.nsd()->lastExtent.ext();
- else
- e = e->xprev.ext();
- if ( e->myLoc != _qp.nsd()->capExtent )
- return e->firstRecord;
- }
- else {
- if ( !e->xprev.isNull() ) {
- e = e->xprev.ext();
- return e->firstRecord;
- }
- }
- return DiskLoc(); // reached beginning of collection
- }
- void createClientCursor( const DiskLoc &startLoc = DiskLoc() ) {
- shared_ptr<Cursor> c = _qp.newCursor( startLoc );
- _findingStartCursor.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns()) );
- }
+ DiskLoc prevExtentFirstLoc( const DiskLoc &rec );
+ void createClientCursor( const DiskLoc &startLoc = DiskLoc() );
void destroyClientCursor() {
_findingStartCursor.reset( 0 );
}
- void init() {
- // Use a ClientCursor here so we can release db mutex while scanning
- // oplog (can take quite a while with large oplogs).
- shared_ptr<Cursor> c = _qp.newReverseCursor();
- _findingStartCursor.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns(), BSONObj()) );
- _findingStartTimer.reset();
- _findingStartMode = Initial;
- BSONElement tsElt = _qp.originalQuery()[ "ts" ];
- massert( 13044, "no ts field in query", !tsElt.eoo() );
- BSONObjBuilder b;
- b.append( tsElt );
- BSONObj tsQuery = b.obj();
- _matcher.reset(new CoveredIndexMatcher(tsQuery, _qp.indexKey()));
- }
+ void init();
+ bool firstDocMatchesOrEmpty() const;
};
void pretouchOperation(const BSONObj& op);
diff --git a/db/oplogreader.h b/db/oplogreader.h
index 54c90d9..01f76f4 100644
--- a/db/oplogreader.h
+++ b/db/oplogreader.h
@@ -12,8 +12,8 @@ namespace mongo {
still fairly awkward but a start.
*/
class OplogReader {
- auto_ptr<DBClientConnection> _conn;
- auto_ptr<DBClientCursor> cursor;
+ shared_ptr<DBClientConnection> _conn;
+ shared_ptr<DBClientCursor> cursor;
public:
OplogReader() {
@@ -40,6 +40,9 @@ namespace mongo {
/* ok to call if already connected */
bool connect(string hostname);
+ bool connect(const BSONObj& rid, const int from, const string& to);
+
+
void tailCheck() {
if( cursor.get() && cursor->isDead() ) {
log() << "repl: old cursor isDead, will initiate a new one" << endl;
@@ -51,25 +54,39 @@ namespace mongo {
void query(const char *ns, const BSONObj& query) {
assert( !haveCursor() );
- cursor = _conn->query(ns, query, 0, 0, 0, QueryOption_SlaveOk);
+ cursor.reset( _conn->query(ns, query, 0, 0, 0, QueryOption_SlaveOk).release() );
+ }
+
+ void queryGTE(const char *ns, OpTime t) {
+ BSONObjBuilder q;
+ q.appendDate("$gte", t.asDate());
+ BSONObjBuilder q2;
+ q2.append("ts", q.done());
+ query(ns, q2.done());
}
- void tailingQuery(const char *ns, const BSONObj& query) {
+ void tailingQuery(const char *ns, const BSONObj& query, const BSONObj* fields=0) {
assert( !haveCursor() );
log(2) << "repl: " << ns << ".find(" << query.toString() << ')' << endl;
- cursor = _conn->query( ns, query, 0, 0, 0,
- QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay |
- /* TODO: slaveok maybe shouldn't use? */
- QueryOption_AwaitData
- );
+ cursor.reset( _conn->query( ns, query, 0, 0, fields,
+ QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay |
+ /* TODO: slaveok maybe shouldn't use? */
+ QueryOption_AwaitData
+ ).release() );
}
- void tailingQueryGTE(const char *ns, OpTime t) {
+ void tailingQueryGTE(const char *ns, OpTime t, const BSONObj* fields=0) {
BSONObjBuilder q;
q.appendDate("$gte", t.asDate());
BSONObjBuilder query;
query.append("ts", q.done());
- tailingQuery(ns, query.done());
+ tailingQuery(ns, query.done(), fields);
+ }
+
+ /* Do a tailing query, but only send the ts field back. */
+ void ghostQueryGTE(const char *ns, OpTime t) {
+ const BSONObj fields = BSON("ts" << 1 << "_id" << 0);
+ return tailingQueryGTE(ns, t, &fields);
}
bool more() {
@@ -93,13 +110,13 @@ namespace mongo {
BSONObj nextSafe() { return cursor->nextSafe(); }
- BSONObj next() {
- return cursor->next();
- }
+ BSONObj next() { return cursor->next(); }
- void putBack(BSONObj op) {
- cursor->putBack(op);
- }
+ void putBack(BSONObj op) { cursor->putBack(op); }
+
+ private:
+ bool commonConnect(const string& hostName);
+ bool passthroughHandshake(const BSONObj& rid, const int f);
};
}
diff --git a/db/ops/delete.cpp b/db/ops/delete.cpp
new file mode 100644
index 0000000..3009047
--- /dev/null
+++ b/db/ops/delete.cpp
@@ -0,0 +1,242 @@
+// delete.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "delete.h"
+#include "../queryoptimizer.h"
+#include "../oplog.h"
+
+namespace mongo {
+
+ // Just try to identify best plan.
+ class DeleteOp : public MultiCursor::CursorOp {
+ public:
+ DeleteOp( bool justOne, int& bestCount, int orClauseIndex = -1 ) :
+ justOne_( justOne ),
+ count_(),
+ bestCount_( bestCount ),
+ _nscanned(),
+ _orClauseIndex( orClauseIndex ) {
+ }
+ virtual void _init() {
+ c_ = qp().newCursor();
+ }
+ virtual bool prepareToYield() {
+ if ( _orClauseIndex > 0 ) {
+ return false;
+ }
+ if ( ! _cc ) {
+ _cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c_ , qp().ns() ) );
+ }
+ return _cc->prepareToYield( _yieldData );
+ }
+ virtual void recoverFromYield() {
+ if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _cc.reset();
+ c_.reset();
+ massert( 13340, "cursor dropped during delete", false );
+ }
+ }
+ virtual long long nscanned() {
+ return c_.get() ? c_->nscanned() : _nscanned;
+ }
+ virtual void next() {
+ if ( !c_->ok() ) {
+ setComplete();
+ return;
+ }
+
+ DiskLoc rloc = c_->currLoc();
+
+ if ( matcher( c_ )->matchesCurrent(c_.get()) ) {
+ if ( !c_->getsetdup(rloc) )
+ ++count_;
+ }
+
+ c_->advance();
+ _nscanned = c_->nscanned();
+
+ if ( _orClauseIndex > 0 && _nscanned >= 100 ) {
+ setComplete();
+ return;
+ }
+
+ if ( count_ > bestCount_ )
+ bestCount_ = count_;
+
+ if ( count_ > 0 ) {
+ if ( justOne_ )
+ setComplete();
+ else if ( _nscanned >= 100 && count_ == bestCount_ )
+ setComplete();
+ }
+ }
+ virtual bool mayRecordPlan() const { return !justOne_; }
+ virtual QueryOp *_createChild() const {
+ bestCount_ = 0; // should be safe to reset this in contexts where createChild() is called
+ return new DeleteOp( justOne_, bestCount_, _orClauseIndex + 1 );
+ }
+ virtual shared_ptr<Cursor> newCursor() const { return qp().newCursor(); }
+ private:
+ bool justOne_;
+ int count_;
+ int &bestCount_;
+ long long _nscanned;
+ shared_ptr<Cursor> c_;
+ ClientCursor::CleanupPointer _cc;
+ ClientCursor::YieldData _yieldData;
+ // Avoid yielding in the MultiPlanScanner when not the first $or clause - just a temporary implementaiton for now. SERVER-3555
+ int _orClauseIndex;
+ };
+
+ /* ns: namespace, e.g. <database>.<collection>
+ pattern: the "where" clause / criteria
+ justOne: stop after 1 match
+ god: allow access to system namespaces, and don't yield
+ */
+ long long deleteObjects(const char *ns, BSONObj pattern, bool justOneOrig, bool logop, bool god, RemoveSaver * rs ) {
+ if( !god ) {
+ if ( strstr(ns, ".system.") ) {
+ /* note a delete from system.indexes would corrupt the db
+ if done here, as there are pointers into those objects in
+ NamespaceDetails.
+ */
+ uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) );
+ }
+ if ( strchr( ns , '$' ) ) {
+ log() << "cannot delete from collection with reserved $ in name: " << ns << endl;
+ uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
+ }
+ }
+
+ {
+ NamespaceDetails *d = nsdetails( ns );
+ if ( ! d )
+ return 0;
+ uassert( 10101 , "can't remove from a capped collection" , ! d->capped );
+ }
+
+ long long nDeleted = 0;
+
+ int best = 0;
+ shared_ptr< MultiCursor::CursorOp > opPtr( new DeleteOp( justOneOrig, best ) );
+ shared_ptr< MultiCursor > creal( new MultiCursor( ns, pattern, BSONObj(), opPtr, !god ) );
+
+ if( !creal->ok() )
+ return nDeleted;
+
+ shared_ptr< Cursor > cPtr = creal;
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) );
+ cc->setDoingDeletes( true );
+
+ CursorId id = cc->cursorid();
+
+ bool justOne = justOneOrig;
+ bool canYield = !god && !creal->matcher()->docMatcher().atomic();
+
+ do {
+ // TODO: we can generalize this I believe
+ //
+ bool willNeedRecord = creal->matcher()->needRecord() || pattern.isEmpty() || isSimpleIdQuery( pattern );
+ if ( ! willNeedRecord ) {
+ // TODO: this is a total hack right now
+ // check if the index full encompasses query
+
+ if ( pattern.nFields() == 1 &&
+ str::equals( pattern.firstElement().fieldName() , creal->indexKeyPattern().firstElement().fieldName() ) )
+ willNeedRecord = true;
+ }
+
+ if ( canYield && ! cc->yieldSometimes( willNeedRecord ? ClientCursor::WillNeed : ClientCursor::MaybeCovered ) ) {
+ cc.release(); // has already been deleted elsewhere
+ // TODO should we assert or something?
+ break;
+ }
+ if ( !cc->ok() ) {
+ break; // if we yielded, could have hit the end
+ }
+
+ // this way we can avoid calling updateLocation() every time (expensive)
+ // as well as some other nuances handled
+ cc->setDoingDeletes( true );
+
+ DiskLoc rloc = cc->currLoc();
+ BSONObj key = cc->currKey();
+
+ // NOTE Calling advance() may change the matcher, so it's important
+ // to try to match first.
+ bool match = creal->matcher()->matchesCurrent(creal.get());
+
+ if ( ! cc->advance() )
+ justOne = true;
+
+ if ( ! match )
+ continue;
+
+ assert( !cc->c()->getsetdup(rloc) ); // can't be a dup, we deleted it!
+
+ if ( !justOne ) {
+ /* NOTE: this is SLOW. this is not good, noteLocation() was designed to be called across getMore
+ blocks. here we might call millions of times which would be bad.
+ */
+ cc->c()->noteLocation();
+ }
+
+ if ( logop ) {
+ BSONElement e;
+ if( BSONObj( rloc.rec() ).getObjectID( e ) ) {
+ BSONObjBuilder b;
+ b.append( e );
+ bool replJustOne = true;
+ logOp( "d", ns, b.done(), 0, &replJustOne );
+ }
+ else {
+ problem() << "deleted object without id, not logging" << endl;
+ }
+ }
+
+ if ( rs )
+ rs->goingToDelete( rloc.obj() /*cc->c->current()*/ );
+
+ theDataFileMgr.deleteRecord(ns, rloc.rec(), rloc);
+ nDeleted++;
+ if ( justOne ) {
+ break;
+ }
+ cc->c()->checkLocation();
+
+ if( !god )
+ getDur().commitIfNeeded();
+
+ if( debug && god && nDeleted == 100 )
+ log() << "warning high number of deletes with god=true which could use significant memory" << endl;
+ }
+ while ( cc->ok() );
+
+ if ( cc.get() && ClientCursor::find( id , false ) == 0 ) {
+ // TODO: remove this and the id declaration above if this doesn't trigger
+ // if it does, then i'm very confused (ERH 06/2011)
+ error() << "this should be impossible" << endl;
+ printStackTrace();
+ cc.release();
+ }
+
+ return nDeleted;
+ }
+
+}
diff --git a/db/ops/delete.h b/db/ops/delete.h
new file mode 100644
index 0000000..a74b7a6
--- /dev/null
+++ b/db/ops/delete.h
@@ -0,0 +1,33 @@
+// delete.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../pch.h"
+#include "../jsobj.h"
+#include "../clientcursor.h"
+
+namespace mongo {
+
+ class RemoveSaver;
+
+ // If justOne is true, deletedId is set to the id of the deleted object.
+ long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop = false, bool god=false, RemoveSaver * rs=0);
+
+
+}
diff --git a/db/query.cpp b/db/ops/query.cpp
index 671e714..cf4dc98 100644
--- a/db/query.cpp
+++ b/db/ops/query.cpp
@@ -18,24 +18,25 @@
#include "pch.h"
#include "query.h"
-#include "pdfile.h"
-#include "jsobjmanipulator.h"
-#include "../bson/util/builder.h"
+#include "../pdfile.h"
+#include "../jsobjmanipulator.h"
+#include "../../bson/util/builder.h"
#include <time.h>
-#include "introspect.h"
-#include "btree.h"
-#include "../util/lruishmap.h"
-#include "json.h"
-#include "repl.h"
-#include "replpair.h"
-#include "scanandorder.h"
-#include "security.h"
-#include "curop-inl.h"
-#include "commands.h"
-#include "queryoptimizer.h"
-#include "lasterror.h"
-#include "../s/d_logic.h"
-#include "repl_block.h"
+#include "../introspect.h"
+#include "../btree.h"
+#include "../../util/lruishmap.h"
+#include "../json.h"
+#include "../repl.h"
+#include "../replutil.h"
+#include "../scanandorder.h"
+#include "../security.h"
+#include "../curop-inl.h"
+#include "../commands.h"
+#include "../queryoptimizer.h"
+#include "../lasterror.h"
+#include "../../s/d_logic.h"
+#include "../repl_block.h"
+#include "../../server.h"
namespace mongo {
@@ -50,204 +51,14 @@ namespace mongo {
extern bool useCursors;
extern bool useHints;
- // Just try to identify best plan.
- class DeleteOp : public MultiCursor::CursorOp {
- public:
- DeleteOp( bool justOne, int& bestCount ) :
- justOne_( justOne ),
- count_(),
- bestCount_( bestCount ),
- _nscanned() {
- }
- virtual void _init() {
- c_ = qp().newCursor();
- }
- virtual bool prepareToYield() {
- if ( ! _cc ) {
- _cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c_ , qp().ns() ) );
- }
- return _cc->prepareToYield( _yieldData );
- }
- virtual void recoverFromYield() {
- if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
- _cc.reset();
- c_.reset();
- massert( 13340, "cursor dropped during delete", false );
- }
- }
- virtual long long nscanned() {
- return c_.get() ? c_->nscanned() : _nscanned;
- }
- virtual void next() {
- if ( !c_->ok() ) {
- setComplete();
- return;
- }
-
- DiskLoc rloc = c_->currLoc();
-
- if ( matcher()->matches(c_->currKey(), rloc ) ) {
- if ( !c_->getsetdup(rloc) )
- ++count_;
- }
-
- c_->advance();
- _nscanned = c_->nscanned();
- if ( count_ > bestCount_ )
- bestCount_ = count_;
-
- if ( count_ > 0 ) {
- if ( justOne_ )
- setComplete();
- else if ( _nscanned >= 100 && count_ == bestCount_ )
- setComplete();
- }
- }
- virtual bool mayRecordPlan() const { return !justOne_; }
- virtual QueryOp *_createChild() const {
- bestCount_ = 0; // should be safe to reset this in contexts where createChild() is called
- return new DeleteOp( justOne_, bestCount_ );
- }
- virtual shared_ptr<Cursor> newCursor() const { return qp().newCursor(); }
- private:
- bool justOne_;
- int count_;
- int &bestCount_;
- long long _nscanned;
- shared_ptr<Cursor> c_;
- ClientCursor::CleanupPointer _cc;
- ClientCursor::YieldData _yieldData;
- };
-
- /* ns: namespace, e.g. <database>.<collection>
- pattern: the "where" clause / criteria
- justOne: stop after 1 match
- god: allow access to system namespaces, and don't yield
- */
- long long deleteObjects(const char *ns, BSONObj pattern, bool justOneOrig, bool logop, bool god, RemoveSaver * rs ) {
- if( !god ) {
- if ( strstr(ns, ".system.") ) {
- /* note a delete from system.indexes would corrupt the db
- if done here, as there are pointers into those objects in
- NamespaceDetails.
- */
- uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) );
- }
- if ( strchr( ns , '$' ) ) {
- log() << "cannot delete from collection with reserved $ in name: " << ns << endl;
- uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
- }
- }
-
- NamespaceDetails *d = nsdetails( ns );
- if ( ! d )
- return 0;
- uassert( 10101 , "can't remove from a capped collection" , ! d->capped );
-
- long long nDeleted = 0;
-
- int best = 0;
- shared_ptr< MultiCursor::CursorOp > opPtr( new DeleteOp( justOneOrig, best ) );
- shared_ptr< MultiCursor > creal( new MultiCursor( ns, pattern, BSONObj(), opPtr, !god ) );
-
- if( !creal->ok() )
- return nDeleted;
-
- shared_ptr< Cursor > cPtr = creal;
- auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) );
- cc->setDoingDeletes( true );
-
- CursorId id = cc->cursorid();
-
- bool justOne = justOneOrig;
- bool canYield = !god && !creal->matcher()->docMatcher().atomic();
-
- do {
- if ( canYield && ! cc->yieldSometimes() ) {
- cc.release(); // has already been deleted elsewhere
- // TODO should we assert or something?
- break;
- }
- if ( !cc->ok() ) {
- break; // if we yielded, could have hit the end
- }
-
- // this way we can avoid calling updateLocation() every time (expensive)
- // as well as some other nuances handled
- cc->setDoingDeletes( true );
-
- DiskLoc rloc = cc->currLoc();
- BSONObj key = cc->currKey();
-
- // NOTE Calling advance() may change the matcher, so it's important
- // to try to match first.
- bool match = creal->matcher()->matches( key , rloc );
-
- if ( ! cc->advance() )
- justOne = true;
-
- if ( ! match )
- continue;
-
- assert( !cc->c()->getsetdup(rloc) ); // can't be a dup, we deleted it!
-
- if ( !justOne ) {
- /* NOTE: this is SLOW. this is not good, noteLocation() was designed to be called across getMore
- blocks. here we might call millions of times which would be bad.
- */
- cc->c()->noteLocation();
- }
-
- if ( logop ) {
- BSONElement e;
- if( BSONObj( rloc.rec() ).getObjectID( e ) ) {
- BSONObjBuilder b;
- b.append( e );
- bool replJustOne = true;
- logOp( "d", ns, b.done(), 0, &replJustOne );
- }
- else {
- problem() << "deleted object without id, not logging" << endl;
- }
- }
-
- if ( rs )
- rs->goingToDelete( rloc.obj() /*cc->c->current()*/ );
-
- theDataFileMgr.deleteRecord(ns, rloc.rec(), rloc);
- nDeleted++;
- if ( justOne ) {
- break;
- }
- cc->c()->checkLocation();
-
- if( !god )
- getDur().commitIfNeeded();
-
- if( debug && god && nDeleted == 100 )
- log() << "warning high number of deletes with god=true which could use significant memory" << endl;
- }
- while ( cc->ok() );
-
- if ( cc.get() && ClientCursor::find( id , false ) == 0 ) {
- cc.release();
- }
-
- return nDeleted;
- }
-
- int otherTraceLevel = 0;
-
- int initialExtentSize(int len);
-
bool runCommands(const char *ns, BSONObj& jsobj, CurOp& curop, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
try {
return _runCommands(ns, jsobj, b, anObjBuilder, fromRepl, queryOptions);
}
catch ( AssertionException& e ) {
e.getInfo().append( anObjBuilder , "assertion" , "assertionCode" );
+ curop.debug().exceptionInfo = e.getInfo();
}
- curop.debug().str << " assertion ";
anObjBuilder.append("errmsg", "db assertion failure");
anObjBuilder.append("ok", 0.0);
BSONObj x = anObjBuilder.done();
@@ -255,8 +66,6 @@ namespace mongo {
return true;
}
- int nCaught = 0;
-
BSONObj id_obj = fromjson("{\"_id\":1}");
BSONObj empty_obj = fromjson("{}");
@@ -273,6 +82,7 @@ namespace mongo {
qr->startingFrom = 0;
qr->len = b.len();
qr->setOperation(opReply);
+ qr->initializeResultFlags();
qr->nReturned = 0;
b.decouple();
return qr;
@@ -283,35 +93,29 @@ namespace mongo {
ClientCursor::Pointer p(cursorid);
ClientCursor *cc = p.c();
- int bufSize = 512;
- if ( cc ) {
- bufSize += sizeof( QueryResult );
- bufSize += MaxBytesToReturnToClientAtOnce;
- }
+ int bufSize = 512 + sizeof( QueryResult ) + MaxBytesToReturnToClientAtOnce;
BufBuilder b( bufSize );
-
b.skip(sizeof(QueryResult));
-
int resultFlags = ResultFlag_AwaitCapable;
int start = 0;
int n = 0;
- if ( !cc ) {
+ if ( unlikely(!cc) ) {
log() << "getMore: cursorid not found " << ns << " " << cursorid << endl;
cursorid = 0;
resultFlags = ResultFlag_CursorNotFound;
}
else {
+ // check for spoofing of the ns such that it does not match the one originally there for the cursor
+ uassert(14833, "auth error", str::equals(ns, cc->ns().c_str()));
+
if ( pass == 0 )
cc->updateSlaveLocation( curop );
int queryOptions = cc->queryOptions();
-
- if( pass == 0 ) {
- StringBuilder& ss = curop.debug().str;
- ss << " getMore: " << cc->query().toString() << " ";
- }
+
+ curop.debug().query = cc->query();
start = cc->pos();
Cursor *c = cc->c();
@@ -322,6 +126,9 @@ namespace mongo {
if ( cc->modifiedKeys() == false && cc->isMultiKey() == false && cc->fields )
keyFieldsOnly.reset( cc->fields->checkKey( cc->indexKeyPattern() ) );
+ // This manager may be stale, but it's the state of chunking when the cursor was created.
+ ShardChunkManagerPtr manager = cc->getChunkManager();
+
while ( 1 ) {
if ( !c->ok() ) {
if ( c->tailable() ) {
@@ -333,7 +140,7 @@ namespace mongo {
continue;
if( n == 0 && (queryOptions & QueryOption_AwaitData) && pass < 1000 ) {
- throw GetMoreWaitException();
+ return 0;
}
break;
@@ -345,15 +152,13 @@ namespace mongo {
cc = 0;
break;
}
+
// in some cases (clone collection) there won't be a matcher
- if ( c->matcher() && !c->matcher()->matches(c->currKey(), c->currLoc() ) ) {
+ if ( c->matcher() && !c->matcher()->matchesCurrent( c ) ) {
}
- /*
- TODO
- else if ( _chunkMatcher && ! _chunkMatcher->belongsToMe( c->currKey(), c->currLoc() ) ){
- cout << "TEMP skipping un-owned chunk: " << c->current() << endl;
+ else if ( manager && ! manager->belongsToMe( cc ) ){
+ LOG(2) << "cursor skipping document in un-owned chunk: " << c->current() << endl;
}
- */
else {
if( c->getsetdup(c->currLoc()) ) {
//out() << " but it's a dup \n";
@@ -380,7 +185,7 @@ namespace mongo {
}
c->advance();
- if ( ! cc->yieldSometimes() ) {
+ if ( ! cc->yieldSometimes( ClientCursor::MaybeCovered ) ) {
ClientCursor::erase(cursorid);
cursorid = 0;
cc = 0;
@@ -422,7 +227,7 @@ namespace mongo {
virtual void _init() {
_c = qp().newCursor();
_capped = _c->capped();
- if ( qp().exactKeyMatch() && ! matcher()->needRecord() ) {
+ if ( qp().exactKeyMatch() && ! matcher( _c )->needRecord() ) {
_query = qp().simplifiedQuery( qp().indexKey() );
_bc = dynamic_cast< BtreeCursor* >( _c.get() );
_bc->forgetEndKey();
@@ -452,6 +257,9 @@ namespace mongo {
if ( _capped ) {
msgassertedNoTrace( 13337, str::stream() << "capped cursor overrun during count: " << _ns );
}
+ else if ( qp().mustAssertOnYieldFailure() ) {
+ msgassertedNoTrace( 15891, str::stream() << "CountOp::recoverFromYield() failed to recover: " << _ns );
+ }
else {
// we don't fail query since we're fine with returning partial data if collection dropped
}
@@ -467,7 +275,7 @@ namespace mongo {
_nscanned = _c->nscanned();
if ( _bc ) {
if ( _firstMatch.isEmpty() ) {
- _firstMatch = _bc->currKeyNode().key.copy();
+ _firstMatch = _bc->currKey().getOwned();
// if not match
if ( _query.woCompare( _firstMatch, BSONObj(), false ) ) {
setComplete();
@@ -476,7 +284,7 @@ namespace mongo {
_gotOne();
}
else {
- if ( ! _firstMatch.woEqual( _bc->currKeyNode().key ) ) {
+ if ( ! _firstMatch.equal( _bc->currKey() ) ) {
setComplete();
return;
}
@@ -484,7 +292,7 @@ namespace mongo {
}
}
else {
- if ( !matcher()->matches(_c->currKey(), _c->currLoc() ) ) {
+ if ( !matcher( _c )->matchesCurrent( _c.get() ) ) {
}
else if( !_c->getsetdup(_c->currLoc()) ) {
_gotOne();
@@ -610,6 +418,8 @@ namespace mongo {
*_b << "indexBounds" << c->prettyIndexBounds();
+ c->explainDetails( *_b );
+
if ( !hint ) {
*_b << "allPlans" << _a->arr();
}
@@ -689,7 +499,7 @@ namespace mongo {
if ( qp().scanAndOrderRequired() ) {
_inMemSort = true;
- _so.reset( new ScanAndOrder( _pq.getSkip() , _pq.getNumToReturn() , _pq.getOrder() ) );
+ _so.reset( new ScanAndOrder( _pq.getSkip() , _pq.getNumToReturn() , _pq.getOrder(), qp().multikeyFrs() ) );
}
if ( _pq.isExplain() ) {
@@ -728,6 +538,9 @@ namespace mongo {
if ( _capped ) {
msgassertedNoTrace( 13338, str::stream() << "capped cursor overrun during query: " << _pq.ns() );
}
+ else if ( qp().mustAssertOnYieldFailure() ) {
+ msgassertedNoTrace( 15890, str::stream() << "UserQueryOp::recoverFromYield() failed to recover: " << _pq.ns() );
+ }
else {
// we don't fail query since we're fine with returning partial data if collection dropped
@@ -746,13 +559,13 @@ namespace mongo {
virtual void next() {
if ( _findingStartCursor.get() ) {
+ if ( !_findingStartCursor->done() ) {
+ _findingStartCursor->next();
+ }
if ( _findingStartCursor->done() ) {
- _c = _findingStartCursor->cRelease();
+ _c = _findingStartCursor->cursor();
_findingStartCursor.reset( 0 );
}
- else {
- _findingStartCursor->next();
- }
_capped = true;
return;
}
@@ -774,15 +587,15 @@ namespace mongo {
}
_nscanned = _c->nscanned();
- if ( !matcher()->matches(_c->currKey(), _c->currLoc() , &_details ) ) {
+ if ( !matcher( _c )->matchesCurrent(_c.get() , &_details ) ) {
// not a match, continue onward
- if ( _details.loadedObject )
+ if ( _details._loadedObject )
_nscannedObjects++;
}
else {
_nscannedObjects++;
DiskLoc cl = _c->currLoc();
- if ( _chunkManager && ! _chunkManager->belongsToMe( cl.obj() ) ) {
+ if ( _chunkManager && ! _chunkManager->belongsToMe( cl.obj() ) ) { // TODO: should make this covered at some point
_nChunkSkips++;
// log() << "TEMP skipping un-owned chunk: " << _c->current() << endl;
}
@@ -938,6 +751,9 @@ namespace mongo {
cc->slaveReadTill( _slaveReadTill );
}
+
+ ShardChunkManagerPtr getChunkManager(){ return _chunkManager; }
+
private:
BufBuilder _buf;
const ParsedQuery& _pq;
@@ -981,7 +797,6 @@ namespace mongo {
@return points to ns if exhaust mode. 0=normal mode
*/
const char *runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result) {
- StringBuilder& ss = curop.debug().str;
shared_ptr<ParsedQuery> pq_shared( new ParsedQuery(q) );
ParsedQuery& pq( *pq_shared );
int ntoskip = q.ntoskip;
@@ -990,15 +805,10 @@ namespace mongo {
const char *ns = q.ns;
if( logLevel >= 2 )
- log() << "query: " << ns << jsobj << endl;
-
- ss << ns;
- {
- // only say ntoreturn if nonzero.
- int n = pq.getNumToReturn();
- if( n )
- ss << " ntoreturn:" << n;
- }
+ log() << "runQuery called " << ns << " " << jsobj << endl;
+
+ curop.debug().ns = ns;
+ curop.debug().ntoreturn = pq.getNumToReturn();
curop.setQuery(jsobj);
if ( pq.couldBeCommand() ) {
@@ -1006,15 +816,16 @@ namespace mongo {
bb.skip(sizeof(QueryResult));
BSONObjBuilder cmdResBuf;
if ( runCommands(ns, jsobj, curop, bb, cmdResBuf, false, queryOptions) ) {
- ss << " command: ";
- jsobj.toString( ss );
+ curop.debug().iscommand = true;
+ curop.debug().query = jsobj;
curop.markCommand();
+
auto_ptr< QueryResult > qr;
qr.reset( (QueryResult *) bb.buf() );
bb.decouple();
qr->setResultFlagsToOk();
qr->len = bb.len();
- ss << " reslen:" << bb.len();
+ curop.debug().responseLength = bb.len();
qr->setOperation(opReply);
qr->cursorId = 0;
qr->startingFrom = 0;
@@ -1090,6 +901,7 @@ namespace mongo {
}
if ( ! (explain || pq.showDiskLoc()) && isSimpleIdQuery( query ) && !pq.hasOption( QueryOption_CursorTailable ) ) {
+
bool nsFound = false;
bool indexFound = false;
@@ -1099,8 +911,8 @@ namespace mongo {
if ( nsFound == false || indexFound == true ) {
BufBuilder bb(sizeof(QueryResult)+resObject.objsize()+32);
bb.skip(sizeof(QueryResult));
-
- ss << " idhack ";
+
+ curop.debug().idhack = true;
if ( found ) {
n = 1;
fillQueryResultFromObj( bb , pq.getFields() , resObject );
@@ -1110,13 +922,14 @@ namespace mongo {
bb.decouple();
qr->setResultFlagsToOk();
qr->len = bb.len();
- ss << " reslen:" << bb.len();
+
+ curop.debug().responseLength = bb.len();
qr->setOperation(opReply);
qr->cursorId = 0;
qr->startingFrom = 0;
qr->nReturned = n;
result.setData( qr.release(), true );
- return false;
+ return NULL;
}
}
@@ -1147,8 +960,8 @@ namespace mongo {
}
n = dqo.n();
long long nscanned = dqo.totalNscanned();
- if ( dqo.scanAndOrderRequired() )
- ss << " scanAndOrder ";
+ curop.debug().scanAndOrder = dqo.scanAndOrderRequired();
+
shared_ptr<Cursor> cursor = dqo.cursor();
if( logLevel >= 5 )
log() << " used cursor: " << cursor.get() << endl;
@@ -1159,13 +972,16 @@ namespace mongo {
bool moreClauses = mps->mayRunMore();
if ( moreClauses ) {
// this MultiCursor will use a dumb NoOp to advance(), so no need to specify mayYield
- shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher(), dqo ) );
+ shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher( cursor ), dqo ) );
cc = new ClientCursor(queryOptions, multi, ns, jsobj.getOwned());
}
else {
- if( ! cursor->matcher() ) cursor->setMatcher( dqo.matcher() );
+ if( ! cursor->matcher() ) cursor->setMatcher( dqo.matcher( cursor ) );
cc = new ClientCursor( queryOptions, cursor, ns, jsobj.getOwned() );
}
+
+ cc->setChunkManager( dqo.getChunkManager() );
+
cursorid = cc->cursorid();
DEV tlog(2) << "query has more, cursorid: " << cursorid << endl;
cc->setPos( n );
@@ -1177,7 +993,7 @@ namespace mongo {
DEV tlog() << "query has no more but tailable, cursorid: " << cursorid << endl;
if( queryOptions & QueryOption_Exhaust ) {
exhaust = ns;
- ss << " exhaust ";
+ curop.debug().exhaust = true;
}
dqo.finishForOplogReplay(cc);
}
@@ -1186,7 +1002,7 @@ namespace mongo {
qr->cursorId = cursorid;
qr->setResultFlagsToOk();
// qr->len is updated automatically by appendData()
- ss << " reslen:" << qr->len;
+ curop.debug().responseLength = qr->len;
qr->setOperation(opReply);
qr->startingFrom = 0;
qr->nReturned = n;
@@ -1194,14 +1010,10 @@ namespace mongo {
int duration = curop.elapsedMillis();
bool dbprofile = curop.shouldDBProfile( duration );
if ( dbprofile || duration >= cmdLine.slowMS ) {
- ss << " nscanned:" << nscanned << ' ';
- if ( ntoskip )
- ss << " ntoskip:" << ntoskip;
- if ( dbprofile )
- ss << " \nquery: ";
- ss << jsobj.toString() << ' ';
+ curop.debug().nscanned = (int) nscanned;
+ curop.debug().ntoskip = ntoskip;
}
- ss << " nreturned:" << n;
+ curop.debug().nreturned = n;
return exhaust;
}
diff --git a/db/query.h b/db/ops/query.h
index 5de7ced..ada2e90 100644
--- a/db/query.h
+++ b/db/ops/query.h
@@ -18,98 +18,22 @@
#pragma once
-#include "../pch.h"
-#include "../util/message.h"
-#include "dbmessage.h"
-#include "jsobj.h"
-#include "diskloc.h"
-#include "projection.h"
-
-/* db request message format
-
- unsigned opid; // arbitary; will be echoed back
- byte operation;
- int options;
-
- then for:
-
- dbInsert:
- string collection;
- a series of JSObjects
- dbDelete:
- string collection;
- int flags=0; // 1=DeleteSingle
- JSObject query;
- dbUpdate:
- string collection;
- int flags; // 1=upsert
- JSObject query;
- JSObject objectToUpdate;
- objectToUpdate may include { $inc: <field> } or { $set: ... }, see struct Mod.
- dbQuery:
- string collection;
- int nToSkip;
- int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
- // greater than zero is simply a hint on how many objects to send back per "cursor batch".
- // a negative number indicates a hard limit.
- JSObject query;
- [JSObject fieldsToReturn]
- dbGetMore:
- string collection; // redundant, might use for security.
- int nToReturn;
- int64 cursorID;
- dbKillCursors=2007:
- int n;
- int64 cursorIDs[n];
-
- Note that on Update, there is only one object, which is different
- from insert where you can pass a list of objects to insert in the db.
- Note that the update field layout is very similar layout to Query.
-*/
+#include "../../pch.h"
+#include "../../util/net/message.h"
+#include "../dbmessage.h"
+#include "../jsobj.h"
+#include "../diskloc.h"
+#include "../projection.h"
// struct QueryOptions, QueryResult, QueryResultFlags in:
-#include "../client/dbclient.h"
+#include "../../client/dbclient.h"
namespace mongo {
extern const int MaxBytesToReturnToClientAtOnce;
- // for an existing query (ie a ClientCursor), send back additional information.
- struct GetMoreWaitException { };
-
QueryResult* processGetMore(const char *ns, int ntoreturn, long long cursorid , CurOp& op, int pass, bool& exhaust);
- struct UpdateResult {
- bool existing; // if existing objects were modified
- bool mod; // was this a $ mod
- long long num; // how many objects touched
- OID upserted; // if something was upserted, the new _id of the object
-
- UpdateResult( bool e, bool m, unsigned long long n , const BSONObj& upsertedObject = BSONObj() )
- : existing(e) , mod(m), num(n) {
- upserted.clear();
-
- BSONElement id = upsertedObject["_id"];
- if ( ! e && n == 1 && id.type() == jstOID ) {
- upserted = id.OID();
- }
- }
-
- };
-
- class RemoveSaver;
-
- /* returns true if an existing object was updated, false if no existing object was found.
- multi - update multiple objects - mostly useful with things like $set
- god - allow access to system namespaces
- */
- UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug );
- UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj pattern,
- bool upsert, bool multi , bool logop , OpDebug& debug , RemoveSaver * rs = 0 );
-
- // If justOne is true, deletedId is set to the id of the deleted object.
- long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop = false, bool god=false, RemoveSaver * rs=0);
-
long long runCount(const char *ns, const BSONObj& cmd, string& err);
const char * runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result);
@@ -142,7 +66,7 @@ namespace mongo {
* includes fields from the query message, both possible query levels
* parses everything up front
*/
- class ParsedQuery {
+ class ParsedQuery : boost::noncopyable {
public:
ParsedQuery( QueryMessage& qm )
: _ns( qm.ns ) , _ntoskip( qm.ntoskip ) , _ntoreturn( qm.ntoreturn ) , _options( qm.queryOptions ) {
@@ -155,8 +79,6 @@ namespace mongo {
initFields( fields );
}
- ~ParsedQuery() {}
-
const char * ns() const { return _ns; }
bool isLocalDB() const { return strncmp(_ns, "local.", 6) == 0; }
@@ -170,7 +92,6 @@ namespace mongo {
int getOptions() const { return _options; }
bool hasOption( int x ) const { return x & _options; }
-
bool isExplain() const { return _explain; }
bool isSnapshot() const { return _snapshot; }
bool returnKey() const { return _returnKey; }
@@ -262,27 +183,33 @@ namespace mongo {
_order = transformOrderFromArrayFormat( _order );
}
else {
- uassert(13513, "sort must be an object or array", 0);
+ uasserted(13513, "sort must be an object or array");
}
+ continue;
}
- else if ( strcmp( "$explain" , name ) == 0 )
- _explain = e.trueValue();
- else if ( strcmp( "$snapshot" , name ) == 0 )
- _snapshot = e.trueValue();
- else if ( strcmp( "$min" , name ) == 0 )
- _min = e.embeddedObject();
- else if ( strcmp( "$max" , name ) == 0 )
- _max = e.embeddedObject();
- else if ( strcmp( "$hint" , name ) == 0 )
- _hint = e;
- else if ( strcmp( "$returnKey" , name ) == 0 )
- _returnKey = e.trueValue();
- else if ( strcmp( "$maxScan" , name ) == 0 )
- _maxScan = e.numberInt();
- else if ( strcmp( "$showDiskLoc" , name ) == 0 )
- _showDiskLoc = e.trueValue();
-
+ if( *name == '$' ) {
+ name++;
+ if ( strcmp( "explain" , name ) == 0 )
+ _explain = e.trueValue();
+ else if ( strcmp( "snapshot" , name ) == 0 )
+ _snapshot = e.trueValue();
+ else if ( strcmp( "min" , name ) == 0 )
+ _min = e.embeddedObject();
+ else if ( strcmp( "max" , name ) == 0 )
+ _max = e.embeddedObject();
+ else if ( strcmp( "hint" , name ) == 0 )
+ _hint = e;
+ else if ( strcmp( "returnKey" , name ) == 0 )
+ _returnKey = e.trueValue();
+ else if ( strcmp( "maxScan" , name ) == 0 )
+ _maxScan = e.numberInt();
+ else if ( strcmp( "showDiskLoc" , name ) == 0 )
+ _showDiskLoc = e.trueValue();
+ else if ( strcmp( "comment" , name ) == 0 ) {
+ ; // no-op
+ }
+ }
}
if ( _snapshot ) {
@@ -299,20 +226,14 @@ namespace mongo {
_fields->init( fields );
}
- ParsedQuery( const ParsedQuery& other ) {
- assert(0);
- }
-
- const char* _ns;
- int _ntoskip;
+ const char * const _ns;
+ const int _ntoskip;
int _ntoreturn;
- int _options;
-
BSONObj _filter;
+ BSONObj _order;
+ const int _options;
shared_ptr< Projection > _fields;
-
bool _wantMore;
-
bool _explain;
bool _snapshot;
bool _returnKey;
@@ -320,11 +241,10 @@ namespace mongo {
BSONObj _min;
BSONObj _max;
BSONElement _hint;
- BSONObj _order;
int _maxScan;
};
} // namespace mongo
-#include "clientcursor.h"
+
diff --git a/db/update.cpp b/db/ops/update.cpp
index 8dc6c85..fd9798a 100644
--- a/db/update.cpp
+++ b/db/ops/update.cpp
@@ -18,12 +18,13 @@
#include "pch.h"
#include "query.h"
-#include "pdfile.h"
-#include "jsobjmanipulator.h"
-#include "queryoptimizer.h"
-#include "repl.h"
+#include "../pdfile.h"
+#include "../jsobjmanipulator.h"
+#include "../queryoptimizer.h"
+#include "../repl.h"
+#include "../btree.h"
+#include "../../util/stringutils.h"
#include "update.h"
-#include "btree.h"
//#define DEBUGUPDATE(x) cout << x << endl;
#define DEBUGUPDATE(x)
@@ -284,7 +285,7 @@ namespace mongo {
case BIT: {
uassert( 10136 , "$bit needs an array" , elt.type() == Object );
uassert( 10137 , "$bit can only be applied to numbers" , in.isNumber() );
- uassert( 10138 , "$bit can't use a double" , in.type() != NumberDouble );
+ uassert( 10138 , "$bit cannot update a value of type double" , in.type() != NumberDouble );
int x = in.numberInt();
long long y = in.numberLong();
@@ -293,23 +294,22 @@ namespace mongo {
while ( it.more() ) {
BSONElement e = it.next();
uassert( 10139 , "$bit field must be number" , e.isNumber() );
- if ( strcmp( e.fieldName() , "and" ) == 0 ) {
+ if ( str::equals(e.fieldName(), "and") ) {
switch( in.type() ) {
case NumberInt: x = x&e.numberInt(); break;
case NumberLong: y = y&e.numberLong(); break;
default: assert( 0 );
}
}
- else if ( strcmp( e.fieldName() , "or" ) == 0 ) {
+ else if ( str::equals(e.fieldName(), "or") ) {
switch( in.type() ) {
case NumberInt: x = x|e.numberInt(); break;
case NumberLong: y = y|e.numberLong(); break;
default: assert( 0 );
}
}
-
else {
- throw UserException( 9016, (string)"unknown bit mod:" + e.fieldName() );
+ uasserted(9016, str::stream() << "unknown $bit operation: " << e.fieldName());
}
}
@@ -407,7 +407,7 @@ namespace mongo {
if ( mss->amIInPlacePossible( e.isNumber() ) ) {
// check more typing info here
if ( m.elt.type() != e.type() ) {
- // if i'm incrememnting with a double, then the storage has to be a double
+ // if i'm incrementing with a double, then the storage has to be a double
mss->amIInPlacePossible( m.elt.type() != NumberDouble );
}
@@ -509,7 +509,7 @@ namespace mongo {
}
if ( m->op == Mod::RENAME_FROM ) {
- DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_FROM fielName:" << m->fieldName );
+ DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_FROM fieldName:" << m->fieldName );
BSONObjBuilder bb( b.subobjStart( "$unset" ) );
bb.append( m->fieldName, 1 );
bb.done();
@@ -517,7 +517,7 @@ namespace mongo {
}
if ( m->op == Mod::RENAME_TO ) {
- DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_TO fielName:" << m->fieldName );
+ DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_TO fieldName:" << m->fieldName );
BSONObjBuilder bb( b.subobjStart( "$set" ) );
bb.appendAs( newVal, m->fieldName );
return;
@@ -570,13 +570,18 @@ namespace mongo {
switch ( m.m->op ) {
case Mod::UNSET:
- case Mod::PULL:
- case Mod::PULL_ALL:
case Mod::ADDTOSET:
case Mod::RENAME_FROM:
case Mod::RENAME_TO:
// this should have been handled by prepare
break;
+ case Mod::PULL:
+ case Mod::PULL_ALL:
+ // this should have been handled by prepare
+ break;
+ case Mod::POP:
+ assert( m.old.eoo() || ( m.old.isABSONObj() && m.old.Obj().isEmpty() ) );
+ break;
// [dm] the BSONElementManipulator statements below are for replication (correct?)
case Mod::INC:
if ( isOnDisk )
@@ -658,7 +663,7 @@ namespace mongo {
switch ( cmp ) {
- case LEFT_SUBFIELD: { // Mod is embeddeed under this element
+ case LEFT_SUBFIELD: { // Mod is embedded under this element
uassert( 10145 , str::stream() << "LEFT_SUBFIELD only supports Object: " << field << " not: " << e.type() , e.type() == Object || e.type() == Array );
if ( onedownseen.count( e.fieldName() ) == 0 ) {
onedownseen.insert( e.fieldName() );
@@ -739,6 +744,10 @@ namespace mongo {
return ss.str();
}
+ bool ModSetState::FieldCmp::operator()( const string &l, const string &r ) const {
+ return lexNumCmp( l.c_str(), r.c_str() ) < 0;
+ }
+
BSONObj ModSet::createNewFromQuery( const BSONObj& query ) {
BSONObj newObj;
@@ -751,7 +760,7 @@ namespace mongo {
if ( e.fieldName()[0] == '$' ) // for $atomic and anything else we add
continue;
- if ( e.type() == Object && e.embeddedObject().firstElement().fieldName()[0] == '$' ) {
+ if ( e.type() == Object && e.embeddedObject().firstElementFieldName()[0] == '$' ) {
// this means this is a $gt type filter, so don't make part of the new object
continue;
}
@@ -793,7 +802,7 @@ namespace mongo {
BSONElement e = it.next();
const char *fn = e.fieldName();
- uassert( 10147 , "Invalid modifier specified" + string( fn ), e.type() == Object );
+ uassert( 10147 , "Invalid modifier specified: " + string( fn ), e.type() == Object );
BSONObj j = e.embeddedObject();
DEBUGUPDATE( "\t" << j );
@@ -892,7 +901,10 @@ namespace mongo {
class UpdateOp : public MultiCursor::CursorOp {
public:
- UpdateOp( bool hasPositionalField ) : _nscanned(), _hasPositionalField( hasPositionalField ) {}
+ UpdateOp( bool hasPositionalField, int orClauseIndex = -1 ) :
+ _nscanned(),
+ _hasPositionalField( hasPositionalField ),
+ _orClauseIndex( orClauseIndex ) {}
virtual void _init() {
_c = qp().newCursor();
if ( ! _c->ok() ) {
@@ -900,6 +912,9 @@ namespace mongo {
}
}
virtual bool prepareToYield() {
+ if ( _orClauseIndex > 0 ) {
+ return false;
+ }
if ( ! _cc ) {
_cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , qp().ns() ) );
}
@@ -921,7 +936,11 @@ namespace mongo {
return;
}
_nscanned = _c->nscanned();
- if ( matcher()->matches(_c->currKey(), _c->currLoc(), &_details ) ) {
+ if ( _orClauseIndex > 0 && _nscanned >= 100 ) {
+ setComplete();
+ return;
+ }
+ if ( matcher( _c )->matchesCurrent(_c.get(), &_details ) ) {
setComplete();
return;
}
@@ -930,7 +949,7 @@ namespace mongo {
virtual bool mayRecordPlan() const { return false; }
virtual QueryOp *_createChild() const {
- return new UpdateOp( _hasPositionalField );
+ return new UpdateOp( _hasPositionalField, _orClauseIndex + 1 );
}
// already scanned to the first match, so return _c
virtual shared_ptr< Cursor > newCursor() const { return _c; }
@@ -942,6 +961,8 @@ namespace mongo {
MatchDetails _details;
ClientCursor::CleanupPointer _cc;
ClientCursor::YieldData _yieldData;
+ // Avoid yielding in the MultiPlanScanner when not the first $or clause - just a temporary implementaiton for now. SERVER-3555
+ int _orClauseIndex;
};
static void checkTooLarge(const BSONObj& newObj) {
@@ -958,19 +979,47 @@ namespace mongo {
NamespaceDetailsTransient *nsdt,
bool god, const char *ns,
const BSONObj& updateobj, BSONObj patternOrig, bool logop, OpDebug& debug) {
+
DiskLoc loc;
{
IndexDetails& i = d->idx(idIdxNo);
- BSONObj key = i.getKeyFromQuery( patternOrig );
- loc = i.head.btree()->findSingle(i, i.head, key);
+ BSONObj key = i.getKeyFromQuery( patternOrig );
+ loc = i.idxInterface().findSingle(i, i.head, key);
if( loc.isNull() ) {
// no upsert support in _updateById yet, so we are done.
return UpdateResult(0, 0, 0);
}
}
-
Record *r = loc.rec();
+ if ( ! r->likelyInPhysicalMemory() ) {
+ {
+ auto_ptr<RWLockRecursive::Shared> lk( new RWLockRecursive::Shared( MongoFile::mmmutex) );
+ dbtempreleasewritelock t;
+ r->touch();
+ lk.reset(0); // we have to release mmmutex before we can re-acquire dbmutex
+ }
+
+ {
+ // we need to re-find in case something changed
+ d = nsdetails( ns );
+ if ( ! d ) {
+ // dropped
+ return UpdateResult(0, 0, 0);
+ }
+ nsdt = &NamespaceDetailsTransient::get_w(ns);
+ IndexDetails& i = d->idx(idIdxNo);
+ BSONObj key = i.getKeyFromQuery( patternOrig );
+ loc = i.idxInterface().findSingle(i, i.head, key);
+ if( loc.isNull() ) {
+ // no upsert support in _updateById yet, so we are done.
+ return UpdateResult(0, 0, 0);
+ }
+
+ r = loc.rec();
+ }
+ }
+
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
if ( isOperatorUpdate ) {
@@ -980,8 +1029,6 @@ namespace mongo {
if( mss->canApplyInPlace() ) {
mss->applyModsInPlace(true);
DEBUGUPDATE( "\t\t\t updateById doing in place update" );
- /*if ( profile )
- ss << " fastmod "; */
}
else {
BSONObj newObj = mss->createNewFromMods();
@@ -1027,19 +1074,16 @@ namespace mongo {
DEBUGUPDATE( "update: " << ns << " update: " << updateobj << " query: " << patternOrig << " upsert: " << upsert << " multi: " << multi );
Client& client = cc();
int profile = client.database()->profile;
- StringBuilder& ss = debug.str;
+
+ debug.updateobj = updateobj;
- if ( logLevel > 2 )
- ss << " update: " << updateobj.toString();
-
- /* idea with these here it to make them loop invariant for multi updates, and thus be a bit faster for that case */
- /* NOTE: when yield() is added herein, these must be refreshed after each call to yield! */
+ // idea with these here it to make them loop invariant for multi updates, and thus be a bit faster for that case
+ // The pointers may be left invalid on a failed or terminal yield recovery.
NamespaceDetails *d = nsdetails(ns); // can be null if an upsert...
NamespaceDetailsTransient *nsdt = &NamespaceDetailsTransient::get_w(ns);
- /* end note */
auto_ptr<ModSet> mods;
- bool isOperatorUpdate = updateobj.firstElement().fieldName()[0] == '$';
+ bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$';
int modsIsIndexed = false; // really the # of indexes
if ( isOperatorUpdate ) {
if( d && d->indexBuildInProgress ) {
@@ -1053,208 +1097,248 @@ namespace mongo {
modsIsIndexed = mods->isIndexed();
}
- if( !upsert && !multi && isSimpleIdQuery(patternOrig) && d && !modsIsIndexed ) {
+ if( !multi && isSimpleIdQuery(patternOrig) && d && !modsIsIndexed ) {
int idxNo = d->findIdIndex();
if( idxNo >= 0 ) {
- ss << " byid ";
- return _updateById(isOperatorUpdate, idxNo, mods.get(), profile, d, nsdt, god, ns, updateobj, patternOrig, logop, debug);
+ debug.idhack = true;
+ UpdateResult result = _updateById(isOperatorUpdate, idxNo, mods.get(), profile, d, nsdt, god, ns, updateobj, patternOrig, logop, debug);
+ if ( result.existing || ! upsert ) {
+ return result;
+ }
+ else if ( upsert && ! isOperatorUpdate && ! logop) {
+ // this handles repl inserts
+ checkNoMods( updateobj );
+ debug.upsert = true;
+ BSONObj no = updateobj;
+ theDataFileMgr.insertWithObjMod(ns, no, god);
+ return UpdateResult( 0 , 0 , 1 , no );
+ }
}
}
- set<DiskLoc> seenObjects;
-
int numModded = 0;
long long nscanned = 0;
- MatchDetails details;
shared_ptr< MultiCursor::CursorOp > opPtr( new UpdateOp( mods.get() && mods->hasDynamicArray() ) );
shared_ptr< MultiCursor > c( new MultiCursor( ns, patternOrig, BSONObj(), opPtr, true ) );
- auto_ptr<ClientCursor> cc;
-
- while ( c->ok() ) {
- nscanned++;
-
- bool atomic = c->matcher()->docMatcher().atomic();
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get_w(ns);
- // May have already matched in UpdateOp, but do again to get details set correctly
- if ( ! c->matcher()->matches( c->currKey(), c->currLoc(), &details ) ) {
- c->advance();
+ if( c->ok() ) {
+ set<DiskLoc> seenObjects;
+ MatchDetails details;
+ auto_ptr<ClientCursor> cc;
+ do {
+ nscanned++;
- if ( nscanned % 256 == 0 && ! atomic ) {
+ bool atomic = c->matcher()->docMatcher().atomic();
+
+ if ( !atomic ) {
+ // *****************
if ( cc.get() == 0 ) {
shared_ptr< Cursor > cPtr = c;
cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
}
- if ( ! cc->yield() ) {
+
+ bool didYield;
+ if ( ! cc->yieldSometimes( ClientCursor::WillNeed, &didYield ) ) {
cc.release();
- // TODO should we assert or something?
break;
}
if ( !c->ok() ) {
break;
}
+
+ if ( didYield ) {
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get_w(ns);
+ }
+ // *****************
}
- continue;
- }
- Record *r = c->_current();
- DiskLoc loc = c->currLoc();
+ // May have already matched in UpdateOp, but do again to get details set correctly
+ if ( ! c->matcher()->matchesCurrent( c.get(), &details ) ) {
+ c->advance();
- // TODO Maybe this is unnecessary since we have seenObjects
- if ( c->getsetdup( loc ) ) {
- c->advance();
- continue;
- }
-
- BSONObj js(r);
-
- BSONObj pattern = patternOrig;
-
- if ( logop ) {
- BSONObjBuilder idPattern;
- BSONElement id;
- // NOTE: If the matching object lacks an id, we'll log
- // with the original pattern. This isn't replay-safe.
- // It might make sense to suppress the log instead
- // if there's no id.
- if ( js.getObjectID( id ) ) {
- idPattern.append( id );
- pattern = idPattern.obj();
- }
- else {
- uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
+ if ( nscanned % 256 == 0 && ! atomic ) {
+ if ( cc.get() == 0 ) {
+ shared_ptr< Cursor > cPtr = c;
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
+ }
+ if ( ! cc->yield() ) {
+ cc.release();
+ // TODO should we assert or something?
+ break;
+ }
+ if ( !c->ok() ) {
+ break;
+ }
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get_w(ns);
+ }
+ continue;
}
- }
-
- if ( profile )
- ss << " nscanned:" << nscanned;
- /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
- regular ones at the moment. */
- if ( isOperatorUpdate ) {
+ Record *r = c->_current();
+ DiskLoc loc = c->currLoc();
- if ( multi ) {
- c->advance(); // go to next record in case this one moves
- if ( seenObjects.count( loc ) )
- continue;
+ // TODO Maybe this is unnecessary since we have seenObjects
+ if ( c->getsetdup( loc ) ) {
+ c->advance();
+ continue;
}
- const BSONObj& onDisk = loc.obj();
+ BSONObj js(r);
- ModSet * useMods = mods.get();
- bool forceRewrite = false;
+ BSONObj pattern = patternOrig;
- auto_ptr<ModSet> mymodset;
- if ( details.elemMatchKey && mods->hasDynamicArray() ) {
- useMods = mods->fixDynamicArray( details.elemMatchKey );
- mymodset.reset( useMods );
- forceRewrite = true;
+ if ( logop ) {
+ BSONObjBuilder idPattern;
+ BSONElement id;
+ // NOTE: If the matching object lacks an id, we'll log
+ // with the original pattern. This isn't replay-safe.
+ // It might make sense to suppress the log instead
+ // if there's no id.
+ if ( js.getObjectID( id ) ) {
+ idPattern.append( id );
+ pattern = idPattern.obj();
+ }
+ else {
+ uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
+ }
}
- auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
+ if ( profile && !multi )
+ debug.nscanned = (int) nscanned;
- bool indexHack = multi && ( modsIsIndexed || ! mss->canApplyInPlace() );
+ /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
+ regular ones at the moment. */
+ if ( isOperatorUpdate ) {
- if ( indexHack ) {
- if ( cc.get() )
- cc->updateLocation();
- else
- c->noteLocation();
- }
+ if ( multi ) {
+ c->advance(); // go to next record in case this one moves
+ if ( seenObjects.count( loc ) )
+ continue;
+ }
- if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ) {
- mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
+ const BSONObj& onDisk = loc.obj();
- DEBUGUPDATE( "\t\t\t doing in place update" );
- if ( profile )
- ss << " fastmod ";
+ ModSet * useMods = mods.get();
+ bool forceRewrite = false;
- if ( modsIsIndexed ) {
- seenObjects.insert( loc );
- }
- }
- else {
- if ( rs )
- rs->goingToDelete( onDisk );
-
- BSONObj newObj = mss->createNewFromMods();
- checkTooLarge(newObj);
- DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
- if ( newLoc != loc || modsIsIndexed ) {
- // object moved, need to make sure we don' get again
- seenObjects.insert( newLoc );
+ auto_ptr<ModSet> mymodset;
+ if ( details._elemMatchKey && mods->hasDynamicArray() ) {
+ useMods = mods->fixDynamicArray( details._elemMatchKey );
+ mymodset.reset( useMods );
+ forceRewrite = true;
}
- }
+ auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
- if ( logop ) {
- DEV assert( mods->size() );
+ bool indexHack = multi && ( modsIsIndexed || ! mss->canApplyInPlace() );
- if ( mss->haveArrayDepMod() ) {
- BSONObjBuilder patternBuilder;
- patternBuilder.appendElements( pattern );
- mss->appendSizeSpecForArrayDepMods( patternBuilder );
- pattern = patternBuilder.obj();
+ if ( indexHack ) {
+ if ( cc.get() )
+ cc->updateLocation();
+ else
+ c->noteLocation();
}
- if ( forceRewrite || mss->needOpLogRewrite() ) {
- DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
- logOp("u", ns, mss->getOpLogRewrite() , &pattern );
+ if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ) {
+ mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
+
+ DEBUGUPDATE( "\t\t\t doing in place update" );
+ if ( profile && !multi )
+ debug.fastmod = true;
+
+ if ( modsIsIndexed ) {
+ seenObjects.insert( loc );
+ }
}
else {
- logOp("u", ns, updateobj, &pattern );
- }
- }
- numModded++;
- if ( ! multi )
- return UpdateResult( 1 , 1 , numModded );
- if ( indexHack )
- c->checkLocation();
+ if ( rs )
+ rs->goingToDelete( onDisk );
+
+ BSONObj newObj = mss->createNewFromMods();
+ checkTooLarge(newObj);
+ DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
+ if ( newLoc != loc || modsIsIndexed ) {
+ // object moved, need to make sure we don' get again
+ seenObjects.insert( newLoc );
+ }
- if ( nscanned % 64 == 0 && ! atomic ) {
- if ( cc.get() == 0 ) {
- shared_ptr< Cursor > cPtr = c;
- cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
}
- if ( ! cc->yield() ) {
- cc.release();
- break;
+
+ if ( logop ) {
+ DEV assert( mods->size() );
+
+ if ( mss->haveArrayDepMod() ) {
+ BSONObjBuilder patternBuilder;
+ patternBuilder.appendElements( pattern );
+ mss->appendSizeSpecForArrayDepMods( patternBuilder );
+ pattern = patternBuilder.obj();
+ }
+
+ if ( forceRewrite || mss->needOpLogRewrite() ) {
+ DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
+ logOp("u", ns, mss->getOpLogRewrite() , &pattern );
+ }
+ else {
+ logOp("u", ns, updateobj, &pattern );
+ }
}
- if ( !c->ok() ) {
- break;
+ numModded++;
+ if ( ! multi )
+ return UpdateResult( 1 , 1 , numModded );
+ if ( indexHack )
+ c->checkLocation();
+
+ if ( nscanned % 64 == 0 && ! atomic ) {
+ if ( cc.get() == 0 ) {
+ shared_ptr< Cursor > cPtr = c;
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
+ }
+ if ( ! cc->yield() ) {
+ cc.release();
+ break;
+ }
+ if ( !c->ok() ) {
+ break;
+ }
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get_w(ns);
}
- }
- getDur().commitIfNeeded();
+ getDur().commitIfNeeded();
- continue;
- }
+ continue;
+ }
- uassert( 10158 , "multi update only works with $ operators" , ! multi );
+ uassert( 10158 , "multi update only works with $ operators" , ! multi );
- BSONElementManipulator::lookForTimestamps( updateobj );
- checkNoMods( updateobj );
- theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, god);
- if ( logop ) {
- DEV if( god ) log() << "REALLY??" << endl; // god doesn't get logged, this would be bad.
- logOp("u", ns, updateobj, &pattern );
- }
- return UpdateResult( 1 , 0 , 1 );
- }
+ BSONElementManipulator::lookForTimestamps( updateobj );
+ checkNoMods( updateobj );
+ theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, god);
+ if ( logop ) {
+ DEV wassert( !god ); // god doesn't get logged, this would be bad.
+ logOp("u", ns, updateobj, &pattern );
+ }
+ return UpdateResult( 1 , 0 , 1 );
+ } while ( c->ok() );
+ } // endif
if ( numModded )
return UpdateResult( 1 , 1 , numModded );
-
if ( profile )
- ss << " nscanned:" << nscanned;
+ debug.nscanned = (int) nscanned;
if ( upsert ) {
- if ( updateobj.firstElement().fieldName()[0] == '$' ) {
+ if ( updateobj.firstElementFieldName()[0] == '$' ) {
/* upsert of an $inc. build a default */
BSONObj newObj = mods->createNewFromQuery( patternOrig );
- if ( profile )
- ss << " fastmodinsert ";
+ checkNoMods( newObj );
+ debug.fastmodinsert = true;
theDataFileMgr.insertWithObjMod(ns, newObj, god);
if ( logop )
logOp( "i", ns, newObj );
@@ -1263,8 +1347,7 @@ namespace mongo {
}
uassert( 10159 , "multi update only works with $ operators" , ! multi );
checkNoMods( updateobj );
- if ( profile )
- ss << " upsert ";
+ debug.upsert = true;
BSONObj no = updateobj;
theDataFileMgr.insertWithObjMod(ns, no, god);
if ( logop )
diff --git a/db/update.h b/db/ops/update.h
index d8396b5..de5805a 100644
--- a/db/update.h
+++ b/db/ops/update.h
@@ -16,13 +16,48 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "../pch.h"
-#include "jsobj.h"
-#include "../util/embedded_builder.h"
-#include "matcher.h"
+#include "../../pch.h"
+#include "../jsobj.h"
+#include "../../util/embedded_builder.h"
+#include "../matcher.h"
namespace mongo {
+ // ---------- public -------------
+
+ struct UpdateResult {
+ bool existing; // if existing objects were modified
+ bool mod; // was this a $ mod
+ long long num; // how many objects touched
+ OID upserted; // if something was upserted, the new _id of the object
+
+ UpdateResult( bool e, bool m, unsigned long long n , const BSONObj& upsertedObject = BSONObj() )
+ : existing(e) , mod(m), num(n) {
+ upserted.clear();
+
+ BSONElement id = upsertedObject["_id"];
+ if ( ! e && n == 1 && id.type() == jstOID ) {
+ upserted = id.OID();
+ }
+ }
+
+ };
+
+
+ class RemoveSaver;
+
+ /* returns true if an existing object was updated, false if no existing object was found.
+ multi - update multiple objects - mostly useful with things like $set
+ god - allow access to system namespaces
+ */
+ UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug );
+ UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj pattern,
+ bool upsert, bool multi , bool logop , OpDebug& debug , RemoveSaver * rs = 0 );
+
+
+
+ // ---------- private -------------
+
class ModState;
class ModSetState;
@@ -507,9 +542,7 @@ namespace mongo {
*/
class ModSetState : boost::noncopyable {
struct FieldCmp {
- bool operator()( const string &l, const string &r ) const {
- return lexNumCmp( l.c_str(), r.c_str() ) < 0;
- }
+ bool operator()( const string &l, const string &r ) const;
};
typedef map<string,ModState,FieldCmp> ModStateHolder;
const BSONObj& _obj;
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 2aedfd4..ac7731a 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -30,10 +30,11 @@ _ disallow system* manipulations from the database.
#include "../util/hashtab.h"
#include "../util/file_allocator.h"
#include "../util/processinfo.h"
+#include "../util/file.h"
#include "btree.h"
+#include "btreebuilder.h"
#include <algorithm>
#include <list>
-#include "query.h"
#include "repl.h"
#include "dbhelpers.h"
#include "namespace-inl.h"
@@ -41,9 +42,27 @@ _ disallow system* manipulations from the database.
#include "extsort.h"
#include "curop-inl.h"
#include "background.h"
+#include "compact.h"
+#include "ops/delete.h"
+#include "instance.h"
+#include "replutil.h"
namespace mongo {
+ BOOST_STATIC_ASSERT( sizeof(Extent)-4 == 48+128 );
+ BOOST_STATIC_ASSERT( sizeof(DataFileHeader)-4 == 8192 );
+
+ bool isValidNS( const StringData& ns ) {
+ // TODO: should check for invalid characters
+
+ const char * x = strchr( ns.data() , '.' );
+ if ( ! x )
+ return false;
+
+ x++;
+ return *x > 0;
+ }
+
bool inDBRepair = false;
struct doingRepair {
doingRepair() {
@@ -86,7 +105,7 @@ namespace mongo {
}
BackgroundOperation::~BackgroundOperation() {
- assertInWriteLock();
+ wassert( dbMutex.isWriteLocked() );
dbsInProg[_ns.db]--;
nsInProg.erase(_ns.ns());
}
@@ -114,7 +133,6 @@ namespace mongo {
DatabaseHolder dbHolder;
int MAGIC = 0x1000;
- extern int otherTraceLevel;
void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0);
void ensureIdIndexForNewNs(const char *ns) {
if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
@@ -157,7 +175,7 @@ namespace mongo {
void _deleteDataFiles(const char *database) {
if ( directoryperdb ) {
FileAllocator::get()->waitUntilFinished();
- BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( boost::filesystem::path( dbpath ) / database ) );
+ MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( boost::filesystem::remove_all( boost::filesystem::path( dbpath ) / database ), "delete data files with a directoryperdb" );
return;
}
class : public FileOp {
@@ -206,11 +224,11 @@ namespace mongo {
}
}
- uassert( 10083 , "invalid size spec", size > 0 );
+ uassert( 10083 , "create collection invalid size spec", size > 0 );
bool newCapped = false;
int mx = 0;
- if( options.getBoolField("capped") ) {
+ if( options["capped"].trueValue() ) {
newCapped = true;
BSONElement e = options.getField("max");
if ( e.isNumber() ) {
@@ -232,7 +250,7 @@ namespace mongo {
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
- database->suitableFile( (int) size, false )->createExtent( ns, (int) size, newCapped );
+ database->suitableFile( ns, (int) size, false, false )->createExtent( ns, (int) size, newCapped );
}
}
else if ( int( e.number() ) > 0 ) {
@@ -244,7 +262,7 @@ namespace mongo {
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
- database->suitableFile( (int) size, false )->createExtent( ns, (int) size, newCapped );
+ database->suitableFile( ns, (int) size, false, false )->createExtent( ns, (int) size, newCapped );
}
}
else {
@@ -256,7 +274,7 @@ namespace mongo {
desiredExtentSize = Extent::minSize();
}
desiredExtentSize &= 0xffffff00;
- Extent *e = database->allocExtent( ns, desiredExtentSize, newCapped );
+ Extent *e = database->allocExtent( ns, desiredExtentSize, newCapped, true );
size -= e->length;
}
}
@@ -325,13 +343,13 @@ namespace mongo {
}
}
- void MongoDataFile::badOfs2(int ofs) const {
+ NOINLINE_DECL void MongoDataFile::badOfs2(int ofs) const {
stringstream ss;
ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
uasserted(13441, ss.str());
}
- void MongoDataFile::badOfs(int ofs) const {
+ NOINLINE_DECL void MongoDataFile::badOfs(int ofs) const {
stringstream ss;
ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
uasserted(13440, ss.str());
@@ -339,39 +357,17 @@ namespace mongo {
int MongoDataFile::defaultSize( const char *filename ) const {
int size;
-
if ( fileNo <= 4 )
size = (64*1024*1024) << fileNo;
else
size = 0x7ff00000;
-
if ( cmdLine.smallfiles ) {
size = size >> 2;
}
-
-
return size;
}
void MongoDataFile::open( const char *filename, int minSize, bool preallocateOnly ) {
- {
- /* check quotas
- very simple temporary implementation for now
- */
- if ( cmdLine.quota && fileNo > cmdLine.quotaFiles && !MMF::exists(filename) ) {
- /* todo: if we were adding / changing keys in an index did we do some
- work previously that needs cleaning up? Possible. We should
- check code like that and have it catch the exception and do
- something reasonable.
- */
- string s = "db disk space quota exceeded ";
- Database *database = cc().database();
- if ( database )
- s += database->name;
- uasserted(12501,s);
- }
- }
-
long size = defaultSize( filename );
while ( size < minSize ) {
if ( size < maxSize() / 2 )
@@ -438,13 +434,20 @@ namespace mongo {
}
Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) {
+ {
+ // make sizes align with VM page size
+ int newSize = (approxSize + 0xfff) & 0xfffff000;
+ assert( newSize >= 0 );
+ if( newSize < Extent::maxSize() )
+ approxSize = newSize;
+ }
massert( 10357 , "shutdown in progress", ! inShutdown() );
massert( 10358 , "bad new extent size", approxSize >= Extent::minSize() && approxSize <= Extent::maxSize() );
massert( 10359 , "header==0 on new extent: 32 bit mmap space exceeded?", header() ); // null if file open failed
- int ExtentSize = approxSize <= header()->unusedLength ? approxSize : header()->unusedLength;
+ int ExtentSize = min(header()->unusedLength, approxSize);
DiskLoc loc;
if ( ExtentSize < Extent::minSize() ) {
- /* not there could be a lot of looping here is db just started and
+ /* note there could be a lot of looping here is db just started and
no files are open yet. we might want to do something about that. */
if ( loops > 8 ) {
assert( loops < 10000 );
@@ -455,12 +458,12 @@ namespace mongo {
}
int offset = header()->unused.getOfs();
- DataFileHeader *h = getDur().writing(header());
- h->unused.set( fileNo, offset + ExtentSize );
- h->unusedLength -= ExtentSize;
+ DataFileHeader *h = header();
+ h->unused.writing().set( fileNo, offset + ExtentSize );
+ getDur().writingInt(h->unusedLength) = h->unusedLength - ExtentSize;
loc.set(fileNo, offset);
Extent *e = _getExtent(loc);
- DiskLoc emptyLoc = getDur().writing(e)->init(ns, ExtentSize, fileNo, offset);
+ DiskLoc emptyLoc = getDur().writing(e)->init(ns, ExtentSize, fileNo, offset, newCapped);
addNewExtentToNamespace(ns, e, loc, emptyLoc, newCapped);
@@ -484,11 +487,15 @@ namespace mongo {
low = (int) (approxSize * 0.8);
high = (int) (approxSize * 1.4);
}
- if( high < 0 ) high = approxSize;
+ if( high <= 0 ) {
+ // overflowed
+ high = max(approxSize, Extent::maxSize());
+ }
int n = 0;
Extent *best = 0;
int bestDiff = 0x7fffffff;
{
+ Timer t;
DiskLoc L = f->firstExtent;
while( !L.isNull() ) {
Extent * e = L.ext();
@@ -497,16 +504,35 @@ namespace mongo {
if( diff < bestDiff ) {
bestDiff = diff;
best = e;
- if( diff == 0 )
+ if( ((double) diff) / approxSize < 0.1 ) {
+ // close enough
break;
+ }
+ if( t.seconds() >= 2 ) {
+ // have spent lots of time in write lock, and we are in [low,high], so close enough
+ // could come into play if extent freelist is very long
+ break;
+ }
+ }
+ else {
+ OCCASIONALLY {
+ if( high < 64 * 1024 && t.seconds() >= 2 ) {
+ // be less picky if it is taking a long time
+ high = 64 * 1024;
+ }
+ }
}
}
L = e->xnext;
++n;
-
+ }
+ if( t.seconds() >= 10 ) {
+ log() << "warning: slow scan in allocFromFreeList (in write lock)" << endl;
}
}
- OCCASIONALLY if( n > 512 ) log() << "warning: newExtent " << n << " scanned\n";
+
+ if( n > 128 ) log( n < 512 ) << "warning: newExtent " << n << " scanned\n";
+
if( best ) {
Extent *e = best;
// remove from the free list
@@ -521,7 +547,7 @@ namespace mongo {
// use it
OCCASIONALLY if( n > 512 ) log() << "warning: newExtent " << n << " scanned\n";
- DiskLoc emptyLoc = e->reuse(ns);
+ DiskLoc emptyLoc = e->reuse(ns, capped);
addNewExtentToNamespace(ns, e, e->myLoc, emptyLoc, capped);
return e;
}
@@ -533,24 +559,43 @@ namespace mongo {
/*---------------------------------------------------------------------*/
- DiskLoc Extent::reuse(const char *nsname) {
- return getDur().writing(this)->_reuse(nsname);
- }
- DiskLoc Extent::_reuse(const char *nsname) {
- log(3) << "reset extent was:" << nsDiagnostic.toString() << " now:" << nsname << '\n';
- massert( 10360 , "Extent::reset bad magic value", magic == 0x41424344 );
+ void Extent::markEmpty() {
xnext.Null();
xprev.Null();
- nsDiagnostic = nsname;
firstRecord.Null();
lastRecord.Null();
+ }
- DiskLoc emptyLoc = myLoc;
- emptyLoc.inc( (int) (_extentData-(char*)this) );
+ DiskLoc Extent::reuse(const char *nsname, bool capped) {
+ return getDur().writing(this)->_reuse(nsname, capped);
+ }
- int delRecLength = length - (_extentData - (char *) this);
+ void getEmptyLoc(const char *ns, const DiskLoc extentLoc, int extentLength, bool capped, /*out*/DiskLoc& emptyLoc, /*out*/int& delRecLength) {
+ emptyLoc = extentLoc;
+ emptyLoc.inc( Extent::HeaderSize() );
+ delRecLength = extentLength - Extent::HeaderSize();
+ if( delRecLength >= 32*1024 && str::contains(ns, '$') && !capped ) {
+ // probably an index. so skip forward to keep its records page aligned
+ int& ofs = emptyLoc.GETOFS();
+ int newOfs = (ofs + 0xfff) & ~0xfff;
+ delRecLength -= (newOfs-ofs);
+ dassert( delRecLength > 0 );
+ ofs = newOfs;
+ }
+ }
- DeletedRecord *empty = DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength);//(DeletedRecord *) getRecord(emptyLoc);
+ DiskLoc Extent::_reuse(const char *nsname, bool capped) {
+ LOG(3) << "reset extent was:" << nsDiagnostic.toString() << " now:" << nsname << '\n';
+ massert( 10360 , "Extent::reset bad magic value", magic == 0x41424344 );
+ nsDiagnostic = nsname;
+ markEmpty();
+
+ DiskLoc emptyLoc;
+ int delRecLength;
+ getEmptyLoc(nsname, myLoc, length, capped, emptyLoc, delRecLength);
+
+ // todo: some dup code here and below in Extent::init
+ DeletedRecord *empty = DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength);
empty = getDur().writing(empty);
empty->lengthWithHeaders = delRecLength;
empty->extentOfs = myLoc.getOfs();
@@ -560,7 +605,7 @@ namespace mongo {
}
/* assumes already zeroed -- insufficient for block 'reuse' perhaps */
- DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset) {
+ DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset, bool capped) {
magic = 0x41424344;
myLoc.set(_fileNo, _offset);
xnext.Null();
@@ -570,12 +615,12 @@ namespace mongo {
firstRecord.Null();
lastRecord.Null();
- DiskLoc emptyLoc = myLoc;
- emptyLoc.inc( (int) (_extentData-(char*)this) );
+ DiskLoc emptyLoc;
+ int delRecLength;
+ getEmptyLoc(nsname, myLoc, _length, capped, emptyLoc, delRecLength);
- int l = _length - (_extentData - (char *) this);
- DeletedRecord *empty = getDur().writing( DataFileMgr::makeDeletedRecord(emptyLoc, l) );
- empty->lengthWithHeaders = l;
+ DeletedRecord *empty = getDur().writing( DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength) );
+ empty->lengthWithHeaders = delRecLength;
empty->extentOfs = myLoc.getOfs();
return emptyLoc;
}
@@ -673,7 +718,7 @@ namespace mongo {
/* todo: if extent is empty, free it for reuse elsewhere.
that is a bit complicated have to clean up the freelists.
*/
- RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
+ RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead. ns:" << ns << endl;
// find a nonempty extent
// it might be nice to free the whole extent here! but have to clean up free recs then.
e = e->getNextExtent();
@@ -713,7 +758,7 @@ namespace mongo {
void printFreeList() {
string s = cc().database()->name + ".$freelist";
- log() << "dump freelist " << s << '\n';
+ log() << "dump freelist " << s << endl;
NamespaceDetails *freeExtents = nsdetails(s.c_str());
if( freeExtents == 0 ) {
log() << " freeExtents==0" << endl;
@@ -722,11 +767,48 @@ namespace mongo {
DiskLoc a = freeExtents->firstExtent;
while( !a.isNull() ) {
Extent *e = a.ext();
- log() << " " << a.toString() << " len:" << e->length << " prev:" << e->xprev.toString() << '\n';
+ log() << " extent " << a.toString() << " len:" << e->length << " prev:" << e->xprev.toString() << endl;
a = e->xnext;
}
- log() << " end freelist" << endl;
+ log() << "end freelist" << endl;
+ }
+
+ /** free a list of extents that are no longer in use. this is a double linked list of extents
+ (could be just one in the list)
+ */
+ void freeExtents(DiskLoc firstExt, DiskLoc lastExt) {
+ {
+ assert( !firstExt.isNull() && !lastExt.isNull() );
+ Extent *f = firstExt.ext();
+ Extent *l = lastExt.ext();
+ assert( f->xprev.isNull() );
+ assert( l->xnext.isNull() );
+ assert( f==l || !f->xnext.isNull() );
+ assert( f==l || !l->xprev.isNull() );
+ }
+
+ string s = cc().database()->name + ".$freelist";
+ NamespaceDetails *freeExtents = nsdetails(s.c_str());
+ if( freeExtents == 0 ) {
+ string err;
+ _userCreateNS(s.c_str(), BSONObj(), err, 0); // todo: this actually allocates an extent, which is bad!
+ freeExtents = nsdetails(s.c_str());
+ massert( 10361 , "can't create .$freelist", freeExtents);
+ }
+ if( freeExtents->firstExtent.isNull() ) {
+ freeExtents->firstExtent.writing() = firstExt;
+ freeExtents->lastExtent.writing() = lastExt;
+ }
+ else {
+ DiskLoc a = freeExtents->firstExtent;
+ assert( a.ext()->xprev.isNull() );
+ getDur().writingDiskLoc( a.ext()->xprev ) = lastExt;
+ getDur().writingDiskLoc( lastExt.ext()->xnext ) = a;
+ getDur().writingDiskLoc( freeExtents->firstExtent ) = firstExt;
+ }
+
+ //printFreeList();
}
/* drop a collection/namespace */
@@ -755,27 +837,9 @@ namespace mongo {
// free extents
if( !d->firstExtent.isNull() ) {
- string s = cc().database()->name + ".$freelist";
- NamespaceDetails *freeExtents = nsdetails(s.c_str());
- if( freeExtents == 0 ) {
- string err;
- _userCreateNS(s.c_str(), BSONObj(), err, 0);
- freeExtents = nsdetails(s.c_str());
- massert( 10361 , "can't create .$freelist", freeExtents);
- }
- if( freeExtents->firstExtent.isNull() ) {
- freeExtents->firstExtent.writing() = d->firstExtent;
- freeExtents->lastExtent.writing() = d->lastExtent;
- }
- else {
- DiskLoc a = freeExtents->firstExtent;
- assert( a.ext()->xprev.isNull() );
- getDur().writingDiskLoc( a.ext()->xprev ) = d->lastExtent;
- getDur().writingDiskLoc( d->lastExtent.ext()->xnext ) = a;
- getDur().writingDiskLoc( freeExtents->firstExtent ) = d->firstExtent;
- getDur().writingDiskLoc( d->firstExtent ).setInvalid();
- getDur().writingDiskLoc( d->lastExtent ).setInvalid();
- }
+ freeExtents(d->firstExtent, d->lastExtent);
+ getDur().writingDiskLoc( d->firstExtent ).setInvalid();
+ getDur().writingDiskLoc( d->lastExtent ).setInvalid();
}
// remove from the catalog hashtable
@@ -810,22 +874,17 @@ namespace mongo {
dropNS(name);
}
- int nUnindexes = 0;
-
/* unindex all keys in index for this record. */
static void _unindexRecord(IndexDetails& id, BSONObj& obj, const DiskLoc& dl, bool logMissing = true) {
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id.getKeysFromObject(obj, keys);
- for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ IndexInterface& ii = id.idxInterface();
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
BSONObj j = *i;
- if ( otherTraceLevel >= 5 ) {
- out() << "_unindexRecord() " << obj.toString();
- out() << "\n unindex:" << j.toString() << endl;
- }
- nUnindexes++;
+
bool ok = false;
try {
- ok = id.head.btree()->unindex(id.head, id, j, dl);
+ ok = ii.unindex(id.head, id, j, dl);
}
catch (AssertionException& e) {
problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
@@ -837,7 +896,7 @@ namespace mongo {
}
if ( !ok && logMissing ) {
- out() << "unindex failed (key too big?) " << id.indexNamespace() << '\n';
+ log() << "unindex failed (key too big?) " << id.indexNamespace() << " key: " << j << " " << obj["_id"] << endl;
}
}
}
@@ -910,7 +969,7 @@ namespace mongo {
}
}
- void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK, bool noWarn) {
+ void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK, bool noWarn, bool doLog ) {
dassert( todelete == dl.rec() );
NamespaceDetails* d = nsdetails(ns);
@@ -919,6 +978,14 @@ namespace mongo {
uassert( 10089 , "can't remove from a capped collection" , 0 );
return;
}
+
+ BSONObj toDelete;
+ if ( doLog ) {
+ BSONElement e = dl.obj()["_id"];
+ if ( e.type() ) {
+ toDelete = e.wrap();
+ }
+ }
/* check if any cursors point to us. if so, advance them. */
ClientCursor::aboutToDelete(dl);
@@ -927,6 +994,10 @@ namespace mongo {
_deleteRecord(d, ns, todelete, dl);
NamespaceDetailsTransient::get_w( ns ).notifyOfWriteOp();
+
+ if ( ! toDelete.isEmpty() ) {
+ logOp( "d" , ns , toDelete );
+ }
}
@@ -938,7 +1009,7 @@ namespace mongo {
NamespaceDetailsTransient *nsdt,
Record *toupdate, const DiskLoc& dl,
const char *_buf, int _len, OpDebug& debug, bool god) {
- StringBuilder& ss = debug.str;
+
dassert( toupdate == dl.rec() );
BSONObj objOld(toupdate);
@@ -972,8 +1043,7 @@ namespace mongo {
// doesn't fit. reallocate -----------------------------------------------------
uassert( 10003 , "failing update: objects in a capped ns cannot grow", !(d && d->capped));
d->paddingTooSmall();
- if ( cc().database()->profile )
- ss << " moved ";
+ debug.moved = true;
deleteRecord(ns, toupdate, dl);
return insert(ns, objNew.objdata(), objNew.objsize(), god);
}
@@ -987,12 +1057,17 @@ namespace mongo {
int z = d->nIndexesBeingBuilt();
for ( int x = 0; x < z; x++ ) {
IndexDetails& idx = d->idx(x);
+ IndexInterface& ii = idx.idxInterface();
for ( unsigned i = 0; i < changes[x].removed.size(); i++ ) {
try {
- idx.head.btree()->unindex(idx.head, idx, *changes[x].removed[i], dl);
+ bool found = ii.unindex(idx.head, idx, *changes[x].removed[i], dl);
+ if ( ! found ) {
+ RARELY warning() << "ns: " << ns << " couldn't unindex key: " << *changes[x].removed[i]
+ << " for doc: " << objOld["_id"] << endl;
+ }
}
catch (AssertionException&) {
- ss << " exception update unindex ";
+ debug.extra << " exception update unindex ";
problem() << " caught assertion update unindex " << idx.indexNamespace() << endl;
}
}
@@ -1003,18 +1078,18 @@ namespace mongo {
for ( unsigned i = 0; i < changes[x].added.size(); i++ ) {
try {
/* we did the dupCheck() above. so we don't have to worry about it here. */
- idx.head.btree()->bt_insert(
+ ii.bt_insert(
idx.head,
dl, *changes[x].added[i], ordering, /*dupsAllowed*/true, idx);
}
catch (AssertionException& e) {
- ss << " exception update index ";
- problem() << " caught assertion update index " << idx.indexNamespace() << " " << e << endl;
+ debug.extra << " exception update index ";
+ problem() << " caught assertion update index " << idx.indexNamespace() << " " << e << " " << objNew["_id"] << endl;
}
}
}
- if( keyUpdates && cc().database()->profile )
- ss << '\n' << keyUpdates << " key updates ";
+
+ debug.keyUpdates = keyUpdates;
}
// update in place
@@ -1047,19 +1122,21 @@ namespace mongo {
/* add keys to index idxNo for a new record */
static inline void _indexRecord(NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc recordLoc, bool dupsAllowed) {
IndexDetails& idx = d->idx(idxNo);
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
idx.getKeysFromObject(obj, keys);
+ if( keys.empty() )
+ return;
BSONObj order = idx.keyPattern();
+ IndexInterface& ii = idx.idxInterface();
Ordering ordering = Ordering::make(order);
int n = 0;
- for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
if( ++n == 2 ) {
d->setIndexIsMultikey(idxNo);
}
assert( !recordLoc.isNull() );
try {
- idx.head.btree()->bt_insert(idx.head, recordLoc,
- *i, ordering, dupsAllowed, idx);
+ ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx);
}
catch (AssertionException& e) {
if( e.getCode() == 10287 && idxNo == d->nIndexes ) {
@@ -1070,17 +1147,18 @@ namespace mongo {
// dup key exception, presumably.
throw;
}
- problem() << " caught assertion _indexRecord " << idx.indexNamespace() << endl;
+ problem() << " caught assertion _indexRecord " << idx.indexNamespace() << " " << obj["_id"] << endl;
}
}
}
+#if 0
void testSorting() {
BSONObjBuilder b;
b.appendNull("");
BSONObj x = b.obj();
- BSONObjExternalSorter sorter;
+ BSONObjExternalSorter sorter(*IndexDetails::iis[1]);
sorter.add(x, DiskLoc(3,7));
sorter.add(x, DiskLoc(4,7));
@@ -1098,6 +1176,62 @@ namespace mongo {
cout<<"SORTER next:" << d.first.toString() << endl;*/
}
}
+#endif
+
+ SortPhaseOne *precalced = 0;
+
+ template< class V >
+ void buildBottomUpPhases2And3(bool dupsAllowed, IndexDetails& idx, BSONObjExternalSorter& sorter,
+ bool dropDups, list<DiskLoc> &dupsToDrop, CurOp * op, SortPhaseOne *phase1, ProgressMeterHolder &pm,
+ Timer& t
+ )
+ {
+ BtreeBuilder<V> btBuilder(dupsAllowed, idx);
+ BSONObj keyLast;
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ assert( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
+ while( i->more() ) {
+ RARELY killCurrentOp.checkForInterrupt();
+ BSONObjExternalSorter::Data d = i->next();
+
+ try {
+ if ( !dupsAllowed && dropDups ) {
+ LastError::Disabled led( lastError.get() );
+ btBuilder.addKey(d.first, d.second);
+ }
+ else {
+ btBuilder.addKey(d.first, d.second);
+ }
+ }
+ catch( AssertionException& e ) {
+ if ( dupsAllowed ) {
+ // unknow exception??
+ throw;
+ }
+
+ if( e.interrupted() ) {
+ killCurrentOp.checkForInterrupt();
+ }
+
+ if ( ! dropDups )
+ throw;
+
+ /* we could queue these on disk, but normally there are very few dups, so instead we
+ keep in ram and have a limit.
+ */
+ dupsToDrop.push_back(d.second);
+ uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
+ }
+ pm.hit();
+ }
+ pm.finished();
+ op->setMessage( "index: (3/3) btree-middle" );
+ log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
+ btBuilder.commit();
+ if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
+ warning() << "not all entries were added to the index, probably some keys were too large" << endl;
+ }
+ }
// throws DBException
unsigned long long fastBuildIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
@@ -1116,39 +1250,36 @@ namespace mongo {
if ( logLevel > 1 ) printMemInfo( "before index start" );
/* get and sort all the keys ----- */
- unsigned long long n = 0;
- shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- BSONObjExternalSorter sorter(order);
- sorter.hintNumObjects( d->stats.nrecords );
- unsigned long long nkeys = 0;
ProgressMeterHolder pm( op->setMessage( "index: (1/3) external sort" , d->stats.nrecords , 10 ) );
- while ( c->ok() ) {
- BSONObj o = c->current();
- DiskLoc loc = c->currLoc();
-
- BSONObjSetDefaultOrder keys;
- idx.getKeysFromObject(o, keys);
- int k = 0;
- for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
- if( ++k == 2 ) {
- d->setIndexIsMultikey(idxNo);
+ SortPhaseOne _ours;
+ SortPhaseOne *phase1 = precalced;
+ if( phase1 == 0 ) {
+ phase1 = &_ours;
+ SortPhaseOne& p1 = *phase1;
+ shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ p1.sorter.reset( new BSONObjExternalSorter(idx.idxInterface(), order) );
+ p1.sorter->hintNumObjects( d->stats.nrecords );
+ const IndexSpec& spec = idx.getSpec();
+ while ( c->ok() ) {
+ BSONObj o = c->current();
+ DiskLoc loc = c->currLoc();
+ p1.addKeys(spec, o, loc);
+ c->advance();
+ pm.hit();
+ if ( logLevel > 1 && p1.n % 10000 == 0 ) {
+ printMemInfo( "\t iterating objects" );
}
- sorter.add(*i, loc);
- nkeys++;
- }
+ };
+ }
+ pm.finished();
- c->advance();
- n++;
- pm.hit();
- if ( logLevel > 1 && n % 10000 == 0 ) {
- printMemInfo( "\t iterating objects" );
- }
+ BSONObjExternalSorter& sorter = *(phase1->sorter);
- };
- pm.finished();
+ if( phase1->multi )
+ d->setIndexIsMultikey(idxNo);
if ( logLevel > 1 ) printMemInfo( "before final sort" );
- sorter.sort();
+ phase1->sorter->sort();
if ( logLevel > 1 ) printMemInfo( "after final sort" );
log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
@@ -1156,55 +1287,21 @@ namespace mongo {
list<DiskLoc> dupsToDrop;
/* build index --- */
- {
- BtreeBuilder btBuilder(dupsAllowed, idx);
- BSONObj keyLast;
- auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
- assert( pm == op->setMessage( "index: (2/3) btree bottom up" , nkeys , 10 ) );
- while( i->more() ) {
- RARELY killCurrentOp.checkForInterrupt();
- BSONObjExternalSorter::Data d = i->next();
-
- try {
- btBuilder.addKey(d.first, d.second);
- }
- catch( AssertionException& e ) {
- if ( dupsAllowed ) {
- // unknow exception??
- throw;
- }
-
- if( e.interrupted() )
- throw;
-
- if ( ! dropDups )
- throw;
-
- /* we could queue these on disk, but normally there are very few dups, so instead we
- keep in ram and have a limit.
- */
- dupsToDrop.push_back(d.second);
- uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
- }
- pm.hit();
- }
- pm.finished();
- op->setMessage( "index: (3/3) btree-middle" );
- log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
- btBuilder.commit();
- if ( btBuilder.getn() != nkeys && ! dropDups ) {
- warning() << "not all entries were added to the index, probably some keys were too large" << endl;
- }
- }
+ if( idx.version() == 0 )
+ buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
+ else if( idx.version() == 1 )
+ buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
+ else
+ assert(false);
log(1) << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;
for( list<DiskLoc>::iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); i++ ){
- theDataFileMgr.deleteRecord( ns, i->rec(), *i, false, true );
+ theDataFileMgr.deleteRecord( ns, i->rec(), *i, false /* cappedOk */ , true /* noWarn */ , isMaster( ns ) /* logOp */ );
getDur().commitIfNeeded();
}
- return n;
+ return phase1->n;
}
class BackgroundIndexBuildJob : public BackgroundOperation {
@@ -1226,18 +1323,27 @@ namespace mongo {
while ( cc->ok() ) {
BSONObj js = cc->current();
try {
- _indexRecord(d, idxNo, js, cc->currLoc(), dupsAllowed);
+ {
+ if ( !dupsAllowed && dropDups ) {
+ LastError::Disabled led( lastError.get() );
+ _indexRecord(d, idxNo, js, cc->currLoc(), dupsAllowed);
+ }
+ else {
+ _indexRecord(d, idxNo, js, cc->currLoc(), dupsAllowed);
+ }
+ }
cc->advance();
}
catch( AssertionException& e ) {
- if( e.interrupted() )
- throw;
+ if( e.interrupted() ) {
+ killCurrentOp.checkForInterrupt();
+ }
if ( dropDups ) {
DiskLoc toDelete = cc->currLoc();
bool ok = cc->advance();
cc->updateLocation();
- theDataFileMgr.deleteRecord( ns, toDelete.rec(), toDelete, false, true );
+ theDataFileMgr.deleteRecord( ns, toDelete.rec(), toDelete, false, true , true );
if( ClientCursor::find(id, false) == 0 ) {
cc.release();
if( !ok ) {
@@ -1259,7 +1365,10 @@ namespace mongo {
getDur().commitIfNeeded();
- if ( n % 128 == 0 && !cc->yield() ) {
+ if ( cc->yieldSometimes( ClientCursor::WillNeed ) ) {
+ progress.setTotalWhileRunning( d->stats.nrecords );
+ }
+ else {
cc.release();
uasserted(12584, "cursor gone during bg index");
break;
@@ -1292,7 +1401,7 @@ namespace mongo {
prep(ns.c_str(), d);
assert( idxNo == d->nIndexes );
try {
- idx.head.writing() = BtreeBucket::addBucket(idx);
+ idx.head.writing() = idx.idxInterface().addBucket(idx);
n = addExistingToIndex(ns.c_str(), d, idx, idxNo);
}
catch(...) {
@@ -1336,18 +1445,18 @@ namespace mongo {
// throws DBException
static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
- tlog() << "building new index on " << idx.keyPattern() << " for " << ns << ( background ? " background" : "" ) << endl;
+ tlog() << "build index " << ns << ' ' << idx.keyPattern() << ( background ? " background" : "" ) << endl;
Timer t;
unsigned long long n;
- if( background ) {
- log(2) << "buildAnIndex: background=true\n";
- }
-
assert( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
assert( d->indexBuildInProgress == 0 );
assertInWriteLock();
RecoverableIndexState recoverable( d );
+
+ // Build index spec here in case the collection is empty and the index details are invalid
+ idx.getSpec();
+
if( inDBRepair || !background ) {
n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
assert( !idx.head.isNull() );
@@ -1356,7 +1465,7 @@ namespace mongo {
BackgroundIndexBuildJob j(ns.c_str());
n = j.go(ns, d, idx, idxNo);
}
- tlog() << "done for " << n << " records " << t.millis() / 1000.0 << "secs" << endl;
+ tlog() << "build index done " << n << " records " << t.millis() / 1000.0 << " secs" << endl;
}
/* add keys to indexes for a new record */
@@ -1436,17 +1545,16 @@ namespace mongo {
logOp( "i", ns, tmp );
}
+ /** @param o the object to insert. can be modified to add _id and thus be an in/out param
+ */
DiskLoc DataFileMgr::insertWithObjMod(const char *ns, BSONObj &o, bool god) {
- DiskLoc loc = insert( ns, o.objdata(), o.objsize(), god );
- if ( !loc.isNull() )
+ bool addedID = false;
+ DiskLoc loc = insert( ns, o.objdata(), o.objsize(), god, true, &addedID );
+ if( addedID && !loc.isNull() )
o = BSONObj( loc.rec() );
return loc;
}
- void DataFileMgr::insertNoReturnVal(const char *ns, BSONObj o, bool god) {
- insert( ns, o.objdata(), o.objsize(), god );
- }
-
bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject );
// We are now doing two btree scans for all unique indexes (one here, and one when we've
@@ -1457,55 +1565,186 @@ namespace mongo {
for ( int idxNo = 0; idxNo < d->nIndexes; idxNo++ ) {
if( d->idx(idxNo).unique() ) {
IndexDetails& idx = d->idx(idxNo);
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
idx.getKeysFromObject(obj, keys);
BSONObj order = idx.keyPattern();
- for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ IndexInterface& ii = idx.idxInterface();
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ // WARNING: findSingle may not be compound index safe. this may need to change. see notes in
+ // findSingle code.
uassert( 12582, "duplicate key insert for unique index of capped collection",
- idx.head.btree()->findSingle(idx, idx.head, *i ).isNull() );
+ ii.findSingle(idx, idx.head, *i ).isNull() );
}
}
}
}
- /* note: if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
- after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
+ /** add a record to the end of the linked list chain within this extent.
+ require: you must have already declared write intent for the record header.
*/
- DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, const BSONElement &writeId, bool mayAddIndex) {
- bool wouldAddIndex = false;
- massert( 10093 , "cannot insert into reserved $ collection", god || isANormalNSName( ns ) );
- uassert( 10094 , str::stream() << "invalid ns: " << ns , isValidNS( ns ) );
- const char *sys = strstr(ns, "system.");
- if ( sys ) {
- uassert( 10095 , "attempt to insert in reserved database name 'system'", sys != ns);
- if ( strstr(ns, ".system.") ) {
- // later:check for dba-type permissions here if have that at some point separate
- if ( strstr(ns, ".system.indexes" ) )
- wouldAddIndex = true;
- else if ( legalClientSystemNS( ns , true ) )
- ;
- else if ( !god ) {
- out() << "ERROR: attempt to insert in system namespace " << ns << endl;
- return DiskLoc();
+ void addRecordToRecListInExtent(Record *r, DiskLoc loc) {
+ dassert( loc.rec() == r );
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ Extent::FL *fl = getDur().writing(e->fl());
+ fl->firstRecord = fl->lastRecord = loc;
+ r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
+ }
+ else {
+ Record *oldlast = e->lastRecord.rec();
+ r->prevOfs = e->lastRecord.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ getDur().writingInt(oldlast->nextOfs) = loc.getOfs();
+ getDur().writingDiskLoc(e->lastRecord) = loc;
+ }
+ }
+
+ NOINLINE_DECL DiskLoc outOfSpace(const char *ns, NamespaceDetails *d, int lenWHdr, bool god, DiskLoc extentLoc) {
+ DiskLoc loc;
+ if ( d->capped == 0 ) { // size capped doesn't grow
+ log(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor << " lenWHdr: " << lenWHdr << endl;
+ cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
+ loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ log() << "warning: alloc() failed after allocating new extent. lenWHdr: " << lenWHdr << " last extent size:" << d->lastExtentSize << "; trying again\n";
+ for ( int z=0; z<10 && lenWHdr > d->lastExtentSize; z++ ) {
+ log() << "try #" << z << endl;
+ cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
+ loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( ! loc.isNull() )
+ break;
}
}
- else
- sys = 0;
}
+ return loc;
+ }
+
+ /** used by insert and also compact
+ * @return null loc if out of space
+ */
+ DiskLoc allocateSpaceForANewRecord(const char *ns, NamespaceDetails *d, int lenWHdr, bool god) {
+ DiskLoc extentLoc;
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ loc = outOfSpace(ns, d, lenWHdr, god, extentLoc);
+ }
+ return loc;
+ }
+
+ bool NOINLINE_DECL insert_checkSys(const char *sys, const char *ns, bool& wouldAddIndex, const void *obuf, bool god) {
+ uassert( 10095 , "attempt to insert in reserved database name 'system'", sys != ns);
+ if ( strstr(ns, ".system.") ) {
+ // later:check for dba-type permissions here if have that at some point separate
+ if ( strstr(ns, ".system.indexes" ) )
+ wouldAddIndex = true;
+ else if ( legalClientSystemNS( ns , true ) ) {
+ if ( obuf && strstr( ns , ".system.users" ) ) {
+ BSONObj t( reinterpret_cast<const char *>( obuf ) );
+ uassert( 14051 , "system.user entry needs 'user' field to be a string" , t["user"].type() == String );
+ uassert( 14052 , "system.user entry needs 'pwd' field to be a string" , t["pwd"].type() == String );
+ uassert( 14053 , "system.user entry needs 'user' field to be non-empty" , t["user"].String().size() );
+ uassert( 14054 , "system.user entry needs 'pwd' field to be non-empty" , t["pwd"].String().size() );
+ }
+ }
+ else if ( !god ) {
+ // todo this should probably uasseert rather than doing this:
+ log() << "ERROR: attempt to insert in system namespace " << ns << endl;
+ return false;
+ }
+ }
+ return true;
+ }
+
+ NOINLINE_DECL NamespaceDetails* insert_newNamespace(const char *ns, int len, bool god) {
+ addNewNamespaceToCatalog(ns);
+ /* todo: shouldn't be in the namespace catalog until after the allocations here work.
+ also if this is an addIndex, those checks should happen before this!
+ */
+ // This may create first file in the database.
+ int ies = Extent::initialSize(len);
+ if( str::contains(ns, '$') && len + Record::HeaderSize >= BtreeData_V1::BucketSize - 256 && len + Record::HeaderSize <= BtreeData_V1::BucketSize + 256 ) {
+ // probably an index. so we pick a value here for the first extent instead of using initialExtentSize() which is more
+ // for user collections. TODO: we could look at the # of records in the parent collection to be smarter here.
+ ies = (32+4) * 1024;
+ }
+ cc().database()->allocExtent(ns, ies, false, false);
+ NamespaceDetails *d = nsdetails(ns);
+ if ( !god )
+ ensureIdIndexForNewNs(ns);
+ return d;
+ }
+
+ void NOINLINE_DECL insert_makeIndex(NamespaceDetails *tableToIndex, const string& tabletoidxns, const DiskLoc& loc) {
+ uassert( 13143 , "can't create index on system.indexes" , tabletoidxns.find( ".system.indexes" ) == string::npos );
+
+ BSONObj info = loc.obj();
+ bool background = info["background"].trueValue();
+ if( background && cc().isSyncThread() ) {
+ /* don't do background indexing on slaves. there are nuances. this could be added later
+ but requires more code.
+ */
+ log() << "info: indexing in foreground on this replica; was a background index build on the primary" << endl;
+ background = false;
+ }
+
+ int idxNo = tableToIndex->nIndexes;
+ IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
+ getDur().writingDiskLoc(idx.info) = loc;
+ try {
+ buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
+ }
+ catch( DBException& e ) {
+ // save our error msg string as an exception or dropIndexes will overwrite our message
+ LastError *le = lastError.get();
+ int savecode = 0;
+ string saveerrmsg;
+ if ( le ) {
+ savecode = le->code;
+ saveerrmsg = le->msg;
+ }
+ else {
+ savecode = e.getCode();
+ saveerrmsg = e.what();
+ }
+
+ // roll back this index
+ string name = idx.indexName();
+ BSONObjBuilder b;
+ string errmsg;
+ bool ok = dropIndexes(tableToIndex, tabletoidxns.c_str(), name.c_str(), errmsg, b, true);
+ if( !ok ) {
+ log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
+ }
+
+ assert( le && !saveerrmsg.empty() );
+ raiseError(savecode,saveerrmsg.c_str());
+ throw;
+ }
+ }
+
+ /* if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
+ after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
+
+ @param mayAddIndex almost always true, except for invocation from rename namespace command.
+ @param addedID if not null, set to true if adding _id element. you must assure false before calling
+ if using.
+ */
+
+ DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, bool mayAddIndex, bool *addedID) {
+ bool wouldAddIndex = false;
+ massert( 10093 , "cannot insert into reserved $ collection", god || NamespaceString::normal( ns ) );
+ uassert( 10094 , str::stream() << "invalid ns: " << ns , isValidNS( ns ) );
+ {
+ const char *sys = strstr(ns, "system.");
+ if ( sys && !insert_checkSys(sys, ns, wouldAddIndex, obuf, god) )
+ return DiskLoc();
+ }
bool addIndex = wouldAddIndex && mayAddIndex;
NamespaceDetails *d = nsdetails(ns);
if ( d == 0 ) {
- addNewNamespaceToCatalog(ns);
- /* todo: shouldn't be in the namespace catalog until after the allocations here work.
- also if this is an addIndex, those checks should happen before this!
- */
- // This may create first file in the database.
- cc().database()->allocExtent(ns, Extent::initialSize(len), false);
- d = nsdetails(ns);
- if ( !god )
- ensureIdIndexForNewNs(ns);
+ d = insert_newNamespace(ns, len, god);
}
d->paddingFits();
@@ -1516,18 +1755,18 @@ namespace mongo {
if ( addIndex ) {
assert( obuf );
BSONObj io((const char *) obuf);
- if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) )
+ if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) ) {
+ // prepare creates _id itself, or this indicates to fail the build silently (such
+ // as if index already exists)
return DiskLoc();
-
+ }
if ( ! fixedIndexObject.isEmpty() ) {
obuf = fixedIndexObject.objdata();
len = fixedIndexObject.objsize();
}
-
}
- const BSONElement *newId = &writeId;
- int addID = 0;
+ int addID = 0; // 0 if not adding _id; if adding, the length of that new element
if( !god ) {
/* Check if we have an _id field. If we don't, we'll add it.
Note that btree buckets which we insert aren't BSONObj's, but in that case god==true.
@@ -1535,20 +1774,18 @@ namespace mongo {
BSONObj io((const char *) obuf);
BSONElement idField = io.getField( "_id" );
uassert( 10099 , "_id cannot be an array", idField.type() != Array );
- if( idField.eoo() && !wouldAddIndex && strstr(ns, ".local.") == 0 ) {
+ // we don't add _id for capped collections as they don't have an _id index
+ if( idField.eoo() && !wouldAddIndex && strstr(ns, ".local.") == 0 && d->haveIdIndex() ) {
+ if( addedID )
+ *addedID = true;
addID = len;
- if ( writeId.eoo() ) {
- // Very likely we'll add this elt, so little harm in init'ing here.
- idToInsert_.oid.init();
- newId = &idToInsert;
- }
- len += newId->size();
+ idToInsert_.oid.init();
+ len += idToInsert.size();
}
BSONElementManipulator::lookForTimestamps( io );
}
- DiskLoc extentLoc;
int lenWHdr = len + Record::HeaderSize;
lenWHdr = (int) (lenWHdr * d->paddingFactor);
if ( lenWHdr == 0 ) {
@@ -1564,29 +1801,11 @@ namespace mongo {
checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
}
- DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
if ( loc.isNull() ) {
- // out of space
- if ( d->capped == 0 ) { // size capped doesn't grow
- log(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor << " lenWHdr: " << lenWHdr << endl;
- cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false);
- loc = d->alloc(ns, lenWHdr, extentLoc);
- if ( loc.isNull() ) {
- log() << "WARNING: alloc() failed after allocating new extent. lenWHdr: " << lenWHdr << " last extent size:" << d->lastExtentSize << "; trying again\n";
- for ( int zzz=0; zzz<10 && lenWHdr > d->lastExtentSize; zzz++ ) {
- log() << "try #" << zzz << endl;
- cc().database()->allocExtent(ns, Extent::followupSize(len, d->lastExtentSize), false);
- loc = d->alloc(ns, lenWHdr, extentLoc);
- if ( ! loc.isNull() )
- break;
- }
- }
- }
- if ( loc.isNull() ) {
- log() << "insert: couldn't alloc space for object ns:" << ns << " capped:" << d->capped << endl;
- assert(d->capped);
- return DiskLoc();
- }
+ log() << "insert: couldn't alloc space for object ns:" << ns << " capped:" << d->capped << endl;
+ assert(d->capped);
+ return DiskLoc();
}
Record *r = loc.rec();
@@ -1595,31 +1814,17 @@ namespace mongo {
r = (Record*) getDur().writingPtr(r, lenWHdr);
if( addID ) {
/* a little effort was made here to avoid a double copy when we add an ID */
- ((int&)*r->data) = *((int*) obuf) + newId->size();
- memcpy(r->data+4, newId->rawdata(), newId->size());
- memcpy(r->data+4+newId->size(), ((char *)obuf)+4, addID-4);
+ ((int&)*r->data) = *((int*) obuf) + idToInsert.size();
+ memcpy(r->data+4, idToInsert.rawdata(), idToInsert.size());
+ memcpy(r->data+4+idToInsert.size(), ((char *)obuf)+4, addID-4);
}
else {
- if( obuf )
+ if( obuf ) // obuf can be null from internal callers
memcpy(r->data, obuf, len);
}
}
- {
- Extent *e = r->myExtent(loc);
- if ( e->lastRecord.isNull() ) {
- Extent::FL *fl = getDur().writing(e->fl());
- fl->firstRecord = fl->lastRecord = loc;
- r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
- }
- else {
- Record *oldlast = e->lastRecord.rec();
- r->prevOfs = e->lastRecord.getOfs();
- r->nextOfs = DiskLoc::NullOfs;
- getDur().writingInt(oldlast->nextOfs) = loc.getOfs();
- getDur().writingDiskLoc(e->lastRecord) = loc;
- }
- }
+ addRecordToRecListInExtent(r, loc);
/* durability todo : this could be a bit annoying / slow to record constantly */
{
@@ -1628,56 +1833,12 @@ namespace mongo {
s->nrecords++;
}
- // we don't bother clearing those stats for the god tables - also god is true when adidng a btree bucket
+ // we don't bother resetting query optimizer stats for the god tables - also god is true when adding a btree bucket
if ( !god )
NamespaceDetailsTransient::get_w( ns ).notifyOfWriteOp();
if ( tableToIndex ) {
- uassert( 13143 , "can't create index on system.indexes" , tabletoidxns.find( ".system.indexes" ) == string::npos );
-
- BSONObj info = loc.obj();
- bool background = info["background"].trueValue();
- if( background && cc().isSyncThread() ) {
- /* don't do background indexing on slaves. there are nuances. this could be added later
- but requires more code.
- */
- log() << "info: indexing in foreground on this replica; was a background index build on the primary" << endl;
- background = false;
- }
-
- int idxNo = tableToIndex->nIndexes;
- IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
- getDur().writingDiskLoc(idx.info) = loc;
- try {
- buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
- }
- catch( DBException& e ) {
- // save our error msg string as an exception or dropIndexes will overwrite our message
- LastError *le = lastError.get();
- int savecode = 0;
- string saveerrmsg;
- if ( le ) {
- savecode = le->code;
- saveerrmsg = le->msg;
- }
- else {
- savecode = e.getCode();
- saveerrmsg = e.what();
- }
-
- // roll back this index
- string name = idx.indexName();
- BSONObjBuilder b;
- string errmsg;
- bool ok = dropIndexes(tableToIndex, tabletoidxns.c_str(), name.c_str(), errmsg, b, true);
- if( !ok ) {
- log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
- }
-
- assert( le && !saveerrmsg.empty() );
- raiseError(savecode,saveerrmsg.c_str());
- throw;
- }
+ insert_makeIndex(tableToIndex, tabletoidxns, loc);
}
/* add this record to our indexes */
@@ -1703,7 +1864,6 @@ namespace mongo {
}
}
- // out() << " inserted at loc:" << hex << loc.getOfs() << " lenwhdr:" << hex << lenWHdr << dec << ' ' << ns << endl;
return loc;
}
@@ -1718,10 +1878,7 @@ namespace mongo {
DiskLoc extentLoc;
int lenWHdr = len + Record::HeaderSize;
DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- if ( loc.isNull() ) {
- assert(false);
- return 0;
- }
+ assert( !loc.isNull() );
Record *r = loc.rec();
assert( r->lengthWithHeaders >= lenWHdr );
@@ -1782,6 +1939,15 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForDb(d->name.c_str());
+ dbMutex.assertWriteLocked();
+
+ // Not sure we need this here, so removed. If we do, we need to move it down
+ // within other calls both (1) as they could be called from elsewhere and
+ // (2) to keep the lock order right - groupcommitmutex must be locked before
+ // mmmutex (if both are locked).
+ //
+ // RWLockRecursive::Exclusive lk(MongoFile::mmmutex);
+
getDur().syncDataAndTruncateJournal();
Database::closeDatabase( d->name.c_str(), d->path );
@@ -1889,21 +2055,6 @@ namespace mongo {
return sa.size();
}
-#if !defined(_WIN32)
-} // namespace mongo
-#include <sys/statvfs.h>
-namespace mongo {
-#endif
- boost::intmax_t freeSpace ( const string &path ) {
-#if !defined(_WIN32)
- struct statvfs info;
- assert( !statvfs( path.c_str() , &info ) );
- return boost::intmax_t( info.f_bavail ) * info.f_frsize;
-#else
- return -1;
-#endif
- }
-
bool repairDatabase( string dbNameS , string &errmsg,
bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) {
doingRepair dr;
@@ -1923,7 +2074,7 @@ namespace mongo {
getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
boost::intmax_t totalSize = dbSize( dbName );
- boost::intmax_t freeSize = freeSpace( repairpath );
+ boost::intmax_t freeSize = File::freeSpace(repairpath);
if ( freeSize > -1 && freeSize < totalSize ) {
stringstream ss;
ss << "Cannot repair database " << dbName << " having size: " << totalSize
@@ -1946,12 +2097,15 @@ namespace mongo {
assert( ctx.justCreated() );
res = cloneFrom(localhost.c_str(), errmsg, dbName,
- /*logForReplication=*/false, /*slaveok*/false, /*replauth*/false, /*snapshot*/false);
+ /*logForReplication=*/false, /*slaveok*/false, /*replauth*/false,
+ /*snapshot*/false, /*mayYield*/false, /*mayBeInterrupted*/true);
Database::closeDatabase( dbName, reservedPathString.c_str() );
}
if ( !res ) {
- problem() << "clone failed for " << dbName << " with error: " << errmsg << endl;
+ errmsg = str::stream() << "clone failed for " << dbName << " with error: " << errmsg;
+ problem() << errmsg << endl;
+
if ( !preserveClonedFilesOnFailure )
BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
@@ -1996,7 +2150,7 @@ namespace mongo {
bool ok = false;
BOOST_CHECK_EXCEPTION( ok = fo.apply( q ) );
if ( ok )
- log(2) << fo.op() << " file " << q.string() << '\n';
+ log(2) << fo.op() << " file " << q.string() << endl;
int i = 0;
int extra = 10; // should not be necessary, this is defensive in case there are missing files
while ( 1 ) {
@@ -2060,16 +2214,4 @@ namespace mongo {
return true;
}
- bool isValidNS( const StringData& ns ) {
- // TODO: should check for invalid characters
-
- const char * x = strchr( ns.data() , '.' );
- if ( ! x )
- return false;
-
- x++;
- return *x > 0;
- }
-
-
} // namespace mongo
diff --git a/db/pdfile.h b/db/pdfile.h
index 91f4877..64dba68 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -52,9 +52,6 @@ namespace mongo {
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication, bool *deferIdIndex = 0);
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc());
- // -1 if library unavailable.
- boost::intmax_t freeSpace( const string &path = dbpath );
-
bool isValidNS( const StringData& ns );
/*---------------------------------------------------------------------*/
@@ -123,13 +120,16 @@ namespace mongo {
// The object o may be updated if modified on insert.
void insertAndLog( const char *ns, const BSONObj &o, bool god = false );
- /** @param obj both and in and out param -- insert can sometimes modify an object (such as add _id). */
- DiskLoc insertWithObjMod(const char *ns, BSONObj &o, bool god = false);
+ /** insert will add an _id to the object if not present. if you would like to see the final object
+ after such an addition, use this method.
+ @param o both and in and out param
+ */
+ DiskLoc insertWithObjMod(const char *ns, BSONObj & /*out*/o, bool god = false);
/** @param obj in value only for this version. */
void insertNoReturnVal(const char *ns, BSONObj o, bool god = false);
- DiskLoc insert(const char *ns, const void *buf, int len, bool god = false, const BSONElement &writeId = BSONElement(), bool mayAddIndex = true);
+ DiskLoc insert(const char *ns, const void *buf, int len, bool god = false, bool mayAddIndex = true, bool *addedID = 0);
static shared_ptr<Cursor> findAll(const char *ns, const DiskLoc &startLoc = DiskLoc());
/* special version of insert for transaction logging -- streamlined a bit.
@@ -142,7 +142,7 @@ namespace mongo {
static Record* getRecord(const DiskLoc& dl);
static DeletedRecord* makeDeletedRecord(const DiskLoc& dl, int len);
- void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false);
+ void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false, bool logOp=false);
/* does not clean up indexes, etc. : just deletes the record in the pdfile. use deleteRecord() to unindex */
void _deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl);
@@ -160,6 +160,9 @@ namespace mongo {
int lengthWithHeaders;
int extentOfs;
DiskLoc nextDeleted;
+ DiskLoc myExtentLoc(const DiskLoc& myLoc) const {
+ return DiskLoc(myLoc.a(), extentOfs);
+ }
Extent* myExtent(const DiskLoc& myLoc) {
return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
}
@@ -174,7 +177,7 @@ namespace mongo {
(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how Record::myExtent() works
- (11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must popular its extentOfs then
+ (11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must populate its extentOfs then
*/
class Record {
public:
@@ -204,11 +207,43 @@ namespace mongo {
DiskLoc getNext(const DiskLoc& myLoc);
DiskLoc getPrev(const DiskLoc& myLoc);
+ DiskLoc nextInExtent(const DiskLoc& myLoc) {
+ if ( nextOfs == DiskLoc::NullOfs )
+ return DiskLoc();
+ assert( nextOfs );
+ return DiskLoc(myLoc.a(), nextOfs);
+ }
+
struct NP {
int nextOfs;
int prevOfs;
};
NP* np() { return (NP*) &nextOfs; }
+
+ // ---------------------
+ // memory cache
+ // ---------------------
+
+ /**
+ * touches the data so that is in physical memory
+ * @param entireRecrd if false, only the header and first byte is touched
+ * if true, the entire record is touched
+ * */
+ void touch( bool entireRecrd = false );
+
+ /**
+ * @return if this record is likely in physical memory
+ * its not guaranteed because its possible it gets swapped out in a very unlucky windows
+ */
+ bool likelyInPhysicalMemory();
+
+ /**
+ * tell the cache this Record was accessed
+ * @return this, for simple chaining
+ */
+ Record* accessed();
+
+ static bool MemoryTrackingEnabled;
};
/* extents are datafile regions where all the records within the region
@@ -240,6 +275,12 @@ namespace mongo {
length >= 0 && !myLoc.isNull();
}
+ BSONObj dump() {
+ return BSON( "loc" << myLoc.toString() << "xnext" << xnext.toString() << "xprev" << xprev.toString()
+ << "nsdiag" << nsDiagnostic.toString()
+ << "size" << length << "firstRecord" << firstRecord.toString() << "lastRecord" << lastRecord.toString());
+ }
+
void dump(iostream& s) {
s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
s << " nsdiag:" << nsDiagnostic.toString() << '\n';
@@ -250,10 +291,10 @@ namespace mongo {
Returns a DeletedRecord location which is the data in the extent ready for us.
Caller will need to add that to the freelist structure in namespacedetail.
*/
- DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset);
+ DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset, bool capped);
/* like init(), but for a reuse case */
- DiskLoc reuse(const char *nsname);
+ DiskLoc reuse(const char *nsname, bool newUseIsAsCapped);
bool isOk() const { return magic == 0x41424344; }
void assertOk() const { assert(isOk()); }
@@ -279,8 +320,8 @@ namespace mongo {
*/
static int followupSize(int len, int lastExtentLen);
- /**
- * @param len lengt of record we need
+ /** get a suggested size for the first extent in a namespace
+ * @param len length of record we need to insert
*/
static int initialSize(int len);
@@ -292,8 +333,11 @@ namespace mongo {
this helper is for that -- for use with getDur().writing() method
*/
FL* fl() { return (FL*) &firstRecord; }
+
+ /** caller must declare write intent first */
+ void markEmpty();
private:
- DiskLoc _reuse(const char *nsname);
+ DiskLoc _reuse(const char *nsname, bool newUseIsAsCapped); // recycle an extent and reuse it for a different ns
};
/* a datafile - i.e. the "dbname.<#>" files :
@@ -318,7 +362,7 @@ namespace mongo {
int unusedLength;
char reserved[8192 - 4*4 - 8];
- char data[4];
+ char data[4]; // first extent starts here
enum { HeaderSize = 8192 };
@@ -414,7 +458,7 @@ namespace mongo {
return DataFileMgr::getRecord(*this);
}
inline BSONObj DiskLoc::obj() const {
- return BSONObj(rec());
+ return BSONObj(rec()->accessed());
}
inline DeletedRecord* DiskLoc::drec() const {
assert( _a != -1 );
@@ -423,9 +467,12 @@ namespace mongo {
inline Extent* DiskLoc::ext() const {
return DataFileMgr::getExtent(*this);
}
- inline const BtreeBucket* DiskLoc::btree() const {
+
+ template< class V >
+ inline
+ const BtreeBucket<V> * DiskLoc::btree() const {
assert( _a != -1 );
- return (const BtreeBucket *) rec()->data;
+ return (const BtreeBucket<V> *) rec()->data;
}
} // namespace mongo
@@ -478,19 +525,8 @@ namespace mongo {
bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
-
- /**
- * @return true if ns is 'normal'. $ used for collections holding index data, which do not contain BSON objects in their records.
- * special case for the local.oplog.$main ns -- naming it as such was a mistake.
- */
- inline bool isANormalNSName( const char* ns ) {
- if ( strchr( ns , '$' ) == 0 )
- return true;
- return strcmp( ns, "local.oplog.$main" ) == 0;
- }
-
inline BSONObj::BSONObj(const Record *r) {
- init(r->data, false);
+ init(r->data);
}
} // namespace mongo
diff --git a/db/projection.cpp b/db/projection.cpp
index 3dcfef7..d07e565 100644
--- a/db/projection.cpp
+++ b/db/projection.cpp
@@ -61,7 +61,7 @@ namespace mongo {
}
}
else {
- uassert(13097, string("Unsupported projection option: ") + obj.firstElement().fieldName(), false);
+ uassert(13097, string("Unsupported projection option: ") + obj.firstElementFieldName(), false);
}
}
diff --git a/db/projection.h b/db/projection.h
index fd3b856..b5e0a0c 100644
--- a/db/projection.h
+++ b/db/projection.h
@@ -94,6 +94,8 @@ namespace mongo {
*/
KeyOnly* checkKey( const BSONObj& keyPattern ) const;
+ bool includeID() const { return _includeID; }
+
private:
/**
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 4eb2a99..692e9fd 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -1,4 +1,4 @@
-/* queryoptimizer.cpp */
+// @file queryoptimizer.cpp
/**
* Copyright (C) 2008 10gen Inc.
@@ -24,7 +24,6 @@
#include "queryoptimizer.h"
#include "cmdline.h"
#include "clientcursor.h"
-#include <queue>
//#define DEBUGQO(x) cout << x << endl;
#define DEBUGQO(x)
@@ -53,9 +52,10 @@ namespace mongo {
QueryPlan::QueryPlan(
NamespaceDetails *d, int idxNo,
- const FieldRangeSet &fbs, const FieldRangeSet &originalFrs, const BSONObj &originalQuery, const BSONObj &order, const BSONObj &startKey, const BSONObj &endKey , string special ) :
+ const FieldRangeSetPair &frsp, const FieldRangeSetPair *originalFrsp, const BSONObj &originalQuery, const BSONObj &order, bool mustAssertOnYieldFailure, const BSONObj &startKey, const BSONObj &endKey , string special ) :
_d(d), _idxNo(idxNo),
- _fbs( fbs ),
+ _frs( frsp.frsForIndex( _d, _idxNo ) ),
+ _frsMulti( frsp.frsForIndex( _d, -1 ) ),
_originalQuery( originalQuery ),
_order( order ),
_index( 0 ),
@@ -65,36 +65,44 @@ namespace mongo {
_direction( 0 ),
_endKeyInclusive( endKey.isEmpty() ),
_unhelpful( false ),
+ _impossible( false ),
_special( special ),
_type(0),
- _startOrEndSpec( !startKey.isEmpty() || !endKey.isEmpty() ) {
+ _startOrEndSpec( !startKey.isEmpty() || !endKey.isEmpty() ),
+ _mustAssertOnYieldFailure( mustAssertOnYieldFailure ) {
- if ( !_fbs.matchPossible() ) {
- _unhelpful = true;
+ BSONObj idxKey = _idxNo < 0 ? BSONObj() : d->idx( _idxNo ).keyPattern();
+
+ if ( !_frs.matchPossibleForIndex( idxKey ) ) {
+ _impossible = true;
_scanAndOrderRequired = false;
return;
}
-
- if( _idxNo >= 0 ) {
- _index = &d->idx(_idxNo);
- }
- else {
- // full table scan case
- if ( _order.isEmpty() || !strcmp( _order.firstElement().fieldName(), "$natural" ) )
+
+ if ( willScanTable() ) {
+ if ( _order.isEmpty() || !strcmp( _order.firstElementFieldName(), "$natural" ) )
_scanAndOrderRequired = false;
- return;
+ return;
}
+
+ _index = &d->idx(_idxNo);
+
+ // If the parsing or index indicates this is a special query, don't continue the processing
+ if ( _special.size() ||
+ ( _index->getSpec().getType() && _index->getSpec().getType()->suitability( originalQuery, order ) != USELESS ) ) {
+
+ if( _special.size() ) _optimal = true;
- if ( _special.size() ) {
- _optimal = true;
_type = _index->getSpec().getType();
+ if( !_special.size() ) _special = _index->getSpec().getType()->getPlugin()->getName();
+
massert( 13040 , (string)"no type for special: " + _special , _type );
// hopefully safe to use original query in these contexts - don't think we can mix special with $or clause separation yet
_scanAndOrderRequired = _type->scanAndOrderRequired( _originalQuery , order );
return;
}
- BSONObj idxKey = _index->keyPattern();
+ const IndexSpec &idxSpec = _index->getSpec();
BSONObjIterator o( order );
BSONObjIterator k( idxKey );
if ( !o.moreWithEOO() )
@@ -114,7 +122,7 @@ namespace mongo {
goto doneCheckOrder;
if ( strcmp( oe.fieldName(), ke.fieldName() ) == 0 )
break;
- if ( !fbs.range( ke.fieldName() ).equality() )
+ if ( !_frs.range( ke.fieldName() ).equality() )
goto doneCheckOrder;
}
int d = elementDirection( oe ) == elementDirection( ke ) ? 1 : -1;
@@ -130,41 +138,46 @@ doneCheckOrder:
int exactIndexedQueryCount = 0;
int optimalIndexedQueryCount = 0;
bool stillOptimalIndexedQueryCount = true;
- set< string > orderFieldsUnindexed;
+ set<string> orderFieldsUnindexed;
order.getFieldNames( orderFieldsUnindexed );
while( i.moreWithEOO() ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
- const FieldRange &fb = fbs.range( e.fieldName() );
+ const FieldRange &fr = _frs.range( e.fieldName() );
if ( stillOptimalIndexedQueryCount ) {
- if ( fb.nontrivial() )
+ if ( fr.nontrivial() )
++optimalIndexedQueryCount;
- if ( !fb.equality() )
+ if ( !fr.equality() )
stillOptimalIndexedQueryCount = false;
}
else {
- if ( fb.nontrivial() )
+ if ( fr.nontrivial() )
optimalIndexedQueryCount = -1;
}
- if ( fb.equality() ) {
- BSONElement e = fb.max();
+ if ( fr.equality() ) {
+ BSONElement e = fr.max();
if ( !e.isNumber() && !e.mayEncapsulate() && e.type() != RegEx )
++exactIndexedQueryCount;
}
orderFieldsUnindexed.erase( e.fieldName() );
}
if ( !_scanAndOrderRequired &&
- ( optimalIndexedQueryCount == fbs.nNontrivialRanges() ) )
+ ( optimalIndexedQueryCount == _frs.nNontrivialRanges() ) )
_optimal = true;
- if ( exactIndexedQueryCount == fbs.nNontrivialRanges() &&
+ if ( exactIndexedQueryCount == _frs.nNontrivialRanges() &&
orderFieldsUnindexed.size() == 0 &&
- exactIndexedQueryCount == _index->keyPattern().nFields() &&
+ exactIndexedQueryCount == idxKey.nFields() &&
exactIndexedQueryCount == _originalQuery.nFields() ) {
_exactKeyMatch = true;
}
- _frv.reset( new FieldRangeVector( fbs, idxKey, _direction ) );
- _originalFrv.reset( new FieldRangeVector( originalFrs, idxKey, _direction ) );
+ _frv.reset( new FieldRangeVector( _frs, idxSpec, _direction ) );
+ if ( originalFrsp ) {
+ _originalFrv.reset( new FieldRangeVector( originalFrsp->frsForIndex( _d, _idxNo ), idxSpec, _direction ) );
+ }
+ else {
+ _originalFrv = _frv;
+ }
if ( _startOrEndSpec ) {
BSONObj newStart, newEnd;
if ( !startKey.isEmpty() )
@@ -178,7 +191,7 @@ doneCheckOrder:
}
if ( ( _scanAndOrderRequired || _order.isEmpty() ) &&
- !fbs.range( idxKey.firstElement().fieldName() ).nontrivial() ) {
+ !_frs.range( idxKey.firstElementFieldName() ).nontrivial() ) {
_unhelpful = true;
}
}
@@ -190,39 +203,57 @@ doneCheckOrder:
return _type->newCursor( _originalQuery , _order , numWanted );
}
- if ( !_fbs.matchPossible() ) {
- if ( _fbs.nNontrivialRanges() )
- checkTableScanAllowed( _fbs.ns() );
+ if ( _impossible ) {
+ // TODO We might want to allow this dummy table scan even in no table
+ // scan mode, since it won't scan anything.
+ if ( _frs.nNontrivialRanges() )
+ checkTableScanAllowed( _frs.ns() );
return shared_ptr<Cursor>( new BasicCursor( DiskLoc() ) );
}
- if ( !_index ) {
- if ( _fbs.nNontrivialRanges() )
- checkTableScanAllowed( _fbs.ns() );
- return findTableScan( _fbs.ns(), _order, startLoc );
- }
+ if ( willScanTable() ) {
+ if ( _frs.nNontrivialRanges() ) {
+ checkTableScanAllowed( _frs.ns() );
+
+ // if we are doing a table scan on _id
+ // and its a capped collection
+ // we disallow as its a common user error
+ // .system. and local collections are exempt
+ if ( _d && _d->capped && _frs.range( "_id" ).nontrivial() ) {
+ if ( cc().isSyncThread() ||
+ str::contains( _frs.ns() , ".system." ) ||
+ str::startsWith( _frs.ns() , "local." ) ) {
+ // ok
+ }
+ else {
+ warning() << "_id query on capped collection without an _id index, performance will be poor collection: " << _frs.ns() << endl;
+ //uassert( 14820, str::stream() << "doing _id query on a capped collection without an index is not allowed: " << _frs.ns() ,
+ }
+ }
+ }
+ return findTableScan( _frs.ns(), _order, startLoc );
+ }
+
massert( 10363 , "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
if ( _startOrEndSpec ) {
// we are sure to spec _endKeyInclusive
- return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _startKey, _endKey, _endKeyInclusive, _direction >= 0 ? 1 : -1 ) );
+ return shared_ptr<Cursor>( BtreeCursor::make( _d, _idxNo, *_index, _startKey, _endKey, _endKeyInclusive, _direction >= 0 ? 1 : -1 ) );
}
else if ( _index->getSpec().getType() ) {
- return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _frv->startKey(), _frv->endKey(), true, _direction >= 0 ? 1 : -1 ) );
+ return shared_ptr<Cursor>( BtreeCursor::make( _d, _idxNo, *_index, _frv->startKey(), _frv->endKey(), true, _direction >= 0 ? 1 : -1 ) );
}
else {
- return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _frv, _direction >= 0 ? 1 : -1 ) );
+ return shared_ptr<Cursor>( BtreeCursor::make( _d, _idxNo, *_index, _frv, _direction >= 0 ? 1 : -1 ) );
}
}
shared_ptr<Cursor> QueryPlan::newReverseCursor() const {
- if ( !_fbs.matchPossible() )
- return shared_ptr<Cursor>( new BasicCursor( DiskLoc() ) );
- if ( !_index ) {
+ if ( willScanTable() ) {
int orderSpec = _order.getIntField( "$natural" );
if ( orderSpec == INT_MIN )
orderSpec = 1;
- return findTableScan( _fbs.ns(), BSON( "$natural" << -orderSpec ) );
+ return findTableScan( _frs.ns(), BSON( "$natural" << -orderSpec ) );
}
massert( 10364 , "newReverseCursor() not implemented for indexed plans", false );
return shared_ptr<Cursor>();
@@ -235,23 +266,51 @@ doneCheckOrder:
}
void QueryPlan::registerSelf( long long nScanned ) const {
- if ( _fbs.matchPossible() ) {
- scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
- NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( _fbs.pattern( _order ), indexKey(), nScanned );
+ // FIXME SERVER-2864 Otherwise no query pattern can be generated.
+ if ( _frs.matchPossible() ) {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( _frs.pattern( _order ), indexKey(), nScanned );
}
}
+
+ /**
+ * @return a copy of the inheriting class, which will be run with its own
+ * query plan. If multiple plan sets are required for an $or query, the
+ * QueryOp of the winning plan from a given set will be cloned to generate
+ * QueryOps for the subsequent plan set. This function should only be called
+ * after the query op has completed executing.
+ */
+ QueryOp *QueryOp::createChild() {
+ if( _orConstraint.get() ) {
+ _matcher->advanceOrClause( _orConstraint );
+ _orConstraint.reset();
+ }
+ QueryOp *ret = _createChild();
+ ret->_oldMatcher = _matcher;
+ return ret;
+ }
bool QueryPlan::isMultiKey() const {
if ( _idxNo < 0 )
return false;
return _d->isMultikey( _idxNo );
}
+
+ void QueryOp::init() {
+ if ( _oldMatcher.get() ) {
+ _matcher.reset( _oldMatcher->nextClauseMatcher( qp().indexKey() ) );
+ }
+ else {
+ _matcher.reset( new CoveredIndexMatcher( qp().originalQuery(), qp().indexKey(), alwaysUseRecord() ) );
+ }
+ _init();
+ }
- QueryPlanSet::QueryPlanSet( const char *ns, auto_ptr< FieldRangeSet > frs, auto_ptr< FieldRangeSet > originalFrs, const BSONObj &originalQuery, const BSONObj &order, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max, bool bestGuessOnly, bool mayYield ) :
+ QueryPlanSet::QueryPlanSet( const char *ns, auto_ptr<FieldRangeSetPair> frsp, auto_ptr<FieldRangeSetPair> originalFrsp, const BSONObj &originalQuery, const BSONObj &order, bool mustAssertOnYieldFailure, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max, bool bestGuessOnly, bool mayYield ) :
_ns(ns),
_originalQuery( originalQuery ),
- _fbs( frs ),
- _originalFrs( originalFrs ),
+ _frsp( frsp ),
+ _originalFrsp( originalFrsp ),
_mayRecordPlan( true ),
_usingPrerecordedPlan( false ),
_hint( BSONObj() ),
@@ -262,7 +321,8 @@ doneCheckOrder:
_max( max.getOwned() ),
_bestGuessOnly( bestGuessOnly ),
_mayYield( mayYield ),
- _yieldSometimesTracker( 256, 20 ) {
+ _yieldSometimesTracker( 256, 20 ),
+ _mustAssertOnYieldFailure( mustAssertOnYieldFailure ) {
if ( hint && !hint->eoo() ) {
_hint = hint->wrap();
}
@@ -289,10 +349,10 @@ doneCheckOrder:
string errmsg;
BSONObj keyPattern = id.keyPattern();
// This reformats _min and _max to be used for index lookup.
- massert( 10365 , errmsg, indexDetailsForRange( _fbs->ns(), errmsg, _min, _max, keyPattern ) );
+ massert( 10365 , errmsg, indexDetailsForRange( _frsp->ns(), errmsg, _min, _max, keyPattern ) );
}
NamespaceDetails *d = nsdetails(_ns);
- _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(id), *_fbs, *_originalFrs, _originalQuery, _order, _min, _max ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(id), *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure, _min, _max ) ) );
}
// returns an IndexDetails * for a hint, 0 if hint is $natural.
@@ -312,7 +372,7 @@ doneCheckOrder:
else if( hint.type() == Object ) {
BSONObj hintobj = hint.embeddedObject();
uassert( 10112 , "bad hint", !hintobj.isEmpty() );
- if ( !strcmp( hintobj.firstElement().fieldName(), "$natural" ) ) {
+ if ( !strcmp( hintobj.firstElementFieldName(), "$natural" ) ) {
return 0;
}
NamespaceDetails::IndexIterator i = d->ii();
@@ -329,15 +389,16 @@ doneCheckOrder:
void QueryPlanSet::init() {
DEBUGQO( "QueryPlanSet::init " << ns << "\t" << _originalQuery );
+ _runner.reset();
_plans.clear();
_mayRecordPlan = true;
_usingPrerecordedPlan = false;
- const char *ns = _fbs->ns();
+ const char *ns = _frsp->ns();
NamespaceDetails *d = nsdetails( ns );
- if ( !d || !_fbs->matchPossible() ) {
+ if ( !d || !_frsp->matchPossible() ) {
// Table scan plan, when no matches are possible
- _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
return;
}
@@ -351,7 +412,7 @@ doneCheckOrder:
else {
massert( 10366 , "natural order cannot be specified with $min/$max", _min.isEmpty() && _max.isEmpty() );
// Table scan plan
- _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
}
return;
}
@@ -361,7 +422,7 @@ doneCheckOrder:
BSONObj keyPattern;
IndexDetails *idx = indexDetailsForRange( ns, errmsg, _min, _max, keyPattern );
massert( 10367 , errmsg, idx );
- _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(*idx), *_fbs, *_originalFrs, _originalQuery, _order, _min, _max ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(*idx), *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure, _min, _max ) ) );
return;
}
@@ -370,19 +431,19 @@ doneCheckOrder:
if ( idx >= 0 ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
- _plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_fbs , *_fbs , _originalQuery, _order ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_frsp , _originalFrsp.get() , _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
return;
}
}
if ( _originalQuery.isEmpty() && _order.isEmpty() ) {
- _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
return;
}
- DEBUGQO( "\t special : " << _fbs->getSpecial() );
- if ( _fbs->getSpecial().size() ) {
- _special = _fbs->getSpecial();
+ DEBUGQO( "\t special : " << _frsp->getSpecial() );
+ if ( _frsp->getSpecial().size() ) {
+ _special = _frsp->getSpecial();
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
int j = i.pos();
@@ -391,8 +452,8 @@ doneCheckOrder:
if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
- _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_fbs , *_fbs , _originalQuery, _order ,
- BSONObj() , BSONObj() , _special ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_frsp , _originalFrsp.get() , _originalQuery, _order ,
+ _mustAssertOnYieldFailure , BSONObj() , BSONObj() , _special ) ) );
return;
}
}
@@ -400,15 +461,15 @@ doneCheckOrder:
}
if ( _honorRecordedPlan ) {
- scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
- NamespaceDetailsTransient& nsd = NamespaceDetailsTransient::get_inlock( ns );
- BSONObj bestIndex = nsd.indexForPattern( _fbs->pattern( _order ) );
+ pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( *_frsp, _order );
+ BSONObj bestIndex = best.first;
+ long long oldNScanned = best.second;
if ( !bestIndex.isEmpty() ) {
QueryPlanPtr p;
- _oldNScanned = nsd.nScannedForPattern( _fbs->pattern( _order ) );
- if ( !strcmp( bestIndex.firstElement().fieldName(), "$natural" ) ) {
+ _oldNScanned = oldNScanned;
+ if ( !strcmp( bestIndex.firstElementFieldName(), "$natural" ) ) {
// Table scan plan
- p.reset( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) );
+ p.reset( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
}
NamespaceDetails::IndexIterator i = d->ii();
@@ -416,7 +477,7 @@ doneCheckOrder:
int j = i.pos();
IndexDetails& ii = i.next();
if( ii.keyPattern().woCompare(bestIndex) == 0 ) {
- p.reset( new QueryPlan( d, j, *_fbs, *_originalFrs, _originalQuery, _order ) );
+ p.reset( new QueryPlan( d, j, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
}
}
@@ -434,67 +495,111 @@ doneCheckOrder:
}
void QueryPlanSet::addOtherPlans( bool checkFirst ) {
- const char *ns = _fbs->ns();
+ const char *ns = _frsp->ns();
NamespaceDetails *d = nsdetails( ns );
if ( !d )
return;
// If table scan is optimal or natural order requested or tailable cursor requested
- if ( !_fbs->matchPossible() || ( _fbs->nNontrivialRanges() == 0 && _order.isEmpty() ) ||
- ( !_order.isEmpty() && !strcmp( _order.firstElement().fieldName(), "$natural" ) ) ) {
+ if ( !_frsp->matchPossible() || ( _frsp->noNontrivialRanges() && _order.isEmpty() ) ||
+ ( !_order.isEmpty() && !strcmp( _order.firstElementFieldName(), "$natural" ) ) ) {
// Table scan plan
- addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ), checkFirst );
+ addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ), checkFirst );
return;
}
bool normalQuery = _hint.isEmpty() && _min.isEmpty() && _max.isEmpty();
PlanSet plans;
+ QueryPlanPtr optimalPlan;
for( int i = 0; i < d->nIndexes; ++i ) {
- IndexDetails& id = d->idx(i);
- const IndexSpec& spec = id.getSpec();
- IndexSuitability suitability = HELPFUL;
if ( normalQuery ) {
- suitability = spec.suitability( _fbs->simplifiedQuery() , _order );
- if ( suitability == USELESS )
+ BSONObj keyPattern = d->idx( i ).keyPattern();
+ if ( !_frsp->matchPossibleForIndex( d, i, keyPattern ) ) {
+ // If no match is possible, only generate a trival plan that won't
+ // scan any documents.
+ QueryPlanPtr p( new QueryPlan( d, i, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
+ addPlan( p, checkFirst );
+ return;
+ }
+ if ( !QueryUtilIndexed::indexUseful( *_frsp, d, i, _order ) ) {
continue;
+ }
}
- QueryPlanPtr p( new QueryPlan( d, i, *_fbs, *_originalFrs, _originalQuery, _order ) );
+ QueryPlanPtr p( new QueryPlan( d, i, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
if ( p->optimal() ) {
- addPlan( p, checkFirst );
- return;
+ if ( !optimalPlan.get() ) {
+ optimalPlan = p;
+ }
}
else if ( !p->unhelpful() ) {
plans.push_back( p );
}
}
+ if ( optimalPlan.get() ) {
+ addPlan( optimalPlan, checkFirst );
+ return;
+ }
for( PlanSet::iterator i = plans.begin(); i != plans.end(); ++i )
addPlan( *i, checkFirst );
// Table scan plan
- addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ), checkFirst );
+ addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ), checkFirst );
}
- shared_ptr< QueryOp > QueryPlanSet::runOp( QueryOp &op ) {
+ shared_ptr<QueryOp> QueryPlanSet::runOp( QueryOp &op ) {
if ( _usingPrerecordedPlan ) {
Runner r( *this, op );
- shared_ptr< QueryOp > res = r.run();
- // _plans.size() > 1 if addOtherPlans was called in Runner::run().
+ shared_ptr<QueryOp> res = r.runUntilFirstCompletes();
+ // _plans.size() > 1 if addOtherPlans was called in Runner::runUntilFirstCompletes().
if ( _bestGuessOnly || res->complete() || _plans.size() > 1 )
return res;
- {
- scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
- NamespaceDetailsTransient::get_inlock( _fbs->ns() ).registerIndexForPattern( _fbs->pattern( _order ), BSONObj(), 0 );
- }
+ // Retry with all candidate plans.
+ QueryUtilIndexed::clearIndexesForPatterns( *_frsp, _order );
init();
}
Runner r( *this, op );
- return r.run();
+ return r.runUntilFirstCompletes();
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::nextOp( QueryOp &originalOp, bool retried ) {
+ if ( !_runner ) {
+ _runner.reset( new Runner( *this, originalOp ) );
+ shared_ptr<QueryOp> op = _runner->init();
+ if ( op->complete() ) {
+ return op;
+ }
+ }
+ shared_ptr<QueryOp> op = _runner->nextNonError();
+ if ( !op->error() ) {
+ return op;
+ }
+ if ( !_usingPrerecordedPlan || _bestGuessOnly || _plans.size() > 1 ) {
+ return op;
+ }
+
+ // Avoid an infinite loop here
+ uassert( 15878, str::stream() << "query plans not successful even with no constraints, potentially due to additional sort", ! retried );
+
+ // Retry with all candidate plans.
+ QueryUtilIndexed::clearIndexesForPatterns( *_frsp, _order );
+ init();
+ return nextOp( originalOp, true );
}
+ bool QueryPlanSet::prepareToYield() {
+ return _runner ? _runner->prepareToYield() : true;
+ }
+
+ void QueryPlanSet::recoverFromYield() {
+ if ( _runner ) {
+ _runner->recoverFromYield();
+ }
+ }
+
BSONObj QueryPlanSet::explain() const {
- vector< BSONObj > arr;
+ vector<BSONObj> arr;
for( PlanSet::const_iterator i = _plans.begin(); i != _plans.end(); ++i ) {
shared_ptr<Cursor> c = (*i)->newCursor();
BSONObjBuilder explain;
@@ -515,17 +620,16 @@ doneCheckOrder:
return _plans[i];
}
- stringstream ss;
- ss << "best guess plan requested, but scan and order required:";
- ss << " query: " << _fbs->simplifiedQuery();
- ss << " order: " << _order;
- ss << " choices: ";
- for ( unsigned i=0; i<_plans.size(); i++ ) {
- ss << _plans[i]->indexKey() << " ";
- }
+ warning() << "best guess query plan requested, but scan and order are required for all plans "
+ << " query: " << _originalQuery
+ << " order: " << _order
+ << " choices: ";
+
+ for ( unsigned i=0; i<_plans.size(); i++ )
+ warning() << _plans[i]->indexKey() << " ";
+ warning() << endl;
- string s = ss.str();
- msgassertedNoTrace( 13284, s.c_str() );
+ return QueryPlanPtr();
}
return _plans[0];
}
@@ -535,101 +639,134 @@ doneCheckOrder:
_plans( plans ) {
}
- void QueryPlanSet::Runner::mayYield( const vector< shared_ptr< QueryOp > > &ops ) {
- if ( _plans._mayYield ) {
- if ( _plans._yieldSometimesTracker.ping() ) {
- int micros = ClientCursor::yieldSuggest();
- if ( micros > 0 ) {
- for( vector< shared_ptr< QueryOp > >::const_iterator i = ops.begin(); i != ops.end(); ++i ) {
- if ( !prepareToYield( **i ) ) {
- return;
- }
- }
- ClientCursor::staticYield( micros , _plans._ns );
- for( vector< shared_ptr< QueryOp > >::const_iterator i = ops.begin(); i != ops.end(); ++i ) {
- recoverFromYield( **i );
- }
- }
+ bool QueryPlanSet::Runner::prepareToYield() {
+ for( vector<shared_ptr<QueryOp> >::const_iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ if ( !prepareToYieldOp( **i ) ) {
+ return false;
}
}
+ return true;
}
- struct OpHolder {
- OpHolder( const shared_ptr< QueryOp > &op ) : _op( op ), _offset() {}
- shared_ptr< QueryOp > _op;
- long long _offset;
- bool operator<( const OpHolder &other ) const {
- return _op->nscanned() + _offset > other._op->nscanned() + other._offset;
- }
- };
+ void QueryPlanSet::Runner::recoverFromYield() {
+ for( vector<shared_ptr<QueryOp> >::const_iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ recoverFromYieldOp( **i );
+ }
+ }
+
+ void QueryPlanSet::Runner::mayYield() {
+ if ( ! _plans._mayYield )
+ return;
+
+ if ( ! _plans._yieldSometimesTracker.ping() )
+ return;
+
+ int micros = ClientCursor::yieldSuggest();
+ if ( micros <= 0 )
+ return;
+
+ if ( !prepareToYield() )
+ return;
+
+ ClientCursor::staticYield( micros , _plans._ns , 0 );
+ recoverFromYield();
+ }
- shared_ptr< QueryOp > QueryPlanSet::Runner::run() {
+ shared_ptr<QueryOp> QueryPlanSet::Runner::init() {
massert( 10369 , "no plans", _plans._plans.size() > 0 );
-
- vector< shared_ptr< QueryOp > > ops;
+
if ( _plans._bestGuessOnly ) {
- shared_ptr< QueryOp > op( _op.createChild() );
+ shared_ptr<QueryOp> op( _op.createChild() );
op->setQueryPlan( _plans.getBestGuess().get() );
- ops.push_back( op );
+ _ops.push_back( op );
}
else {
if ( _plans._plans.size() > 1 )
log(1) << " running multiple plans" << endl;
for( PlanSet::iterator i = _plans._plans.begin(); i != _plans._plans.end(); ++i ) {
- shared_ptr< QueryOp > op( _op.createChild() );
+ shared_ptr<QueryOp> op( _op.createChild() );
op->setQueryPlan( i->get() );
- ops.push_back( op );
+ _ops.push_back( op );
}
}
-
- for( vector< shared_ptr< QueryOp > >::iterator i = ops.begin(); i != ops.end(); ++i ) {
+
+ // Initialize ops.
+ for( vector<shared_ptr<QueryOp> >::iterator i = _ops.begin(); i != _ops.end(); ++i ) {
initOp( **i );
if ( (*i)->complete() )
return *i;
}
-
- std::priority_queue< OpHolder > queue;
- for( vector< shared_ptr< QueryOp > >::iterator i = ops.begin(); i != ops.end(); ++i ) {
+
+ // Put runnable ops in the priority queue.
+ for( vector<shared_ptr<QueryOp> >::iterator i = _ops.begin(); i != _ops.end(); ++i ) {
if ( !(*i)->error() ) {
- queue.push( *i );
+ _queue.push( *i );
}
}
-
- while( !queue.empty() ) {
- mayYield( ops );
- OpHolder holder = queue.top();
- queue.pop();
- QueryOp &op = *holder._op;
- nextOp( op );
- if ( op.complete() ) {
- if ( _plans._mayRecordPlan && op.mayRecordPlan() ) {
- op.qp().registerSelf( op.nscanned() );
- }
- return holder._op;
- }
- if ( op.error() ) {
- continue;
- }
- queue.push( holder );
- if ( !_plans._bestGuessOnly && _plans._usingPrerecordedPlan && op.nscanned() > _plans._oldNScanned * 10 && _plans._special.empty() ) {
- holder._offset = -op.nscanned();
- _plans.addOtherPlans( true );
- PlanSet::iterator i = _plans._plans.begin();
- ++i;
- for( ; i != _plans._plans.end(); ++i ) {
- shared_ptr< QueryOp > op( _op.createChild() );
- op->setQueryPlan( i->get() );
- ops.push_back( op );
- initOp( *op );
- if ( op->complete() )
- return op;
- queue.push( op );
- }
- _plans._mayRecordPlan = true;
- _plans._usingPrerecordedPlan = false;
+
+ return *_ops.begin();
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::nextNonError() {
+ if ( _queue.empty() ) {
+ return *_ops.begin();
+ }
+ shared_ptr<QueryOp> ret;
+ do {
+ ret = next();
+ } while( ret->error() && !_queue.empty() );
+ return ret;
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::next() {
+ mayYield();
+ dassert( !_queue.empty() );
+ OpHolder holder = _queue.pop();
+ QueryOp &op = *holder._op;
+ nextOp( op );
+ if ( op.complete() ) {
+ if ( _plans._mayRecordPlan && op.mayRecordPlan() ) {
+ op.qp().registerSelf( op.nscanned() );
}
+ return holder._op;
+ }
+ if ( op.error() ) {
+ return holder._op;
}
- return ops[ 0 ];
+ _queue.push( holder );
+ if ( !_plans._bestGuessOnly && _plans._usingPrerecordedPlan && op.nscanned() > _plans._oldNScanned * 10 && _plans._special.empty() ) {
+ holder._offset = -op.nscanned();
+ _plans.addOtherPlans( /* avoid duplicating the initial plan */ true );
+ PlanSet::iterator i = _plans._plans.begin();
+ ++i;
+ for( ; i != _plans._plans.end(); ++i ) {
+ shared_ptr<QueryOp> op( _op.createChild() );
+ op->setQueryPlan( i->get() );
+ _ops.push_back( op );
+ initOp( *op );
+ if ( op->complete() )
+ return op;
+ _queue.push( op );
+ }
+ _plans._mayRecordPlan = true;
+ _plans._usingPrerecordedPlan = false;
+ }
+ return holder._op;
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::runUntilFirstCompletes() {
+ shared_ptr<QueryOp> potentialFinisher = init();
+ if ( potentialFinisher->complete() ) {
+ return potentialFinisher;
+ }
+
+ while( !_queue.empty() ) {
+ shared_ptr<QueryOp> potentialFinisher = next();
+ if ( potentialFinisher->complete() ) {
+ return potentialFinisher;
+ }
+ }
+ return _ops[ 0 ];
}
#define GUARD_OP_EXCEPTION( op, expression ) \
@@ -655,22 +792,46 @@ doneCheckOrder:
GUARD_OP_EXCEPTION( op, if ( !op.error() ) { op.next(); } );
}
- bool QueryPlanSet::Runner::prepareToYield( QueryOp &op ) {
+ bool QueryPlanSet::Runner::prepareToYieldOp( QueryOp &op ) {
GUARD_OP_EXCEPTION( op,
if ( op.error() ) {
- return true;
- }
- else {
- return op.prepareToYield();
+ return true;
+ }
+ else {
+ return op.prepareToYield();
} );
return true;
}
- void QueryPlanSet::Runner::recoverFromYield( QueryOp &op ) {
+ void QueryPlanSet::Runner::recoverFromYieldOp( QueryOp &op ) {
GUARD_OP_EXCEPTION( op, if ( !op.error() ) { op.recoverFromYield(); } );
}
-
+ /**
+ * NOTE on our $or implementation: In our current qo implementation we don't
+ * keep statistics on our data, but we can conceptualize the problem of
+ * selecting an index when statistics exist for all index ranges. The
+ * d-hitting set problem on k sets and n elements can be reduced to the
+ * problem of index selection on k $or clauses and n index ranges (where
+ * d is the max number of indexes, and the number of ranges n is unbounded).
+ * In light of the fact that d-hitting set is np complete, and we don't even
+ * track statistics (so cost calculations are expensive) our first
+ * implementation uses the following greedy approach: We take one $or clause
+ * at a time and treat each as a separate query for index selection purposes.
+ * But if an index range is scanned for a particular $or clause, we eliminate
+ * that range from all subsequent clauses. One could imagine an opposite
+ * implementation where we select indexes based on the union of index ranges
+ * for all $or clauses, but this can have much poorer worst case behavior.
+ * (An index range that suits one $or clause may not suit another, and this
+ * is worse than the typical case of index range choice staleness because
+ * with $or the clauses may likely be logically distinct.) The greedy
+ * implementation won't do any worse than all the $or clauses individually,
+ * and it can often do better. In the first cut we are intentionally using
+ * QueryPattern tracking to record successful plans on $or clauses for use by
+ * subsequent $or clauses, even though there may be a significant aggregate
+ * $nor component that would not be represented in QueryPattern.
+ */
+
MultiPlanScanner::MultiPlanScanner( const char *ns,
const BSONObj &query,
const BSONObj &order,
@@ -683,24 +844,29 @@ doneCheckOrder:
_ns( ns ),
_or( !query.getField( "$or" ).eoo() ),
_query( query.getOwned() ),
- _fros( ns, _query ),
_i(),
_honorRecordedPlan( honorRecordedPlan ),
_bestGuessOnly( bestGuessOnly ),
_hint( ( hint && !hint->eoo() ) ? hint->wrap() : BSONObj() ),
_mayYield( mayYield ),
_tableScanned() {
- if ( !order.isEmpty() || !min.isEmpty() || !max.isEmpty() || !_fros.getSpecial().empty() ) {
+ if ( !order.isEmpty() || !min.isEmpty() || !max.isEmpty() ) {
_or = false;
}
- if ( _or && uselessOr( _hint.firstElement() ) ) {
- _or = false;
+ if ( _or ) {
+ // Only construct an OrRangeGenerator if we may handle $or clauses.
+ _org.reset( new OrRangeGenerator( ns, _query ) );
+ if ( !_org->getSpecial().empty() ) {
+ _or = false;
+ }
+ else if ( uselessOr( _hint.firstElement() ) ) {
+ _or = false;
+ }
}
// if _or == false, don't use or clauses for index selection
if ( !_or ) {
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns, _query ) );
- auto_ptr< FieldRangeSet > oldFrs( new FieldRangeSet( *frs ) );
- _currentQps.reset( new QueryPlanSet( ns, frs, oldFrs, _query, order, hint, honorRecordedPlan, min, max, _bestGuessOnly, _mayYield ) );
+ auto_ptr<FieldRangeSetPair> frsp( new FieldRangeSetPair( ns, _query, true ) );
+ _currentQps.reset( new QueryPlanSet( ns, frsp, auto_ptr<FieldRangeSetPair>(), _query, order, false, hint, honorRecordedPlan, min, max, _bestGuessOnly, _mayYield ) );
}
else {
BSONElement e = _query.getField( "$or" );
@@ -708,71 +874,168 @@ doneCheckOrder:
}
}
- shared_ptr< QueryOp > MultiPlanScanner::runOpOnce( QueryOp &op ) {
- massert( 13271, "can't run more ops", mayRunMore() );
+ shared_ptr<QueryOp> MultiPlanScanner::runOpOnce( QueryOp &op ) {
+ assertMayRunMore();
if ( !_or ) {
++_i;
return _currentQps->runOp( op );
}
++_i;
- auto_ptr< FieldRangeSet > frs( _fros.topFrs() );
- auto_ptr< FieldRangeSet > originalFrs( _fros.topFrsOriginal() );
+ auto_ptr<FieldRangeSetPair> frsp( _org->topFrsp() );
+ auto_ptr<FieldRangeSetPair> originalFrsp( _org->topFrspOriginal() );
BSONElement hintElt = _hint.firstElement();
- _currentQps.reset( new QueryPlanSet( _ns, frs, originalFrs, _query, BSONObj(), &hintElt, _honorRecordedPlan, BSONObj(), BSONObj(), _bestGuessOnly, _mayYield ) );
- shared_ptr< QueryOp > ret( _currentQps->runOp( op ) );
+ _currentQps.reset( new QueryPlanSet( _ns, frsp, originalFrsp, _query, BSONObj(), true, &hintElt, _honorRecordedPlan, BSONObj(), BSONObj(), _bestGuessOnly, _mayYield ) );
+ shared_ptr<QueryOp> ret( _currentQps->runOp( op ) );
+ if ( ! ret->complete() )
+ throw MsgAssertionException( ret->exception() );
if ( ret->qp().willScanTable() ) {
_tableScanned = true;
+ } else {
+ // If the full table was scanned, don't bother popping the last or clause.
+ _org->popOrClause( ret->qp().nsd(), ret->qp().idxNo(), ret->qp().indexed() ? ret->qp().indexKey() : BSONObj() );
}
- _fros.popOrClause( ret->qp().indexed() ? ret->qp().indexKey() : BSONObj() );
return ret;
}
- shared_ptr< QueryOp > MultiPlanScanner::runOp( QueryOp &op ) {
- shared_ptr< QueryOp > ret = runOpOnce( op );
+ shared_ptr<QueryOp> MultiPlanScanner::runOp( QueryOp &op ) {
+ shared_ptr<QueryOp> ret = runOpOnce( op );
while( !ret->stopRequested() && mayRunMore() ) {
ret = runOpOnce( *ret );
}
return ret;
}
+
+ shared_ptr<QueryOp> MultiPlanScanner::nextOpHandleEndOfClause() {
+ shared_ptr<QueryOp> op = _currentQps->nextOp( *_baseOp );
+ if ( !op->complete() ) {
+ return op;
+ }
+ if ( op->qp().willScanTable() ) {
+ _tableScanned = true;
+ } else {
+ _org->popOrClause( op->qp().nsd(), op->qp().idxNo(), op->qp().indexed() ? op->qp().indexKey() : BSONObj() );
+ }
+ return op;
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::nextOpBeginningClause() {
+ assertMayRunMore();
+ shared_ptr<QueryOp> op;
+ while( mayRunMore() ) {
+ ++_i;
+ auto_ptr<FieldRangeSetPair> frsp( _org->topFrsp() );
+ auto_ptr<FieldRangeSetPair> originalFrsp( _org->topFrspOriginal() );
+ BSONElement hintElt = _hint.firstElement();
+ _currentQps.reset( new QueryPlanSet( _ns, frsp, originalFrsp, _query, BSONObj(), true, &hintElt, _honorRecordedPlan, BSONObj(), BSONObj(), _bestGuessOnly, _mayYield ) );
+ op = nextOpHandleEndOfClause();
+ if ( !op->complete() ) {
+ return op;
+ }
+ _baseOp = op;
+ }
+ return op;
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::nextOp() {
+ if ( !_or ) {
+ if ( _i == 0 ) {
+ assertMayRunMore();
+ ++_i;
+ }
+ return _currentQps->nextOp( *_baseOp );
+ }
+ if ( _i == 0 ) {
+ return nextOpBeginningClause();
+ }
+ shared_ptr<QueryOp> op = nextOpHandleEndOfClause();
+ if ( !op->complete() ) {
+ return op;
+ }
+ if ( !op->stopRequested() && mayRunMore() ) {
+ // Finished scanning the clause, but stop hasn't been requested.
+ // Start scanning the next clause.
+ _baseOp = op;
+ return nextOpBeginningClause();
+ }
+ return op;
+ }
+
+ bool MultiPlanScanner::prepareToYield() {
+ return _currentQps.get() ? _currentQps->prepareToYield() : true;
+ }
+
+ void MultiPlanScanner::recoverFromYield() {
+ if ( _currentQps.get() ) {
+ _currentQps->recoverFromYield();
+ }
+ }
+
+ shared_ptr<Cursor> MultiPlanScanner::singleCursor() const {
+ if ( _or || _currentQps->nPlans() != 1 || _currentQps->firstPlan()->scanAndOrderRequired() ) {
+ return shared_ptr<Cursor>();
+ }
+ // If there is only one plan and it does not require an in memory
+ // sort, we do not expect its cursor op to throw an exception and
+ // so do not need a QueryOptimizerCursor to handle this case.
+ return _currentQps->firstPlan()->newCursor();
+ }
bool MultiPlanScanner::uselessOr( const BSONElement &hint ) const {
NamespaceDetails *nsd = nsdetails( _ns );
if ( !nsd ) {
return true;
}
- IndexDetails *id = 0;
if ( !hint.eoo() ) {
IndexDetails *id = parseHint( hint, nsd );
if ( !id ) {
return true;
}
+ return QueryUtilIndexed::uselessOr( *_org, nsd, nsd->idxNo( *id ) );
}
- vector< BSONObj > ret;
- _fros.allClausesSimplified( ret );
- for( vector< BSONObj >::const_iterator i = ret.begin(); i != ret.end(); ++i ) {
- if ( id ) {
- if ( id->getSpec().suitability( *i, BSONObj() ) == USELESS ) {
- return true;
- }
- }
- else {
- bool useful = false;
- NamespaceDetails::IndexIterator j = nsd->ii();
- while( j.more() ) {
- IndexDetails &id = j.next();
- if ( id.getSpec().suitability( *i, BSONObj() ) != USELESS ) {
- useful = true;
- break;
- }
- }
- if ( !useful ) {
- return true;
- }
+ return QueryUtilIndexed::uselessOr( *_org, nsd, -1 );
+ }
+
+ MultiCursor::MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj &order, shared_ptr<CursorOp> op, bool mayYield )
+ : _mps( new MultiPlanScanner( ns, pattern, order, 0, true, BSONObj(), BSONObj(), !op.get(), mayYield ) ), _nscanned() {
+ if ( op.get() ) {
+ _op = op;
+ }
+ else {
+ _op.reset( new NoOp() );
+ }
+ if ( _mps->mayRunMore() ) {
+ nextClause();
+ if ( !ok() ) {
+ advance();
}
}
- return false;
+ else {
+ _c.reset( new BasicCursor( DiskLoc() ) );
+ }
+ }
+
+ MultiCursor::MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c, const shared_ptr<CoveredIndexMatcher> &matcher, const QueryOp &op, long long nscanned )
+ : _op( new NoOp( op ) ), _c( c ), _mps( mps ), _matcher( matcher ), _nscanned( nscanned ) {
+ _mps->setBestGuessOnly();
+ _mps->mayYield( false ); // with a NoOp, there's no need to yield in QueryPlanSet
+ if ( !ok() ) {
+ // would have been advanced by UserQueryOp if possible
+ advance();
+ }
}
-
+
+ void MultiCursor::nextClause() {
+ if ( _nscanned >= 0 && _c.get() ) {
+ _nscanned += _c->nscanned();
+ }
+ shared_ptr<CursorOp> best = _mps->runOpOnce( *_op );
+ if ( ! best->complete() )
+ throw MsgAssertionException( best->exception() );
+ _c = best->newCursor();
+ _matcher = best->matcher( _c );
+ _op = best;
+ }
+
bool indexWorks( const BSONObj &idxPattern, const BSONObj &sampleKey, int direction, int firstSignificantField ) {
BSONObjIterator p( idxPattern );
BSONObjIterator k( sampleKey );
@@ -816,7 +1079,7 @@ doneCheckOrder:
return b.obj();
}
- pair< int, int > keyAudit( const BSONObj &min, const BSONObj &max ) {
+ pair<int,int> keyAudit( const BSONObj &min, const BSONObj &max ) {
int direction = 0;
int firstSignificantField = 0;
BSONObjIterator i( min );
@@ -841,7 +1104,7 @@ doneCheckOrder:
return make_pair( direction, firstSignificantField );
}
- pair< int, int > flexibleKeyAudit( const BSONObj &min, const BSONObj &max ) {
+ pair<int,int> flexibleKeyAudit( const BSONObj &min, const BSONObj &max ) {
if ( min.isEmpty() || max.isEmpty() ) {
return make_pair( 1, -1 );
}
@@ -865,7 +1128,7 @@ doneCheckOrder:
return 0;
}
- pair< int, int > ret = flexibleKeyAudit( min, max );
+ pair<int,int> ret = flexibleKeyAudit( min, max );
if ( ret == make_pair( -1, -1 ) ) {
errmsg = "min and max keys do not share pattern";
return 0;
@@ -924,5 +1187,115 @@ doneCheckOrder:
return id;
}
+
+ bool isSimpleIdQuery( const BSONObj& query ) {
+ BSONObjIterator i(query);
+
+ if( !i.more() )
+ return false;
+ BSONElement e = i.next();
+
+ if( i.more() )
+ return false;
+
+ if( strcmp("_id", e.fieldName()) != 0 )
+ return false;
+
+ if ( e.isSimpleType() ) // e.g. not something like { _id : { $gt : ...
+ return true;
+
+ if ( e.type() == Object )
+ return e.Obj().firstElementFieldName()[0] != '$';
+
+ return false;
+ }
+
+ shared_ptr<Cursor> bestGuessCursor( const char *ns, const BSONObj &query, const BSONObj &sort ) {
+ if( !query.getField( "$or" ).eoo() ) {
+ return shared_ptr<Cursor>( new MultiCursor( ns, query, sort ) );
+ }
+ else {
+ auto_ptr<FieldRangeSetPair> frsp( new FieldRangeSetPair( ns, query, true ) );
+ auto_ptr<FieldRangeSetPair> origFrsp( new FieldRangeSetPair( *frsp ) );
+
+ QueryPlanSet qps( ns, frsp, origFrsp, query, sort, false );
+ QueryPlanSet::QueryPlanPtr qpp = qps.getBestGuess();
+ if( ! qpp.get() ) return shared_ptr<Cursor>();
+
+ shared_ptr<Cursor> ret = qpp->newCursor();
+
+ // If we don't already have a matcher, supply one.
+ if ( !query.isEmpty() && ! ret->matcher() ) {
+ shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, ret->indexKeyPattern() ) );
+ ret->setMatcher( matcher );
+ }
+ return ret;
+ }
+ }
+
+ bool QueryUtilIndexed::indexUseful( const FieldRangeSetPair &frsp, NamespaceDetails *d, int idxNo, const BSONObj &order ) {
+ DEV frsp.assertValidIndex( d, idxNo );
+ BSONObj keyPattern = d->idx( idxNo ).keyPattern();
+ if ( !frsp.matchPossibleForIndex( d, idxNo, keyPattern ) ) {
+ // No matches are possible in the index so the index may be useful.
+ return true;
+ }
+ return d->idx( idxNo ).getSpec().suitability( frsp.simplifiedQueryForIndex( d, idxNo, keyPattern ), order ) != USELESS;
+ }
+
+ void QueryUtilIndexed::clearIndexesForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order ) {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient& nsd = NamespaceDetailsTransient::get_inlock( frsp.ns() );
+ nsd.registerIndexForPattern( frsp._singleKey.pattern( order ), BSONObj(), 0 );
+ nsd.registerIndexForPattern( frsp._multiKey.pattern( order ), BSONObj(), 0 );
+ }
+
+ pair< BSONObj, long long > QueryUtilIndexed::bestIndexForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order ) {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient& nsd = NamespaceDetailsTransient::get_inlock( frsp.ns() );
+ // TODO Maybe it would make sense to return the index with the lowest
+ // nscanned if there are two possibilities.
+ if ( frsp._singleKey.matchPossible() ) {
+ QueryPattern pattern = frsp._singleKey.pattern( order );
+ BSONObj oldIdx = nsd.indexForPattern( pattern );
+ if ( !oldIdx.isEmpty() ) {
+ long long oldNScanned = nsd.nScannedForPattern( pattern );
+ return make_pair( oldIdx, oldNScanned );
+ }
+ }
+ if ( frsp._multiKey.matchPossible() ) {
+ QueryPattern pattern = frsp._multiKey.pattern( order );
+ BSONObj oldIdx = nsd.indexForPattern( pattern );
+ if ( !oldIdx.isEmpty() ) {
+ long long oldNScanned = nsd.nScannedForPattern( pattern );
+ return make_pair( oldIdx, oldNScanned );
+ }
+ }
+ return make_pair( BSONObj(), 0 );
+ }
+
+ bool QueryUtilIndexed::uselessOr( const OrRangeGenerator &org, NamespaceDetails *d, int hintIdx ) {
+ for( list<FieldRangeSetPair>::const_iterator i = org._originalOrSets.begin(); i != org._originalOrSets.end(); ++i ) {
+ if ( hintIdx != -1 ) {
+ if ( !indexUseful( *i, d, hintIdx, BSONObj() ) ) {
+ return true;
+ }
+ }
+ else {
+ bool useful = false;
+ for( int j = 0; j < d->nIndexes; ++j ) {
+ if ( indexUseful( *i, d, j, BSONObj() ) ) {
+ useful = true;
+ break;
+ }
+ }
+ if ( !useful ) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
} // namespace mongo
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index ebd264e..fea6c0b 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -1,4 +1,4 @@
-/* queryoptimizer.h */
+// @file queryoptimizer.h
/**
* Copyright (C) 2008 10gen Inc.
@@ -22,58 +22,79 @@
#include "jsobj.h"
#include "queryutil.h"
#include "matcher.h"
-#include "../util/message.h"
+#include "../util/net/listen.h"
+#include <queue>
namespace mongo {
class IndexDetails;
class IndexType;
+ class ElapsedTracker;
+ /** A plan for executing a query using the given index spec and FieldRangeSet. */
class QueryPlan : boost::noncopyable {
public:
+ /**
+ * @param originalFrsp - original constraints for this query clause. If null, frsp will be used instead.
+ */
QueryPlan(NamespaceDetails *d,
int idxNo, // -1 = no index
- const FieldRangeSet &fbs,
- const FieldRangeSet &originalFrs,
+ const FieldRangeSetPair &frsp,
+ const FieldRangeSetPair *originalFrsp,
const BSONObj &originalQuery,
const BSONObj &order,
+ bool mustAssertOnYieldFailure = true,
const BSONObj &startKey = BSONObj(),
- const BSONObj &endKey = BSONObj() ,
+ const BSONObj &endKey = BSONObj(),
string special="" );
- /* If true, no other index can do better. */
+ /** @return true iff no other plans should be considered. */
bool optimal() const { return _optimal; }
- /* ScanAndOrder processing will be required if true */
+ /* @return true iff this plan should not be considered at all. */
+ bool unhelpful() const { return _unhelpful; }
+ /** @return true iff ScanAndOrder processing will be required for result set. */
bool scanAndOrderRequired() const { return _scanAndOrderRequired; }
- /* When true, the index we are using has keys such that it can completely resolve the
- query expression to match by itself without ever checking the main object.
+ /**
+ * @return true iff the index we are using has keys such that it can completely resolve the
+ * query expression to match by itself without ever checking the main object.
*/
bool exactKeyMatch() const { return _exactKeyMatch; }
- /* If true, the startKey and endKey are unhelpful and the index order doesn't match the
- requested sort order */
- bool unhelpful() const { return _unhelpful; }
- int direction() const { return _direction; }
+ /** @return true iff this QueryPlan would perform an unindexed scan. */
+ bool willScanTable() const { return _idxNo < 0 && !_impossible; }
+
+ /** @return a new cursor based on this QueryPlan's index and FieldRangeSet. */
shared_ptr<Cursor> newCursor( const DiskLoc &startLoc = DiskLoc() , int numWanted=0 ) const;
+ /** @return a new reverse cursor if this is an unindexed plan. */
shared_ptr<Cursor> newReverseCursor() const;
+ /** Register this plan as a winner for its QueryPattern, with specified 'nscanned'. */
+ void registerSelf( long long nScanned ) const;
+
+ int direction() const { return _direction; }
BSONObj indexKey() const;
bool indexed() const { return _index; }
- bool willScanTable() const { return !_index && _fbs.matchPossible(); }
- const char *ns() const { return _fbs.ns(); }
+ int idxNo() const { return _idxNo; }
+ const char *ns() const { return _frs.ns(); }
NamespaceDetails *nsd() const { return _d; }
BSONObj originalQuery() const { return _originalQuery; }
- BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const { return _fbs.simplifiedQuery( fields ); }
- const FieldRange &range( const char *fieldName ) const { return _fbs.range( fieldName ); }
- void registerSelf( long long nScanned ) const;
- shared_ptr< FieldRangeVector > originalFrv() const { return _originalFrv; }
- // just for testing
- shared_ptr< FieldRangeVector > frv() const { return _frv; }
+ BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const { return _frs.simplifiedQuery( fields ); }
+ const FieldRange &range( const char *fieldName ) const { return _frs.range( fieldName ); }
+ shared_ptr<FieldRangeVector> originalFrv() const { return _originalFrv; }
+
+ const FieldRangeSet &multikeyFrs() const { return _frsMulti; }
+
+ bool mustAssertOnYieldFailure() const { return _mustAssertOnYieldFailure; }
+
+ /** just for testing */
+
+ shared_ptr<FieldRangeVector> frv() const { return _frv; }
bool isMultiKey() const;
private:
NamespaceDetails * _d;
int _idxNo;
- const FieldRangeSet &_fbs;
+ const FieldRangeSet &_frs;
+ const FieldRangeSet &_frsMulti;
const BSONObj &_originalQuery;
const BSONObj &_order;
const IndexDetails * _index;
@@ -81,86 +102,104 @@ namespace mongo {
bool _scanAndOrderRequired;
bool _exactKeyMatch;
int _direction;
- shared_ptr< FieldRangeVector > _frv;
- shared_ptr< FieldRangeVector > _originalFrv;
+ shared_ptr<FieldRangeVector> _frv;
+ shared_ptr<FieldRangeVector> _originalFrv;
BSONObj _startKey;
BSONObj _endKey;
bool _endKeyInclusive;
bool _unhelpful;
+ bool _impossible;
string _special;
IndexType * _type;
bool _startOrEndSpec;
+ bool _mustAssertOnYieldFailure;
};
- // Inherit from this interface to implement a new query operation.
- // The query optimizer will clone the QueryOp that is provided, giving
- // each clone its own query plan.
+ /**
+ * Inherit from this interface to implement a new query operation.
+ * The query optimizer will clone the QueryOp that is provided, giving
+ * each clone its own query plan.
+ *
+ * Normal sequence of events:
+ * 1) A new QueryOp is generated using createChild().
+ * 2) A QueryPlan is assigned to this QueryOp with setQueryPlan().
+ * 3) _init() is called on the QueryPlan.
+ * 4) next() is called repeatedly, with nscanned() checked after each call.
+ * 5) In one of these calls to next(), setComplete() is called.
+ * 6) The QueryPattern for the QueryPlan may be recorded as a winner.
+ */
class QueryOp {
public:
QueryOp() : _complete(), _stopRequested(), _qp(), _error() {}
- // Used when handing off from one QueryOp type to another
+ /** Used when handing off from one QueryOp to another. */
QueryOp( const QueryOp &other ) :
_complete(), _stopRequested(), _qp(), _error(), _matcher( other._matcher ),
_orConstraint( other._orConstraint ) {}
virtual ~QueryOp() {}
- /** these gets called after a query plan is set */
- void init() {
- if ( _oldMatcher.get() ) {
- _matcher.reset( _oldMatcher->nextClauseMatcher( qp().indexKey() ) );
- }
- else {
- _matcher.reset( new CoveredIndexMatcher( qp().originalQuery(), qp().indexKey(), alwaysUseRecord() ) );
- }
- _init();
- }
+ /** @return QueryPlan assigned to this QueryOp by the query optimizer. */
+ const QueryPlan &qp() const { return *_qp; }
+
+ /** Advance to next potential matching document (eg using a cursor). */
virtual void next() = 0;
-
- virtual bool mayRecordPlan() const = 0;
-
+ /**
+ * @return current 'nscanned' metric for this QueryOp. Used to compare
+ * cost to other QueryOps.
+ */
+ virtual long long nscanned() = 0;
+ /** Take any steps necessary before the db mutex is yielded. */
virtual bool prepareToYield() { massert( 13335, "yield not supported", false ); return false; }
+ /** Recover once the db mutex is regained. */
virtual void recoverFromYield() { massert( 13336, "yield not supported", false ); }
+
+ /**
+ * @return true iff the QueryPlan for this QueryOp may be registered
+ * as a winning plan.
+ */
+ virtual bool mayRecordPlan() const = 0;
- virtual long long nscanned() = 0;
-
- /** @return a copy of the inheriting class, which will be run with its own
- query plan. If multiple plan sets are required for an $or query,
- the QueryOp of the winning plan from a given set will be cloned
- to generate QueryOps for the subsequent plan set. This function
- should only be called after the query op has completed executing.
- */
- QueryOp *createChild() {
- if( _orConstraint.get() ) {
- _matcher->advanceOrClause( _orConstraint );
- _orConstraint.reset();
- }
- QueryOp *ret = _createChild();
- ret->_oldMatcher = _matcher;
- return ret;
- }
+ /** @return true iff the implementation called setComplete() or setStop(). */
bool complete() const { return _complete; }
- bool error() const { return _error; }
+ /** @return true iff the implementation called steStop(). */
bool stopRequested() const { return _stopRequested; }
+ /** @return true iff the implementation threw an exception. */
+ bool error() const { return _error; }
+ /** @return the exception thrown by implementation if one was thrown. */
ExceptionInfo exception() const { return _exception; }
- const QueryPlan &qp() const { return *_qp; }
- // To be called by QueryPlanSet::Runner only.
- void setQueryPlan( const QueryPlan *qp ) { _qp = qp; }
+
+ /** To be called by QueryPlanSet::Runner only. */
+
+ QueryOp *createChild();
+ void setQueryPlan( const QueryPlan *qp ) { _qp = qp; assert( _qp != NULL ); }
+ void init();
void setException( const DBException &e ) {
_error = true;
_exception = e.getInfo();
}
- shared_ptr< CoveredIndexMatcher > matcher() const { return _matcher; }
+
+ shared_ptr<CoveredIndexMatcher> matcher( const shared_ptr<Cursor>& c ) const {
+ return matcher( c.get() );
+ }
+ shared_ptr<CoveredIndexMatcher> matcher( Cursor* c ) const {
+ if( ! c ) return _matcher;
+ return c->matcher() ? c->matcherPtr() : _matcher;
+ }
+
protected:
+ /** Call if all results have been found. */
void setComplete() {
_orConstraint = qp().originalFrv();
_complete = true;
}
+ /** Call if the scan is complete even if not all results have been found. */
void setStop() { setComplete(); _stopRequested = true; }
+ /** Handle initialization after a QueryPlan has been set. */
virtual void _init() = 0;
+ /** @return a copy of the inheriting class, which will be run with its own query plan. */
virtual QueryOp *_createChild() const = 0;
virtual bool alwaysUseRecord() const { return false; }
@@ -171,42 +210,98 @@ namespace mongo {
ExceptionInfo _exception;
const QueryPlan *_qp;
bool _error;
- shared_ptr< CoveredIndexMatcher > _matcher;
- shared_ptr< CoveredIndexMatcher > _oldMatcher;
- shared_ptr< FieldRangeVector > _orConstraint;
+ shared_ptr<CoveredIndexMatcher> _matcher;
+ shared_ptr<CoveredIndexMatcher> _oldMatcher;
+ shared_ptr<FieldRangeVector> _orConstraint;
};
- // Set of candidate query plans for a particular query. Used for running
- // a QueryOp on these plans.
+ // temp. this class works if T::operator< is variant unlike a regular stl priority queue.
+ // but it's very slow. however if v.size() is always very small, it would be fine,
+ // maybe even faster than a smart impl that does more memory allocations.
+ template<class T>
+ class our_priority_queue : boost::noncopyable {
+ vector<T> v;
+ public:
+ our_priority_queue() {
+ v.reserve(4);
+ }
+ int size() const { return v.size(); }
+ bool empty() const { return v.empty(); }
+ void push(const T & x) {
+ v.push_back(x);
+ }
+ T pop() {
+ size_t t = 0;
+ for( size_t i = 1; i < v.size(); i++ ) {
+ if( v[t] < v[i] )
+ t = i;
+ }
+ T ret = v[t];
+ v.erase(v.begin()+t);
+ return ret;
+ }
+ };
+
+ /**
+ * A set of candidate query plans for a query. This class can return a best buess plan or run a
+ * QueryOp on all the plans.
+ */
class QueryPlanSet {
public:
- typedef boost::shared_ptr< QueryPlan > QueryPlanPtr;
- typedef vector< QueryPlanPtr > PlanSet;
+ typedef boost::shared_ptr<QueryPlan> QueryPlanPtr;
+ typedef vector<QueryPlanPtr> PlanSet;
+ /**
+ * @param originalFrsp - original constraints for this query clause; if null, frsp will be used.
+ */
QueryPlanSet( const char *ns,
- auto_ptr< FieldRangeSet > frs,
- auto_ptr< FieldRangeSet > originalFrs,
+ auto_ptr<FieldRangeSetPair> frsp,
+ auto_ptr<FieldRangeSetPair> originalFrsp,
const BSONObj &originalQuery,
const BSONObj &order,
+ bool mustAssertOnYieldFailure = true,
const BSONElement *hint = 0,
bool honorRecordedPlan = true,
const BSONObj &min = BSONObj(),
const BSONObj &max = BSONObj(),
bool bestGuessOnly = false,
bool mayYield = false);
+
+ /** @return number of candidate plans. */
int nPlans() const { return _plans.size(); }
- shared_ptr< QueryOp > runOp( QueryOp &op );
- template< class T >
- shared_ptr< T > runOp( T &op ) {
- return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp& >( op ) ) );
+
+ /**
+ * Clone op for each query plan, and @return the first cloned op to call
+ * setComplete() or setStop().
+ */
+
+ shared_ptr<QueryOp> runOp( QueryOp &op );
+ template<class T>
+ shared_ptr<T> runOp( T &op ) {
+ return dynamic_pointer_cast<T>( runOp( static_cast<QueryOp&>( op ) ) );
}
+
+ /** Initialize or iterate a runner generated from @param originalOp. */
+ shared_ptr<QueryOp> nextOp( QueryOp &originalOp, bool retried = false );
+
+ /** Yield the runner member. */
+
+ bool prepareToYield();
+ void recoverFromYield();
+
+ QueryPlanPtr firstPlan() const { return _plans[ 0 ]; }
+
+ /** @return metadata about cursors and index bounds for all plans, suitable for explain output. */
BSONObj explain() const;
+ /** @return true iff a plan is selected based on previous success of this plan. */
bool usingPrerecordedPlan() const { return _usingPrerecordedPlan; }
+ /** @return a single plan that may work well for the specified query. */
QueryPlanPtr getBestGuess() const;
+
//for testing
- const FieldRangeSet &fbs() const { return *_fbs; }
- const FieldRangeSet &originalFrs() const { return *_originalFrs; }
+ const FieldRangeSetPair &frsp() const { return *_frsp; }
+ const FieldRangeSetPair *originalFrsp() const { return _originalFrsp.get(); }
bool modifiedKeys() const;
bool hasMultiKey() const;
@@ -219,22 +314,55 @@ namespace mongo {
}
void init();
void addHint( IndexDetails &id );
- struct Runner {
+ class Runner {
+ public:
Runner( QueryPlanSet &plans, QueryOp &op );
- shared_ptr< QueryOp > run();
- void mayYield( const vector< shared_ptr< QueryOp > > &ops );
+
+ /**
+ * Iterate interactively through candidate documents on all plans.
+ * QueryOp objects are returned at each interleaved step.
+ */
+
+ /** @return a plan that has completed, otherwise an arbitrary plan. */
+ shared_ptr<QueryOp> init();
+ /**
+ * Move the Runner forward one iteration, and @return the plan for
+ * this iteration.
+ */
+ shared_ptr<QueryOp> next();
+ /** @return next non error op if there is one, otherwise an error op. */
+ shared_ptr<QueryOp> nextNonError();
+
+ bool prepareToYield();
+ void recoverFromYield();
+
+ /** Run until first op completes. */
+ shared_ptr<QueryOp> runUntilFirstCompletes();
+
+ void mayYield();
QueryOp &_op;
QueryPlanSet &_plans;
static void initOp( QueryOp &op );
static void nextOp( QueryOp &op );
- static bool prepareToYield( QueryOp &op );
- static void recoverFromYield( QueryOp &op );
+ static bool prepareToYieldOp( QueryOp &op );
+ static void recoverFromYieldOp( QueryOp &op );
+ private:
+ vector<shared_ptr<QueryOp> > _ops;
+ struct OpHolder {
+ OpHolder( const shared_ptr<QueryOp> &op ) : _op( op ), _offset() {}
+ shared_ptr<QueryOp> _op;
+ long long _offset;
+ bool operator<( const OpHolder &other ) const {
+ return _op->nscanned() + _offset > other._op->nscanned() + other._offset;
+ }
+ };
+ our_priority_queue<OpHolder> _queue;
};
const char *_ns;
BSONObj _originalQuery;
- auto_ptr< FieldRangeSet > _fbs;
- auto_ptr< FieldRangeSet > _originalFrs;
+ auto_ptr<FieldRangeSetPair> _frsp;
+ auto_ptr<FieldRangeSetPair> _originalFrsp;
PlanSet _plans;
bool _mayRecordPlan;
bool _usingPrerecordedPlan;
@@ -248,31 +376,11 @@ namespace mongo {
bool _bestGuessOnly;
bool _mayYield;
ElapsedTracker _yieldSometimesTracker;
+ shared_ptr<Runner> _runner;
+ bool _mustAssertOnYieldFailure;
};
- // Handles $or type queries by generating a QueryPlanSet for each $or clause
- // NOTE on our $or implementation: In our current qo implementation we don't
- // keep statistics on our data, but we can conceptualize the problem of
- // selecting an index when statistics exist for all index ranges. The
- // d-hitting set problem on k sets and n elements can be reduced to the
- // problem of index selection on k $or clauses and n index ranges (where
- // d is the max number of indexes, and the number of ranges n is unbounded).
- // In light of the fact that d-hitting set is np complete, and we don't even
- // track statistics (so cost calculations are expensive) our first
- // implementation uses the following greedy approach: We take one $or clause
- // at a time and treat each as a separate query for index selection purposes.
- // But if an index range is scanned for a particular $or clause, we eliminate
- // that range from all subsequent clauses. One could imagine an opposite
- // implementation where we select indexes based on the union of index ranges
- // for all $or clauses, but this can have much poorer worst case behavior.
- // (An index range that suits one $or clause may not suit another, and this
- // is worse than the typical case of index range choice staleness because
- // with $or the clauses may likely be logically distinct.) The greedy
- // implementation won't do any worse than all the $or clauses individually,
- // and it can often do better. In the first cut we are intentionally using
- // QueryPattern tracking to record successful plans on $or clauses for use by
- // subsequent $or clauses, even though there may be a significant aggregate
- // $nor component that would not be represented in QueryPattern.
+ /** Handles $or type queries by generating a QueryPlanSet for each $or clause. */
class MultiPlanScanner {
public:
MultiPlanScanner( const char *ns,
@@ -284,23 +392,54 @@ namespace mongo {
const BSONObj &max = BSONObj(),
bool bestGuessOnly = false,
bool mayYield = false);
- shared_ptr< QueryOp > runOp( QueryOp &op );
- template< class T >
- shared_ptr< T > runOp( T &op ) {
- return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp& >( op ) ) );
+
+ /**
+ * Clone op for each query plan of a single $or clause, and @return the first cloned op
+ * to call setComplete() or setStop().
+ */
+
+ shared_ptr<QueryOp> runOpOnce( QueryOp &op );
+ template<class T>
+ shared_ptr<T> runOpOnce( T &op ) {
+ return dynamic_pointer_cast<T>( runOpOnce( static_cast<QueryOp&>( op ) ) );
}
- shared_ptr< QueryOp > runOpOnce( QueryOp &op );
- template< class T >
- shared_ptr< T > runOpOnce( T &op ) {
- return dynamic_pointer_cast< T >( runOpOnce( static_cast< QueryOp& >( op ) ) );
+
+ /**
+ * For each $or clause, calls runOpOnce on the child QueryOp cloned from the winning QueryOp
+ * of the previous $or clause (or from the supplied 'op' for the first $or clause).
+ */
+
+ shared_ptr<QueryOp> runOp( QueryOp &op );
+ template<class T>
+ shared_ptr<T> runOp( T &op ) {
+ return dynamic_pointer_cast<T>( runOp( static_cast<QueryOp&>( op ) ) );
}
- bool mayRunMore() const { return _or ? ( !_tableScanned && !_fros.orFinished() ) : _i == 0; }
+
+ /** Initialize or iterate a runner generated from @param originalOp. */
+
+ void initialOp( const shared_ptr<QueryOp> &originalOp ) { _baseOp = originalOp; }
+ shared_ptr<QueryOp> nextOp();
+
+ /** Yield the runner member. */
+
+ bool prepareToYield();
+ void recoverFromYield();
+
+ /**
+ * @return a single simple cursor if the scanner would run a single cursor
+ * for this query, otherwise return an empty shared_ptr.
+ */
+ shared_ptr<Cursor> singleCursor() const;
+
+ /** @return true iff more $or clauses need to be scanned. */
+ bool mayRunMore() const { return _or ? ( !_tableScanned && !_org->orFinished() ) : _i == 0; }
+ /** @return non-$or version of explain output. */
BSONObj oldExplain() const { assertNotOr(); return _currentQps->explain(); }
- // just report this when only one query op
- bool usingPrerecordedPlan() const {
- return !_or && _currentQps->usingPrerecordedPlan();
- }
+ /** @return true iff this is not a $or query and a plan is selected based on previous success of this plan. */
+ bool usingPrerecordedPlan() const { return !_or && _currentQps->usingPrerecordedPlan(); }
+ /** Don't attempt to scan multiple plans, just use the best guess. */
void setBestGuessOnly() { _bestGuessOnly = true; }
+ /** Yielding is allowed while running each QueryPlan. */
void mayYield( bool val ) { _mayYield = val; }
bool modifiedKeys() const { return _currentQps->modifiedKeys(); }
bool hasMultiKey() const { return _currentQps->hasMultiKey(); }
@@ -309,57 +448,46 @@ namespace mongo {
void assertNotOr() const {
massert( 13266, "not implemented for $or query", !_or );
}
+ void assertMayRunMore() const {
+ massert( 13271, "can't run more ops", mayRunMore() );
+ }
+ shared_ptr<QueryOp> nextOpBeginningClause();
+ shared_ptr<QueryOp> nextOpHandleEndOfClause();
bool uselessOr( const BSONElement &hint ) const;
const char * _ns;
bool _or;
BSONObj _query;
- FieldRangeOrSet _fros;
- auto_ptr< QueryPlanSet > _currentQps;
+ shared_ptr<OrRangeGenerator> _org; // May be null in certain non $or query cases.
+ auto_ptr<QueryPlanSet> _currentQps;
int _i;
bool _honorRecordedPlan;
bool _bestGuessOnly;
BSONObj _hint;
bool _mayYield;
bool _tableScanned;
+ shared_ptr<QueryOp> _baseOp;
};
+ /** Provides a cursor interface for certain limited uses of a MultiPlanScanner. */
class MultiCursor : public Cursor {
public:
class CursorOp : public QueryOp {
public:
CursorOp() {}
CursorOp( const QueryOp &other ) : QueryOp( other ) {}
- virtual shared_ptr< Cursor > newCursor() const = 0;
+ virtual shared_ptr<Cursor> newCursor() const = 0;
};
- // takes ownership of 'op'
- MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj &order, shared_ptr< CursorOp > op = shared_ptr< CursorOp >(), bool mayYield = false )
- : _mps( new MultiPlanScanner( ns, pattern, order, 0, true, BSONObj(), BSONObj(), !op.get(), mayYield ) ), _nscanned() {
- if ( op.get() ) {
- _op = op;
- }
- else {
- _op.reset( new NoOp() );
- }
- if ( _mps->mayRunMore() ) {
- nextClause();
- if ( !ok() ) {
- advance();
- }
- }
- else {
- _c.reset( new BasicCursor( DiskLoc() ) );
- }
- }
- // used to handoff a query to a getMore()
- MultiCursor( auto_ptr< MultiPlanScanner > mps, const shared_ptr< Cursor > &c, const shared_ptr< CoveredIndexMatcher > &matcher, const QueryOp &op )
- : _op( new NoOp( op ) ), _c( c ), _mps( mps ), _matcher( matcher ), _nscanned( -1 ) {
- _mps->setBestGuessOnly();
- _mps->mayYield( false ); // with a NoOp, there's no need to yield in QueryPlanSet
- if ( !ok() ) {
- // would have been advanced by UserQueryOp if possible
- advance();
- }
- }
+ /** takes ownership of 'op' */
+ MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj &order, shared_ptr<CursorOp> op = shared_ptr<CursorOp>(), bool mayYield = false );
+ /**
+ * Used
+ * 1. To handoff a query to a getMore()
+ * 2. To handoff a QueryOptimizerCursor
+ * @param nscanned is an optional initial value, if not supplied nscanned()
+ * will always return -1
+ */
+ MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c, const shared_ptr<CoveredIndexMatcher> &matcher, const QueryOp &op, long long nscanned = -1 );
+
virtual bool ok() { return _c->ok(); }
virtual Record* _current() { return _c->_current(); }
virtual BSONObj current() { return _c->current(); }
@@ -373,31 +501,30 @@ namespace mongo {
}
virtual BSONObj currKey() const { return _c->currKey(); }
virtual DiskLoc refLoc() { return _c->refLoc(); }
- virtual void noteLocation() {
- _c->noteLocation();
- }
- virtual void checkLocation() {
- _c->checkLocation();
- }
+ virtual void noteLocation() { _c->noteLocation(); }
+ virtual void checkLocation() { _c->checkLocation(); }
virtual bool supportGetMore() { return true; }
virtual bool supportYields() { return _c->supportYields(); }
+ virtual BSONObj indexKeyPattern() { return _c->indexKeyPattern(); }
- // with update we could potentially get the same document on multiple
- // indexes, but update appears to already handle this with seenObjects
- // so we don't have to do anything special here.
- virtual bool getsetdup(DiskLoc loc) {
- return _c->getsetdup( loc );
- }
+ /**
+ * with update we could potentially get the same document on multiple
+ * indexes, but update appears to already handle this with seenObjects
+ * so we don't have to do anything special here.
+ */
+ virtual bool getsetdup(DiskLoc loc) { return _c->getsetdup( loc ); }
virtual bool modifiedKeys() const { return _mps->modifiedKeys(); }
virtual bool isMultiKey() const { return _mps->hasMultiKey(); }
- virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
- // return -1 if we're a getmore handoff
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
+ virtual CoveredIndexMatcher* matcher() const { return _matcher.get(); }
+
+ /** return -1 if we're a getmore handoff */
virtual long long nscanned() { return _nscanned >= 0 ? _nscanned + _c->nscanned() : _nscanned; }
- // just for testing
- shared_ptr< Cursor > sub_c() const { return _c; }
+ /** just for testing */
+ shared_ptr<Cursor> sub_c() const { return _c; }
private:
class NoOp : public CursorOp {
public:
@@ -407,55 +534,45 @@ namespace mongo {
virtual void next() {}
virtual bool mayRecordPlan() const { return false; }
virtual QueryOp *_createChild() const { return new NoOp(); }
- virtual shared_ptr< Cursor > newCursor() const { return qp().newCursor(); }
+ virtual shared_ptr<Cursor> newCursor() const { return qp().newCursor(); }
virtual long long nscanned() { assert( false ); return 0; }
};
- void nextClause() {
- if ( _nscanned >= 0 && _c.get() ) {
- _nscanned += _c->nscanned();
- }
- shared_ptr< CursorOp > best = _mps->runOpOnce( *_op );
- if ( ! best->complete() )
- throw MsgAssertionException( best->exception() );
- _c = best->newCursor();
- _matcher = best->matcher();
- _op = best;
- }
- shared_ptr< CursorOp > _op;
- shared_ptr< Cursor > _c;
- auto_ptr< MultiPlanScanner > _mps;
- shared_ptr< CoveredIndexMatcher > _matcher;
+ void nextClause();
+ shared_ptr<CursorOp> _op;
+ shared_ptr<Cursor> _c;
+ auto_ptr<MultiPlanScanner> _mps;
+ shared_ptr<CoveredIndexMatcher> _matcher;
long long _nscanned;
};
- // NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
+ /** NOTE min, max, and keyPattern will be updated to be consistent with the selected index. */
IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern );
- inline bool isSimpleIdQuery( const BSONObj& query ) {
- BSONObjIterator i(query);
- if( !i.more() ) return false;
- BSONElement e = i.next();
- if( i.more() ) return false;
- if( strcmp("_id", e.fieldName()) != 0 ) return false;
- return e.isSimpleType(); // e.g. not something like { _id : { $gt : ...
- }
-
- // matcher() will always work on the returned cursor
- inline shared_ptr< Cursor > bestGuessCursor( const char *ns, const BSONObj &query, const BSONObj &sort ) {
- if( !query.getField( "$or" ).eoo() ) {
- return shared_ptr< Cursor >( new MultiCursor( ns, query, sort ) );
- }
- else {
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns, query ) );
- auto_ptr< FieldRangeSet > origFrs( new FieldRangeSet( *frs ) );
- shared_ptr< Cursor > ret = QueryPlanSet( ns, frs, origFrs, query, sort ).getBestGuess()->newCursor();
- // If we don't already have a matcher, supply one.
- if ( !query.isEmpty() && ! ret->matcher() ) {
- shared_ptr< CoveredIndexMatcher > matcher( new CoveredIndexMatcher( query, ret->indexKeyPattern() ) );
- ret->setMatcher( matcher );
- }
- return ret;
- }
- }
-
+ bool isSimpleIdQuery( const BSONObj& query );
+
+ /**
+ * @return a single cursor that may work well for the given query.
+ * It is possible no cursor is returned if the sort is not supported by an index. Clients are responsible
+ * for checking this if they are not sure an index for a sort exists, and defaulting to a non-sort if
+ * no suitable indices exist.
+ */
+ shared_ptr<Cursor> bestGuessCursor( const char *ns, const BSONObj &query, const BSONObj &sort );
+
+ /**
+ * Add-on functionality for queryutil classes requiring access to indexing
+ * functionality not currently linked to mongos.
+ * TODO Clean this up a bit, possibly with separate sharded and non sharded
+ * implementations for the appropriate queryutil classes or by pulling index
+ * related functionality into separate wrapper classes.
+ */
+ struct QueryUtilIndexed {
+ /** @return true if the index may be useful according to its KeySpec. */
+ static bool indexUseful( const FieldRangeSetPair &frsp, NamespaceDetails *d, int idxNo, const BSONObj &order );
+ /** Clear any indexes recorded as the best for either the single or multi key pattern. */
+ static void clearIndexesForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order );
+ /** Return a recorded best index for the single or multi key pattern. */
+ static pair< BSONObj, long long > bestIndexForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order );
+ static bool uselessOr( const OrRangeGenerator& org, NamespaceDetails *d, int hintIdx );
+ };
+
} // namespace mongo
diff --git a/db/queryoptimizercursor.cpp b/db/queryoptimizercursor.cpp
new file mode 100644
index 0000000..9260889
--- /dev/null
+++ b/db/queryoptimizercursor.cpp
@@ -0,0 +1,387 @@
+// @file queryoptimizercursor.cpp
+
+/**
+ * Copyright (C) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "queryoptimizer.h"
+#include "pdfile.h"
+#include "clientcursor.h"
+#include "btree.h"
+
+namespace mongo {
+
+ static const int OutOfOrderDocumentsAssertionCode = 14810;
+
+ /**
+ * A QueryOp implementation utilized by the QueryOptimizerCursor
+ */
+ class QueryOptimizerCursorOp : public QueryOp {
+ public:
+ /**
+ * @param aggregateNscanned - shared int counting total nscanned for
+ * query ops for all cursors.
+ */
+ QueryOptimizerCursorOp( long long &aggregateNscanned ) : _matchCount(), _mustAdvance(), _nscanned(), _aggregateNscanned( aggregateNscanned ) {}
+
+ virtual void _init() {
+ if ( qp().scanAndOrderRequired() ) {
+ throw MsgAssertionException( OutOfOrderDocumentsAssertionCode, "order spec cannot be satisfied with index" );
+ }
+ _c = qp().newCursor();
+ _capped = _c->capped();
+ mayAdvance();
+ }
+
+ virtual long long nscanned() {
+ return _c ? _c->nscanned() : _nscanned;
+ }
+
+ virtual bool prepareToYield() {
+ if ( _c && !_cc ) {
+ _cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , qp().ns() ) );
+ }
+ if ( _cc ) {
+ _posBeforeYield = currLoc();
+ return _cc->prepareToYield( _yieldData );
+ }
+ // no active cursor - ok to yield
+ return true;
+ }
+
+ virtual void recoverFromYield() {
+ if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _c.reset();
+ _cc.reset();
+
+ if ( _capped ) {
+ msgassertedNoTrace( 13338, str::stream() << "capped cursor overrun: " << qp().ns() );
+ }
+ else if ( qp().mustAssertOnYieldFailure() ) {
+ msgassertedNoTrace( 15892, str::stream() << "QueryOptimizerCursorOp::recoverFromYield() failed to recover" );
+ }
+ else {
+ // we don't fail query since we're fine with returning partial data if collection dropped
+ // also, see SERVER-2454
+ }
+ }
+ else {
+ if ( _posBeforeYield != currLoc() ) {
+ // If the yield advanced our position, the next next() will be a no op.
+ _mustAdvance = false;
+ }
+ }
+ }
+
+ virtual void next() {
+ mayAdvance();
+
+ if ( _matchCount >= 101 ) {
+ // This is equivalent to the default condition for switching from
+ // a query to a getMore.
+ setStop();
+ return;
+ }
+ if ( !_c || !_c->ok() ) {
+ setComplete();
+ return;
+ }
+
+ if ( matcher( _c )->matchesCurrent( _c.get() ) && !_c->getsetdup( _c->currLoc() ) ) {
+ ++_matchCount;
+ }
+ _mustAdvance = true;
+ }
+ virtual QueryOp *_createChild() const {
+ QueryOptimizerCursorOp *ret = new QueryOptimizerCursorOp( _aggregateNscanned );
+ ret->_matchCount = _matchCount;
+ return ret;
+ }
+ DiskLoc currLoc() const { return _c ? _c->currLoc() : DiskLoc(); }
+ BSONObj currKey() const { return _c ? _c->currKey() : BSONObj(); }
+ virtual bool mayRecordPlan() const {
+ return complete() && !stopRequested();
+ }
+ shared_ptr<Cursor> cursor() const { return _c; }
+ private:
+ void mayAdvance() {
+ if ( _mustAdvance && _c ) {
+ _c->advance();
+ _mustAdvance = false;
+ }
+ _aggregateNscanned += ( _c->nscanned() - _nscanned );
+ _nscanned = _c->nscanned();
+ }
+ int _matchCount;
+ bool _mustAdvance;
+ long long _nscanned;
+ bool _capped;
+ shared_ptr<Cursor> _c;
+ ClientCursor::CleanupPointer _cc;
+ DiskLoc _posBeforeYield;
+ ClientCursor::YieldData _yieldData;
+ long long &_aggregateNscanned;
+ };
+
+ /**
+ * This cursor runs a MultiPlanScanner iteratively and returns results from
+ * the scanner's cursors as they become available. Once the scanner chooses
+ * a single plan, this cursor becomes a simple wrapper around that single
+ * plan's cursor (called the 'takeover' cursor).
+ */
+ class QueryOptimizerCursor : public Cursor {
+ public:
+ QueryOptimizerCursor( auto_ptr<MultiPlanScanner> &mps ) :
+ _mps( mps ),
+ _originalOp( new QueryOptimizerCursorOp( _nscanned ) ),
+ _currOp(),
+ _nscanned() {
+ _mps->initialOp( _originalOp );
+ shared_ptr<QueryOp> op = _mps->nextOp();
+ rethrowOnError( op );
+ if ( !op->complete() ) {
+ _currOp = dynamic_cast<QueryOptimizerCursorOp*>( op.get() );
+ }
+ }
+
+ virtual bool ok() { return _takeover ? _takeover->ok() : !currLoc().isNull(); }
+ virtual Record* _current() {
+ if ( _takeover ) {
+ return _takeover->_current();
+ }
+ assertOk();
+ return currLoc().rec();
+ }
+ virtual BSONObj current() {
+ if ( _takeover ) {
+ return _takeover->current();
+ }
+ assertOk();
+ return currLoc().obj();
+ }
+ virtual DiskLoc currLoc() { return _takeover ? _takeover->currLoc() : _currLoc(); }
+ DiskLoc _currLoc() const {
+ verify( 14826, !_takeover );
+ if ( _currOp ) {
+ return _currOp->currLoc();
+ }
+ return DiskLoc();
+ }
+ virtual bool advance() {
+ if ( _takeover ) {
+ return _takeover->advance();
+ }
+
+ // Ok to advance if currOp in an error state due to failed yield recovery.
+ // This may be the case when advance() is called by recoverFromYield().
+ if ( !( _currOp && _currOp->error() ) && !ok() ) {
+ return false;
+ }
+
+ _currOp = 0;
+ shared_ptr<QueryOp> op = _mps->nextOp();
+ rethrowOnError( op );
+
+ QueryOptimizerCursorOp *qocop = dynamic_cast<QueryOptimizerCursorOp*>( op.get() );
+ if ( !op->complete() ) {
+ // 'qocop' will be valid until we call _mps->nextOp() again.
+ _currOp = qocop;
+ }
+ else if ( op->stopRequested() ) {
+ if ( qocop->cursor() ) {
+ _takeover.reset( new MultiCursor( _mps,
+ qocop->cursor(),
+ op->matcher( qocop->cursor() ),
+ *op,
+ _nscanned - qocop->cursor()->nscanned() ) );
+ }
+ }
+
+ return ok();
+ }
+ virtual BSONObj currKey() const {
+ if ( _takeover ) {
+ return _takeover->currKey();
+ }
+ assertOk();
+ return _currOp->currKey();
+ }
+
+ /** This cursor will be ignored for yielding by the client cursor implementation. */
+ virtual DiskLoc refLoc() { return _takeover ? _takeover->refLoc() : DiskLoc(); }
+
+ virtual BSONObj indexKeyPattern() {
+ if ( _takeover ) {
+ return _takeover->indexKeyPattern();
+ }
+ assertOk();
+ return _currOp->cursor()->indexKeyPattern();
+ }
+
+ virtual bool supportGetMore() { return false; }
+
+ virtual bool supportYields() { return _takeover ? _takeover->supportYields() : true; }
+ virtual bool prepareToYield() {
+ if ( _takeover ) {
+ return _takeover->prepareToYield();
+ }
+ else if ( _currOp ) {
+ return _mps->prepareToYield();
+ }
+ else {
+ return true;
+ }
+ }
+ virtual void recoverFromYield() {
+ if ( _takeover ) {
+ _takeover->recoverFromYield();
+ return;
+ }
+ if ( _currOp ) {
+ _mps->recoverFromYield();
+ if ( _currOp->error() ) {
+ // See if we can advance to a non error op.
+ advance();
+ }
+ }
+ }
+
+ virtual string toString() { return "QueryOptimizerCursor"; }
+
+ virtual bool getsetdup(DiskLoc loc) {
+ if ( _takeover ) {
+ if ( getdupInternal( loc ) ) {
+ return true;
+ }
+ return _takeover->getsetdup( loc );
+ }
+ assertOk();
+ return getsetdupInternal( loc );
+ }
+
+ /** Matcher needs to know if the the cursor being forwarded to is multikey. */
+ virtual bool isMultiKey() const {
+ if ( _takeover ) {
+ return _takeover->isMultiKey();
+ }
+ assertOk();
+ return _currOp->cursor()->isMultiKey();
+ }
+
+ virtual bool modifiedKeys() const { return true; }
+
+ virtual long long nscanned() { return _takeover ? _takeover->nscanned() : _nscanned; }
+
+ /** @return the matcher for the takeover cursor or current active op. */
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const {
+ if ( _takeover ) {
+ return _takeover->matcherPtr();
+ }
+ assertOk();
+ return _currOp->matcher( _currOp->cursor() );
+ }
+
+ /** @return the matcher for the takeover cursor or current active op. */
+ virtual CoveredIndexMatcher* matcher() const {
+ if ( _takeover ) {
+ return _takeover->matcher();
+ }
+ assertOk();
+ return _currOp->matcher( _currOp->cursor() ).get();
+ }
+
+ private:
+ void rethrowOnError( const shared_ptr< QueryOp > &op ) {
+ // If all plans have erred out, assert.
+ if ( op->error() ) {
+ throw MsgAssertionException( op->exception() );
+ }
+ }
+
+ void assertOk() const {
+ massert( 14809, "Invalid access for cursor that is not ok()", !_currLoc().isNull() );
+ }
+
+ /** Insert and check for dups before takeover occurs */
+ bool getsetdupInternal(const DiskLoc &loc) {
+ pair<set<DiskLoc>::iterator, bool> p = _dups.insert(loc);
+ return !p.second;
+ }
+
+ /** Just check for dups - after takeover occurs */
+ bool getdupInternal(const DiskLoc &loc) {
+ return _dups.count( loc ) > 0;
+ }
+
+ auto_ptr<MultiPlanScanner> _mps;
+ shared_ptr<QueryOptimizerCursorOp> _originalOp;
+ QueryOptimizerCursorOp *_currOp;
+ set<DiskLoc> _dups;
+ shared_ptr<Cursor> _takeover;
+ long long _nscanned;
+ };
+
+ shared_ptr<Cursor> newQueryOptimizerCursor( auto_ptr<MultiPlanScanner> mps ) {
+ try {
+ return shared_ptr<Cursor>( new QueryOptimizerCursor( mps ) );
+ } catch( const AssertionException &e ) {
+ if ( e.getCode() == OutOfOrderDocumentsAssertionCode ) {
+ // If no indexes follow the requested sort order, return an
+ // empty pointer.
+ return shared_ptr<Cursor>();
+ }
+ throw;
+ }
+ return shared_ptr<Cursor>( new QueryOptimizerCursor( mps ) );
+ }
+
+ shared_ptr<Cursor> NamespaceDetailsTransient::getCursor( const char *ns, const BSONObj &query, const BSONObj &order ) {
+ if ( query.isEmpty() && order.isEmpty() ) {
+ // TODO This will not use a covered index.
+ return theDataFileMgr.findAll( ns );
+ }
+ if ( isSimpleIdQuery( query ) ) {
+ Database *database = cc().database();
+ assert( database );
+ NamespaceDetails *d = database->namespaceIndex.details(ns);
+ if ( d ) {
+ int idxNo = d->findIdIndex();
+ if ( idxNo >= 0 ) {
+ IndexDetails& i = d->idx( idxNo );
+ BSONObj key = i.getKeyFromQuery( query );
+ return shared_ptr<Cursor>( BtreeCursor::make( d, idxNo, i, key, key, true, 1 ) );
+ }
+ }
+ }
+ auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false
+ shared_ptr<Cursor> single = mps->singleCursor();
+ if ( single ) {
+ if ( !query.isEmpty() && !single->matcher() ) {
+ shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, single->indexKeyPattern() ) );
+ single->setMatcher( matcher );
+ }
+ return single;
+ }
+ return newQueryOptimizerCursor( mps );
+ }
+
+ /** This interface just available for testing. */
+ shared_ptr<Cursor> newQueryOptimizerCursor( const char *ns, const BSONObj &query, const BSONObj &order ) {
+ auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false
+ return newQueryOptimizerCursor( mps );
+ }
+
+} // namespace mongo;
diff --git a/db/querypattern.cpp b/db/querypattern.cpp
new file mode 100644
index 0000000..589182d
--- /dev/null
+++ b/db/querypattern.cpp
@@ -0,0 +1,54 @@
+// @file querypattern.cpp - Query pattern matching for selecting similar plans given similar queries.
+
+/* Copyright 2011 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "querypattern.h"
+
+namespace mongo {
+
+ /** for testing only - speed unimportant */
+ bool QueryPattern::operator==( const QueryPattern &other ) const {
+ bool less = operator<( other );
+ bool more = other.operator<( *this );
+ assert( !( less && more ) );
+ return !( less || more );
+ }
+
+ /** for testing only - speed unimportant */
+ bool QueryPattern::operator!=( const QueryPattern &other ) const {
+ return !operator==( other );
+ }
+
+ void QueryPattern::setSort( const BSONObj sort ) {
+ _sort = normalizeSort( sort );
+ }
+
+ BSONObj QueryPattern::normalizeSort( const BSONObj &spec ) {
+ if ( spec.isEmpty() )
+ return spec;
+ int direction = ( spec.firstElement().number() >= 0 ) ? 1 : -1;
+ BSONObjIterator i( spec );
+ BSONObjBuilder b;
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ b.append( e.fieldName(), direction * ( ( e.number() >= 0 ) ? -1 : 1 ) );
+ }
+ return b.obj();
+ }
+
+} // namespace mongo
diff --git a/db/querypattern.h b/db/querypattern.h
new file mode 100644
index 0000000..d87cc64
--- /dev/null
+++ b/db/querypattern.h
@@ -0,0 +1,76 @@
+// @file querypattern.h - Query pattern matching for selecting similar plans given similar queries.
+
+/* Copyright 2011 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+
+namespace mongo {
+
+ /**
+ * Implements query pattern matching, used to determine if a query is
+ * similar to an earlier query and should use the same plan.
+ *
+ * Two queries will generate the same QueryPattern, and therefore match each
+ * other, if their fields have the same Types and they have the same sort
+ * spec.
+ */
+ class QueryPattern {
+ public:
+ friend class FieldRangeSet;
+ enum Type {
+ Equality,
+ LowerBound,
+ UpperBound,
+ UpperAndLowerBound
+ };
+ bool operator<( const QueryPattern &other ) const;
+ /** for testing only */
+ bool operator==( const QueryPattern &other ) const;
+ /** for testing only */
+ bool operator!=( const QueryPattern &other ) const;
+ private:
+ QueryPattern() {}
+ void setSort( const BSONObj sort );
+ static BSONObj normalizeSort( const BSONObj &spec );
+ map<string,Type> _fieldTypes;
+ BSONObj _sort;
+ };
+
+ inline bool QueryPattern::operator<( const QueryPattern &other ) const {
+ map<string,Type>::const_iterator i = _fieldTypes.begin();
+ map<string,Type>::const_iterator j = other._fieldTypes.begin();
+ while( i != _fieldTypes.end() ) {
+ if ( j == other._fieldTypes.end() )
+ return false;
+ if ( i->first < j->first )
+ return true;
+ else if ( i->first > j->first )
+ return false;
+ if ( i->second < j->second )
+ return true;
+ else if ( i->second > j->second )
+ return false;
+ ++i;
+ ++j;
+ }
+ if ( j != other._fieldTypes.end() )
+ return true;
+ return _sort.woCompare( other._sort ) < 0;
+ }
+
+} // namespace mongo
diff --git a/db/queryutil-inl.h b/db/queryutil-inl.h
new file mode 100644
index 0000000..d0fc212
--- /dev/null
+++ b/db/queryutil-inl.h
@@ -0,0 +1,153 @@
+// @file queryutil-inl.h - Inline definitions for frequently called queryutil.h functions
+
+/* Copyright 2011 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace mongo {
+
+ inline bool FieldInterval::equality() const {
+ if ( _cachedEquality == -1 ) {
+ _cachedEquality = ( _lower._inclusive && _upper._inclusive && _lower._bound.woCompare( _upper._bound, false ) == 0 );
+ }
+ return _cachedEquality;
+ }
+
+ inline bool FieldRange::equality() const {
+ return
+ !empty() &&
+ min().woCompare( max(), false ) == 0 &&
+ maxInclusive() &&
+ minInclusive();
+ }
+
+ inline bool FieldRange::inQuery() const {
+ if ( equality() ) {
+ return true;
+ }
+ for( vector<FieldInterval>::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ if ( !i->equality() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * TODO Assumes intervals are contiguous and minKey/maxKey will not be
+ * matched against.
+ */
+ inline bool FieldRange::nontrivial() const {
+ return
+ ! empty() &&
+ ( _intervals.size() != 1 ||
+ minKey.firstElement().woCompare( min(), false ) != 0 ||
+ maxKey.firstElement().woCompare( max(), false ) != 0 );
+ }
+
+ inline const FieldRange &FieldRangeSet::range( const char *fieldName ) const {
+ map<string,FieldRange>::const_iterator f = _ranges.find( fieldName );
+ if ( f == _ranges.end() )
+ return trivialRange();
+ return f->second;
+ }
+
+ inline FieldRange &FieldRangeSet::range( const char *fieldName ) {
+ map<string,FieldRange>::iterator f = _ranges.find( fieldName );
+ if ( f == _ranges.end() ) {
+ _ranges.insert( make_pair( string( fieldName ), trivialRange() ) );
+ return _ranges.find( fieldName )->second;
+ }
+ return f->second;
+ }
+
+ inline int FieldRangeSet::nNontrivialRanges() const {
+ int count = 0;
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ if ( i->second.nontrivial() )
+ ++count;
+ }
+ return count;
+ }
+
+ inline bool FieldRangeSet::matchPossible() const {
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ if ( i->second.empty() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ inline bool FieldRangeSet::matchPossibleForIndex( const BSONObj &keyPattern ) const {
+ if ( !_singleKey ) {
+ return matchPossible();
+ }
+ BSONObjIterator i( keyPattern );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.fieldName() == string( "$natural" ) ) {
+ return true;
+ }
+ if ( range( e.fieldName() ).empty() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ inline long long FieldRangeVector::size() {
+ long long ret = 1;
+ for( vector<FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ ret *= i->intervals().size();
+ }
+ return ret;
+ }
+
+ inline FieldRangeSetPair *OrRangeGenerator::topFrsp() const {
+ FieldRangeSetPair *ret = new FieldRangeSetPair( _baseSet );
+ if (_orSets.size()) {
+ *ret &= _orSets.front();
+ }
+ return ret;
+ }
+
+ inline FieldRangeSetPair *OrRangeGenerator::topFrspOriginal() const {
+ FieldRangeSetPair *ret = new FieldRangeSetPair( _baseSet );
+ if (_originalOrSets.size()) {
+ *ret &= _originalOrSets.front();
+ }
+ return ret;
+ }
+
+ inline bool FieldRangeSetPair::matchPossibleForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const {
+ assertValidIndexOrNoIndex( d, idxNo );
+ if ( !matchPossible() ) {
+ return false;
+ }
+ if ( idxNo < 0 ) {
+ // multi key matchPossible() is true, so return true.
+ return true;
+ }
+ return frsForIndex( d, idxNo ).matchPossibleForIndex( keyPattern );
+ }
+
+ inline void FieldRangeSetPair::assertValidIndexOrNoIndex( const NamespaceDetails *d, int idxNo ) const {
+ massert( 14049, "FieldRangeSetPair invalid index specified", idxNo >= -1 );
+ if ( idxNo >= 0 ) {
+ assertValidIndex( d, idxNo );
+ }
+ }
+
+} // namespace mongo
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index 1cd750b..717eac8 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -1,4 +1,4 @@
-// queryutil.cpp
+// @file queryutil.cpp
/* Copyright 2009 10gen Inc.
*
@@ -24,9 +24,11 @@
#include "../util/unittest.h"
#include "dbmessage.h"
#include "indexkey.h"
+#include "../util/mongoutils/str.h"
namespace mongo {
extern BSONObj staticNull;
+ extern BSONObj staticUndefined;
/** returns a string that when used as a matcher, would match a super set of regex()
returns "" for complex regular expressions
@@ -78,21 +80,39 @@ namespace mongo {
r = r.substr( 0 , r.size() - 1 );
return r; //breaking here fails with /^a?/
}
+ else if (c == '|') {
+ // whole match so far is optional. Nothing we can do here.
+ return string();
+ }
else if (c == '\\') {
- // slash followed by non-alphanumeric represents the following char
c = *(regex++);
- if ((c >= 'A' && c <= 'Z') ||
+ if (c == 'Q'){
+ // \Q...\E quotes everything inside
+ while (*regex) {
+ c = (*regex++);
+ if (c == '\\' && (*regex == 'E')){
+ regex++; //skip the 'E'
+ break; // go back to start of outer loop
+ }
+ else {
+ ss << c; // character should match itself
+ }
+ }
+ }
+ else if ((c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '0') ||
(c == '\0')) {
+ // don't know what to do with these
r = ss.str();
break;
}
else {
+ // slash followed by non-alphanumeric represents the following char
ss << c;
}
}
- else if (strchr("^$.[|()+{", c)) {
+ else if (strchr("^$.[()+{", c)) {
// list of "metacharacters" from man pcrepattern
r = ss.str();
break;
@@ -136,42 +156,63 @@ namespace mongo {
}
- FieldRange::FieldRange( const BSONElement &e, bool isNot, bool optimize ) {
+ FieldRange::FieldRange( const BSONElement &e, bool singleKey, bool isNot, bool optimize )
+ : _singleKey( singleKey ) {
+ int op = e.getGtLtOp();
+
// NOTE with $not, we could potentially form a complementary set of intervals.
- if ( !isNot && !e.eoo() && e.type() != RegEx && e.getGtLtOp() == BSONObj::opIN ) {
- set< BSONElement, element_lt > vals;
- vector< FieldRange > regexes;
+ if ( !isNot && !e.eoo() && e.type() != RegEx && op == BSONObj::opIN ) {
+ set<BSONElement,element_lt> vals;
+ vector<FieldRange> regexes;
uassert( 12580 , "invalid query" , e.isABSONObj() );
BSONObjIterator i( e.embeddedObject() );
while( i.more() ) {
BSONElement ie = i.next();
+ uassert( 15881, "$elemMatch not allowed within $in",
+ ie.type() != Object ||
+ ie.embeddedObject().firstElement().getGtLtOp() != BSONObj::opELEM_MATCH );
if ( ie.type() == RegEx ) {
- regexes.push_back( FieldRange( ie, false, optimize ) );
+ regexes.push_back( FieldRange( ie, singleKey, false, optimize ) );
}
else {
- vals.insert( ie );
+ // A document array may be indexed by its first element, by undefined
+ // if it is empty, or as a full array if it is embedded within another
+ // array.
+ vals.insert( ie );
+ if ( ie.type() == Array ) {
+ BSONElement temp = ie.embeddedObject().firstElement();
+ if ( temp.eoo() ) {
+ temp = staticUndefined.firstElement();
+ }
+ vals.insert( temp );
+ }
}
}
- for( set< BSONElement, element_lt >::const_iterator i = vals.begin(); i != vals.end(); ++i )
+ for( set<BSONElement,element_lt>::const_iterator i = vals.begin(); i != vals.end(); ++i )
_intervals.push_back( FieldInterval(*i) );
- for( vector< FieldRange >::const_iterator i = regexes.begin(); i != regexes.end(); ++i )
+ for( vector<FieldRange>::const_iterator i = regexes.begin(); i != regexes.end(); ++i )
*this |= *i;
return;
}
- if ( e.type() == Array && e.getGtLtOp() == BSONObj::Equality ) {
+ // A document array may be indexed by its first element, by undefined
+ // if it is empty, or as a full array if it is embedded within another
+ // array.
+ if ( e.type() == Array && op == BSONObj::Equality ) {
_intervals.push_back( FieldInterval(e) );
-
- const BSONElement& temp = e.embeddedObject().firstElement();
- if ( ! temp.eoo() ) {
- if ( temp < e )
- _intervals.insert( _intervals.begin() , temp );
- else
- _intervals.push_back( FieldInterval(temp) );
+ BSONElement temp = e.embeddedObject().firstElement();
+ if ( temp.eoo() ) {
+ temp = staticUndefined.firstElement();
+ }
+ if ( temp < e ) {
+ _intervals.insert( _intervals.begin() , temp );
+ }
+ else {
+ _intervals.push_back( FieldInterval(temp) );
}
return;
@@ -190,7 +231,12 @@ namespace mongo {
if ( e.eoo() )
return;
- int op = e.getGtLtOp();
+
+ bool existsSpec = false;
+ if ( op == BSONObj::opEXISTS ) {
+ existsSpec = e.trueValue();
+ }
+
if ( e.type() == RegEx
|| (e.type() == Object && !e.embeddedObject()["$regex"].eoo())
) {
@@ -254,6 +300,9 @@ namespace mongo {
case BSONObj::GTE:
op = BSONObj::LT;
break;
+ case BSONObj::opEXISTS:
+ existsSpec = !existsSpec;
+ break;
default: // otherwise doesn't matter
break;
}
@@ -286,7 +335,7 @@ namespace mongo {
lower = e;
break;
case BSONObj::opALL: {
- massert( 10370 , "$all requires array", e.type() == Array );
+ uassert( 10370 , "$all requires array", e.type() == Array );
BSONObjIterator i( e.embeddedObject() );
bool bound = false;
while ( i.more() ) {
@@ -356,6 +405,13 @@ namespace mongo {
case BSONObj::opWITHIN:
_special = "2d";
break;
+ case BSONObj::opEXISTS: {
+ if ( !existsSpec ) {
+ lower = upper = staticNull.firstElement();
+ }
+ optimize = false;
+ break;
+ }
default:
break;
}
@@ -367,6 +423,8 @@ namespace mongo {
upper = addObj( b.obj() ).firstElement();
}
else if ( lower.type() == MinKey && upper.type() != MaxKey && upper.isSimpleType() ) { // TODO: get rid of isSimpleType
+ if( upper.type() == Date )
+ lowerInclusive = false;
BSONObjBuilder b;
b.appendMinForType( upper.fieldName() , upper.type() );
lower = addObj( b.obj() ).firstElement();
@@ -375,9 +433,9 @@ namespace mongo {
}
- void FieldRange::finishOperation( const vector< FieldInterval > &newIntervals, const FieldRange &other ) {
+ void FieldRange::finishOperation( const vector<FieldInterval> &newIntervals, const FieldRange &other ) {
_intervals = newIntervals;
- for( vector< BSONObj >::const_iterator i = other._objData.begin(); i != other._objData.end(); ++i )
+ for( vector<BSONObj>::const_iterator i = other._objData.begin(); i != other._objData.end(); ++i )
_objData.push_back( *i );
if ( _special.size() == 0 && other._special.size() )
_special = other._special;
@@ -407,9 +465,15 @@ namespace mongo {
}
const FieldRange &FieldRange::operator&=( const FieldRange &other ) {
- vector< FieldInterval > newIntervals;
- vector< FieldInterval >::const_iterator i = _intervals.begin();
- vector< FieldInterval >::const_iterator j = other._intervals.begin();
+ if ( !_singleKey && nontrivial() ) {
+ if ( other <= *this ) {
+ *this = other;
+ }
+ return *this;
+ }
+ vector<FieldInterval> newIntervals;
+ vector<FieldInterval>::const_iterator i = _intervals.begin();
+ vector<FieldInterval>::const_iterator j = other._intervals.begin();
while( i != _intervals.end() && j != other._intervals.end() ) {
FieldInterval overlap;
if ( fieldIntervalOverlap( *i, *j, overlap ) ) {
@@ -426,7 +490,7 @@ namespace mongo {
return *this;
}
- void handleInterval( const FieldInterval &lower, FieldBound &low, FieldBound &high, vector< FieldInterval > &newIntervals ) {
+ void handleInterval( const FieldInterval &lower, FieldBound &low, FieldBound &high, vector<FieldInterval> &newIntervals ) {
if ( low._bound.eoo() ) {
low = lower._lower; high = lower._upper;
}
@@ -446,11 +510,11 @@ namespace mongo {
}
const FieldRange &FieldRange::operator|=( const FieldRange &other ) {
- vector< FieldInterval > newIntervals;
+ vector<FieldInterval> newIntervals;
FieldBound low;
FieldBound high;
- vector< FieldInterval >::const_iterator i = _intervals.begin();
- vector< FieldInterval >::const_iterator j = other._intervals.begin();
+ vector<FieldInterval>::const_iterator i = _intervals.begin();
+ vector<FieldInterval>::const_iterator j = other._intervals.begin();
while( i != _intervals.end() && j != other._intervals.end() ) {
int cmp = i->_lower._bound.woCompare( j->_lower._bound, false );
if ( ( cmp == 0 && i->_lower._inclusive ) || cmp < 0 ) {
@@ -479,9 +543,9 @@ namespace mongo {
}
const FieldRange &FieldRange::operator-=( const FieldRange &other ) {
- vector< FieldInterval > newIntervals;
- vector< FieldInterval >::iterator i = _intervals.begin();
- vector< FieldInterval >::const_iterator j = other._intervals.begin();
+ vector<FieldInterval> newIntervals;
+ vector<FieldInterval>::iterator i = _intervals.begin();
+ vector<FieldInterval>::const_iterator j = other._intervals.begin();
while( i != _intervals.end() && j != other._intervals.end() ) {
int cmp = i->_lower._bound.woCompare( j->_lower._bound, false );
if ( cmp < 0 ||
@@ -543,20 +607,60 @@ namespace mongo {
}
// TODO write a proper implementation that doesn't do a full copy
- bool FieldRange::operator<=( const FieldRange &other ) {
+ bool FieldRange::operator<=( const FieldRange &other ) const {
FieldRange temp = *this;
temp -= other;
return temp.empty();
}
+ void FieldRange::setExclusiveBounds() {
+ for( vector<FieldInterval>::iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ i->_lower._inclusive = false;
+ i->_upper._inclusive = false;
+ }
+ }
+
+ void FieldRange::reverse( FieldRange &ret ) const {
+ assert( _special.empty() );
+ ret._intervals.clear();
+ ret._objData = _objData;
+ for( vector<FieldInterval>::const_reverse_iterator i = _intervals.rbegin(); i != _intervals.rend(); ++i ) {
+ FieldInterval fi;
+ fi._lower = i->_upper;
+ fi._upper = i->_lower;
+ ret._intervals.push_back( fi );
+ }
+ }
+
BSONObj FieldRange::addObj( const BSONObj &o ) {
_objData.push_back( o );
return o;
}
+ string FieldInterval::toString() const {
+ StringBuilder buf;
+ buf << ( _lower._inclusive ? "[" : "(" );
+ buf << _lower._bound;
+ buf << " , ";
+ buf << _upper._bound;
+ buf << ( _upper._inclusive ? "]" : ")" );
+ return buf.str();
+ }
+
+ string FieldRange::toString() const {
+ StringBuilder buf;
+ buf << "(FieldRange special: " << _special << " singleKey: " << _special << " intervals: ";
+ for( vector<FieldInterval>::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ buf << i->toString();
+ }
+
+ buf << ")";
+ return buf.str();
+ }
+
string FieldRangeSet::getSpecial() const {
string s = "";
- for ( map<string,FieldRange>::iterator i=_ranges.begin(); i!=_ranges.end(); i++ ) {
+ for ( map<string,FieldRange>::const_iterator i=_ranges.begin(); i!=_ranges.end(); i++ ) {
if ( i->second.getSpecial().size() == 0 )
continue;
uassert( 13033 , "can't have 2 special fields" , s.size() == 0 );
@@ -565,12 +669,111 @@ namespace mongo {
return s;
}
+ /**
+ * Btree scanning for a multidimentional key range will yield a
+ * multidimensional box. The idea here is that if an 'other'
+ * multidimensional box contains the current box we don't have to scan
+ * the current box. If the 'other' box contains the current box in
+ * all dimensions but one, we can safely subtract the values of 'other'
+ * along that one dimension from the values for the current box on the
+ * same dimension. In other situations, subtracting the 'other'
+ * box from the current box yields a result that is not a box (but
+ * rather can be expressed as a union of boxes). We don't support
+ * such splitting currently in calculating index ranges. Note that
+ * where I have said 'box' above, I actually mean sets of boxes because
+ * a field range can consist of multiple intervals.
+ */
+ const FieldRangeSet &FieldRangeSet::operator-=( const FieldRangeSet &other ) {
+ int nUnincluded = 0;
+ string unincludedKey;
+ map<string,FieldRange>::iterator i = _ranges.begin();
+ map<string,FieldRange>::const_iterator j = other._ranges.begin();
+ while( nUnincluded < 2 && i != _ranges.end() && j != other._ranges.end() ) {
+ int cmp = i->first.compare( j->first );
+ if ( cmp == 0 ) {
+ if ( i->second <= j->second ) {
+ // nothing
+ }
+ else {
+ ++nUnincluded;
+ unincludedKey = i->first;
+ }
+ ++i;
+ ++j;
+ }
+ else if ( cmp < 0 ) {
+ ++i;
+ }
+ else {
+ // other has a bound we don't, nothing can be done
+ return *this;
+ }
+ }
+ if ( j != other._ranges.end() ) {
+ // other has a bound we don't, nothing can be done
+ return *this;
+ }
+ if ( nUnincluded > 1 ) {
+ return *this;
+ }
+ if ( nUnincluded == 0 ) {
+ makeEmpty();
+ return *this;
+ }
+ // nUnincluded == 1
+ range( unincludedKey.c_str() ) -= other.range( unincludedKey.c_str() );
+ appendQueries( other );
+ return *this;
+ }
+
+ const FieldRangeSet &FieldRangeSet::operator&=( const FieldRangeSet &other ) {
+ map<string,FieldRange>::iterator i = _ranges.begin();
+ map<string,FieldRange>::const_iterator j = other._ranges.begin();
+ while( i != _ranges.end() && j != other._ranges.end() ) {
+ int cmp = i->first.compare( j->first );
+ if ( cmp == 0 ) {
+ // Same field name, so find range intersection.
+ i->second &= j->second;
+ ++i;
+ ++j;
+ }
+ else if ( cmp < 0 ) {
+ // Field present in *this.
+ ++i;
+ }
+ else {
+ // Field not present in *this, so add it.
+ range( j->first.c_str() ) = j->second;
+ ++j;
+ }
+ }
+ while( j != other._ranges.end() ) {
+ // Field not present in *this, add it.
+ range( j->first.c_str() ) = j->second;
+ ++j;
+ }
+ appendQueries( other );
+ return *this;
+ }
+
+ void FieldRangeSet::appendQueries( const FieldRangeSet &other ) {
+ for( vector<BSONObj>::const_iterator i = other._queries.begin(); i != other._queries.end(); ++i ) {
+ _queries.push_back( *i );
+ }
+ }
+
+ void FieldRangeSet::makeEmpty() {
+ for( map<string,FieldRange>::iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ i->second.makeEmpty();
+ }
+ }
+
void FieldRangeSet::processOpElement( const char *fieldName, const BSONElement &f, bool isNot, bool optimize ) {
BSONElement g = f;
int op2 = g.getGtLtOp();
if ( op2 == BSONObj::opALL ) {
BSONElement h = g;
- massert( 13050 , "$all requires array", h.type() == Array );
+ uassert( 13050 , "$all requires array", h.type() == Array );
BSONObjIterator i( h.embeddedObject() );
if( i.more() ) {
BSONElement x = i.next();
@@ -590,29 +793,56 @@ namespace mongo {
int op3 = getGtLtOp( h );
if ( op3 == BSONObj::Equality ) {
- _ranges[ fullname ] &= FieldRange( h , isNot , optimize );
+ range( fullname.c_str() ) &= FieldRange( h , _singleKey , isNot , optimize );
}
else {
BSONObjIterator l( h.embeddedObject() );
while ( l.more() ) {
- _ranges[ fullname ] &= FieldRange( l.next() , isNot , optimize );
+ range( fullname.c_str() ) &= FieldRange( l.next() , _singleKey , isNot , optimize );
}
}
}
}
else {
- _ranges[ fieldName ] &= FieldRange( f , isNot , optimize );
+ range( fieldName ) &= FieldRange( f , _singleKey , isNot , optimize );
}
}
void FieldRangeSet::processQueryField( const BSONElement &e, bool optimize ) {
+ if ( e.fieldName()[ 0 ] == '$' ) {
+ if ( strcmp( e.fieldName(), "$and" ) == 0 ) {
+ uassert( 14816 , "$and expression must be a nonempty array" , e.type() == Array && e.embeddedObject().nFields() > 0 );
+ BSONObjIterator i( e.embeddedObject() );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ uassert( 14817 , "$and elements must be objects" , e.type() == Object );
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ processQueryField( j.next(), optimize );
+ }
+ }
+ }
+
+ if ( strcmp( e.fieldName(), "$where" ) == 0 ) {
+ return;
+ }
+
+ if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
+ return;
+ }
+
+ if ( strcmp( e.fieldName(), "$nor" ) == 0 ) {
+ return;
+ }
+ }
+
bool equality = ( getGtLtOp( e ) == BSONObj::Equality );
if ( equality && e.type() == Object ) {
- equality = ( strcmp( e.embeddedObject().firstElement().fieldName(), "$not" ) != 0 );
+ equality = ( strcmp( e.embeddedObject().firstElementFieldName(), "$not" ) != 0 );
}
if ( equality || ( e.type() == Object && !e.embeddedObject()[ "$regex" ].eoo() ) ) {
- _ranges[ e.fieldName() ] &= FieldRange( e , false , optimize );
+ range( e.fieldName() ) &= FieldRange( e , _singleKey , false , optimize );
}
if ( !equality ) {
BSONObjIterator j( e.embeddedObject() );
@@ -643,93 +873,97 @@ namespace mongo {
}
}
- FieldRangeSet::FieldRangeSet( const char *ns, const BSONObj &query , bool optimize )
- : _ns( ns ), _queries( 1, query.getOwned() ) {
+ FieldRangeSet::FieldRangeSet( const char *ns, const BSONObj &query, bool singleKey, bool optimize )
+ : _ns( ns ), _queries( 1, query.getOwned() ), _singleKey( singleKey ) {
BSONObjIterator i( _queries[ 0 ] );
while( i.more() ) {
+ processQueryField( i.next(), optimize );
+ }
+ }
+
+ FieldRangeVector::FieldRangeVector( const FieldRangeSet &frs, const IndexSpec &indexSpec, int direction )
+ :_indexSpec( indexSpec ), _direction( direction >= 0 ? 1 : -1 ) {
+ _queries = frs._queries;
+ BSONObjIterator i( _indexSpec.keyPattern );
+ set< string > baseObjectNontrivialPrefixes;
+ while( i.more() ) {
BSONElement e = i.next();
- // e could be x:1 or x:{$gt:1}
-
- if ( strcmp( e.fieldName(), "$where" ) == 0 ) {
- continue;
+ const FieldRange *range = &frs.range( e.fieldName() );
+ if ( !frs.singleKey() ) {
+ string prefix = str::before( e.fieldName(), '.' );
+ if ( baseObjectNontrivialPrefixes.count( prefix ) > 0 ) {
+ // A field with the same parent field has already been
+ // constrainted, and with a multikey index we cannot
+ // constrain this field.
+ range = &frs.trivialRange();
+ } else {
+ if ( range->nontrivial() ) {
+ baseObjectNontrivialPrefixes.insert( prefix );
+ }
+ }
}
-
- if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
- continue;
+ int number = (int) e.number(); // returns 0.0 if not numeric
+ bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction >= 0 ? 1 : -1 ) > 0 );
+ if ( forward ) {
+ _ranges.push_back( *range );
}
-
- if ( strcmp( e.fieldName(), "$nor" ) == 0 ) {
- continue;
+ else {
+ _ranges.push_back( FieldRange( BSONObj().firstElement(), frs.singleKey(), false, true ) );
+ range->reverse( _ranges.back() );
}
+ assert( !_ranges.back().empty() );
+ }
+ uassert( 13385, "combinatorial limit of $in partitioning of result set exceeded", size() < 1000000 );
+ }
- processQueryField( e, optimize );
+ BSONObj FieldRangeVector::startKey() const {
+ BSONObjBuilder b;
+ for( vector<FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ const FieldInterval &fi = i->intervals().front();
+ b.appendAs( fi._lower._bound, "" );
}
+ return b.obj();
}
- FieldRangeOrSet::FieldRangeOrSet( const char *ns, const BSONObj &query , bool optimize )
- : _baseSet( ns, query, optimize ), _orFound() {
-
- BSONObjIterator i( _baseSet._queries[ 0 ] );
-
- while( i.more() ) {
- BSONElement e = i.next();
- if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
- massert( 13262, "$or requires nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
- BSONObjIterator j( e.embeddedObject() );
- while( j.more() ) {
- BSONElement f = j.next();
- massert( 13263, "$or array must contain objects", f.type() == Object );
- _orSets.push_back( FieldRangeSet( ns, f.embeddedObject(), optimize ) );
- massert( 13291, "$or may not contain 'special' query", _orSets.back().getSpecial().empty() );
- _originalOrSets.push_back( _orSets.back() );
- }
- _orFound = true;
- continue;
- }
+ BSONObj FieldRangeVector::endKey() const {
+ BSONObjBuilder b;
+ for( vector<FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ const FieldInterval &fi = i->intervals().back();
+ b.appendAs( fi._upper._bound, "" );
}
+ return b.obj();
}
- void FieldRangeOrSet::popOrClause( const BSONObj &indexSpec ) {
- massert( 13274, "no or clause to pop", !orFinished() );
- auto_ptr< FieldRangeSet > holder;
- FieldRangeSet *toDiff = &_originalOrSets.front();
- if ( toDiff->matchPossible() && !indexSpec.isEmpty() ) {
- holder.reset( toDiff->subset( indexSpec ) );
- toDiff = holder.get();
- }
- list< FieldRangeSet >::iterator i = _orSets.begin();
- list< FieldRangeSet >::iterator j = _originalOrSets.begin();
- ++i;
- ++j;
- while( i != _orSets.end() ) {
- *i -= *toDiff;
- if( !i->matchPossible() ) {
- i = _orSets.erase( i );
- j = _originalOrSets.erase( j );
- }
- else {
- ++i;
- ++j;
- }
+ BSONObj FieldRangeVector::obj() const {
+ BSONObjBuilder b;
+ BSONObjIterator k( _indexSpec.keyPattern );
+ for( int i = 0; i < (int)_ranges.size(); ++i ) {
+ BSONArrayBuilder a( b.subarrayStart( k.next().fieldName() ) );
+ for( vector<FieldInterval>::const_iterator j = _ranges[ i ].intervals().begin();
+ j != _ranges[ i ].intervals().end(); ++j ) {
+ a << BSONArray( BSON_ARRAY( j->_lower._bound << j->_upper._bound ).clientReadable() );
+ }
+ a.done();
}
- _oldOrSets.push_front( _orSets.front() );
- _orSets.pop_front();
- _originalOrSets.pop_front();
+ return b.obj();
}
-
- FieldRange *FieldRangeSet::trivialRange_ = 0;
- FieldRange &FieldRangeSet::trivialRange() {
- if ( trivialRange_ == 0 )
- trivialRange_ = new FieldRange();
- return *trivialRange_;
+
+ FieldRange *FieldRangeSet::__singleKeyTrivialRange = 0;
+ FieldRange *FieldRangeSet::__multiKeyTrivialRange = 0;
+ const FieldRange &FieldRangeSet::trivialRange() const {
+ FieldRange *&ret = _singleKey ? __singleKeyTrivialRange : __multiKeyTrivialRange;
+ if ( ret == 0 ) {
+ ret = new FieldRange( BSONObj().firstElement(), _singleKey, false, true );
+ }
+ return *ret;
}
BSONObj FieldRangeSet::simplifiedQuery( const BSONObj &_fields ) const {
BSONObj fields = _fields;
if ( fields.isEmpty() ) {
BSONObjBuilder b;
- for( map< string, FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
b.append( i->first, 1 );
}
fields = b.obj();
@@ -739,17 +973,17 @@ namespace mongo {
while( i.more() ) {
BSONElement e = i.next();
const char *name = e.fieldName();
- const FieldRange &range = _ranges[ name ];
- assert( !range.empty() );
- if ( range.equality() )
- b.appendAs( range.min(), name );
- else if ( range.nontrivial() ) {
+ const FieldRange &eRange = range( name );
+ assert( !eRange.empty() );
+ if ( eRange.equality() )
+ b.appendAs( eRange.min(), name );
+ else if ( eRange.nontrivial() ) {
BSONObj o;
BSONObjBuilder c;
- if ( range.min().type() != MinKey )
- c.appendAs( range.min(), range.minInclusive() ? "$gte" : "$gt" );
- if ( range.max().type() != MaxKey )
- c.appendAs( range.max(), range.maxInclusive() ? "$lte" : "$lt" );
+ if ( eRange.min().type() != MinKey )
+ c.appendAs( eRange.min(), eRange.minInclusive() ? "$gte" : "$gt" );
+ if ( eRange.max().type() != MaxKey )
+ c.appendAs( eRange.max(), eRange.maxInclusive() ? "$lte" : "$lt" );
o = c.obj();
b.append( name, o );
}
@@ -759,7 +993,7 @@ namespace mongo {
QueryPattern FieldRangeSet::pattern( const BSONObj &sort ) const {
QueryPattern qp;
- for( map< string, FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
assert( !i->second.empty() );
if ( i->second.equality() ) {
qp._fieldTypes[ i->first ] = QueryPattern::Equality;
@@ -781,9 +1015,9 @@ namespace mongo {
// TODO get rid of this
BoundList FieldRangeSet::indexBounds( const BSONObj &keyPattern, int direction ) const {
- typedef vector< pair< shared_ptr< BSONObjBuilder >, shared_ptr< BSONObjBuilder > > > BoundBuilders;
+ typedef vector<pair<shared_ptr<BSONObjBuilder>, shared_ptr<BSONObjBuilder> > > BoundBuilders;
BoundBuilders builders;
- builders.push_back( make_pair( shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ), shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ) ) );
+ builders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
BSONObjIterator i( keyPattern );
bool ineq = false; // until ineq is true, we are just dealing with equality and $in bounds
while( i.more() ) {
@@ -803,16 +1037,16 @@ namespace mongo {
ineq = true;
}
BoundBuilders newBuilders;
- const vector< FieldInterval > &intervals = fr.intervals();
+ const vector<FieldInterval> &intervals = fr.intervals();
for( BoundBuilders::const_iterator i = builders.begin(); i != builders.end(); ++i ) {
BSONObj first = i->first->obj();
BSONObj second = i->second->obj();
const unsigned maxCombinations = 4000000;
if ( forward ) {
- for( vector< FieldInterval >::const_iterator j = intervals.begin(); j != intervals.end(); ++j ) {
+ for( vector<FieldInterval>::const_iterator j = intervals.begin(); j != intervals.end(); ++j ) {
uassert( 13303, "combinatorial limit of $in partitioning of result set exceeded", newBuilders.size() < maxCombinations );
- newBuilders.push_back( make_pair( shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ), shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ) ) );
+ newBuilders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
newBuilders.back().first->appendElements( first );
newBuilders.back().second->appendElements( second );
newBuilders.back().first->appendAs( j->_lower._bound, "" );
@@ -820,9 +1054,9 @@ namespace mongo {
}
}
else {
- for( vector< FieldInterval >::const_reverse_iterator j = intervals.rbegin(); j != intervals.rend(); ++j ) {
+ for( vector<FieldInterval>::const_reverse_iterator j = intervals.rbegin(); j != intervals.rend(); ++j ) {
uassert( 13304, "combinatorial limit of $in partitioning of result set exceeded", newBuilders.size() < maxCombinations );
- newBuilders.push_back( make_pair( shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ), shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ) ) );
+ newBuilders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
newBuilders.back().first->appendElements( first );
newBuilders.back().second->appendElements( second );
newBuilders.back().first->appendAs( j->_upper._bound, "" );
@@ -847,18 +1081,52 @@ namespace mongo {
}
FieldRangeSet *FieldRangeSet::subset( const BSONObj &fields ) const {
- FieldRangeSet *ret = new FieldRangeSet( _ns, BSONObj() );
+ FieldRangeSet *ret = new FieldRangeSet( _ns, BSONObj(), _singleKey, true );
BSONObjIterator i( fields );
while( i.more() ) {
BSONElement e = i.next();
- if ( _ranges[ e.fieldName() ].nontrivial() ) {
- ret->_ranges[ e.fieldName() ] = _ranges[ e.fieldName() ];
+ if ( range( e.fieldName() ).nontrivial() ) {
+ ret->range( e.fieldName() ) = range( e.fieldName() );
}
}
ret->_queries = _queries;
return ret;
}
+
+ bool FieldRangeSetPair::noNontrivialRanges() const {
+ return _singleKey.matchPossible() && _singleKey.nNontrivialRanges() == 0 &&
+ _multiKey.matchPossible() && _multiKey.nNontrivialRanges() == 0;
+ }
+
+ FieldRangeSetPair &FieldRangeSetPair::operator&=( const FieldRangeSetPair &other ) {
+ _singleKey &= other._singleKey;
+ _multiKey &= other._multiKey;
+ return *this;
+ }
+ FieldRangeSetPair &FieldRangeSetPair::operator-=( const FieldRangeSet &scanned ) {
+ _singleKey -= scanned;
+ _multiKey -= scanned;
+ return *this;
+ }
+
+ BSONObj FieldRangeSetPair::simplifiedQueryForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const {
+ return frsForIndex( d, idxNo ).simplifiedQuery( keyPattern );
+ }
+
+ void FieldRangeSetPair::assertValidIndex( const NamespaceDetails *d, int idxNo ) const {
+ massert( 14048, "FieldRangeSetPair invalid index specified", idxNo >= 0 && idxNo < d->nIndexes );
+ }
+
+ const FieldRangeSet &FieldRangeSetPair::frsForIndex( const NamespaceDetails* nsd, int idxNo ) const {
+ assertValidIndexOrNoIndex( nsd, idxNo );
+ if ( idxNo < 0 ) {
+ // An unindexed cursor cannot have a "single key" constraint.
+ return _multiKey;
+ }
+ return nsd->isMultikey( idxNo ) ? _multiKey : _singleKey;
+ }
+
bool FieldRangeVector::matchesElement( const BSONElement &e, int i, bool forward ) const {
bool eq;
int l = matchingLowElement( e, i, forward, eq );
@@ -913,41 +1181,52 @@ namespace mongo {
return l;
}
- bool FieldRangeVector::matches( const BSONObj &obj ) const {
- if ( !_indexSpec.get() ) {
- _indexSpec.reset( new IndexSpec( _keyPattern ) );
+ bool FieldRangeVector::matchesKey( const BSONObj &key ) const {
+ BSONObjIterator j( key );
+ BSONObjIterator k( _indexSpec.keyPattern );
+ for( int l = 0; l < (int)_ranges.size(); ++l ) {
+ int number = (int) k.next().number();
+ bool forward = ( number >= 0 ? 1 : -1 ) * ( _direction >= 0 ? 1 : -1 ) > 0;
+ if ( !matchesElement( j.next(), l, forward ) ) {
+ return false;
+ }
}
+ return true;
+ }
+
+ bool FieldRangeVector::matches( const BSONObj &obj ) const {
// TODO The representation of matching keys could potentially be optimized
// more for the case at hand. (For example, we can potentially consider
// fields individually instead of constructing several bson objects using
// multikey arrays.) But getKeys() canonically defines the key set for a
// given object and for now we are using it as is.
- BSONObjSetDefaultOrder keys;
- _indexSpec->getKeys( obj, keys );
- for( BSONObjSetDefaultOrder::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
- BSONObjIterator j( *i );
- BSONObjIterator k( _keyPattern );
- bool match = true;
- for( int l = 0; l < (int)_ranges.size(); ++l ) {
- int number = (int) k.next().number();
- bool forward = ( number >= 0 ? 1 : -1 ) * ( _direction >= 0 ? 1 : -1 ) > 0;
- if ( !matchesElement( j.next(), l, forward ) ) {
- match = false;
- break;
- }
- }
- if ( match ) {
- // The *i key matched a valid range for every element.
- return true;
+ BSONObjSet keys;
+ _indexSpec.getKeys( obj, keys );
+ for( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
+ if ( matchesKey( *i ) ) {
+ return true;
}
}
return false;
}
+ BSONObj FieldRangeVector::firstMatch( const BSONObj &obj ) const {
+ // NOTE Only works in forward direction.
+ assert( _direction >= 0 );
+ BSONObjSet keys( BSONObjCmp( _indexSpec.keyPattern ) );
+ _indexSpec.getKeys( obj, keys );
+ for( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
+ if ( matchesKey( *i ) ) {
+ return *i;
+ }
+ }
+ return BSONObj();
+ }
+
// TODO optimize more
- int FieldRangeVector::Iterator::advance( const BSONObj &curr ) {
+ int FieldRangeVectorIterator::advance( const BSONObj &curr ) {
BSONObjIterator j( curr );
- BSONObjIterator o( _v._keyPattern );
+ BSONObjIterator o( _v._indexSpec.keyPattern );
// track first field for which we are not at the end of the valid values,
// since we may need to advance from the key prefix ending with this field
int latestNonEndpoint = -1;
@@ -1085,13 +1364,109 @@ namespace mongo {
return -1;
}
- void FieldRangeVector::Iterator::prepDive() {
+ void FieldRangeVectorIterator::prepDive() {
for( int j = 0; j < (int)_i.size(); ++j ) {
_cmp[ j ] = &_v._ranges[ j ].intervals().front()._lower._bound;
_inc[ j ] = _v._ranges[ j ].intervals().front()._lower._inclusive;
}
}
+ BSONObj FieldRangeVectorIterator::startKey() {
+ BSONObjBuilder b;
+ for( int unsigned i = 0; i < _i.size(); ++i ) {
+ const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
+ b.appendAs( fi._lower._bound, "" );
+ }
+ return b.obj();
+ }
+
+ // temp
+ BSONObj FieldRangeVectorIterator::endKey() {
+ BSONObjBuilder b;
+ for( int unsigned i = 0; i < _i.size(); ++i ) {
+ const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
+ b.appendAs( fi._upper._bound, "" );
+ }
+ return b.obj();
+ }
+
+ OrRangeGenerator::OrRangeGenerator( const char *ns, const BSONObj &query , bool optimize )
+ : _baseSet( ns, query, optimize ), _orFound() {
+
+ BSONObjIterator i( _baseSet.originalQuery() );
+
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
+ uassert( 13262, "$or requires nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ BSONElement f = j.next();
+ uassert( 13263, "$or array must contain objects", f.type() == Object );
+ _orSets.push_back( FieldRangeSetPair( ns, f.embeddedObject(), optimize ) );
+ uassert( 13291, "$or may not contain 'special' query", _orSets.back().getSpecial().empty() );
+ _originalOrSets.push_back( _orSets.back() );
+ }
+ _orFound = true;
+ continue;
+ }
+ }
+ }
+
+ void OrRangeGenerator::assertMayPopOrClause() {
+ massert( 13274, "no or clause to pop", !orFinished() );
+ }
+
+ void OrRangeGenerator::popOrClause( NamespaceDetails *nsd, int idxNo, const BSONObj &keyPattern ) {
+ assertMayPopOrClause();
+ auto_ptr<FieldRangeSet> holder;
+ const FieldRangeSet *toDiff = &_originalOrSets.front().frsForIndex( nsd, idxNo );
+ BSONObj indexSpec = keyPattern;
+ if ( !indexSpec.isEmpty() && toDiff->matchPossibleForIndex( indexSpec ) ) {
+ holder.reset( toDiff->subset( indexSpec ) );
+ toDiff = holder.get();
+ }
+ popOrClause( toDiff, nsd, idxNo, keyPattern );
+ }
+
+ void OrRangeGenerator::popOrClauseSingleKey() {
+ assertMayPopOrClause();
+ FieldRangeSet *toDiff = &_originalOrSets.front()._singleKey;
+ popOrClause( toDiff );
+ }
+
+ /**
+ * Removes the top or clause, which would have been recently scanned, and
+ * removes the field ranges it covers from all subsequent or clauses. As a
+ * side effect, this function may invalidate the return values of topFrs()
+ * calls made before this function was called.
+ * @param indexSpec - Keys of the index that was used to satisfy the last or
+ * clause. Used to determine the range of keys that were scanned. If
+ * empty we do not constrain the previous clause's ranges using index keys,
+ * which may reduce opportunities for range elimination.
+ */
+ void OrRangeGenerator::popOrClause( const FieldRangeSet *toDiff, NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) {
+ list<FieldRangeSetPair>::iterator i = _orSets.begin();
+ list<FieldRangeSetPair>::iterator j = _originalOrSets.begin();
+ ++i;
+ ++j;
+ while( i != _orSets.end() ) {
+ *i -= *toDiff;
+ // Check if match is possible at all, and if it is possible for the recently scanned index.
+ if( !i->matchPossible() || ( d && !i->matchPossibleForIndex( d, idxNo, keyPattern ) ) ) {
+ i = _orSets.erase( i );
+ j = _originalOrSets.erase( j );
+ }
+ else {
+ ++i;
+ ++j;
+ }
+ }
+ _oldOrSets.push_front( _orSets.front() );
+ _orSets.pop_front();
+ _originalOrSets.pop_front();
+ }
+
struct SimpleRegexUnitTest : UnitTest {
void run() {
{
@@ -1148,6 +1523,16 @@ namespace mongo {
BSONObj o = b.done();
assert( simpleRegex(o.firstElement()) == "foo #" );
}
+ {
+ assert( simpleRegex("^\\Qasdf\\E", "", NULL) == "asdf" );
+ assert( simpleRegex("^\\Qasdf\\E.*", "", NULL) == "asdf" );
+ assert( simpleRegex("^\\Qasdf", "", NULL) == "asdf" ); // PCRE supports this
+ assert( simpleRegex("^\\Qasdf\\\\E", "", NULL) == "asdf\\" );
+ assert( simpleRegex("^\\Qas.*df\\E", "", NULL) == "as.*df" );
+ assert( simpleRegex("^\\Qas\\Q[df\\E", "", NULL) == "as\\Q[df" );
+ assert( simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", NULL) == "as\\E$df" ); // quoted string containing \E
+ }
+
}
} simple_regex_unittest;
@@ -1173,36 +1558,5 @@ namespace mongo {
return num;
}
- string debugString( Message& m ) {
- stringstream ss;
- ss << "op: " << opToString( m.operation() ) << " len: " << m.size();
- if ( m.operation() >= 2000 && m.operation() < 2100 ) {
- DbMessage d(m);
- ss << " ns: " << d.getns();
- switch ( m.operation() ) {
- case dbUpdate: {
- int flags = d.pullInt();
- BSONObj q = d.nextJsObj();
- BSONObj o = d.nextJsObj();
- ss << " flags: " << flags << " query: " << q << " update: " << o;
- break;
- }
- case dbInsert:
- ss << d.nextJsObj();
- break;
- case dbDelete: {
- int flags = d.pullInt();
- BSONObj q = d.nextJsObj();
- ss << " flags: " << flags << " query: " << q;
- break;
- }
- default:
- ss << " CANNOT HANDLE YET";
- }
-
-
- }
- return ss.str();
- }
} // namespace mongo
diff --git a/db/queryutil.h b/db/queryutil.h
index 2746695..104cde2 100644
--- a/db/queryutil.h
+++ b/db/queryutil.h
@@ -1,4 +1,4 @@
-// queryutil.h
+// @file queryutil.h - Utility classes representing ranges of valid BSONElement values for a query.
/* Copyright 2009 10gen Inc.
*
@@ -18,9 +18,14 @@
#pragma once
#include "jsobj.h"
+#include "indexkey.h"
namespace mongo {
+ /**
+ * One side of an interval of valid BSONElements, specified by a value and a
+ * boolean indicating whether the interval includes the value.
+ */
struct FieldBound {
BSONElement _bound;
bool _inclusive;
@@ -31,6 +36,7 @@ namespace mongo {
void flipInclusive() { _inclusive = !_inclusive; }
};
+ /** A closed interval composed of a lower and an upper FieldBound. */
struct FieldInterval {
FieldInterval() : _cachedEquality( -1 ) {}
FieldInterval( const BSONElement& e ) : _cachedEquality( -1 ) {
@@ -39,381 +45,270 @@ namespace mongo {
}
FieldBound _lower;
FieldBound _upper;
+ /** @return true iff no single element can be contained in the interval. */
bool strictValid() const {
int cmp = _lower._bound.woCompare( _upper._bound, false );
return ( cmp < 0 || ( cmp == 0 && _lower._inclusive && _upper._inclusive ) );
}
- bool equality() const {
- if ( _cachedEquality == -1 ) {
- _cachedEquality = ( _lower._inclusive && _upper._inclusive && _lower._bound.woCompare( _upper._bound, false ) == 0 );
- }
- return _cachedEquality;
- }
+ /** @return true iff the interval is an equality constraint. */
+ bool equality() const;
mutable int _cachedEquality;
+
+ string toString() const;
};
- // range of a field's value that may be determined from query -- used to
- // determine index limits
+ /**
+ * An ordered list of FieldIntervals expressing constraints on valid
+ * BSONElement values for a field.
+ */
class FieldRange {
public:
- FieldRange( const BSONElement &e = BSONObj().firstElement() , bool isNot=false , bool optimize=true );
+ FieldRange( const BSONElement &e , bool singleKey , bool isNot=false , bool optimize=true );
+
+ /** @return Range intersection with 'other'. */
const FieldRange &operator&=( const FieldRange &other );
+ /** @return Range union with 'other'. */
const FieldRange &operator|=( const FieldRange &other );
+ /** @return Range of elements elements included in 'this' but not 'other'. */
const FieldRange &operator-=( const FieldRange &other );
- // true iff other includes this
- bool operator<=( const FieldRange &other );
+ /** @return true iff this range is a subset of 'other'. */
+ bool operator<=( const FieldRange &other ) const;
+
+ /**
+ * If there are any valid values for this range, the extreme values can
+ * be extracted.
+ */
+
BSONElement min() const { assert( !empty() ); return _intervals[ 0 ]._lower._bound; }
BSONElement max() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._bound; }
bool minInclusive() const { assert( !empty() ); return _intervals[ 0 ]._lower._inclusive; }
bool maxInclusive() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._inclusive; }
- bool equality() const {
- return
- !empty() &&
- min().woCompare( max(), false ) == 0 &&
- maxInclusive() &&
- minInclusive();
- }
- bool inQuery() const {
- if ( equality() ) {
- return true;
- }
- for( vector< FieldInterval >::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
- if ( !i->equality() ) {
- return false;
- }
- }
- return true;
- }
- bool nontrivial() const {
- return
- ! empty() &&
- ( _intervals.size() != 1 ||
- minKey.firstElement().woCompare( min(), false ) != 0 ||
- maxKey.firstElement().woCompare( max(), false ) != 0 );
- }
+
+ /** @return true iff this range expresses a single equality interval. */
+ bool equality() const;
+ /** @return true if all the intervals for this range are equalities */
+ bool inQuery() const;
+ /** @return true iff this range does not include every BSONElement */
+ bool nontrivial() const;
+ /** @return true iff this range matches no BSONElements. */
bool empty() const { return _intervals.empty(); }
+
+ /** Empty the range so it matches no BSONElements. */
void makeEmpty() { _intervals.clear(); }
- const vector< FieldInterval > &intervals() const { return _intervals; }
+ const vector<FieldInterval> &intervals() const { return _intervals; }
string getSpecial() const { return _special; }
- void setExclusiveBounds() {
- for( vector< FieldInterval >::iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
- i->_lower._inclusive = false;
- i->_upper._inclusive = false;
- }
- }
- // constructs a range which is the reverse of the current one
- // note - the resulting intervals may not be strictValid()
- void reverse( FieldRange &ret ) const {
- assert( _special.empty() );
- ret._intervals.clear();
- ret._objData = _objData;
- for( vector< FieldInterval >::const_reverse_iterator i = _intervals.rbegin(); i != _intervals.rend(); ++i ) {
- FieldInterval fi;
- fi._lower = i->_upper;
- fi._upper = i->_lower;
- ret._intervals.push_back( fi );
- }
- }
+ /** Make component intervals noninclusive. */
+ void setExclusiveBounds();
+ /**
+ * Constructs a range where all FieldIntervals and FieldBounds are in
+ * the opposite order of the current range.
+ * NOTE the resulting intervals might not be strictValid().
+ */
+ void reverse( FieldRange &ret ) const;
+
+ string toString() const;
private:
BSONObj addObj( const BSONObj &o );
- void finishOperation( const vector< FieldInterval > &newIntervals, const FieldRange &other );
- vector< FieldInterval > _intervals;
- vector< BSONObj > _objData;
+ void finishOperation( const vector<FieldInterval> &newIntervals, const FieldRange &other );
+ vector<FieldInterval> _intervals;
+ // Owns memory for our BSONElements.
+ vector<BSONObj> _objData;
string _special;
+ bool _singleKey;
};
- // implements query pattern matching, used to determine if a query is
- // similar to an earlier query and should use the same plan
- class QueryPattern {
- public:
- friend class FieldRangeSet;
- enum Type {
- Equality,
- LowerBound,
- UpperBound,
- UpperAndLowerBound
- };
- // for testing only, speed unimportant
- bool operator==( const QueryPattern &other ) const {
- bool less = operator<( other );
- bool more = other.operator<( *this );
- assert( !( less && more ) );
- return !( less || more );
- }
- bool operator!=( const QueryPattern &other ) const {
- return !operator==( other );
- }
- bool operator<( const QueryPattern &other ) const {
- map< string, Type >::const_iterator i = _fieldTypes.begin();
- map< string, Type >::const_iterator j = other._fieldTypes.begin();
- while( i != _fieldTypes.end() ) {
- if ( j == other._fieldTypes.end() )
- return false;
- if ( i->first < j->first )
- return true;
- else if ( i->first > j->first )
- return false;
- if ( i->second < j->second )
- return true;
- else if ( i->second > j->second )
- return false;
- ++i;
- ++j;
- }
- if ( j != other._fieldTypes.end() )
- return true;
- return _sort.woCompare( other._sort ) < 0;
- }
- private:
- QueryPattern() {}
- void setSort( const BSONObj sort ) {
- _sort = normalizeSort( sort );
- }
- BSONObj static normalizeSort( const BSONObj &spec ) {
- if ( spec.isEmpty() )
- return spec;
- int direction = ( spec.firstElement().number() >= 0 ) ? 1 : -1;
- BSONObjIterator i( spec );
- BSONObjBuilder b;
- while( i.moreWithEOO() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- b.append( e.fieldName(), direction * ( ( e.number() >= 0 ) ? -1 : 1 ) );
- }
- return b.obj();
- }
- map< string, Type > _fieldTypes;
- BSONObj _sort;
- };
-
- // a BoundList contains intervals specified by inclusive start
- // and end bounds. The intervals should be nonoverlapping and occur in
- // the specified direction of traversal. For example, given a simple index {i:1}
- // and direction +1, one valid BoundList is: (1, 2); (4, 6). The same BoundList
- // would be valid for index {i:-1} with direction -1.
- typedef vector< pair< BSONObj, BSONObj > > BoundList;
+ /**
+ * A BoundList contains intervals specified by inclusive start
+ * and end bounds. The intervals should be nonoverlapping and occur in
+ * the specified direction of traversal. For example, given a simple index {i:1}
+ * and direction +1, one valid BoundList is: (1, 2); (4, 6). The same BoundList
+ * would be valid for index {i:-1} with direction -1.
+ */
+ typedef vector<pair<BSONObj,BSONObj> > BoundList;
- // ranges of fields' value that may be determined from query -- used to
- // determine index limits
+ class QueryPattern;
+
+ /**
+ * A set of FieldRanges determined from constraints on the fields of a query,
+ * that may be used to determine index bounds.
+ */
class FieldRangeSet {
public:
- friend class FieldRangeOrSet;
+ friend class OrRangeGenerator;
friend class FieldRangeVector;
- FieldRangeSet( const char *ns, const BSONObj &query , bool optimize=true );
+ FieldRangeSet( const char *ns, const BSONObj &query , bool singleKey , bool optimize=true );
+
+ /** @return true if there is a nontrivial range for the given field. */
bool hasRange( const char *fieldName ) const {
- map< string, FieldRange >::const_iterator f = _ranges.find( fieldName );
+ map<string, FieldRange>::const_iterator f = _ranges.find( fieldName );
return f != _ranges.end();
}
- const FieldRange &range( const char *fieldName ) const {
- map< string, FieldRange >::const_iterator f = _ranges.find( fieldName );
- if ( f == _ranges.end() )
- return trivialRange();
- return f->second;
- }
- FieldRange &range( const char *fieldName ) {
- map< string, FieldRange >::iterator f = _ranges.find( fieldName );
- if ( f == _ranges.end() )
- return trivialRange();
- return f->second;
- }
- int nNontrivialRanges() const {
- int count = 0;
- for( map< string, FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
- if ( i->second.nontrivial() )
- ++count;
- }
- return count;
- }
+ /** @return range for the given field. */
+ const FieldRange &range( const char *fieldName ) const;
+ /** @return range for the given field. */
+ FieldRange &range( const char *fieldName );
+ /** @return the number of nontrivial ranges. */
+ int nNontrivialRanges() const;
+ /**
+ * @return true if a match could be possible on every field. Generally this
+ * is not useful information for a single key FieldRangeSet and
+ * matchPossibleForIndex() should be used instead.
+ */
+ bool matchPossible() const;
+ /**
+ * @return true if a match could be possible given the value of _singleKey
+ * and index key 'keyPattern'.
+ * @param keyPattern May be {} or {$natural:1} for a non index scan.
+ */
+ bool matchPossibleForIndex( const BSONObj &keyPattern ) const;
+
const char *ns() const { return _ns; }
- // if fields is specified, order fields of returned object to match those of 'fields'
+
+ /**
+ * @return a simplified query from the extreme values of the nontrivial
+ * fields.
+ * @param fields If specified, the fields of the returned object are
+ * ordered to match those of 'fields'.
+ */
BSONObj simplifiedQuery( const BSONObj &fields = BSONObj() ) const;
- bool matchPossible() const {
- for( map< string, FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i )
- if ( i->second.empty() )
- return false;
- return true;
- }
+
QueryPattern pattern( const BSONObj &sort = BSONObj() ) const;
string getSpecial() const;
- // Btree scanning for a multidimentional key range will yield a
- // multidimensional box. The idea here is that if an 'other'
- // multidimensional box contains the current box we don't have to scan
- // the current box. If the 'other' box contains the current box in
- // all dimensions but one, we can safely subtract the values of 'other'
- // along that one dimension from the values for the current box on the
- // same dimension. In other situations, subtracting the 'other'
- // box from the current box yields a result that is not a box (but
- // rather can be expressed as a union of boxes). We don't support
- // such splitting currently in calculating index ranges. Note that
- // where I have said 'box' above, I actually mean sets of boxes because
- // a field range can consist of multiple intervals.
- const FieldRangeSet &operator-=( const FieldRangeSet &other ) {
- int nUnincluded = 0;
- string unincludedKey;
- map< string, FieldRange >::iterator i = _ranges.begin();
- map< string, FieldRange >::const_iterator j = other._ranges.begin();
- while( nUnincluded < 2 && i != _ranges.end() && j != other._ranges.end() ) {
- int cmp = i->first.compare( j->first );
- if ( cmp == 0 ) {
- if ( i->second <= j->second ) {
- // nothing
- }
- else {
- ++nUnincluded;
- unincludedKey = i->first;
- }
- ++i;
- ++j;
- }
- else if ( cmp < 0 ) {
- ++i;
- }
- else {
- // other has a bound we don't, nothing can be done
- return *this;
- }
- }
- if ( j != other._ranges.end() ) {
- // other has a bound we don't, nothing can be done
- return *this;
- }
- if ( nUnincluded > 1 ) {
- return *this;
- }
- if ( nUnincluded == 0 ) {
- makeEmpty();
- return *this;
- }
- // nUnincluded == 1
- _ranges[ unincludedKey ] -= other._ranges[ unincludedKey ];
- appendQueries( other );
- return *this;
- }
- const FieldRangeSet &operator&=( const FieldRangeSet &other ) {
- map< string, FieldRange >::iterator i = _ranges.begin();
- map< string, FieldRange >::const_iterator j = other._ranges.begin();
- while( i != _ranges.end() && j != other._ranges.end() ) {
- int cmp = i->first.compare( j->first );
- if ( cmp == 0 ) {
- i->second &= j->second;
- ++i;
- ++j;
- }
- else if ( cmp < 0 ) {
- ++i;
- }
- else {
- _ranges[ j->first ] = j->second;
- ++j;
- }
- }
- while( j != other._ranges.end() ) {
- _ranges[ j->first ] = j->second;
- ++j;
- }
- appendQueries( other );
- return *this;
- }
- // TODO get rid of this
+
+ /**
+ * @return a FieldRangeSet approximation of the documents in 'this' but
+ * not in 'other'. The approximation will be a superset of the documents
+ * in 'this' but not 'other'.
+ */
+ const FieldRangeSet &operator-=( const FieldRangeSet &other );
+ /** @return intersection of 'this' with 'other'. */
+ const FieldRangeSet &operator&=( const FieldRangeSet &other );
+
+ /**
+ * @return an ordered list of bounds generated using an index key pattern
+ * and traversal direction.
+ *
+ * NOTE This function is deprecated in the query optimizer and only
+ * currently used by the sharding code.
+ */
BoundList indexBounds( const BSONObj &keyPattern, int direction ) const;
/**
- * @param return - A new FieldRangeSet based on this FieldRangeSet, but with only
+ * @return - A new FieldRangeSet based on this FieldRangeSet, but with only
* a subset of the fields.
* @param fields - Only fields which are represented as field names in this object
* will be included in the returned FieldRangeSet.
*/
FieldRangeSet *subset( const BSONObj &fields ) const;
+
+ bool singleKey() const { return _singleKey; }
+
+ BSONObj originalQuery() const { return _queries[ 0 ]; }
private:
- void appendQueries( const FieldRangeSet &other ) {
- for( vector< BSONObj >::const_iterator i = other._queries.begin(); i != other._queries.end(); ++i ) {
- _queries.push_back( *i );
- }
- }
- void makeEmpty() {
- for( map< string, FieldRange >::iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
- i->second.makeEmpty();
- }
- }
+ void appendQueries( const FieldRangeSet &other );
+ void makeEmpty();
void processQueryField( const BSONElement &e, bool optimize );
void processOpElement( const char *fieldName, const BSONElement &f, bool isNot, bool optimize );
- static FieldRange *trivialRange_;
- static FieldRange &trivialRange();
- mutable map< string, FieldRange > _ranges;
+ static FieldRange *__singleKeyTrivialRange;
+ static FieldRange *__multiKeyTrivialRange;
+ const FieldRange &trivialRange() const;
+ map<string,FieldRange> _ranges;
const char *_ns;
- // make sure memory for FieldRange BSONElements is owned
- vector< BSONObj > _queries;
+ // Owns memory for FieldRange BSONElements.
+ vector<BSONObj> _queries;
+ bool _singleKey;
};
+ class NamespaceDetails;
+
+ /**
+ * A pair of FieldRangeSets, one representing constraints for single key
+ * indexes and the other representing constraints for multi key indexes and
+ * unindexed scans. In several member functions the caller is asked to
+ * supply an index so that the implementation may utilize the proper
+ * FieldRangeSet and return results that are appropriate with respect to that
+ * supplied index.
+ */
+ class FieldRangeSetPair {
+ public:
+ FieldRangeSetPair( const char *ns, const BSONObj &query, bool optimize=true )
+ :_singleKey( ns, query, true, optimize ), _multiKey( ns, query, false, optimize ) {}
+
+ /**
+ * @return the appropriate single or multi key FieldRangeSet for the specified index.
+ * @param idxNo -1 for non index scan.
+ */
+ const FieldRangeSet &frsForIndex( const NamespaceDetails* nsd, int idxNo ) const;
+
+ /** @return a field range in the single key FieldRangeSet. */
+ const FieldRange &singleKeyRange( const char *fieldName ) const {
+ return _singleKey.range( fieldName );
+ }
+ /** @return true if the range limits are equivalent to an empty query. */
+ bool noNontrivialRanges() const;
+ /** @return false if a match is impossible regardless of index. */
+ bool matchPossible() const { return _multiKey.matchPossible(); }
+ /**
+ * @return false if a match is impossible on the specified index.
+ * @param idxNo -1 for non index scan.
+ */
+ bool matchPossibleForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const;
+
+ const char *ns() const { return _singleKey.ns(); }
+
+ string getSpecial() const { return _singleKey.getSpecial(); }
+
+ /** Intersect with another FieldRangeSetPair. */
+ FieldRangeSetPair &operator&=( const FieldRangeSetPair &other );
+ /**
+ * Subtract a FieldRangeSet, generally one expressing a range that has
+ * already been scanned.
+ */
+ FieldRangeSetPair &operator-=( const FieldRangeSet &scanned );
+
+ BoundList singleKeyIndexBounds( const BSONObj &keyPattern, int direction ) const {
+ return _singleKey.indexBounds( keyPattern, direction );
+ }
+
+ BSONObj originalQuery() const { return _singleKey.originalQuery(); }
+
+ private:
+ FieldRangeSetPair( const FieldRangeSet &singleKey, const FieldRangeSet &multiKey )
+ :_singleKey( singleKey ), _multiKey( multiKey ) {}
+ void assertValidIndex( const NamespaceDetails *d, int idxNo ) const;
+ void assertValidIndexOrNoIndex( const NamespaceDetails *d, int idxNo ) const;
+ /** matchPossibleForIndex() must be true. */
+ BSONObj simplifiedQueryForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const;
+ FieldRangeSet _singleKey;
+ FieldRangeSet _multiKey;
+ friend class OrRangeGenerator;
+ friend struct QueryUtilIndexed;
+ };
+
class IndexSpec;
/**
- * This class manages the ranges of valid element values for each field in
- * an ordered list of signed fields corresponding to an index specification.
+ * An ordered list of fields and their FieldRanges, correspoinding to valid
+ * index keys for a given index spec.
*/
class FieldRangeVector {
public:
/**
* @param frs The valid ranges for all fields, as defined by the query spec
- * @prarm keyPattern The index key pattern
+ * @param indexSpec The index spec (key pattern and info)
* @param direction The direction of index traversal
*/
- FieldRangeVector( const FieldRangeSet &frs, const BSONObj &keyPattern, int direction )
- :_keyPattern( keyPattern ), _direction( direction >= 0 ? 1 : -1 ) {
- _queries = frs._queries;
- BSONObjIterator i( _keyPattern );
- while( i.more() ) {
- BSONElement e = i.next();
- int number = (int) e.number(); // returns 0.0 if not numeric
- bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction >= 0 ? 1 : -1 ) > 0 );
- if ( forward ) {
- _ranges.push_back( frs.range( e.fieldName() ) );
- }
- else {
- _ranges.push_back( FieldRange() );
- frs.range( e.fieldName() ).reverse( _ranges.back() );
- }
- assert( !_ranges.back().empty() );
- }
- uassert( 13385, "combinatorial limit of $in partitioning of result set exceeded", size() < 1000000 );
- }
- long long size() {
- long long ret = 1;
- for( vector< FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
- ret *= i->intervals().size();
- }
- return ret;
- }
- BSONObj startKey() const {
- BSONObjBuilder b;
- for( vector< FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
- const FieldInterval &fi = i->intervals().front();
- b.appendAs( fi._lower._bound, "" );
- }
- return b.obj();
- }
- BSONObj endKey() const {
- BSONObjBuilder b;
- for( vector< FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
- const FieldInterval &fi = i->intervals().back();
- b.appendAs( fi._upper._bound, "" );
- }
- return b.obj();
- }
- BSONObj obj() const {
- BSONObjBuilder b;
- BSONObjIterator k( _keyPattern );
- for( int i = 0; i < (int)_ranges.size(); ++i ) {
- BSONArrayBuilder a( b.subarrayStart( k.next().fieldName() ) );
- for( vector< FieldInterval >::const_iterator j = _ranges[ i ].intervals().begin();
- j != _ranges[ i ].intervals().end(); ++j ) {
- a << BSONArray( BSON_ARRAY( j->_lower._bound << j->_upper._bound ).clientReadable() );
- }
- a.done();
- }
- return b.obj();
- }
+ FieldRangeVector( const FieldRangeSet &frs, const IndexSpec &indexSpec, int direction );
+
+ /** @return the number of index ranges represented by 'this' */
+ long long size();
+ /** @return starting point for an index traversal. */
+ BSONObj startKey() const;
+ /** @return end point for an index traversal. */
+ BSONObj endKey() const;
+ /** @return a client readable representation of 'this' */
+ BSONObj obj() const;
+
/**
* @return true iff the provided document matches valid ranges on all
* of this FieldRangeVector's fields, which is the case iff this document
@@ -421,144 +316,109 @@ namespace mongo {
* FieldRangeVector. This function is used for $or clause deduping.
*/
bool matches( const BSONObj &obj ) const;
- class Iterator {
- public:
- Iterator( const FieldRangeVector &v ) : _v( v ), _i( _v._ranges.size(), -1 ), _cmp( _v._ranges.size(), 0 ), _inc( _v._ranges.size(), false ), _after() {
- }
- static BSONObj minObject() {
- BSONObjBuilder b;
- b.appendMinKey( "" );
- return b.obj();
- }
- static BSONObj maxObject() {
- BSONObjBuilder b;
- b.appendMaxKey( "" );
- return b.obj();
- }
- bool advance() {
- int i = _i.size() - 1;
- while( i >= 0 && _i[ i ] >= ( (int)_v._ranges[ i ].intervals().size() - 1 ) ) {
- --i;
- }
- if( i >= 0 ) {
- _i[ i ]++;
- for( unsigned j = i + 1; j < _i.size(); ++j ) {
- _i[ j ] = 0;
- }
- }
- else {
- _i[ 0 ] = _v._ranges[ 0 ].intervals().size();
- }
- return ok();
- }
- // return value
- // -2 end of iteration
- // -1 no skipping
- // >= 0 skip parameter
- int advance( const BSONObj &curr );
- const vector< const BSONElement * > &cmp() const { return _cmp; }
- const vector< bool > &inc() const { return _inc; }
- bool after() const { return _after; }
- void prepDive();
- void setZero( int i ) {
- for( int j = i; j < (int)_i.size(); ++j ) {
- _i[ j ] = 0;
- }
- }
- void setMinus( int i ) {
- for( int j = i; j < (int)_i.size(); ++j ) {
- _i[ j ] = -1;
- }
- }
- bool ok() {
- return _i[ 0 ] < (int)_v._ranges[ 0 ].intervals().size();
- }
- BSONObj startKey() {
- BSONObjBuilder b;
- for( int unsigned i = 0; i < _i.size(); ++i ) {
- const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
- b.appendAs( fi._lower._bound, "" );
- }
- return b.obj();
- }
- // temp
- BSONObj endKey() {
- BSONObjBuilder b;
- for( int unsigned i = 0; i < _i.size(); ++i ) {
- const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
- b.appendAs( fi._upper._bound, "" );
- }
- return b.obj();
- }
- // check
- private:
- const FieldRangeVector &_v;
- vector< int > _i;
- vector< const BSONElement* > _cmp;
- vector< bool > _inc;
- bool _after;
- };
+
+ /**
+ * @return first key of 'obj' that would be encountered by a forward
+ * index scan using this FieldRangeVector, BSONObj() if no such key.
+ */
+ BSONObj firstMatch( const BSONObj &obj ) const;
+
private:
int matchingLowElement( const BSONElement &e, int i, bool direction, bool &lowEquality ) const;
bool matchesElement( const BSONElement &e, int i, bool direction ) const;
- vector< FieldRange > _ranges;
- BSONObj _keyPattern;
+ bool matchesKey( const BSONObj &key ) const;
+ vector<FieldRange> _ranges;
+ const IndexSpec &_indexSpec;
int _direction;
- vector< BSONObj > _queries; // make sure mem owned
- // This IndexSpec is lazily constructed directly from _keyPattern if needed.
- mutable shared_ptr< IndexSpec > _indexSpec;
+ vector<BSONObj> _queries; // make sure mem owned
+ friend class FieldRangeVectorIterator;
};
-
- // generages FieldRangeSet objects, accounting for or clauses
- class FieldRangeOrSet {
+
+ /**
+ * Helper class for iterating through an ordered representation of keys
+ * to find those keys that match a specified FieldRangeVector.
+ */
+ class FieldRangeVectorIterator {
public:
- FieldRangeOrSet( const char *ns, const BSONObj &query , bool optimize=true );
- // if there's a useless or clause, we won't use or ranges to help with scanning
- bool orFinished() const { return _orFound && _orSets.empty(); }
- /**
- * Removes the top or clause, which would have been recently scanned, and
- * removes the field ranges it covers from all subsequent or clauses. As a
- * side effect, this function may invalidate the return values of topFrs()
- * calls made before this function was called.
- * @param indexSpec - Keys of the index that was used to satisfy the last or
- * clause. Used to determine the range of keys that were scanned. If
- * empty we do not constrain the previous clause's ranges using index keys,
- * which may reduce opportunities for range elimination.
- */
- void popOrClause( const BSONObj &indexSpec = BSONObj() );
- FieldRangeSet *topFrs() const {
- FieldRangeSet *ret = new FieldRangeSet( _baseSet );
- if (_orSets.size()) {
- *ret &= _orSets.front();
- }
- return ret;
+ FieldRangeVectorIterator( const FieldRangeVector &v ) : _v( v ), _i( _v._ranges.size(), -1 ), _cmp( _v._ranges.size(), 0 ), _inc( _v._ranges.size(), false ), _after() {
}
- // while the original bounds are looser, they are composed of fewer
- // ranges and it is faster to do operations with them; when they can be
- // used instead of more precise bounds, they should
- FieldRangeSet *topFrsOriginal() const {
- FieldRangeSet *ret = new FieldRangeSet( _baseSet );
- if (_originalOrSets.size()) {
- *ret &= _originalOrSets.front();
- }
- return ret;
+ static BSONObj minObject() {
+ BSONObjBuilder b; b.appendMinKey( "" );
+ return b.obj();
}
- void allClausesSimplified( vector< BSONObj > &ret ) const {
- for( list< FieldRangeSet >::const_iterator i = _orSets.begin(); i != _orSets.end(); ++i ) {
- if ( i->matchPossible() ) {
- ret.push_back( i->simplifiedQuery() );
- }
- }
+ static BSONObj maxObject() {
+ BSONObjBuilder b; b.appendMaxKey( "" );
+ return b.obj();
}
+ /**
+ * @return Suggested advance method, based on current key.
+ * -2 Iteration is complete, no need to advance.
+ * -1 Advance to the next key, without skipping.
+ * >=0 Skip parameter. If @return is r, skip to the key comprised
+ * of the first r elements of curr followed by the (r+1)th and
+ * remaining elements of cmp() (with inclusivity specified by
+ * the (r+1)th and remaining elements of inc()). If after() is
+ * true, skip past this key not to it.
+ */
+ int advance( const BSONObj &curr );
+ const vector<const BSONElement *> &cmp() const { return _cmp; }
+ const vector<bool> &inc() const { return _inc; }
+ bool after() const { return _after; }
+ void prepDive();
+ void setZero( int i ) { for( int j = i; j < (int)_i.size(); ++j ) _i[ j ] = 0; }
+ void setMinus( int i ) { for( int j = i; j < (int)_i.size(); ++j ) _i[ j ] = -1; }
+ bool ok() { return _i[ 0 ] < (int)_v._ranges[ 0 ].intervals().size(); }
+ BSONObj startKey();
+ // temp
+ BSONObj endKey();
+ private:
+ const FieldRangeVector &_v;
+ vector<int> _i;
+ vector<const BSONElement*> _cmp;
+ vector<bool> _inc;
+ bool _after;
+ };
+
+ /**
+ * As we iterate through $or clauses this class generates a FieldRangeSetPair
+ * for the current $or clause, in some cases by excluding ranges that were
+ * included in a previous clause.
+ */
+ class OrRangeGenerator {
+ public:
+ OrRangeGenerator( const char *ns, const BSONObj &query , bool optimize=true );
+
+ /**
+ * @return true iff we are done scanning $or clauses. if there's a
+ * useless or clause, we won't use or index ranges to help with scanning.
+ */
+ bool orFinished() const { return _orFound && _orSets.empty(); }
+ /** Iterates to the next $or clause by removing the current $or clause. */
+ void popOrClause( NamespaceDetails *nsd, int idxNo, const BSONObj &keyPattern );
+ void popOrClauseSingleKey();
+ /** @return FieldRangeSetPair for the current $or clause. */
+ FieldRangeSetPair *topFrsp() const;
+ /**
+ * @return original FieldRangeSetPair for the current $or clause. While the
+ * original bounds are looser, they are composed of fewer ranges and it
+ * is faster to do operations with them; when they can be used instead of
+ * more precise bounds, they should.
+ */
+ FieldRangeSetPair *topFrspOriginal() const;
+
string getSpecial() const { return _baseSet.getSpecial(); }
bool moreOrClauses() const { return !_orSets.empty(); }
private:
- FieldRangeSet _baseSet;
- list< FieldRangeSet > _orSets;
- list< FieldRangeSet > _originalOrSets;
- list< FieldRangeSet > _oldOrSets; // make sure memory is owned
+ void assertMayPopOrClause();
+ void popOrClause( const FieldRangeSet *toDiff, NamespaceDetails *d = 0, int idxNo = -1, const BSONObj &keyPattern = BSONObj() );
+ FieldRangeSetPair _baseSet;
+ list<FieldRangeSetPair> _orSets;
+ list<FieldRangeSetPair> _originalOrSets;
+ // ensure memory is owned
+ list<FieldRangeSetPair> _oldOrSets;
bool _orFound;
+ friend struct QueryUtilIndexed;
};
/** returns a string that when used as a matcher, would match a super set of regex()
@@ -575,3 +435,5 @@ namespace mongo {
long long applySkipLimit( long long num , const BSONObj& cmd );
} // namespace mongo
+
+#include "queryutil-inl.h"
diff --git a/db/record.cpp b/db/record.cpp
new file mode 100644
index 0000000..51dc520
--- /dev/null
+++ b/db/record.cpp
@@ -0,0 +1,230 @@
+// record.cpp
+
+#include "pch.h"
+#include "pdfile.h"
+#include "../util/processinfo.h"
+#include "../util/net/listen.h"
+
+namespace mongo {
+
+ namespace ps {
+
+ enum State {
+ In , Out, Unk
+ };
+
+ enum Constants {
+ SliceSize = 65536 ,
+ MaxChain = 20 , // intentionally very low
+ NumSlices = 10 ,
+ RotateTimeSecs = 90
+ };
+
+ int hash( size_t region ) {
+ return
+ abs( ( ( 7 + (int)(region & 0xFFFF) )
+ * ( 11 + (int)( ( region >> 16 ) & 0xFFFF ) )
+#if defined(_WIN64) || defined(__amd64__)
+ * ( 13 + (int)( ( region >> 32 ) & 0xFFFF ) )
+ * ( 17 + (int)( ( region >> 48 ) & 0xFFFF ) )
+#endif
+ ) % SliceSize );
+ }
+
+
+ /**
+ * simple hash map for region -> status
+ * this constitures a single region of time
+ * it does chaining, but very short chains
+ */
+ class Slice {
+
+ struct Entry {
+ size_t region;
+ unsigned long long value;
+ };
+
+ public:
+
+ Slice() {
+ reset();
+ }
+
+ void reset() {
+ memset( _data , 0 , SliceSize * sizeof(Entry) );
+ }
+
+ State get( int regionHash , size_t region , short offset ) {
+ DEV assert( hash( region ) == regionHash );
+
+ Entry * e = _get( regionHash , region , false );
+ if ( ! e )
+ return Unk;
+
+ return ( e->value & ( ((unsigned long long)1) << offset ) ) ? In : Out;
+ }
+
+ /**
+ * @return true if added, false if full
+ */
+ bool in( int regionHash , size_t region , short offset ) {
+ DEV assert( hash( region ) == regionHash );
+
+ Entry * e = _get( regionHash , region , true );
+ if ( ! e )
+ return false;
+
+ e->value |= ((unsigned long long)1) << offset;
+ return true;
+ }
+
+ private:
+
+ Entry* _get( int start , size_t region , bool add ) {
+ for ( int i=0; i<MaxChain; i++ ) {
+
+ int bucket = ( start + i ) % SliceSize;
+
+ if ( _data[bucket].region == 0 ) {
+ if ( ! add )
+ return 0;
+
+ _data[bucket].region = region;
+ return &_data[bucket];
+ }
+
+ if ( _data[bucket].region == region ) {
+ return &_data[bucket];
+ }
+ }
+ return 0;
+ }
+
+ Entry _data[SliceSize];
+ };
+
+
+ /**
+ * this contains many slices of times
+ * the idea you put mem status in the current time slice
+ * and then after a certain period of time, it rolls off so we check again
+ */
+ class Rolling {
+
+ public:
+ Rolling() {
+ _curSlice = 0;
+ _lastRotate = Listener::getElapsedTimeMillis();
+ }
+
+
+ /**
+ * after this call, we assume the page is in ram
+ * @param doHalf if this is a known good access, want to put in first half
+ * @return whether we know the page is in ram
+ */
+ bool access( size_t region , short offset , bool doHalf ) {
+ int regionHash = hash(region);
+
+ scoped_spinlock lk( _lock );
+
+ static int rarely_count = 0;
+ if ( rarely_count++ % 2048 == 0 ) {
+ long long now = Listener::getElapsedTimeMillis();
+ RARELY if ( now == 0 ) {
+ tlog() << "warning Listener::getElapsedTimeMillis returning 0ms" << endl;
+ }
+
+ if ( now - _lastRotate > ( 1000 * RotateTimeSecs ) ) {
+ _rotate();
+ }
+ }
+
+ for ( int i=0; i<NumSlices / ( doHalf ? 2 : 1 ); i++ ) {
+ int pos = (_curSlice+i)%NumSlices;
+ State s = _slices[pos].get( regionHash , region , offset );
+
+ if ( s == In )
+ return true;
+
+ if ( s == Out ) {
+ _slices[pos].in( regionHash , region , offset );
+ return false;
+ }
+ }
+
+ // we weren't in any slice
+ // so add to cur
+ if ( ! _slices[_curSlice].in( regionHash , region , offset ) ) {
+ _rotate();
+ _slices[_curSlice].in( regionHash , region , offset );
+ }
+ return false;
+ }
+
+ private:
+
+ void _rotate() {
+ _curSlice = ( _curSlice + 1 ) % NumSlices;
+ _slices[_curSlice].reset();
+ _lastRotate = Listener::getElapsedTimeMillis();
+ }
+
+ int _curSlice;
+ long long _lastRotate;
+ Slice _slices[NumSlices];
+
+ SpinLock _lock;
+ } rolling;
+
+ }
+
+ bool Record::MemoryTrackingEnabled = true;
+
+
+ volatile int __record_touch_dummy = 1; // this is used to make sure the compiler doesn't get too smart on us
+ void Record::touch( bool entireRecrd ) {
+
+ if ( lengthWithHeaders > HeaderSize ) { // this also makes sure lengthWithHeaders is in memory
+ char * addr = data;
+ char * end = data + netLength();
+ for ( ; addr <= end ; addr += 2048 ) {
+ __record_touch_dummy += addr[0];
+
+ break; // TODO: remove this, pending SERVER-3711
+
+ if ( ! entireRecrd )
+ break;
+ }
+ }
+
+ }
+
+ bool Record::likelyInPhysicalMemory() {
+ if ( ! MemoryTrackingEnabled )
+ return true;
+
+ static bool blockSupported = ProcessInfo::blockCheckSupported();
+
+ const size_t page = (size_t)data >> 12;
+ const size_t region = page >> 6;
+ const size_t offset = page & 0x3f;
+
+ if ( ps::rolling.access( region , offset , false ) )
+ return true;
+
+ if ( ! blockSupported )
+ return false;
+ return ProcessInfo::blockInMemory( data );
+ }
+
+ Record* Record::accessed() {
+ const size_t page = (size_t)data >> 12;
+ const size_t region = page >> 6;
+ const size_t offset = page & 0x3f;
+
+ ps::rolling.access( region , offset , true );
+ return this;
+ }
+
+}
diff --git a/db/repl.cpp b/db/repl.cpp
index b14034d..a18d725 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -26,27 +26,30 @@
local.sources - indicates what sources we pull from as a "slave", and the last update of each
local.oplog.$main - our op log as "master"
local.dbinfo.<dbname> - no longer used???
- local.pair.startup - can contain a special value indicating for a pair that we have the master copy.
+ local.pair.startup - [deprecated] can contain a special value indicating for a pair that we have the master copy.
used when replacing other half of the pair which has permanently failed.
- local.pair.sync - { initialsynccomplete: 1 }
+ local.pair.sync - [deprecated] { initialsynccomplete: 1 }
*/
#include "pch.h"
#include "jsobj.h"
#include "../util/goodies.h"
#include "repl.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../util/background.h"
#include "../client/dbclient.h"
#include "../client/connpool.h"
#include "pdfile.h"
-#include "query.h"
+#include "ops/query.h"
#include "db.h"
#include "commands.h"
#include "security.h"
#include "cmdline.h"
#include "repl_block.h"
#include "repl/rs.h"
+#include "replutil.h"
+#include "repl/connections.h"
+#include "ops/update.h"
namespace mongo {
@@ -57,11 +60,6 @@ namespace mongo {
volatile int syncing = 0;
static volatile int relinquishSyncingSome = 0;
- /* if true replace our peer in a replication pair -- don't worry about if his
- local.oplog.$main is empty.
- */
- bool replacePeer = false;
-
/* "dead" means something really bad happened like replication falling completely out of sync.
when non-null, we are dead and the string is informational
*/
@@ -69,23 +67,10 @@ namespace mongo {
time_t lastForcedResync = 0;
- IdTracker &idTracker = *( new IdTracker() );
-
} // namespace mongo
-#include "replpair.h"
-
namespace mongo {
- PairSync *pairSync = new PairSync();
- bool getInitialSyncCompleted() {
- return pairSync->initialSyncCompleted();
- }
-
- /* --- ReplPair -------------------------------- */
-
- ReplPair *replPair = 0;
-
/* output by the web console */
const char *replInfo = "";
struct ReplInfo {
@@ -97,116 +82,6 @@ namespace mongo {
}
};
- void ReplPair::setMaster(int n, const char *_comment ) {
- if ( n == State_Master && !getInitialSyncCompleted() )
- return;
- info = _comment;
- if ( n != state && !cmdLine.quiet )
- tlog() << "pair: setting master=" << n << " was " << state << endl;
- state = n;
- }
-
- /* peer unreachable, try our arbiter */
- void ReplPair::arbitrate() {
- ReplInfo r("arbitrate");
-
- if ( arbHost == "-" ) {
- // no arbiter. we are up, let's assume partner is down and network is not partitioned.
- setMasterLocked(State_Master, "remote unreachable");
- return;
- }
-
- auto_ptr<DBClientConnection> conn( newClientConnection() );
- string errmsg;
- if ( !conn->connect(arbHost.c_str(), errmsg) ) {
- tlog() << "repl: cantconn arbiter " << errmsg << endl;
- setMasterLocked(State_CantArb, "can't connect to arb");
- return;
- }
-
- negotiate( conn.get(), "arbiter" );
- }
-
- /* --------------------------------------------- */
-
- class CmdReplacePeer : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return true;
- }
- virtual LockType locktype() const { return WRITE; }
- void help(stringstream&h) const { h << "replace a node in a replica pair"; }
- CmdReplacePeer() : Command("replacePeer", false, "replacepeer") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if ( replPair == 0 ) {
- errmsg = "not paired";
- return false;
- }
- if ( !getInitialSyncCompleted() ) {
- errmsg = "not caught up cannot replace peer";
- return false;
- }
- if ( syncing < 0 ) {
- errmsg = "replacepeer already invoked";
- return false;
- }
- Timer t;
- while ( 1 ) {
- if ( syncing == 0 || t.millis() > 30000 )
- break;
- {
- dbtemprelease t;
- relinquishSyncingSome = 1;
- sleepmillis(1);
- }
- }
- if ( syncing ) {
- assert( syncing > 0 );
- errmsg = "timeout waiting for sync() to finish";
- return false;
- }
- {
- ReplSource::SourceVector sources;
- ReplSource::loadAll(sources);
- if ( sources.size() != 1 ) {
- errmsg = "local.sources.count() != 1, cannot replace peer";
- return false;
- }
- }
- {
- Helpers::emptyCollection("local.sources");
- BSONObj o = fromjson("{\"replacepeer\":1}");
- Helpers::putSingleton("local.pair.startup", o);
- }
- syncing = -1;
- replAllDead = "replacepeer invoked -- adjust local.sources hostname then restart this db process";
- result.append("info", "adjust local.sources hostname; db restart now required");
- return true;
- }
- } cmdReplacePeer;
-
- class CmdForceDead : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return true;
- }
- virtual void help(stringstream& h) const { h << "internal"; }
- virtual LockType locktype() const { return WRITE; }
- CmdForceDead() : Command("forcedead") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- replAllDead = "replication forced to stop by 'forcedead' command";
- log() << "*********************************************************\n";
- log() << "received 'forcedead' command, replication forced to stop" << endl;
- return true;
- }
- } cmdForceDead;
-
/* operator requested resynchronization of replication (on the slave). { resync : 1 } */
class CmdResync : public Command {
public:
@@ -220,7 +95,7 @@ namespace mongo {
virtual LockType locktype() const { return WRITE; }
void help(stringstream&h) const { h << "resync (from scratch) an out of date replica slave.\nhttp://www.mongodb.org/display/DOCS/Master+Slave"; }
CmdResync() : Command("resync") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( cmdLine.usingReplSets() ) {
errmsg = "resync command not currently supported with replica sets. See RS102 info in the mongodb documentations";
result.append("info", "http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member");
@@ -265,7 +140,7 @@ namespace mongo {
} cmdResync;
bool anyReplEnabled() {
- return replPair || replSettings.slave || replSettings.master || theReplSet;
+ return replSettings.slave || replSettings.master || theReplSet;
}
bool replAuthenticate(DBClientBase *conn);
@@ -276,7 +151,7 @@ namespace mongo {
if( theReplSet == 0 ) {
result.append("ismaster", false);
result.append("secondary", false);
- result.append("info", ReplSet::startupStatusMsg);
+ result.append("info", ReplSet::startupStatusMsg.get());
result.append( "isreplicaset" , true );
return;
}
@@ -287,21 +162,9 @@ namespace mongo {
if ( replAllDead ) {
result.append("ismaster", 0);
- if( authed ) {
- if ( replPair )
- result.append("remote", replPair->remote);
- }
string s = string("dead: ") + replAllDead;
result.append("info", s);
}
- else if ( replPair ) {
- result.append("ismaster", replPair->state);
- if( authed ) {
- result.append("remote", replPair->remote);
- if ( !replPair->info.empty() )
- result.append("info", replPair->info.toString());
- }
- }
else {
result.appendBool("ismaster", _isMaster() );
}
@@ -369,7 +232,7 @@ namespace mongo {
}
virtual LockType locktype() const { return NONE; }
CmdIsMaster() : Command("isMaster", true, "ismaster") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
/* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
authenticated.
we allow unauthenticated ismaster but we aren't as verbose informationally if
@@ -383,159 +246,11 @@ namespace mongo {
}
} cmdismaster;
- class CmdIsInitialSyncComplete : public Command {
- public:
- virtual bool requiresAuth() { return false; }
- virtual bool slaveOk() const {
- return true;
- }
- virtual LockType locktype() const { return NONE; }
- CmdIsInitialSyncComplete() : Command( "isinitialsynccomplete" ) {}
- virtual bool run(const string&, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
- result.appendBool( "initialsynccomplete", getInitialSyncCompleted() );
- return true;
- }
- } cmdisinitialsynccomplete;
-
- /* negotiate who is master
-
- -1=not set (probably means we just booted)
- 0=was slave
- 1=was master
-
- remote,local -> new remote,local
- !1,1 -> 0,1
- 1,!1 -> 1,0
- -1,-1 -> dominant->1, nondom->0
- 0,0 -> dominant->1, nondom->0
- 1,1 -> dominant->1, nondom->0
-
- { negotiatemaster:1, i_was:<state>, your_name:<hostname> }
- returns:
- { ok:1, you_are:..., i_am:... }
- */
- class CmdNegotiateMaster : public Command {
- public:
- CmdNegotiateMaster() : Command("negotiatemaster") { }
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return true;
- }
- virtual LockType locktype() const { return WRITE; }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- if ( replPair == 0 ) {
- massert( 10383 , "Another mongod instance believes incorrectly that this node is its peer", !cmdObj.getBoolField( "fromArbiter" ) );
- // assume that we are an arbiter and should forward the request
- string host = cmdObj.getStringField("your_name");
- int port = cmdObj.getIntField( "your_port" );
- if ( port == INT_MIN ) {
- errmsg = "no port specified";
- problem() << errmsg << endl;
- return false;
- }
- stringstream ss;
- ss << host << ":" << port;
- string remote = ss.str();
- BSONObj ret;
- {
- dbtemprelease t;
- auto_ptr<DBClientConnection> conn( new DBClientConnection() );
- if ( !conn->connect( remote.c_str(), errmsg ) ) {
- result.append( "you_are", ReplPair::State_Master );
- return true;
- }
- BSONObjBuilder forwardCommand;
- forwardCommand.appendElements( cmdObj );
- forwardCommand.appendBool( "fromArbiter", true );
- ret = conn->findOne( "admin.$cmd", forwardCommand.done() );
- }
- BSONObjIterator i( ret );
- while( i.moreWithEOO() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- if ( e.fieldName() != string( "ok" ) )
- result.append( e );
- }
- return ret["ok"].trueValue();
- }
-
- int was = cmdObj.getIntField("i_was");
- string myname = cmdObj.getStringField("your_name");
- if ( myname.empty() || was < -3 ) {
- errmsg = "your_name/i_was not specified";
- return false;
- }
-
- int N = ReplPair::State_Negotiating;
- int M = ReplPair::State_Master;
- int S = ReplPair::State_Slave;
-
- if ( !replPair->dominant( myname ) ) {
- result.append( "you_are", N );
- result.append( "i_am", replPair->state );
- return true;
- }
-
- int me, you;
- if ( !getInitialSyncCompleted() || ( replPair->state != M && was == M ) ) {
- me=S;
- you=M;
- }
- else {
- me=M;
- you=S;
- }
- replPair->setMaster( me, "CmdNegotiateMaster::run()" );
-
- result.append("you_are", you);
- result.append("i_am", me);
-
- return true;
- }
- } cmdnegotiatemaster;
-
- int ReplPair::negotiate(DBClientConnection *conn, string method) {
- BSONObjBuilder b;
- b.append("negotiatemaster",1);
- b.append("i_was", state);
- b.append("your_name", remoteHost);
- b.append("your_port", remotePort);
- BSONObj cmd = b.done();
- BSONObj res = conn->findOne("admin.$cmd", cmd);
- if ( ! res["ok"].trueValue() ) {
- string message = method + " negotiate failed";
- problem() << message << ": " << res.toString() << '\n';
- setMasterLocked(State_Confused, message.c_str());
- return State_Confused;
- }
- int x = res.getIntField("you_are");
- int remote = res.getIntField("i_am");
- // State_Negotiating means the remote node is not dominant and cannot
- // choose who is master.
- if ( x != State_Slave && x != State_Master && x != State_Negotiating ) {
- problem() << method << " negotiate: bad you_are value " << res.toString() << endl;
- }
- else if ( x != State_Negotiating ) {
- string message = method + " negotiation";
- setMasterLocked(x, message.c_str());
- }
- return remote;
- }
-
- /* --------------------------------------------------------------*/
-
ReplSource::ReplSource() {
- replacing = false;
nClonedThisPass = 0;
- paired = false;
}
ReplSource::ReplSource(BSONObj o) : nClonedThisPass(0) {
- replacing = false;
- paired = false;
only = o.getStringField("only");
hostName = o.getStringField("host");
_sourceName = o.getStringField("source");
@@ -569,8 +284,6 @@ namespace mongo {
incompleteCloneDbs.insert( e.fieldName() );
}
}
-
- _lastSavedLocalTs = OpTime( o.getField( "localLogTs" ).date() );
}
/* Turn our C++ Source object into a BSONObj */
@@ -583,8 +296,6 @@ namespace mongo {
if ( !syncedTo.isNull() )
b.appendTimestamp("syncedTo", syncedTo.asDate());
- b.appendTimestamp("localLogTs", _lastSavedLocalTs.asDate());
-
BSONObjBuilder dbsNextPassBuilder;
int n = 0;
for ( set<string>::iterator i = addDbNextPass.begin(); i != addDbNextPass.end(); i++ ) {
@@ -625,16 +336,6 @@ namespace mongo {
assert( ! res.mod );
assert( res.num == 1 );
}
-
- if ( replacing ) {
- /* if we were in "replace" mode, we now have synced up with the replacement,
- so turn that off.
- */
- replacing = false;
- wassert( replacePeer );
- replacePeer = false;
- Helpers::emptyCollection("local.pair.startup");
- }
}
static void addSourceToList(ReplSource::SourceVector &v, ReplSource& s, ReplSource::SourceVector &old) {
@@ -660,8 +361,6 @@ namespace mongo {
SourceVector old = v;
v.clear();
- bool gotPairWith = false;
-
if ( !cmdLine.source.empty() ) {
// --source <host> specified.
// check that no items are in sources other than that
@@ -705,71 +404,21 @@ namespace mongo {
}
}
- if ( replPair ) {
- const string &remote = replPair->remote;
- // --pairwith host specified.
- if ( replSettings.fastsync ) {
- Helpers::emptyCollection( "local.sources" ); // ignore saved sources
- }
- // check that no items are in sources other than that
- // add if missing
- shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
- int n = 0;
- while ( c->ok() ) {
- n++;
- ReplSource tmp(c->current());
- if ( tmp.hostName != remote ) {
- log() << "pairwith " << remote << " != " << tmp.hostName << " from local.sources collection" << endl;
- log() << "terminating after 30 seconds" << endl;
- sleepsecs(30);
- dbexit( EXIT_REPLICATION_ERROR );
- }
- c->advance();
- }
- uassert( 10122 , "local.sources collection corrupt?", n<2 );
- if ( n == 0 ) {
- // source missing. add.
- ReplSource s;
- s.hostName = remote;
- s.save();
- }
- }
-
shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
while ( c->ok() ) {
ReplSource tmp(c->current());
- if ( replPair && tmp.hostName == replPair->remote && tmp.sourceName() == "main" ) {
- gotPairWith = true;
- tmp.paired = true;
- if ( replacePeer ) {
- // peer was replaced -- start back at the beginning.
- tmp.syncedTo = OpTime();
- tmp.replacing = true;
- }
- }
- if ( ( !replPair && tmp.syncedTo.isNull() ) ||
- ( replPair && replSettings.fastsync ) ) {
+ if ( tmp.syncedTo.isNull() ) {
DBDirectClient c;
if ( c.exists( "local.oplog.$main" ) ) {
BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) );
if ( !op.isEmpty() ) {
tmp.syncedTo = op[ "ts" ].date();
- tmp._lastSavedLocalTs = op[ "ts" ].date();
}
}
}
addSourceToList(v, tmp, old);
c->advance();
}
-
- if ( !gotPairWith && replPair ) {
- /* add the --pairwith server */
- shared_ptr< ReplSource > s( new ReplSource() );
- s->paired = true;
- s->hostName = replPair->remote;
- s->replacing = replacePeer;
- v.push_back(s);
- }
}
BSONObj opTimeQuery = fromjson("{\"getoptime\":1}");
@@ -789,6 +438,7 @@ namespace mongo {
SourceVector sources;
ReplSource::loadAll(sources);
for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) {
+ log() << requester << " forcing resync from " << (*i)->hostName << endl;
(*i)->forceResync( requester );
}
replAllDead = 0;
@@ -798,7 +448,9 @@ namespace mongo {
BSONObj info;
{
dbtemprelease t;
- oplogReader.connect(hostName);
+ if (!oplogReader.connect(hostName)) {
+ msgassertedNoTrace( 14051 , "unable to connect to resync");
+ }
/* todo use getDatabaseNames() method here */
bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
massert( 10385 , "Unable to get database list", ok );
@@ -830,22 +482,132 @@ namespace mongo {
}
/* grab initial copy of a database from the master */
- bool ReplSource::resync(string db) {
+ void ReplSource::resync(string db) {
string dummyNs = resyncDrop( db.c_str(), "internal" );
Client::Context ctx( dummyNs );
{
log() << "resync: cloning database " << db << " to get an initial copy" << endl;
ReplInfo r("resync: cloning a database");
string errmsg;
- bool ok = cloneFrom(hostName.c_str(), errmsg, cc().database()->name, false, /*slaveok*/ true, /*replauth*/ true, /*snapshot*/false);
+ int errCode = 0;
+ bool ok = cloneFrom(hostName.c_str(), errmsg, cc().database()->name, false, /*slaveok*/ true, /*replauth*/ true, /*snapshot*/false, /*mayYield*/true, /*mayBeInterrupted*/false, &errCode);
if ( !ok ) {
- problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
- throw SyncException();
+ if ( errCode == DatabaseDifferCaseCode ) {
+ resyncDrop( db.c_str(), "internal" );
+ log() << "resync: database " << db << " not valid on the master due to a name conflict, dropping." << endl;
+ return;
+ }
+ else {
+ problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
+ throw SyncException();
+ }
}
}
log() << "resync: done with initial clone for db: " << db << endl;
+ return;
+ }
+
+ DatabaseIgnorer ___databaseIgnorer;
+
+ void DatabaseIgnorer::doIgnoreUntilAfter( const string &db, const OpTime &futureOplogTime ) {
+ if ( futureOplogTime > _ignores[ db ] ) {
+ _ignores[ db ] = futureOplogTime;
+ }
+ }
+
+ bool DatabaseIgnorer::ignoreAt( const string &db, const OpTime &currentOplogTime ) {
+ if ( _ignores[ db ].isNull() ) {
+ return false;
+ }
+ if ( _ignores[ db ] >= currentOplogTime ) {
+ return true;
+ } else {
+ // The ignore state has expired, so clear it.
+ _ignores.erase( db );
+ return false;
+ }
+ }
+
+ bool ReplSource::handleDuplicateDbName( const BSONObj &op, const char *ns, const char *db ) {
+ if ( dbHolder.isLoaded( ns, dbpath ) ) {
+ // Database is already present.
+ return true;
+ }
+ BSONElement ts = op.getField( "ts" );
+ if ( ( ts.type() == Date || ts.type() == Timestamp ) && ___databaseIgnorer.ignoreAt( db, ts.date() ) ) {
+ // Database is ignored due to a previous indication that it is
+ // missing from master after optime "ts".
+ return false;
+ }
+ if ( Database::duplicateUncasedName( db, dbpath ).empty() ) {
+ // No duplicate database names are present.
+ return true;
+ }
+
+ OpTime lastTime;
+ bool dbOk = false;
+ {
+ dbtemprelease release;
+
+ // We always log an operation after executing it (never before), so
+ // a database list will always be valid as of an oplog entry generated
+ // before it was retrieved.
+
+ BSONObj last = oplogReader.findOne( this->ns().c_str(), Query().sort( BSON( "$natural" << -1 ) ) );
+ if ( !last.isEmpty() ) {
+ BSONElement ts = last.getField( "ts" );
+ massert( 14032, "Invalid 'ts' in remote log", ts.type() == Date || ts.type() == Timestamp );
+ lastTime = OpTime( ts.date() );
+ }
+
+ BSONObj info;
+ bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
+ massert( 14033, "Unable to get database list", ok );
+ BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
+ while( i.more() ) {
+ BSONElement e = i.next();
+
+ const char * name = e.embeddedObject().getField( "name" ).valuestr();
+ if ( strcasecmp( name, db ) != 0 )
+ continue;
+
+ if ( strcmp( name, db ) == 0 ) {
+ // The db exists on master, still need to check that no conflicts exist there.
+ dbOk = true;
+ continue;
+ }
+
+ // The master has a db name that conflicts with the requested name.
+ dbOk = false;
+ break;
+ }
+ }
+
+ if ( !dbOk ) {
+ ___databaseIgnorer.doIgnoreUntilAfter( db, lastTime );
+ incompleteCloneDbs.erase(db);
+ addDbNextPass.erase(db);
+ return false;
+ }
+
+ // Check for duplicates again, since we released the lock above.
+ set< string > duplicates;
+ Database::duplicateUncasedName( db, dbpath, &duplicates );
+
+ // The database is present on the master and no conflicting databases
+ // are present on the master. Drop any local conflicts.
+ for( set< string >::const_iterator i = duplicates.begin(); i != duplicates.end(); ++i ) {
+ ___databaseIgnorer.doIgnoreUntilAfter( *i, lastTime );
+ incompleteCloneDbs.erase(*i);
+ addDbNextPass.erase(*i);
+ Client::Context ctx(*i);
+ dropDatabase(*i);
+ }
+
+ massert( 14034, "Duplicate database names present after attempting to delete duplicates",
+ Database::duplicateUncasedName( db, dbpath ).empty() );
return true;
}
@@ -869,7 +631,7 @@ namespace mongo {
@param alreadyLocked caller already put us in write lock if true
*/
- void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, OpTime *localLogTail, bool alreadyLocked) {
+ void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked) {
if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
log(6) << "processing op: " << op << endl;
@@ -936,17 +698,16 @@ namespace mongo {
scoped_ptr<writelock> lk( alreadyLocked ? 0 : new writelock() );
- if ( localLogTail && replPair && replPair->state == ReplPair::State_Master ) {
- updateSetsWithLocalOps( *localLogTail, true ); // allow unlocking
- updateSetsWithLocalOps( *localLogTail, false ); // don't allow unlocking or conversion to db backed storage
- }
-
if ( replAllDead ) {
// hmmm why is this check here and not at top of this function? does it get set between top and here?
log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
throw SyncException();
}
+ if ( !handleDuplicateDbName( op, ns, clientName ) ) {
+ return;
+ }
+
Client::Context ctx( ns );
ctx.getClient()->curop()->reset();
@@ -988,78 +749,11 @@ namespace mongo {
save();
}
else {
- bool mod;
- if ( replPair && replPair->state == ReplPair::State_Master ) {
- BSONObj id = idForOp( op, mod );
- if ( !idTracker.haveId( ns, id ) ) {
- applyOperation( op );
- }
- else if ( idTracker.haveModId( ns, id ) ) {
- log( 6 ) << "skipping operation matching mod id object " << op << endl;
- BSONObj existing;
- if ( Helpers::findOne( ns, id, existing ) )
- logOp( "i", ns, existing );
- }
- else {
- log( 6 ) << "skipping operation matching changed id object " << op << endl;
- }
- }
- else {
- applyOperation( op );
- }
+ applyOperation( op );
addDbNextPass.erase( clientName );
}
}
- BSONObj ReplSource::idForOp( const BSONObj &op, bool &mod ) {
- mod = false;
- const char *opType = op.getStringField( "op" );
- BSONObj o = op.getObjectField( "o" );
- switch( opType[ 0 ] ) {
- case 'i': {
- BSONObjBuilder idBuilder;
- BSONElement id;
- if ( !o.getObjectID( id ) )
- return BSONObj();
- idBuilder.append( id );
- return idBuilder.obj();
- }
- case 'u': {
- BSONObj o2 = op.getObjectField( "o2" );
- if ( strcmp( o2.firstElement().fieldName(), "_id" ) != 0 )
- return BSONObj();
- if ( o.firstElement().fieldName()[ 0 ] == '$' )
- mod = true;
- return o2;
- }
- case 'd': {
- if ( opType[ 1 ] != '\0' )
- return BSONObj(); // skip "db" op type
- return o;
- }
- default:
- break;
- }
- return BSONObj();
- }
-
- void ReplSource::updateSetsWithOp( const BSONObj &op, bool mayUnlock ) {
- if ( mayUnlock ) {
- idTracker.mayUpgradeStorage();
- }
- bool mod;
- BSONObj id = idForOp( op, mod );
- if ( !id.isEmpty() ) {
- const char *ns = op.getStringField( "ns" );
- // Since our range of local ops may not be the same as our peer's
- // range of unapplied ops, it is always necessary to rewrite objects
- // to the oplog after a mod update.
- if ( mod )
- idTracker.haveModId( ns, id, true );
- idTracker.haveId( ns, id, true );
- }
- }
-
void ReplSource::syncToTailOfRemoteLog() {
string _ns = ns();
BSONObjBuilder b;
@@ -1074,65 +768,6 @@ namespace mongo {
}
}
- OpTime ReplSource::nextLastSavedLocalTs() const {
- Client::Context ctx( "local.oplog.$main" );
- shared_ptr<Cursor> c = findTableScan( "local.oplog.$main", BSON( "$natural" << -1 ) );
- if ( c->ok() )
- return OpTime( c->current().getField( "ts" ).date() );
- return OpTime();
- }
-
- void ReplSource::setLastSavedLocalTs( const OpTime &nextLocalTs ) {
- _lastSavedLocalTs = nextLocalTs;
- log( 3 ) << "updated _lastSavedLocalTs to: " << _lastSavedLocalTs << endl;
- }
-
- void ReplSource::resetSlave() {
- log() << "**********************************************************\n";
- log() << "Sending forcedead command to slave to stop its replication\n";
- log() << "Host: " << hostName << " paired: " << paired << endl;
- massert( 10387 , "request to kill slave replication failed",
- oplogReader.conn()->simpleCommand( "admin", 0, "forcedead" ) );
- syncToTailOfRemoteLog();
- {
- dblock lk;
- setLastSavedLocalTs( nextLastSavedLocalTs() );
- save();
- oplogReader.resetCursor();
- }
- }
-
- bool ReplSource::updateSetsWithLocalOps( OpTime &localLogTail, bool mayUnlock ) {
- Client::Context ctx( "local.oplog.$main" );
- shared_ptr<Cursor> localLog = findTableScan( "local.oplog.$main", BSON( "$natural" << -1 ) );
- OpTime newTail;
- for( ; localLog->ok(); localLog->advance() ) {
- BSONObj op = localLog->current();
- OpTime ts( localLog->current().getField( "ts" ).date() );
- if ( newTail.isNull() ) {
- newTail = ts;
- }
- if ( !( localLogTail < ts ) )
- break;
- updateSetsWithOp( op, mayUnlock );
- if ( mayUnlock ) {
- RARELY {
- dbtemprelease t;
- }
- }
- }
- if ( !localLogTail.isNull() && !localLog->ok() ) {
- // local log filled up
- idTracker.reset();
- dbtemprelease t;
- resetSlave();
- massert( 10388 , "local master log filled, forcing slave resync", false );
- }
- if ( !newTail.isNull() )
- localLogTail = newTail;
- return true;
- }
-
extern unsigned replApplyBatchSize;
/* slave: pull some data from the master's oplog
@@ -1149,12 +784,6 @@ namespace mongo {
bool tailing = true;
oplogReader.tailCheck();
- if ( replPair && replPair->state == ReplPair::State_Master ) {
- dblock lk;
- idTracker.reset();
- }
- OpTime localLogTail = _lastSavedLocalTs;
-
bool initial = syncedTo.isNull();
if ( !oplogReader.haveCursor() || initial ) {
@@ -1215,7 +844,7 @@ namespace mongo {
b.append("ns", *i + '.');
b.append("op", "db");
BSONObj op = b.done();
- sync_pullOpLog_applyOperation(op, 0, false);
+ sync_pullOpLog_applyOperation(op, false);
}
}
@@ -1231,13 +860,6 @@ namespace mongo {
}
{
dblock lk;
- OpTime nextLastSaved = nextLastSavedLocalTs();
- {
- dbtemprelease t;
- if ( !oplogReader.more() ) {
- setLastSavedLocalTs( nextLastSaved );
- }
- }
save();
}
return okResultCode;
@@ -1266,19 +888,6 @@ namespace mongo {
}
}
- if ( replPair && replPair->state == ReplPair::State_Master ) {
-
- OpTime next( ts.date() );
- if ( !tailing && !initial && next != syncedTo ) {
- log() << "remote slave log filled, forcing slave resync" << endl;
- resetSlave();
- return 1;
- }
-
- dblock lk;
- updateSetsWithLocalOps( localLogTail, true );
- }
-
nextOpTime = OpTime( ts.date() );
log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
if ( initial ) {
@@ -1320,37 +929,21 @@ namespace mongo {
int n = 0;
time_t saveLast = time(0);
while ( 1 ) {
- /* from a.s.:
- I think the idea here is that we can establish a sync point between the local op log and the remote log with the following steps:
-
- 1) identify most recent op in local log -- call it O
- 2) ask "does nextOpTime reflect the tail of the remote op log?" (in other words, is more() false?) - If yes, all subsequent ops after nextOpTime in the remote log must have occurred after O. If no, we can't establish a sync point.
-
- Note that we can't do step (2) followed by step (1) because if we do so ops may be added to both machines between steps (2) and (1) and we can't establish a sync point. (In particular, between (2) and (1) an op may be added to the remote log before a different op is added to the local log. In this case, the newest remote op will have occurred after nextOpTime but before O.)
-
- Now, for performance reasons we don't want to have to identify the most recent op in the local log every time we call c->more() because in performance sensitive situations more() will be true most of the time. So we do:
-
- 0) more()?
- 1) find most recent op in local log
- 2) more()?
- */
bool moreInitialSyncsPending = !addDbNextPass.empty() && n; // we need "&& n" to assure we actually process at least one op to get a sync point recorded in the first place.
if ( moreInitialSyncsPending || !oplogReader.more() ) {
dblock lk;
- OpTime nextLastSaved = nextLastSavedLocalTs();
+
+ // NOTE aaron 2011-03-29 This block may be unnecessary, but I'm leaving it in place to avoid changing timing behavior.
{
dbtemprelease t;
if ( !moreInitialSyncsPending && oplogReader.more() ) {
- if ( getInitialSyncCompleted() ) { // if initial sync hasn't completed, break out of loop so we can set to completed or clone more dbs
- continue;
- }
- }
- else {
- setLastSavedLocalTs( nextLastSaved );
+ continue;
}
+ // otherwise, break out of loop so we can set to completed or clone more dbs
}
+
if( oplogReader.awaitCapable() && tailing )
okResultCode = 0; // don't sleep
syncedTo = nextOpTime;
@@ -1415,7 +1008,7 @@ namespace mongo {
return okResultCode;
}
- sync_pullOpLog_applyOperation(op, &localLogTail, !justOne);
+ sync_pullOpLog_applyOperation(op, !justOne);
n++;
if( --b == 0 )
@@ -1438,6 +1031,9 @@ namespace mongo {
BSONObj userReplQuery = fromjson("{\"user\":\"repl\"}");
bool replAuthenticate(DBClientBase *conn) {
+ if( noauth ) {
+ return true;
+ }
if( ! cc().isAdmin() ) {
log() << "replauthenticate: requires admin permissions, failing\n";
return false;
@@ -1458,7 +1054,7 @@ namespace mongo {
// try the first user in local
!Helpers::getSingleton("local.system.users", user) ) {
log() << "replauthenticate: no user in local.system.users to use for authentication\n";
- return noauth;
+ return false;
}
}
u = user.getStringField("user");
@@ -1477,13 +1073,24 @@ namespace mongo {
bool replHandshake(DBClientConnection *conn) {
+ string myname = getHostName();
+
BSONObj me;
{
+
dblock l;
// local.me is an identifier for a server for getLastError w:2+
- if ( ! Helpers::getSingleton( "local.me" , me ) ) {
+ if ( ! Helpers::getSingleton( "local.me" , me ) ||
+ ! me.hasField("host") ||
+ me["host"].String() != myname ) {
+
+ // clean out local.me
+ Helpers::emptyCollection("local.me");
+
+ // repopulate
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
+ b.append( "host", myname );
me = b.obj();
Helpers::putSingleton( "local.me" , me );
}
@@ -1491,6 +1098,9 @@ namespace mongo {
BSONObjBuilder cmd;
cmd.appendAs( me["_id"] , "handshake" );
+ if (theReplSet) {
+ cmd.append("member", theReplSet->selfId());
+ }
BSONObj res;
bool ok = conn->runCommand( "admin" , cmd.obj() , res );
@@ -1499,14 +1109,13 @@ namespace mongo {
return true;
}
- bool OplogReader::connect(string hostName) {
+ bool OplogReader::commonConnect(const string& hostName) {
if( conn() == 0 ) {
- _conn = auto_ptr<DBClientConnection>(new DBClientConnection( false, 0, replPair ? 20 : 0 /* tcp timeout */));
+ _conn = shared_ptr<DBClientConnection>(new DBClientConnection( false, 0, 0 /* tcp timeout */));
string errmsg;
ReplInfo r("trying to connect to sync source");
if ( !_conn->connect(hostName.c_str(), errmsg) ||
- (!noauth && !replAuthenticate(_conn.get())) ||
- !replHandshake(_conn.get()) ) {
+ (!noauth && !replAuthenticate(_conn.get())) ) {
resetConnection();
log() << "repl: " << errmsg << endl;
return false;
@@ -1514,6 +1123,37 @@ namespace mongo {
}
return true;
}
+
+ bool OplogReader::connect(string hostName) {
+ if (conn() != 0) {
+ return true;
+ }
+
+ if (commonConnect(hostName)) {
+ return replHandshake(_conn.get());
+ }
+ return false;
+ }
+
+ bool OplogReader::connect(const BSONObj& rid, const int from, const string& to) {
+ if (conn() != 0) {
+ return true;
+ }
+ if (commonConnect(to)) {
+ log() << "handshake between " << from << " and " << to << endl;
+ return passthroughHandshake(rid, from);
+ }
+ return false;
+ }
+
+ bool OplogReader::passthroughHandshake(const BSONObj& rid, const int f) {
+ BSONObjBuilder cmd;
+ cmd.appendAs( rid["_id"], "handshake" );
+ cmd.append( "member" , f );
+
+ BSONObj res;
+ return conn()->runCommand( "admin" , cmd.obj() , res );
+ }
/* note: not yet in mutex at this point.
returns >= 0 if ok. return -1 if you want to reconnect.
@@ -1541,22 +1181,9 @@ namespace mongo {
if ( !oplogReader.connect(hostName) ) {
log(4) << "repl: can't connect to sync source" << endl;
- if ( replPair && paired ) {
- assert( startsWith(hostName.c_str(), replPair->remoteHost.c_str()) );
- replPair->arbitrate();
- }
return -1;
}
- if ( paired ) {
- int remote = replPair->negotiate(oplogReader.conn(), "direct");
- int nMasters = ( remote == ReplPair::State_Master ) + ( replPair->state == ReplPair::State_Master );
- if ( getInitialSyncCompleted() && nMasters != 1 ) {
- log() << ( nMasters == 0 ? "no master" : "two masters" ) << ", deferring oplog pull" << endl;
- return 1;
- }
- }
-
/*
// get current mtime at the server.
BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
@@ -1619,9 +1246,6 @@ namespace mongo {
}
else
sleepAdvice = res;
- if ( res >= 0 && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
- pairSync->setInitialSyncCompletedLocking();
- }
}
catch ( const SyncException& ) {
log() << "caught SyncException" << endl;
@@ -1662,8 +1286,11 @@ namespace mongo {
{
dblock lk;
if ( replAllDead ) {
- if ( !replSettings.autoresync || !ReplSource::throttledForceResyncDead( "auto" ) )
+ // throttledForceResyncDead can throw
+ if ( !replSettings.autoresync || !ReplSource::throttledForceResyncDead( "auto" ) ) {
+ log() << "all sources dead: " << replAllDead << ", sleeping for 5 seconds" << endl;
break;
+ }
}
assert( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
syncing++;
@@ -1697,7 +1324,7 @@ namespace mongo {
if ( s ) {
stringstream ss;
- ss << "repl: sleep " << s << "sec before next pass";
+ ss << "repl: sleep " << s << " sec before next pass";
string msg = ss.str();
if ( ! cmdLine.quiet )
log() << msg << endl;
@@ -1707,8 +1334,6 @@ namespace mongo {
}
}
- int debug_stop_repl = 0;
-
static void replMasterThread() {
sleepsecs(4);
Client::initThread("replmaster");
@@ -1725,7 +1350,7 @@ namespace mongo {
if ( lk.got() ) {
toSleep = 10;
- cc().getAuthenticationInfo()->authorize("admin");
+ replLocalAuth();
try {
logKeepalive();
@@ -1749,21 +1374,12 @@ namespace mongo {
{
dblock lk;
- cc().getAuthenticationInfo()->authorize("admin");
-
- BSONObj obj;
- if ( Helpers::getSingleton("local.pair.startup", obj) ) {
- // should be: {replacepeer:1}
- replacePeer = true;
- pairSync->setInitialSyncCompleted(); // we are the half that has all the data
- }
+ replLocalAuth();
}
while ( 1 ) {
try {
replMain();
- if ( debug_stop_repl )
- break;
sleepsecs(5);
}
catch ( AssertionException& ) {
@@ -1771,6 +1387,15 @@ namespace mongo {
problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
sleepsecs(300);
}
+ catch ( DBException& e ) {
+ problem() << "exception in replSlaveThread(): " << e.what()
+ << ", sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ }
+ catch ( ... ) {
+ problem() << "error in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ }
}
}
@@ -1783,15 +1408,21 @@ namespace mongo {
void newRepl();
void oldRepl();
+ void startReplSets(ReplSetCmdline*);
void startReplication() {
/* if we are going to be a replica set, we aren't doing other forms of replication. */
if( !cmdLine._replSet.empty() ) {
- if( replSettings.slave || replSettings.master || replPair ) {
+ if( replSettings.slave || replSettings.master ) {
log() << "***" << endl;
log() << "ERROR: can't use --slave or --master replication options with --replSet" << endl;
log() << "***" << endl;
}
newRepl();
+
+ replSet = true;
+ ReplSetCmdline *replSetCmdline = new ReplSetCmdline(cmdLine._replSet);
+ boost::thread t( boost::bind( &startReplSets, replSetCmdline) );
+
return;
}
@@ -1802,28 +1433,22 @@ namespace mongo {
*/
//boost::thread tempt(tempThread);
- if( !replSettings.slave && !replSettings.master && !replPair )
+ if( !replSettings.slave && !replSettings.master )
return;
{
dblock lk;
- cc().getAuthenticationInfo()->authorize("admin");
- pairSync->init();
+ replLocalAuth();
}
- if ( replSettings.slave || replPair ) {
- if ( replSettings.slave ) {
- assert( replSettings.slave == SimpleSlave );
- log(1) << "slave=true" << endl;
- }
- else
- replSettings.slave = ReplPairSlave;
+ if ( replSettings.slave ) {
+ assert( replSettings.slave == SimpleSlave );
+ log(1) << "slave=true" << endl;
boost::thread repl_thread(replSlaveThread);
}
- if ( replSettings.master || replPair ) {
- if ( replSettings.master )
- log(1) << "master=true" << endl;
+ if ( replSettings.master ) {
+ log(1) << "master=true" << endl;
replSettings.master = true;
createOplog();
boost::thread t(replMasterThread);
@@ -1833,11 +1458,6 @@ namespace mongo {
sleepmillis( 50 );
}
- /* called from main at server startup */
- void pairWith(const char *remoteEnd, const char *arb) {
- replPair = new ReplPair(remoteEnd, arb);
- }
-
void testPretouch() {
int nthr = min(8, 8);
nthr = max(nthr, 1);
diff --git a/db/repl.h b/db/repl.h
index 45036fa..9791f14 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -30,49 +30,44 @@
#include "pdfile.h"
#include "db.h"
#include "dbhelpers.h"
-#include "query.h"
-#include "queryoptimizer.h"
#include "../client/dbclient.h"
#include "../util/optime.h"
#include "oplog.h"
#include "../util/concurrency/thread_pool.h"
#include "oplogreader.h"
+#include "cloner.h"
namespace mongo {
- /* replication slave? (possibly with slave or repl pair nonmaster)
+ /* replication slave? (possibly with slave)
--slave cmd line setting -> SimpleSlave
*/
- typedef enum { NotSlave=0, SimpleSlave, ReplPairSlave } SlaveTypes;
+ typedef enum { NotSlave=0, SimpleSlave } SlaveTypes;
class ReplSettings {
public:
SlaveTypes slave;
- /* true means we are master and doing replication. if we are not writing to oplog (no --master or repl pairing),
- this won't be true.
- */
+ /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
bool master;
- int opIdMem;
-
bool fastsync;
bool autoresync;
int slavedelay;
+ set<string> discoveredSeeds;
+ BSONObj reconfig;
+
ReplSettings()
- : slave(NotSlave) , master(false) , opIdMem(100000000) , fastsync() , autoresync(false), slavedelay() {
+ : slave(NotSlave) , master(false) , fastsync() , autoresync(false), slavedelay(), discoveredSeeds() {
}
};
extern ReplSettings replSettings;
- bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
- bool slaveOk, bool useReplAuth, bool snapshot);
-
/* A replication exception */
class SyncException : public DBException {
public:
@@ -84,18 +79,18 @@ namespace mongo {
Can be a group of things to replicate for several databases.
- { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
'source' defaults to 'main'; support for multiple source names is
not done (always use main for now).
*/
class ReplSource {
- auto_ptr<ThreadPool> tp;
+ shared_ptr<ThreadPool> tp;
- bool resync(string db);
+ void resync(string db);
/** @param alreadyLocked caller already put us in write lock if true */
- void sync_pullOpLog_applyOperation(BSONObj& op, OpTime *localLogTail, bool alreadyLocked);
+ void sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked);
/* pull some operations from the master's oplog, and apply them.
calls sync_pullOpLog_applyOperation
@@ -115,28 +110,23 @@ namespace mongo {
// returns the dummy ns used to do the drop
string resyncDrop( const char *db, const char *requester );
- // returns possibly unowned id spec for the operation.
- static BSONObj idForOp( const BSONObj &op, bool &mod );
- static void updateSetsWithOp( const BSONObj &op, bool mayUpdateStorage );
// call without the db mutex
void syncToTailOfRemoteLog();
- // call with the db mutex
- OpTime nextLastSavedLocalTs() const;
- void setLastSavedLocalTs( const OpTime &nextLocalTs );
- // call without the db mutex
- void resetSlave();
- // call with the db mutex
- // returns false if the slave has been reset
- bool updateSetsWithLocalOps( OpTime &localLogTail, bool mayUnlock );
string ns() const { return string( "local.oplog.$" ) + sourceName(); }
unsigned _sleepAdviceTime;
+ /**
+ * If 'db' is a new database and its name would conflict with that of
+ * an existing database, synchronize these database names with the
+ * master.
+ * @return true iff an op with the specified ns may be applied.
+ */
+ bool handleDuplicateDbName( const BSONObj &op, const char *ns, const char *db );
+
public:
OplogReader oplogReader;
static void applyOperation(const BSONObj& op);
- bool replacing; // in "replace mode" -- see CmdReplacePeer
- bool paired; // --pair in use
string hostName; // ip addr or hostname plus optionally, ":<port>"
string _sourceName; // a logical source name.
string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; }
@@ -145,14 +135,6 @@ namespace mongo {
/* the last time point we have already synced up to (in the remote/master's oplog). */
OpTime syncedTo;
- /* This is for repl pairs.
- _lastSavedLocalTs is the most recent point in the local log that we know is consistent
- with the remote log ( ie say the local op log has entries ABCDE and the remote op log
- has ABCXY, then _lastSavedLocalTs won't be greater than C until we have reconciled
- the DE-XY difference.)
- */
- OpTime _lastSavedLocalTs;
-
int nClonedThisPass;
typedef vector< shared_ptr< ReplSource > > SourceVector;
@@ -186,148 +168,24 @@ namespace mongo {
void forceResync( const char *requester );
};
- // class for managing a set of ids in memory
- class MemIds {
- public:
- MemIds() : size_() {}
- friend class IdTracker;
- void reset() {
- imp_.clear();
- size_ = 0;
- }
- bool get( const char *ns, const BSONObj &id ) { return imp_[ ns ].count( id ); }
- void set( const char *ns, const BSONObj &id, bool val ) {
- if ( val ) {
- if ( imp_[ ns ].insert( id.getOwned() ).second ) {
- size_ += id.objsize() + sizeof( BSONObj );
- }
- }
- else {
- if ( imp_[ ns ].erase( id ) == 1 ) {
- size_ -= id.objsize() + sizeof( BSONObj );
- }
- }
- }
- long long roughSize() const {
- return size_;
- }
- private:
- typedef map< string, BSONObjSetDefaultOrder > IdSets;
- IdSets imp_;
- long long size_;
- };
-
- // class for managing a set of ids in a db collection
- // All functions must be called with db mutex held
- class DbIds {
- public:
- DbIds( const string & name ) : impl_( name, BSON( "ns" << 1 << "id" << 1 ) ) {}
- void reset() {
- impl_.reset();
- }
- bool get( const char *ns, const BSONObj &id ) {
- return impl_.get( key( ns, id ) );
- }
- void set( const char *ns, const BSONObj &id, bool val ) {
- impl_.set( key( ns, id ), val );
- }
- private:
- static BSONObj key( const char *ns, const BSONObj &id ) {
- BSONObjBuilder b;
- b << "ns" << ns;
- // rename _id to id since there may be duplicates
- b.appendAs( id.firstElement(), "id" );
- return b.obj();
- }
- DbSet impl_;
- };
+ bool anyReplEnabled();
+ void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level = 0 );
- // class for tracking ids and mod ids, in memory or on disk
- // All functions must be called with db mutex held
- // Kind of sloppy class structure, for now just want to keep the in mem
- // version speedy.
- // see http://www.mongodb.org/display/DOCS/Pairing+Internals
- class IdTracker {
+ /**
+ * Helper class used to set and query an ignore state for a named database.
+ * The ignore state will expire after a specified OpTime.
+ */
+ class DatabaseIgnorer {
public:
- IdTracker() :
- dbIds_( "local.temp.replIds" ),
- dbModIds_( "local.temp.replModIds" ),
- inMem_( true ),
- maxMem_( replSettings.opIdMem ) {
- }
- void reset( int maxMem = replSettings.opIdMem ) {
- memIds_.reset();
- memModIds_.reset();
- dbIds_.reset();
- dbModIds_.reset();
- maxMem_ = maxMem;
- inMem_ = true;
- }
- bool haveId( const char *ns, const BSONObj &id ) {
- if ( inMem_ )
- return get( memIds_, ns, id );
- else
- return get( dbIds_, ns, id );
- }
- bool haveModId( const char *ns, const BSONObj &id ) {
- if ( inMem_ )
- return get( memModIds_, ns, id );
- else
- return get( dbModIds_, ns, id );
- }
- void haveId( const char *ns, const BSONObj &id, bool val ) {
- if ( inMem_ )
- set( memIds_, ns, id, val );
- else
- set( dbIds_, ns, id, val );
- }
- void haveModId( const char *ns, const BSONObj &id, bool val ) {
- if ( inMem_ )
- set( memModIds_, ns, id, val );
- else
- set( dbModIds_, ns, id, val );
- }
- // will release the db mutex
- void mayUpgradeStorage() {
- if ( !inMem_ || memIds_.roughSize() + memModIds_.roughSize() <= maxMem_ )
- return;
- log() << "saving master modified id information to collection" << endl;
- upgrade( memIds_, dbIds_ );
- upgrade( memModIds_, dbModIds_ );
- memIds_.reset();
- memModIds_.reset();
- inMem_ = false;
- }
- bool inMem() const { return inMem_; }
+ /** Indicate that operations for 'db' should be ignored until after 'futureOplogTime' */
+ void doIgnoreUntilAfter( const string &db, const OpTime &futureOplogTime );
+ /**
+ * Query ignore state of 'db'; if 'currentOplogTime' is after the ignore
+ * limit, the ignore state will be cleared.
+ */
+ bool ignoreAt( const string &db, const OpTime &currentOplogTime );
private:
- template< class T >
- bool get( T &ids, const char *ns, const BSONObj &id ) {
- return ids.get( ns, id );
- }
- template< class T >
- void set( T &ids, const char *ns, const BSONObj &id, bool val ) {
- ids.set( ns, id, val );
- }
- void upgrade( MemIds &a, DbIds &b ) {
- for( MemIds::IdSets::const_iterator i = a.imp_.begin(); i != a.imp_.end(); ++i ) {
- for( BSONObjSetDefaultOrder::const_iterator j = i->second.begin(); j != i->second.end(); ++j ) {
- set( b, i->first.c_str(), *j, true );
- RARELY {
- dbtemprelease t;
- }
- }
- }
- }
- MemIds memIds_;
- MemIds memModIds_;
- DbIds dbIds_;
- DbIds dbModIds_;
- bool inMem_;
- int maxMem_;
+ map< string, OpTime > _ignores;
};
- bool anyReplEnabled();
- void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level = 0 );
-
-
} // namespace mongo
diff --git a/db/repl/connections.h b/db/repl/connections.h
index 7e7bfe5..78cfb30 100644
--- a/db/repl/connections.h
+++ b/db/repl/connections.h
@@ -20,7 +20,7 @@
#include <map>
#include "../../client/dbclient.h"
-#include "../security_key.h"
+#include "../security_common.h"
namespace mongo {
@@ -44,13 +44,14 @@ namespace mongo {
public:
/** throws assertions if connect failure etc. */
ScopedConn(string hostport);
- ~ScopedConn();
+ ~ScopedConn() {
+ // conLock releases...
+ }
/* If we were to run a query and not exhaust the cursor, future use of the connection would be problematic.
So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes
ScopedConn limited in functionality but very safe. More non-cursor wrappers can be added here if needed.
*/
-
bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0) {
return conn()->runCommand(dbname, cmd, info, options);
}
@@ -108,12 +109,4 @@ namespace mongo {
}
}
- inline ScopedConn::~ScopedConn() {
- // conLock releases...
- }
-
- /*inline DBClientConnection* ScopedConn::operator->() {
- return &x->cc;
- }*/
-
}
diff --git a/db/repl/consensus.cpp b/db/repl/consensus.cpp
index dadb22e..fd18cdc 100644
--- a/db/repl/consensus.cpp
+++ b/db/repl/consensus.cpp
@@ -25,7 +25,49 @@ namespace mongo {
public:
CmdReplSetFresh() : ReplSetCommand("replSetFresh") { }
private:
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+
+ bool shouldVeto(const BSONObj& cmdObj, string& errmsg) {
+ unsigned id = cmdObj["id"].Int();
+ const Member* primary = theReplSet->box.getPrimary();
+ const Member* hopeful = theReplSet->findById(id);
+ const Member *highestPriority = theReplSet->getMostElectable();
+
+ if( !hopeful ) {
+ errmsg = str::stream() << "replSet couldn't find member with id " << id;
+ return true;
+ }
+ else if( theReplSet->isPrimary() && theReplSet->lastOpTimeWritten >= hopeful->hbinfo().opTime ) {
+ // hbinfo is not updated, so we have to check the primary's last optime separately
+ errmsg = str::stream() << "I am already primary, " << hopeful->fullName() <<
+ " can try again once I've stepped down";
+ return true;
+ }
+ else if( primary && primary->hbinfo().opTime >= hopeful->hbinfo().opTime ) {
+ // other members might be aware of more up-to-date nodes
+ errmsg = str::stream() << hopeful->fullName() << " is trying to elect itself but " <<
+ primary->fullName() << " is already primary and more up-to-date";
+ return true;
+ }
+ else if( highestPriority && highestPriority->config().priority > hopeful->config().priority) {
+ errmsg = str::stream() << hopeful->fullName() << " has lower priority than " << highestPriority->fullName();
+ return true;
+ }
+
+ // don't veto older versions
+ if (cmdObj["id"].eoo()) {
+ // they won't be looking for the veto field
+ return false;
+ }
+
+ if (!hopeful || !theReplSet->isElectable(id) ||
+ (highestPriority && highestPriority->config().priority > hopeful->config().priority)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
@@ -43,11 +85,15 @@ namespace mongo {
result.append("info", "config version stale");
weAreFresher = true;
}
- else if( opTime < theReplSet->lastOpTimeWritten ) {
+ // check not only our own optime, but any other member we can reach
+ else if( opTime < theReplSet->lastOpTimeWritten ||
+ opTime < theReplSet->lastOtherOpTime()) {
weAreFresher = true;
}
result.appendDate("opTime", theReplSet->lastOpTimeWritten.asDate());
result.append("fresher", weAreFresher);
+ result.append("veto", shouldVeto(cmdObj, errmsg));
+
return true;
}
} cmdReplSetFresh;
@@ -56,11 +102,9 @@ namespace mongo {
public:
CmdReplSetElect() : ReplSetCommand("replSetElect") { }
private:
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
- //task::lam f = boost::bind(&Consensus::electCmdReceived, &theReplSet->elect, cmdObj, &result);
- //theReplSet->mgr->call(f);
theReplSet->elect.electCmdReceived(cmdObj, &result);
return true;
}
@@ -91,6 +135,10 @@ namespace mongo {
if( dt < T )
vUp += m->config().votes;
}
+
+ // the manager will handle calling stepdown if another node should be
+ // primary due to priority
+
return !( vUp * 2 > totalVotes() );
}
@@ -98,17 +146,19 @@ namespace mongo {
const time_t LeaseTime = 30;
+ mutex Consensus::lyMutex("ly");
+
unsigned Consensus::yea(unsigned memberId) { /* throws VoteException */
- Atomic<LastYea>::tran t(ly);
- LastYea &ly = t.ref();
+ mutex::scoped_lock lk(lyMutex);
+ LastYea &L = this->ly.ref(lk);
time_t now = time(0);
- if( ly.when + LeaseTime >= now && ly.who != memberId ) {
- log(1) << "replSet not voting yea for " << memberId <<
- " voted for " << ly.who << ' ' << now-ly.when << " secs ago" << rsLog;
+ if( L.when + LeaseTime >= now && L.who != memberId ) {
+ LOG(1) << "replSet not voting yea for " << memberId <<
+ " voted for " << L.who << ' ' << now-L.when << " secs ago" << rsLog;
throw VoteException();
}
- ly.when = now;
- ly.who = memberId;
+ L.when = now;
+ L.who = memberId;
return rs._self->config().votes;
}
@@ -116,8 +166,8 @@ namespace mongo {
place instead of leaving it for a long time.
*/
void Consensus::electionFailed(unsigned meid) {
- Atomic<LastYea>::tran t(ly);
- LastYea &L = t.ref();
+ mutex::scoped_lock lk(lyMutex);
+ LastYea &L = ly.ref(lk);
DEV assert( L.who == meid ); // this may not always always hold, so be aware, but adding for now as a quick sanity test
if( L.who == meid )
L.when = 0;
@@ -127,7 +177,7 @@ namespace mongo {
void Consensus::electCmdReceived(BSONObj cmd, BSONObjBuilder* _b) {
BSONObjBuilder& b = *_b;
DEV log() << "replSet received elect msg " << cmd.toString() << rsLog;
- else log(2) << "replSet received elect msg " << cmd.toString() << rsLog;
+ else LOG(2) << "replSet received elect msg " << cmd.toString() << rsLog;
string set = cmd["set"].String();
unsigned whoid = cmd["whoid"].Int();
int cfgver = cmd["cfgver"].Int();
@@ -136,22 +186,22 @@ namespace mongo {
const Member* primary = rs.box.getPrimary();
const Member* hopeful = rs.findById(whoid);
+ const Member* highestPriority = rs.getMostElectable();
int vote = 0;
if( set != rs.name() ) {
log() << "replSet error received an elect request for '" << set << "' but our set name is '" << rs.name() << "'" << rsLog;
-
}
else if( myver < cfgver ) {
// we are stale. don't vote
}
else if( myver > cfgver ) {
// they are stale!
- log() << "replSet info got stale version # during election" << rsLog;
+ log() << "replSet electCmdReceived info got stale version # during election" << rsLog;
vote = -10000;
}
else if( !hopeful ) {
- log() << "couldn't find member with id " << whoid << rsLog;
+ log() << "replSet electCmdReceived couldn't find member with id " << whoid << rsLog;
vote = -10000;
}
else if( primary && primary == rs._self && rs.lastOpTimeWritten >= hopeful->hbinfo().opTime ) {
@@ -166,14 +216,19 @@ namespace mongo {
primary->fullName() << " is already primary and more up-to-date" << rsLog;
vote = -10000;
}
+ else if( highestPriority && highestPriority->config().priority > hopeful->config().priority) {
+ log() << hopeful->fullName() << " has lower priority than " << highestPriority->fullName();
+ vote = -10000;
+ }
else {
try {
vote = yea(whoid);
+ dassert( hopeful->id() == whoid );
rs.relinquish();
- log() << "replSet info voting yea for " << whoid << rsLog;
+ log() << "replSet info voting yea for " << hopeful->fullName() << " (" << whoid << ')' << rsLog;
}
catch(VoteException&) {
- log() << "replSet voting no already voted for another" << rsLog;
+ log() << "replSet voting no for " << hopeful->fullName() << " already voted for another" << rsLog;
}
}
@@ -212,7 +267,8 @@ namespace mongo {
"set" << rs.name() <<
"opTime" << Date_t(ord.asDate()) <<
"who" << rs._self->fullName() <<
- "cfgver" << rs._cfg->version );
+ "cfgver" << rs._cfg->version <<
+ "id" << rs._self->id());
list<Target> L;
int ver;
/* the following queries arbiters, even though they are never fresh. wonder if that makes sense.
@@ -228,19 +284,33 @@ namespace mongo {
for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
if( i->ok ) {
nok++;
- if( i->result["fresher"].trueValue() )
+ if( i->result["fresher"].trueValue() ) {
+ log() << "not electing self, we are not freshest" << rsLog;
return false;
+ }
OpTime remoteOrd( i->result["opTime"].Date() );
if( remoteOrd == ord )
nTies++;
assert( remoteOrd <= ord );
+
+ if( i->result["veto"].trueValue() ) {
+ BSONElement msg = i->result["errmsg"];
+ if (!msg.eoo()) {
+ log() << "not electing self, " << i->toHost << " would veto with '" <<
+ msg.String() << "'" << rsLog;
+ }
+ else {
+ log() << "not electing self, " << i->toHost << " would veto" << rsLog;
+ }
+ return false;
+ }
}
else {
DEV log() << "replSet freshest returns " << i->result.toString() << rsLog;
allUp = false;
}
}
- log(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
+ LOG(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
assert( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
return true;
}
@@ -267,7 +337,6 @@ namespace mongo {
bool allUp;
int nTies;
if( !weAreFreshest(allUp, nTies) ) {
- log() << "replSet info not electing self, we are not freshest" << rsLog;
return;
}
@@ -324,7 +393,6 @@ namespace mongo {
multiCommand(electCmd, L);
{
- RSBase::lock lk(&rs);
for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
DEV log() << "replSet elect res: " << i->result.toString() << rsLog;
if( i->ok ) {
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index 762ca90..711b457 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -32,7 +32,6 @@
#include "../dbhelpers.h"
namespace mongo {
-
/* decls for connections.h */
ScopedConn::M& ScopedConn::_map = *(new ScopedConn::M());
mutex ScopedConn::mapMutex("ScopedConn::mapMutex");
@@ -43,9 +42,9 @@ namespace mongo {
using namespace mongoutils::html;
using namespace bson;
- static RamLog _rsLog;
- Tee *rsLog = &_rsLog;
- extern bool replSetBlind;
+ static RamLog * _rsLog = new RamLog( "rs" );
+ Tee *rsLog = _rsLog;
+ extern bool replSetBlind; // for testing
string ago(time_t t) {
if( t == 0 ) return "";
@@ -126,19 +125,6 @@ namespace mongo {
return "";
}
- string MemberState::toString() const {
- if( s == MemberState::RS_STARTUP ) return "STARTUP";
- if( s == MemberState::RS_PRIMARY ) return "PRIMARY";
- if( s == MemberState::RS_SECONDARY ) return "SECONDARY";
- if( s == MemberState::RS_RECOVERING ) return "RECOVERING";
- if( s == MemberState::RS_FATAL ) return "FATAL";
- if( s == MemberState::RS_STARTUP2 ) return "STARTUP2";
- if( s == MemberState::RS_ARBITER ) return "ARBITER";
- if( s == MemberState::RS_DOWN ) return "DOWN";
- if( s == MemberState::RS_ROLLBACK ) return "ROLLBACK";
- return "";
- }
-
extern time_t started;
// oplogdiags in web ui
@@ -208,8 +194,8 @@ namespace mongo {
ss << "<style type=\"text/css\" media=\"screen\">"
"table { font-size:75% }\n"
-// "th { background-color:#bbb; color:#000 }\n"
-// "td,th { padding:.25em }\n"
+ // "th { background-color:#bbb; color:#000 }\n"
+ // "td,th { padding:.25em }\n"
"</style>\n";
ss << table(h, true);
@@ -306,6 +292,8 @@ namespace mongo {
myMinValid = "exception fetching minvalid";
}
+ const Member *_self = this->_self;
+ assert(_self);
{
stringstream s;
/* self row */
@@ -340,20 +328,40 @@ namespace mongo {
void fillRsLog(stringstream& s) {
- _rsLog.toHTML( s );
+ _rsLog->toHTML( s );
}
const Member* ReplSetImpl::findById(unsigned id) const {
- if( id == _self->id() ) return _self;
+ if( _self && id == _self->id() ) return _self;
+
for( Member *m = head(); m; m = m->next() )
if( m->id() == id )
return m;
return 0;
}
+
+ const OpTime ReplSetImpl::lastOtherOpTime() const {
+ OpTime closest(0,0);
+
+ for( Member *m = _members.head(); m; m=m->next() ) {
+ if (!m->hbinfo().up()) {
+ continue;
+ }
+
+ if (m->hbinfo().opTime > closest) {
+ closest = m->hbinfo().opTime;
+ }
+ }
+
+ return closest;
+ }
void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
vector<BSONObj> v;
+ const Member *_self = this->_self;
+ assert( _self );
+
// add self
{
BSONObjBuilder bb;
@@ -390,6 +398,7 @@ namespace mongo {
bb.appendTimestamp("optime", m->hbinfo().opTime.asDate());
bb.appendDate("optimeDate", m->hbinfo().opTime.getSecs() * 1000LL);
bb.appendTimeT("lastHeartbeat", m->hbinfo().lastHeartbeat);
+ bb.append("pingMs", m->hbinfo().ping);
string s = m->lhb();
if( !s.empty() )
bb.append("errmsg", s);
@@ -400,6 +409,10 @@ namespace mongo {
b.append("set", name());
b.appendTimeT("date", time(0));
b.append("myState", box.getState().s);
+ const Member *syncTarget = _currentSyncTarget;
+ if (syncTarget) {
+ b.append("syncingTo", syncTarget->fullName());
+ }
b.append("members", v);
if( replSetBlind )
b.append("blind",true); // to avoid confusion if set...normally never set except for testing.
diff --git a/db/repl/health.h b/db/repl/health.h
index a32db00..55cca93 100644
--- a/db/repl/health.h
+++ b/db/repl/health.h
@@ -24,11 +24,11 @@ namespace mongo {
bool requestHeartbeat(string setname, string fromHost, string memberFullName, BSONObj& result, int myConfigVersion, int& theirConfigVersion, bool checkEmpty = false);
struct HealthOptions {
- HealthOptions() {
- heartbeatSleepMillis = 2000;
- heartbeatTimeoutMillis = 10000;
- heartbeatConnRetries = 2;
- }
+ HealthOptions() :
+ heartbeatSleepMillis(2000),
+ heartbeatTimeoutMillis( 10000 ),
+ heartbeatConnRetries(2)
+ { }
bool isDefault() const { return *this == HealthOptions(); }
@@ -43,8 +43,8 @@ namespace mongo {
}
bool operator==(const HealthOptions& r) const {
- return heartbeatSleepMillis==r.heartbeatSleepMillis && heartbeatTimeoutMillis==r.heartbeatTimeoutMillis && heartbeatConnRetries==heartbeatConnRetries;
+ return heartbeatSleepMillis==r.heartbeatSleepMillis && heartbeatTimeoutMillis==r.heartbeatTimeoutMillis && heartbeatConnRetries==r.heartbeatConnRetries;
}
};
-
+
}
diff --git a/db/repl/heartbeat.cpp b/db/repl/heartbeat.cpp
index 3972466..7d3f78c 100644
--- a/db/repl/heartbeat.cpp
+++ b/db/repl/heartbeat.cpp
@@ -30,15 +30,16 @@
#include "connections.h"
#include "../../util/unittest.h"
#include "../instance.h"
+#include "../repl.h"
namespace mongo {
using namespace bson;
extern bool replSetBlind;
+ extern ReplSettings replSettings;
- // hacky
- string *discoveredSeed = 0;
+ unsigned int HeartbeatInfo::numPings;
long long HeartbeatInfo::timeDown() const {
if( up() ) return 0;
@@ -52,7 +53,7 @@ namespace mongo {
public:
virtual bool adminOnly() const { return false; }
CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( replSetBlind )
return false;
@@ -63,9 +64,13 @@ namespace mongo {
return false;
}
+ if (!checkAuth(errmsg, result)) {
+ return false;
+ }
+
/* we want to keep heartbeat connections open when relinquishing primary. tag them here. */
{
- MessagingPort *mp = cc().port();
+ AbstractMessagingPort *mp = cc().port();
if( mp )
mp->tag |= 1;
}
@@ -78,8 +83,8 @@ namespace mongo {
string s = string(cmdObj.getStringField("replSetHeartbeat"));
if( cmdLine.ourSetName() != s ) {
errmsg = "repl set names do not match";
- log() << "cmdline: " << cmdLine._replSet << endl;
- log() << "s: " << s << endl;
+ log() << "replSet set names do not match, our cmdline: " << cmdLine._replSet << rsLog;
+ log() << "replSet s: " << s << rsLog;
result.append("mismatch", true);
return false;
}
@@ -91,8 +96,8 @@ namespace mongo {
}
if( theReplSet == 0 ) {
string from( cmdObj.getStringField("from") );
- if( !from.empty() && discoveredSeed == 0 ) {
- discoveredSeed = new string(from);
+ if( !from.empty() ) {
+ replSettings.discoveredSeeds.insert(from);
}
errmsg = "still initializing";
return false;
@@ -105,6 +110,7 @@ namespace mongo {
}
result.append("set", theReplSet->name());
result.append("state", theReplSet->state().s);
+ result.append("e", theReplSet->iAmElectable());
result.append("hbmsg", theReplSet->hbmsg());
result.append("time", (long long) time(0));
result.appendDate("opTime", theReplSet->lastOpTimeWritten.asDate());
@@ -144,10 +150,10 @@ namespace mongo {
public:
ReplSetHealthPollTask(const HostAndPort& hh, const HeartbeatInfo& mm) : h(hh), m(mm) { }
- string name() const { return "ReplSetHealthPollTask"; }
+ string name() const { return "rsHealthPoll"; }
void doWork() {
if ( !theReplSet ) {
- log(2) << "theReplSet not initialized yet, skipping health poll this round" << rsLog;
+ LOG(2) << "replSet not initialized yet, skipping health poll this round" << rsLog;
return;
}
@@ -157,11 +163,22 @@ namespace mongo {
BSONObj info;
int theirConfigVersion = -10000;
- time_t before = time(0);
+ Timer timer;
bool ok = requestHeartbeat(theReplSet->name(), theReplSet->selfFullName(), h.toString(), info, theReplSet->config().version, theirConfigVersion);
- time_t after = mem.lastHeartbeat = time(0); // we set this on any response - we don't get this far if couldn't connect because exception is thrown
+ mem.ping = (unsigned int)timer.millis();
+
+ time_t before = timer.startTime() / 1000000;
+ // we set this on any response - we don't get this far if
+ // couldn't connect because exception is thrown
+ time_t after = mem.lastHeartbeat = before + (mem.ping / 1000);
+
+ // weight new ping with old pings
+ // on the first ping, just use the ping value
+ if (old.ping != 0) {
+ mem.ping = (unsigned int)((old.ping * .8) + (mem.ping * .2));
+ }
if ( info["time"].isNumber() ) {
long long t = info["time"].numberLong();
@@ -183,8 +200,10 @@ namespace mongo {
mem.hbstate = MemberState(state.Int());
}
if( ok ) {
+ HeartbeatInfo::numPings++;
+
if( mem.upSince == 0 ) {
- log() << "replSet info " << h.toString() << " is up" << rsLog;
+ log() << "replSet info member " << h.toString() << " is up" << rsLog;
mem.upSince = mem.lastHeartbeat;
}
mem.health = 1.0;
@@ -192,6 +211,30 @@ namespace mongo {
if( info.hasElement("opTime") )
mem.opTime = info["opTime"].Date();
+ // see if this member is in the electable set
+ if( info["e"].eoo() ) {
+ // for backwards compatibility
+ const Member *member = theReplSet->findById(mem.id());
+ if (member && member->config().potentiallyHot()) {
+ theReplSet->addToElectable(mem.id());
+ }
+ else {
+ theReplSet->rmFromElectable(mem.id());
+ }
+ }
+ // add this server to the electable set if it is within 10
+ // seconds of the latest optime we know of
+ else if( info["e"].trueValue() &&
+ mem.opTime >= theReplSet->lastOpTimeWritten.getSecs() - 10) {
+ unsigned lastOp = theReplSet->lastOtherOpTime().getSecs();
+ if (lastOp > 0 && mem.opTime >= lastOp - 10) {
+ theReplSet->addToElectable(mem.id());
+ }
+ }
+ else {
+ theReplSet->rmFromElectable(mem.id());
+ }
+
be cfg = info["config"];
if( cfg.ok() ) {
// received a new config
@@ -208,7 +251,7 @@ namespace mongo {
down(mem, e.what());
}
catch(...) {
- down(mem, "something unusual went wrong");
+ down(mem, "replSet unexpected exception in ReplSetHealthPollTask");
}
m = mem;
@@ -219,7 +262,7 @@ namespace mongo {
bool changed = mem.changed(old);
if( changed ) {
if( old.hbstate != mem.hbstate )
- log() << "replSet member " << h.toString() << ' ' << mem.hbstate.toString() << rsLog;
+ log() << "replSet member " << h.toString() << " is now in state " << mem.hbstate.toString() << rsLog;
}
if( changed || now-last>4 ) {
last = now;
@@ -230,12 +273,15 @@ namespace mongo {
private:
void down(HeartbeatInfo& mem, string msg) {
mem.health = 0.0;
+ mem.ping = 0;
if( mem.upSince || mem.downSince == 0 ) {
mem.upSince = 0;
mem.downSince = jsTime();
+ mem.hbstate = MemberState::RS_DOWN;
log() << "replSet info " << h.toString() << " is down (or slow to respond): " << msg << rsLog;
}
mem.lastHeartbeatMsg = msg;
+ theReplSet->rmFromElectable(mem.id());
}
};
@@ -262,18 +308,13 @@ namespace mongo {
*/
void ReplSetImpl::startThreads() {
task::fork(mgr);
-
- /*Member* m = _members.head();
- while( m ) {
- ReplSetHealthPollTask *task = new ReplSetHealthPollTask(m->h(), m->hbinfo());
- healthTasks.insert(task);
- task::repeat(shared_ptr<task::Task>(task), 2000);
- m = m->next();
- }*/
-
mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
boost::thread t(startSyncThread);
+
+ task::fork(ghost);
+
+ // member heartbeats are started in ReplSetImpl::initFromConfig
}
}
diff --git a/db/repl/manager.cpp b/db/repl/manager.cpp
index d2e0764..3c4c0eb 100644
--- a/db/repl/manager.cpp
+++ b/db/repl/manager.cpp
@@ -19,6 +19,7 @@
#include "pch.h"
#include "rs.h"
+#include "connections.h"
#include "../client.h"
namespace mongo {
@@ -50,7 +51,7 @@ namespace mongo {
}
Manager::Manager(ReplSetImpl *_rs) :
- task::Server("rs Manager"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY) {
+ task::Server("rsMgr"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY) {
}
Manager::~Manager() {
@@ -63,10 +64,8 @@ namespace mongo {
}
void Manager::starting() {
- Client::initThread("rs Manager");
- if (!noauth) {
- cc().getAuthenticationInfo()->authorize("local");
- }
+ Client::initThread("rsMgr");
+ replLocalAuth();
}
void Manager::noteARemoteIsPrimary(const Member *m) {
@@ -81,6 +80,45 @@ namespace mongo {
}
}
+ void Manager::checkElectableSet() {
+ unsigned otherOp = rs->lastOtherOpTime().getSecs();
+
+ // make sure the electable set is up-to-date
+ if (rs->elect.aMajoritySeemsToBeUp() &&
+ rs->iAmPotentiallyHot() &&
+ (otherOp == 0 || rs->lastOpTimeWritten.getSecs() >= otherOp - 10)) {
+ theReplSet->addToElectable(rs->selfId());
+ }
+ else {
+ theReplSet->rmFromElectable(rs->selfId());
+ }
+
+ // check if we should ask the primary (possibly ourselves) to step down
+ const Member *highestPriority = theReplSet->getMostElectable();
+ const Member *primary = rs->box.getPrimary();
+
+ if (primary && highestPriority &&
+ highestPriority->config().priority > primary->config().priority) {
+ log() << "stepping down " << primary->fullName() << endl;
+
+ if (primary->h().isSelf()) {
+ // replSetStepDown tries to acquire the same lock
+ // msgCheckNewState takes, so we can't call replSetStepDown on
+ // ourselves.
+ rs->relinquish();
+ }
+ else {
+ BSONObj cmd = BSON( "replSetStepDown" << 1 );
+ ScopedConn conn(primary->fullName());
+ BSONObj result;
+ if (!conn.runCommand("admin", cmd, result, 0)) {
+ log() << "stepping down " << primary->fullName()
+ << " failed: " << result << endl;
+ }
+ }
+ }
+ }
+
/** called as the health threads get new results */
void Manager::msgCheckNewState() {
{
@@ -90,7 +128,9 @@ namespace mongo {
RSBase::lock lk(rs);
if( busyWithElectSelf ) return;
-
+
+ checkElectableSet();
+
const Member *p = rs->box.getPrimary();
if( p && p != rs->_self ) {
if( !p->hbinfo().up() ||
@@ -154,7 +194,7 @@ namespace mongo {
}
if( rs->elect.shouldRelinquish() ) {
- log() << "replSet can't see a majority of the set, relinquishing primary" << rsLog;
+ log() << "can't see a majority of the set, relinquishing primary" << rsLog;
rs->relinquish();
}
@@ -163,9 +203,7 @@ namespace mongo {
if( !rs->iAmPotentiallyHot() ) // if not we never try to be primary
return;
-
- /* TODO : CHECK PRIORITY HERE. can't be elected if priority zero. */
-
+
/* no one seems to be primary. shall we try to elect ourself? */
if( !rs->elect.aMajoritySeemsToBeUp() ) {
static time_t last;
@@ -178,6 +216,10 @@ namespace mongo {
return;
}
+ if( !rs->iAmElectable() ) {
+ return;
+ }
+
busyWithElectSelf = true; // don't try to do further elections & such while we are already working on one.
}
try {
diff --git a/db/repl/multicmd.h b/db/repl/multicmd.h
index df7c4e5..99dabea 100644
--- a/db/repl/multicmd.h
+++ b/db/repl/multicmd.h
@@ -53,16 +53,16 @@ namespace mongo {
};
inline void multiCommand(BSONObj cmd, list<Target>& L) {
- list<BackgroundJob *> jobs;
+ list< shared_ptr<BackgroundJob> > jobs;
for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
Target& d = *i;
_MultiCommandJob *j = new _MultiCommandJob(cmd, d);
+ jobs.push_back( shared_ptr<BackgroundJob>(j) );
j->go();
- jobs.push_back(j);
}
- for( list<BackgroundJob*>::iterator i = jobs.begin(); i != jobs.end(); i++ ) {
+ for( list< shared_ptr<BackgroundJob> >::iterator i = jobs.begin(); i != jobs.end(); i++ ) {
(*i)->wait();
}
}
diff --git a/db/repl/replset_commands.cpp b/db/repl/replset_commands.cpp
index 1d110ac..68dab7e 100644
--- a/db/repl/replset_commands.cpp
+++ b/db/repl/replset_commands.cpp
@@ -17,6 +17,7 @@
#include "pch.h"
#include "../cmdline.h"
#include "../commands.h"
+#include "../repl.h"
#include "health.h"
#include "rs.h"
#include "rs_config.h"
@@ -28,7 +29,7 @@ using namespace bson;
namespace mongo {
- void checkMembersUpForConfigChange(const ReplSetConfig& cfg, bool initial);
+ void checkMembersUpForConfigChange(const ReplSetConfig& cfg, BSONObjBuilder& result, bool initial);
/* commands in other files:
replSetHeartbeat - health.cpp
@@ -44,14 +45,18 @@ namespace mongo {
help << "Just for regression tests.\n";
}
CmdReplSetTest() : ReplSetCommand("replSetTest") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
log() << "replSet replSetTest command received: " << cmdObj.toString() << rsLog;
+
+ if (!checkAuth(errmsg, result)) {
+ return false;
+ }
+
if( cmdObj.hasElement("forceInitialSyncFailure") ) {
replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
return true;
}
- // may not need this, but if removed check all tests still work:
if( !check(errmsg, result) )
return false;
@@ -63,7 +68,10 @@ namespace mongo {
}
} cmdReplSetTest;
- /** get rollback id */
+ /** get rollback id. used to check if a rollback happened during some interval of time.
+ as consumed, the rollback id is not in any particular order, it simply changes on each rollback.
+ @see incRBID()
+ */
class CmdReplSetGetRBID : public ReplSetCommand {
public:
/* todo: ideally this should only change on rollbacks NOT on mongod restarts also. fix... */
@@ -72,9 +80,11 @@ namespace mongo {
help << "internal";
}
CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {
- rbid = (int) curTimeMillis();
+ // this is ok but micros or combo with some rand() and/or 64 bits might be better --
+ // imagine a restart and a clock correction simultaneously (very unlikely but possible...)
+ rbid = (int) curTimeMillis64();
}
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
result.append("rbid",rbid);
@@ -102,7 +112,7 @@ namespace mongo {
help << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
}
CmdReplSetGetStatus() : ReplSetCommand("replSetGetStatus", true) { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( cmdObj["forShell"].trueValue() )
lastError.disableForCommand();
@@ -122,20 +132,38 @@ namespace mongo {
help << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
}
CmdReplSetReconfig() : ReplSetCommand("replSetReconfig"), mutex("rsreconfig") { }
- virtual bool run(const string& a, BSONObj& b, string& errmsg, BSONObjBuilder& c, bool d) {
+ virtual bool run(const string& a, BSONObj& b, int e, string& errmsg, BSONObjBuilder& c, bool d) {
try {
rwlock_try_write lk(mutex);
- return _run(a,b,errmsg,c,d);
+ return _run(a,b,e,errmsg,c,d);
}
catch(rwlock_try_write::exception&) { }
errmsg = "a replSetReconfig is already in progress";
return false;
}
private:
- bool _run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( !check(errmsg, result) )
+ bool _run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( !checkAuth(errmsg, result) ) {
return false;
- if( !theReplSet->box.getState().primary() ) {
+ }
+
+ if( cmdObj["replSetReconfig"].type() != Object ) {
+ errmsg = "no configuration specified";
+ return false;
+ }
+
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+ if( force && !theReplSet ) {
+ replSettings.reconfig = cmdObj["replSetReconfig"].Obj().getOwned();
+ result.append("msg", "will try this config momentarily, try running rs.conf() again in a few seconds");
+ return true;
+ }
+
+ if ( !check(errmsg, result) ) {
+ return false;
+ }
+
+ if( !force && !theReplSet->box.getState().primary() ) {
errmsg = "replSetReconfig command must be sent to the current replica set primary.";
return false;
}
@@ -152,18 +180,8 @@ namespace mongo {
}
}
- if( cmdObj["replSetReconfig"].type() != Object ) {
- errmsg = "no configuration specified";
- return false;
- }
-
- /** TODO
- Support changes when a majority, but not all, members of a set are up.
- Determine what changes should not be allowed as they would cause erroneous states.
- What should be possible when a majority is not up?
- */
try {
- ReplSetConfig newConfig(cmdObj["replSetReconfig"].Obj());
+ ReplSetConfig newConfig(cmdObj["replSetReconfig"].Obj(), force);
log() << "replSet replSetReconfig config object parses ok, " << newConfig.members.size() << " members specified" << rsLog;
@@ -171,12 +189,12 @@ namespace mongo {
return false;
}
- checkMembersUpForConfigChange(newConfig,false);
+ checkMembersUpForConfigChange(newConfig, result, false);
log() << "replSet replSetReconfig [2]" << rsLog;
theReplSet->haveNewConfig(newConfig, true);
- ReplSet::startupStatusMsg = "replSetReconfig'd";
+ ReplSet::startupStatusMsg.set("replSetReconfig'd");
}
catch( DBException& e ) {
log() << "replSet replSetReconfig exception: " << e.what() << rsLog;
@@ -199,7 +217,7 @@ namespace mongo {
}
CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
int secs = (int) cmdObj.firstElement().numberInt();
@@ -223,13 +241,38 @@ namespace mongo {
}
CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
if( !theReplSet->box.getState().primary() ) {
errmsg = "not primary so can't step down";
return false;
}
+
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+
+ // only step down if there is another node synced to within 10
+ // seconds of this node
+ if (!force) {
+ long long int lastOp = (long long int)theReplSet->lastOpTimeWritten.getSecs();
+ long long int closest = (long long int)theReplSet->lastOtherOpTime().getSecs();
+
+ long long int diff = lastOp - closest;
+ result.append("closest", closest);
+ result.append("difference", diff);
+
+ if (diff < 0) {
+ // not our problem, but we'll wait until thing settle down
+ errmsg = "someone is ahead of the primary?";
+ return false;
+ }
+
+ if (diff > 10) {
+ errmsg = "no secondaries within 10 seconds of my optime";
+ return false;
+ }
+ }
+
int secs = (int) cmdObj.firstElement().numberInt();
if( secs == 0 )
secs = 60;
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index bbfb057..1fbbc10 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -16,7 +16,7 @@
#include "pch.h"
#include "../cmdline.h"
-#include "../../util/sock.h"
+#include "../../util/net/sock.h"
#include "../client.h"
#include "../../client/dbclient.h"
#include "../dbhelpers.h"
@@ -24,14 +24,20 @@
#include "rs.h"
#include "connections.h"
#include "../repl.h"
+#include "../instance.h"
-namespace mongo {
+using namespace std;
+namespace mongo {
+
using namespace bson;
bool replSet = false;
ReplSet *theReplSet = 0;
- extern string *discoveredSeed;
+
+ bool isCurrentlyAReplSetPrimary() {
+ return theReplSet && theReplSet->isPrimary();
+ }
void ReplSetImpl::sethbmsg(string s, int logLevel) {
static time_t lastLogged;
@@ -57,21 +63,71 @@ namespace mongo {
}
void ReplSetImpl::assumePrimary() {
+ LOG(2) << "replSet assuming primary" << endl;
assert( iAmPotentiallyHot() );
writelock lk("admin."); // so we are synchronized with _logOp()
- box.setSelfPrimary(_self);
- //log() << "replSet PRIMARY" << rsLog; // self (" << _self->id() << ") is now primary" << rsLog;
+
+ // Make sure that new OpTimes are higher than existing ones even with clock skew
+ DBDirectClient c;
+ BSONObj lastOp = c.findOne( "local.oplog.rs", Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk );
+ if ( !lastOp.isEmpty() ) {
+ OpTime::setLast( lastOp[ "ts" ].date() );
+ }
+
+ changeState(MemberState::RS_PRIMARY);
}
void ReplSetImpl::changeState(MemberState s) { box.change(s, _self); }
+ void ReplSetImpl::setMaintenanceMode(const bool inc) {
+ lock lk(this);
+
+ if (inc) {
+ log() << "replSet going into maintenance mode (" << _maintenanceMode << " other tasks)" << rsLog;
+
+ _maintenanceMode++;
+ changeState(MemberState::RS_RECOVERING);
+ }
+ else {
+ _maintenanceMode--;
+ // no need to change state, syncTail will try to go live as a secondary soon
+
+ log() << "leaving maintenance mode (" << _maintenanceMode << " other tasks)" << rsLog;
+ }
+ }
+
+ Member* ReplSetImpl::getMostElectable() {
+ lock lk(this);
+
+ Member *max = 0;
+
+ for (set<unsigned>::iterator it = _electableSet.begin(); it != _electableSet.end(); it++) {
+ const Member *temp = findById(*it);
+ if (!temp) {
+ log() << "couldn't find member: " << *it << endl;
+ _electableSet.erase(*it);
+ continue;
+ }
+ if (!max || max->config().priority < temp->config().priority) {
+ max = (Member*)temp;
+ }
+ }
+
+ return max;
+ }
+
const bool closeOnRelinquish = true;
void ReplSetImpl::relinquish() {
+ LOG(2) << "replSet attempting to relinquish" << endl;
if( box.getState().primary() ) {
- log() << "replSet relinquishing primary state" << rsLog;
- changeState(MemberState::RS_SECONDARY);
-
+ {
+ writelock lk("admin."); // so we are synchronized with _logOp()
+
+ log() << "replSet relinquishing primary state" << rsLog;
+ changeState(MemberState::RS_SECONDARY);
+ }
+
if( closeOnRelinquish ) {
/* close sockets that were talking to us so they don't blithly send many writes that will fail
with "not master" (of course client could check result code, but in case they are not)
@@ -173,6 +229,8 @@ namespace mongo {
}
void ReplSetImpl::_fillIsMaster(BSONObjBuilder& b) {
+ lock lk(this);
+
const StateBox::SP sp = box.get();
bool isp = sp.state.primary();
b.append("setName", name());
@@ -203,9 +261,13 @@ namespace mongo {
if( m )
b.append("primary", m->h().toString());
}
+ else {
+ b.append("primary", _self->fullName());
+ }
+
if( myConfig().arbiterOnly )
b.append("arbiterOnly", true);
- if( myConfig().priority == 0 )
+ if( myConfig().priority == 0 && !myConfig().arbiterOnly)
b.append("passive", true);
if( myConfig().slaveDelay )
b.append("slaveDelay", myConfig().slaveDelay);
@@ -213,6 +275,13 @@ namespace mongo {
b.append("hidden", true);
if( !myConfig().buildIndexes )
b.append("buildIndexes", false);
+ if( !myConfig().tags.empty() ) {
+ BSONObjBuilder a;
+ for( map<string,string>::const_iterator i = myConfig().tags.begin(); i != myConfig().tags.end(); i++ )
+ a.append((*i).first, (*i).second);
+ b.append("tags", a.done());
+ }
+ b.append("me", myConfig().h.toString());
}
/** @param cfgString <setname>/<seedhost1>,<seedhost2> */
@@ -259,19 +328,22 @@ namespace mongo {
}
ReplSetImpl::ReplSetImpl(ReplSetCmdline& replSetCmdline) : elect(this),
+ _currentSyncTarget(0),
+ _hbmsgTime(0),
_self(0),
- mgr( new Manager(this) ) {
+ _maintenanceMode(0),
+ mgr( new Manager(this) ),
+ ghost( new GhostSync(this) ) {
+
_cfg = 0;
memset(_hbmsg, 0, sizeof(_hbmsg));
- *_hbmsg = '.'; // temp...just to see
+ strcpy( _hbmsg , "initial startup" );
lastH = 0;
changeState(MemberState::RS_STARTUP);
_seeds = &replSetCmdline.seeds;
- //for( vector<HostAndPort>::iterator i = seeds->begin(); i != seeds->end(); i++ )
- // addMemberIfMissing(*i);
- log(1) << "replSet beginning startup..." << rsLog;
+ LOG(1) << "replSet beginning startup..." << rsLog;
loadConfig();
@@ -282,7 +354,7 @@ namespace mongo {
for( set<HostAndPort>::iterator i = replSetCmdline.seedSet.begin(); i != replSetCmdline.seedSet.end(); i++ ) {
if( i->isSelf() ) {
if( sss == 1 )
- log(1) << "replSet warning self is listed in the seed list and there are no other seeds listed did you intend that?" << rsLog;
+ LOG(1) << "replSet warning self is listed in the seed list and there are no other seeds listed did you intend that?" << rsLog;
}
else
log() << "replSet warning command line seed " << i->toString() << " is not present in the current repl set config" << rsLog;
@@ -291,14 +363,13 @@ namespace mongo {
void newReplUp();
- void ReplSetImpl::loadLastOpTimeWritten() {
- //assert( lastOpTimeWritten.isNull() );
+ void ReplSetImpl::loadLastOpTimeWritten(bool quiet) {
readlock lk(rsoplog);
BSONObj o;
if( Helpers::getLast(rsoplog, o) ) {
lastH = o["h"].numberLong();
lastOpTimeWritten = o["ts"]._opTime();
- uassert(13290, "bad replSet oplog entry?", !lastOpTimeWritten.isNull());
+ uassert(13290, "bad replSet oplog entry?", quiet || !lastOpTimeWritten.isNull());
}
}
@@ -326,7 +397,10 @@ namespace mongo {
extern BSONObj *getLastErrorDefault;
void ReplSetImpl::setSelfTo(Member *m) {
+ // already locked in initFromConfig
_self = m;
+ _id = m->id();
+ _config = m->config();
if( m ) _buildIndexes = m->config().buildIndexes;
else _buildIndexes = true;
}
@@ -345,29 +419,32 @@ namespace mongo {
getLastErrorDefault = new BSONObj( c.getLastErrorDefaults );
}
- list<const ReplSetConfig::MemberCfg*> newOnes;
+ list<ReplSetConfig::MemberCfg*> newOnes;
+ // additive short-cuts the new config setup. If we are just adding a
+ // node/nodes and nothing else is changing, this is additive. If it's
+ // not a reconfig, we're not adding anything
bool additive = reconf;
{
unsigned nfound = 0;
int me = 0;
for( vector<ReplSetConfig::MemberCfg>::iterator i = c.members.begin(); i != c.members.end(); i++ ) {
- const ReplSetConfig::MemberCfg& m = *i;
+
+ ReplSetConfig::MemberCfg& m = *i;
if( m.h.isSelf() ) {
- nfound++;
me++;
- if( !reconf || (_self && _self->id() == (unsigned) m._id) )
- ;
- else {
- log() << "replSet " << _self->id() << ' ' << m._id << rsLog;
+ }
+
+ if( reconf ) {
+ if (m.h.isSelf() && (!_self || (int)_self->id() != m._id)) {
+ log() << "self doesn't match: " << m._id << rsLog;
assert(false);
}
- }
- else if( reconf ) {
+
const Member *old = findById(m._id);
if( old ) {
nfound++;
assert( (int) old->id() == m._id );
- if( old->config() == m ) {
+ if( old->config() != m ) {
additive = false;
}
}
@@ -375,23 +452,21 @@ namespace mongo {
newOnes.push_back(&m);
}
}
-
- // change timeout settings, if necessary
- ScopedConn conn(m.h.toString());
- conn.setTimeout(c.ho.heartbeatTimeoutMillis/1000.0);
}
if( me == 0 ) {
- // initial startup with fastsync
- if (!reconf && replSettings.fastsync) {
- return false;
- }
- // log() << "replSet config : " << _cfg->toString() << rsLog;
+ _members.orphanAll();
+ // hbs must continue to pick up new config
+ // stop sync thread
+ box.set(MemberState::RS_STARTUP, 0);
+
+ // go into holding pattern
log() << "replSet error self not present in the repl set configuration:" << rsLog;
log() << c.toString() << rsLog;
- uasserted(13497, "replSet error self not present in the configuration");
+ return false;
}
uassert( 13302, "replSet error self appears twice in the repl set configuration", me<=1 );
+ // if we found different members that the original config, reload everything
if( reconf && config().members.size() != nfound )
additive = false;
}
@@ -402,10 +477,11 @@ namespace mongo {
_name = _cfg->_id;
assert( !_name.empty() );
+ // this is a shortcut for simple changes
if( additive ) {
log() << "replSet info : additive change to configuration" << rsLog;
- for( list<const ReplSetConfig::MemberCfg*>::const_iterator i = newOnes.begin(); i != newOnes.end(); i++ ) {
- const ReplSetConfig::MemberCfg* m = *i;
+ for( list<ReplSetConfig::MemberCfg*>::const_iterator i = newOnes.begin(); i != newOnes.end(); i++ ) {
+ ReplSetConfig::MemberCfg *m = *i;
Member *mi = new Member(m->h, m->_id, m, false);
/** we will indicate that new members are up() initially so that we don't relinquish our
@@ -417,6 +493,11 @@ namespace mongo {
_members.push(mi);
startHealthTaskFor(mi);
}
+
+ // if we aren't creating new members, we may have to update the
+ // groups for the current ones
+ _cfg->updateMembers(_members);
+
return true;
}
@@ -433,21 +514,21 @@ namespace mongo {
}
forgetPrimary();
- bool iWasArbiterOnly = _self ? iAmArbiterOnly() : false;
- setSelfTo(0);
+ // not setting _self to 0 as other threads use _self w/o locking
+ int me = 0;
+
+ // For logging
+ string members = "";
+
for( vector<ReplSetConfig::MemberCfg>::iterator i = _cfg->members.begin(); i != _cfg->members.end(); i++ ) {
- const ReplSetConfig::MemberCfg& m = *i;
+ ReplSetConfig::MemberCfg& m = *i;
Member *mi;
+ members += ( members == "" ? "" : ", " ) + m.h.toString();
if( m.h.isSelf() ) {
- assert( _self == 0 );
+ assert( me++ == 0 );
mi = new Member(m.h, m._id, &m, true);
setSelfTo(mi);
- // if the arbiter status changed
- if (iWasArbiterOnly ^ iAmArbiterOnly()) {
- _changeArbiterState();
- }
-
if( (int)mi->id() == oldPrimaryId )
box.setSelfPrimary(mi);
}
@@ -459,38 +540,12 @@ namespace mongo {
box.setOtherPrimary(mi);
}
}
- return true;
- }
- void startSyncThread();
-
- void ReplSetImpl::_changeArbiterState() {
- if (iAmArbiterOnly()) {
- changeState(MemberState::RS_ARBITER);
-
- // if there is an oplog, free it
- // not sure if this is necessary, maybe just leave the oplog and let
- // the user delete it if they want the space?
- writelock lk(rsoplog);
- Client::Context c(rsoplog);
- NamespaceDetails *d = nsdetails(rsoplog);
- if (d) {
- string errmsg;
- bob res;
- dropCollection(rsoplog, errmsg, res);
-
- // clear last op time to force initial sync (if the arbiter
- // becomes a "normal" server again)
- lastOpTimeWritten = OpTime();
- }
+ if( me == 0 ){
+ log() << "replSet warning did not detect own host in full reconfig, members " << members << " config: " << c << rsLog;
}
- else {
- changeState(MemberState::RS_RECOVERING);
- // oplog will be allocated when sync begins
- /* TODO : could this cause two sync threads to exist (race condition)? */
- boost::thread t(startSyncThread);
- }
+ return true;
}
// Our own config must be the first one.
@@ -514,7 +569,6 @@ namespace mongo {
if( highest->version > myVersion && highest->version >= 0 ) {
log() << "replSet got config version " << highest->version << " from a remote, saving locally" << rsLog;
- writelock lk("admin.");
highest->saveConfigLocally(BSONObj());
}
return true;
@@ -523,7 +577,7 @@ namespace mongo {
void ReplSetImpl::loadConfig() {
while( 1 ) {
startupStatus = LOADINGCONFIG;
- startupStatusMsg = "loading " + rsConfigNs + " config (LOADINGCONFIG)";
+ startupStatusMsg.set("loading " + rsConfigNs + " config (LOADINGCONFIG)");
try {
vector<ReplSetConfig> configs;
try {
@@ -531,7 +585,6 @@ namespace mongo {
}
catch(DBException& e) {
log() << "replSet exception loading our local replset configuration object : " << e.toString() << rsLog;
- throw;
}
for( vector<HostAndPort>::const_iterator i = _seeds->begin(); i != _seeds->end(); i++ ) {
try {
@@ -542,12 +595,25 @@ namespace mongo {
}
}
- if( discoveredSeed ) {
+ if( replSettings.discoveredSeeds.size() > 0 ) {
+ for (set<string>::iterator i = replSettings.discoveredSeeds.begin(); i != replSettings.discoveredSeeds.end(); i++) {
+ try {
+ configs.push_back( ReplSetConfig(HostAndPort(*i)) );
+ }
+ catch( DBException& ) {
+ log(1) << "replSet exception trying to load config from discovered seed " << *i << rsLog;
+ replSettings.discoveredSeeds.erase(*i);
+ }
+ }
+ }
+
+ if (!replSettings.reconfig.isEmpty()) {
try {
- configs.push_back( ReplSetConfig(HostAndPort(*discoveredSeed)) );
+ configs.push_back(ReplSetConfig(replSettings.reconfig, true));
}
- catch( DBException& ) {
- log(1) << "replSet exception trying to load config from discovered seed " << *discoveredSeed << rsLog;
+ catch( DBException& re) {
+ log() << "couldn't load reconfig: " << re.what() << endl;
+ replSettings.reconfig = BSONObj();
}
}
@@ -563,17 +629,17 @@ namespace mongo {
if( nempty == (int) configs.size() ) {
startupStatus = EMPTYCONFIG;
- startupStatusMsg = "can't get " + rsConfigNs + " config from self or any seed (EMPTYCONFIG)";
+ startupStatusMsg.set("can't get " + rsConfigNs + " config from self or any seed (EMPTYCONFIG)");
log() << "replSet can't get " << rsConfigNs << " config from self or any seed (EMPTYCONFIG)" << rsLog;
static unsigned once;
if( ++once == 1 )
log() << "replSet info you may need to run replSetInitiate -- rs.initiate() in the shell -- if that is not already done" << rsLog;
if( _seeds->size() == 0 )
- log(1) << "replSet info no seed hosts were specified on the --replSet command line" << rsLog;
+ LOG(1) << "replSet info no seed hosts were specified on the --replSet command line" << rsLog;
}
else {
startupStatus = EMPTYUNREACHABLE;
- startupStatusMsg = "can't currently get " + rsConfigNs + " config from self or any seed (EMPTYUNREACHABLE)";
+ startupStatusMsg.set("can't currently get " + rsConfigNs + " config from self or any seed (EMPTYUNREACHABLE)");
log() << "replSet can't get " << rsConfigNs << " config from self or any seed (yet)" << rsLog;
}
@@ -589,7 +655,7 @@ namespace mongo {
}
catch(DBException& e) {
startupStatus = BADCONFIG;
- startupStatusMsg = "replSet error loading set config (BADCONFIG)";
+ startupStatusMsg.set("replSet error loading set config (BADCONFIG)");
log() << "replSet error loading configurations " << e.toString() << rsLog;
log() << "replSet error replication will not start" << rsLog;
sethbmsg("error loading set config");
@@ -598,27 +664,26 @@ namespace mongo {
}
break;
}
- startupStatusMsg = "? started";
+ startupStatusMsg.set("? started");
startupStatus = STARTED;
}
void ReplSetImpl::_fatal() {
- //lock l(this);
box.set(MemberState::RS_FATAL, 0);
- //sethbmsg("fatal error");
log() << "replSet error fatal, stopping replication" << rsLog;
}
void ReplSet::haveNewConfig(ReplSetConfig& newConfig, bool addComment) {
- lock l(this); // convention is to lock replset before taking the db rwlock
- writelock lk("");
bo comment;
if( addComment )
comment = BSON( "msg" << "Reconfig set" << "version" << newConfig.version );
+
newConfig.saveConfigLocally(comment);
+
try {
- initFromConfig(newConfig, true);
- log() << "replSet replSetReconfig new config saved locally" << rsLog;
+ if (initFromConfig(newConfig, true)) {
+ log() << "replSet replSetReconfig new config saved locally" << rsLog;
+ }
}
catch(DBException& e) {
if( e.getCode() == 13497 /* removed from set */ ) {
@@ -652,16 +717,14 @@ namespace mongo {
terminates.
*/
void startReplSets(ReplSetCmdline *replSetCmdline) {
- Client::initThread("startReplSets");
+ Client::initThread("rsStart");
try {
assert( theReplSet == 0 );
if( replSetCmdline == 0 ) {
assert(!replSet);
return;
}
- if( !noauth ) {
- cc().getAuthenticationInfo()->authorize("local");
- }
+ replLocalAuth();
(theReplSet = new ReplSet(*replSetCmdline))->go();
}
catch(std::exception& e) {
@@ -672,6 +735,13 @@ namespace mongo {
cc().shutdown();
}
+ void replLocalAuth() {
+ if ( noauth )
+ return;
+ cc().getAuthenticationInfo()->authorize("local","_repl");
+ }
+
+
}
namespace boost {
diff --git a/db/repl/rs.h b/db/repl/rs.h
index ea9aef1..61041a6 100644
--- a/db/repl/rs.h
+++ b/db/repl/rs.h
@@ -21,13 +21,26 @@
#include "../../util/concurrency/list.h"
#include "../../util/concurrency/value.h"
#include "../../util/concurrency/msg.h"
-#include "../../util/hostandport.h"
+#include "../../util/net/hostandport.h"
#include "../commands.h"
+#include "../oplogreader.h"
#include "rs_exception.h"
#include "rs_optime.h"
#include "rs_member.h"
#include "rs_config.h"
+/**
+ * Order of Events
+ *
+ * On startup, if the --replSet option is present, startReplSets is called.
+ * startReplSets forks off a new thread for replica set activities. It creates
+ * the global theReplSet variable and calls go() on it.
+ *
+ * theReplSet's constructor changes the replica set's state to RS_STARTUP,
+ * starts the replica set manager, and loads the config (if the replica set
+ * has been initialized).
+ */
+
namespace mongo {
struct HowToFixUp;
@@ -41,11 +54,15 @@ namespace mongo {
/* member of a replica set */
class Member : public List1<Member>::Base {
+ private:
+ ~Member(); // intentionally unimplemented as should never be called -- see List1<>::Base.
+ Member(const Member&);
public:
- Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self);
+ Member(HostAndPort h, unsigned ord, ReplSetConfig::MemberCfg *c, bool self);
string fullName() const { return h().toString(); }
const ReplSetConfig::MemberCfg& config() const { return _config; }
+ ReplSetConfig::MemberCfg& configw() { return _config; }
const HeartbeatInfo& hbinfo() const { return _hbinfo; }
HeartbeatInfo& get_hbinfo() { return _hbinfo; }
string lhb() const { return _hbinfo.lastHeartbeatMsg; }
@@ -58,7 +75,7 @@ namespace mongo {
private:
friend class ReplSetImpl;
- const ReplSetConfig::MemberCfg _config;
+ ReplSetConfig::MemberCfg _config;
const HostAndPort _h;
HeartbeatInfo _hbinfo;
};
@@ -75,6 +92,7 @@ namespace mongo {
const Member* findOtherPrimary(bool& two);
void noteARemoteIsPrimary(const Member *);
+ void checkElectableSet();
virtual void starting();
public:
Manager(ReplSetImpl *rs);
@@ -83,6 +101,47 @@ namespace mongo {
void msgCheckNewState();
};
+ class GhostSync : public task::Server {
+ struct GhostSlave {
+ GhostSlave() : last(0), slave(0), init(false) {}
+ OplogReader reader;
+ OpTime last;
+ Member* slave;
+ bool init;
+ };
+ /**
+ * This is a cache of ghost slaves
+ */
+ typedef map<mongo::OID,GhostSlave> MAP;
+ MAP _ghostCache;
+ RWLock _lock; // protects _ghostCache
+ ReplSetImpl *rs;
+ virtual void starting();
+ public:
+ GhostSync(ReplSetImpl *_rs) : task::Server("rsGhostSync"), _lock("GhostSync"), rs(_rs) {}
+ ~GhostSync() {
+ log() << "~GhostSync() called" << rsLog;
+ }
+
+ /**
+ * Replica sets can sync in a hierarchical fashion, which throws off w
+ * calculation on the master. percolate() faux-syncs from an upstream
+ * node so that the primary will know what the slaves are up to.
+ *
+ * We can't just directly sync to the primary because it could be
+ * unreachable, e.g., S1--->S2--->S3--->P. S2 should ghost sync from S3
+ * and S3 can ghost sync from the primary.
+ *
+ * Say we have an S1--->S2--->P situation and this node is S2. rid
+ * would refer to S1. S2 would create a ghost slave of S1 and connect
+ * it to P (_currentSyncTarget). Then it would use this connection to
+ * pretend to be S1, replicating off of P.
+ */
+ void percolate(const BSONObj& rid, const OpTime& last);
+ void associateSlave(const BSONObj& rid, const int memberId);
+ void updateSlave(const mongo::OID& id, const OpTime& last);
+ };
+
struct Target;
class Consensus {
@@ -92,7 +151,8 @@ namespace mongo {
time_t when;
unsigned who;
};
- Atomic<LastYea> ly;
+ static mutex lyMutex;
+ Guarded<LastYea,lyMutex> ly;
unsigned yea(unsigned memberId); // throws VoteException
void electionFailed(unsigned meid);
void _electSelf();
@@ -117,7 +177,12 @@ namespace mongo {
void multiCommand(BSONObj cmd, list<Target>& L);
};
- /** most operations on a ReplSet object should be done while locked. that logic implemented here. */
+ /**
+ * most operations on a ReplSet object should be done while locked. that
+ * logic implemented here.
+ *
+ * Order of locking: lock the replica set, then take a rwlock.
+ */
class RSBase : boost::noncopyable {
public:
const unsigned magic;
@@ -133,6 +198,7 @@ namespace mongo {
log() << "replSet ~RSBase called" << rsLog;
}
+ public:
class lock {
RSBase& rsbase;
auto_ptr<scoped_lock> sl;
@@ -156,7 +222,6 @@ namespace mongo {
}
};
- public:
/* for asserts */
bool locked() const { return _locked != 0; }
@@ -178,13 +243,19 @@ namespace mongo {
const Member *primary;
};
const SP get() {
- scoped_lock lk(m);
+ rwlock lk(m, false);
return sp;
}
- MemberState getState() const { return sp.state; }
- const Member* getPrimary() const { return sp.primary; }
+ MemberState getState() const {
+ rwlock lk(m, false);
+ return sp.state;
+ }
+ const Member* getPrimary() const {
+ rwlock lk(m, false);
+ return sp.primary;
+ }
void change(MemberState s, const Member *self) {
- scoped_lock lk(m);
+ rwlock lk(m, true);
if( sp.state != s ) {
log() << "replSet " << s.toString() << rsLog;
}
@@ -198,24 +269,25 @@ namespace mongo {
}
}
void set(MemberState s, const Member *p) {
- scoped_lock lk(m);
- sp.state = s; sp.primary = p;
+ rwlock lk(m, true);
+ sp.state = s;
+ sp.primary = p;
}
void setSelfPrimary(const Member *self) { change(MemberState::RS_PRIMARY, self); }
void setOtherPrimary(const Member *mem) {
- scoped_lock lk(m);
+ rwlock lk(m, true);
assert( !sp.state.primary() );
sp.primary = mem;
}
void noteRemoteIsPrimary(const Member *remote) {
- scoped_lock lk(m);
+ rwlock lk(m, true);
if( !sp.state.secondary() && !sp.state.fatal() )
sp.state = MemberState::RS_RECOVERING;
sp.primary = remote;
}
StateBox() : m("StateBox") { }
private:
- mongo::mutex m;
+ RWLock m;
SP sp;
};
@@ -267,10 +339,17 @@ namespace mongo {
bool _freeze(int secs);
private:
void assumePrimary();
- void loadLastOpTimeWritten();
+ void loadLastOpTimeWritten(bool quiet=false);
void changeState(MemberState s);
+
+ /**
+ * Find the closest member (using ping time) with a higher latest optime.
+ */
const Member* getMemberToSyncTo();
- void _changeArbiterState();
+ Member* _currentSyncTarget;
+
+ // set of electable members' _ids
+ set<unsigned> _electableSet;
protected:
// "heartbeat message"
// sent in requestHeartbeat respond in field "hbm"
@@ -278,8 +357,54 @@ namespace mongo {
time_t _hbmsgTime; // when it was logged
public:
void sethbmsg(string s, int logLevel = 0);
+
+ /**
+ * Election with Priorities
+ *
+ * Each node (n) keeps a set of nodes that could be elected primary.
+ * Each node in this set:
+ *
+ * 1. can connect to a majority of the set
+ * 2. has a priority greater than 0
+ * 3. has an optime within 10 seconds of the most up-to-date node
+ * that n can reach
+ *
+ * If a node fails to meet one or more of these criteria, it is removed
+ * from the list. This list is updated whenever the node receives a
+ * heartbeat.
+ *
+ * When a node sends an "am I freshest?" query, the node receiving the
+ * query checks their electable list to make sure that no one else is
+ * electable AND higher priority. If this check passes, the node will
+ * return an "ok" response, if not, it will veto.
+ *
+ * If a node is primary and there is another node with higher priority
+ * on the electable list (i.e., it must be synced to within 10 seconds
+ * of the current primary), the node (or nodes) with connections to both
+ * the primary and the secondary with higher priority will issue
+ * replSetStepDown requests to the primary to allow the higher-priority
+ * node to take over.
+ */
+ void addToElectable(const unsigned m) { lock lk(this); _electableSet.insert(m); }
+ void rmFromElectable(const unsigned m) { lock lk(this); _electableSet.erase(m); }
+ bool iAmElectable() { lock lk(this); return _electableSet.find(_self->id()) != _electableSet.end(); }
+ bool isElectable(const unsigned id) { lock lk(this); return _electableSet.find(id) != _electableSet.end(); }
+ Member* getMostElectable();
protected:
- bool initFromConfig(ReplSetConfig& c, bool reconf=false); // true if ok; throws if config really bad; false if config doesn't include self
+ /**
+ * Load a new config as the replica set's main config.
+ *
+ * If there is a "simple" change (just adding a node), this shortcuts
+ * the config. Returns true if the config was changed. Returns false
+ * if the config doesn't include a this node. Throws an exception if
+ * something goes very wrong.
+ *
+ * Behavior to note:
+ * - locks this
+ * - intentionally leaks the old _cfg and any old _members (if the
+ * change isn't strictly additive)
+ */
+ bool initFromConfig(ReplSetConfig& c, bool reconf=false);
void _fillIsMaster(BSONObjBuilder&);
void _fillIsMasterHost(const Member*, vector<string>&, vector<string>&, vector<string>&);
const ReplSetConfig& config() { return *_cfg; }
@@ -301,27 +426,48 @@ namespace mongo {
const vector<HostAndPort> *_seeds;
ReplSetConfig *_cfg;
- /** load our configuration from admin.replset. try seed machines too.
- @return true if ok; throws if config really bad; false if config doesn't include self
- */
+ /**
+ * Finds the configuration with the highest version number and attempts
+ * load it.
+ */
bool _loadConfigFinish(vector<ReplSetConfig>& v);
+ /**
+ * Gather all possible configs (from command line seeds, our own config
+ * doc, and any hosts listed therein) and try to initiate from the most
+ * recent config we find.
+ */
void loadConfig();
list<HostAndPort> memberHostnames() const;
- const ReplSetConfig::MemberCfg& myConfig() const { return _self->config(); }
+ const ReplSetConfig::MemberCfg& myConfig() const { return _config; }
bool iAmArbiterOnly() const { return myConfig().arbiterOnly; }
- bool iAmPotentiallyHot() const { return myConfig().potentiallyHot(); }
+ bool iAmPotentiallyHot() const {
+ return myConfig().potentiallyHot() && // not an arbiter
+ elect.steppedDown <= time(0) && // not stepped down/frozen
+ state() == MemberState::RS_SECONDARY; // not stale
+ }
protected:
Member *_self;
bool _buildIndexes; // = _self->config().buildIndexes
void setSelfTo(Member *); // use this as it sets buildIndexes var
private:
- List1<Member> _members; /* all members of the set EXCEPT self. */
+ List1<Member> _members; // all members of the set EXCEPT _self.
+ ReplSetConfig::MemberCfg _config; // config of _self
+ unsigned _id; // _id of _self
+ int _maintenanceMode; // if we should stay in recovering state
public:
- unsigned selfId() const { return _self->id(); }
+ // this is called from within a writelock in logOpRS
+ unsigned selfId() const { return _id; }
Manager *mgr;
-
+ GhostSync *ghost;
+ /**
+ * This forces a secondary to go into recovering state and stay there
+ * until this is called again, passing in "false". Multiple threads can
+ * call this and it will leave maintenance mode once all of the callers
+ * have called it again, passing in false.
+ */
+ void setMaintenanceMode(const bool inc);
private:
Member* head() const { return _members.head(); }
public:
@@ -334,6 +480,7 @@ namespace mongo {
friend class CmdReplSetElect;
friend class Member;
friend class Manager;
+ friend class GhostSync;
friend class Consensus;
private:
@@ -352,6 +499,7 @@ namespace mongo {
bool _isStale(OplogReader& r, const string& hn);
public:
void syncThread();
+ const OpTime lastOtherOpTime() const;
};
class ReplSet : public ReplSetImpl {
@@ -365,7 +513,7 @@ namespace mongo {
bool freeze(int secs) { return _freeze(secs); }
string selfFullName() {
- lock lk(this);
+ assert( _self );
return _self->fullName();
}
@@ -385,12 +533,20 @@ namespace mongo {
void summarizeStatus(BSONObjBuilder& b) const { _summarizeStatus(b); }
void fillIsMaster(BSONObjBuilder& b) { _fillIsMaster(b); }
- /* we have a new config (reconfig) - apply it.
- @param comment write a no-op comment to the oplog about it. only makes sense if one is primary and initiating the reconf.
- */
+ /**
+ * We have a new config (reconfig) - apply it.
+ * @param comment write a no-op comment to the oplog about it. only
+ * makes sense if one is primary and initiating the reconf.
+ *
+ * The slaves are updated when they get a heartbeat indicating the new
+ * config. The comment is a no-op.
+ */
void haveNewConfig(ReplSetConfig& c, bool comment);
- /* if we delete old configs, this needs to assure locking. currently we don't so it is ok. */
+ /**
+ * Pointer assignment isn't necessarily atomic, so this needs to assure
+ * locking, even though we don't delete old configs.
+ */
const ReplSetConfig& getConfig() { return config(); }
bool lockedByMe() { return RSBase::lockedByMe(); }
@@ -402,9 +558,10 @@ namespace mongo {
}
};
- /** base class for repl set commands. checks basic things such as in rs mode before the command
- does its real work
- */
+ /**
+ * Base class for repl set commands. Checks basic things such if we're in
+ * rs mode before the command does its real work.
+ */
class ReplSetCommand : public Command {
protected:
ReplSetCommand(const char * s, bool show=false) : Command(s, show) { }
@@ -413,26 +570,53 @@ namespace mongo {
virtual bool logTheOp() { return false; }
virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const { help << "internal"; }
+
+ /**
+ * Some replica set commands call this and then call check(). This is
+ * intentional, as they might do things before theReplSet is initialized
+ * that still need to be checked for auth.
+ */
+ bool checkAuth(string& errmsg, BSONObjBuilder& result) {
+ if( !noauth && adminOnly() ) {
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ if (!ai->isAuthorizedForLock("admin", locktype())) {
+ errmsg = "replSet command unauthorized";
+ return false;
+ }
+ }
+ return true;
+ }
+
bool check(string& errmsg, BSONObjBuilder& result) {
if( !replSet ) {
errmsg = "not running with --replSet";
return false;
}
+
if( theReplSet == 0 ) {
result.append("startupStatus", ReplSet::startupStatus);
+ string s;
errmsg = ReplSet::startupStatusMsg.empty() ? "replset unknown error 2" : ReplSet::startupStatusMsg.get();
if( ReplSet::startupStatus == 3 )
result.append("info", "run rs.initiate(...) if not yet done for the set");
return false;
}
- return true;
+
+ return checkAuth(errmsg, result);
}
};
+ /**
+ * does local authentication
+ * directly authorizes against AuthenticationInfo
+ */
+ void replLocalAuth();
+
/** inlines ----------------- */
- inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self) :
+ inline Member::Member(HostAndPort h, unsigned ord, ReplSetConfig::MemberCfg *c, bool self) :
_config(*c), _h(h), _hbinfo(ord) {
+ assert(c);
if( self )
_hbinfo.health = 1.0;
}
diff --git a/db/repl/rs_config.cpp b/db/repl/rs_config.cpp
index 2341fe9..13352b1 100644
--- a/db/repl/rs_config.cpp
+++ b/db/repl/rs_config.cpp
@@ -20,10 +20,11 @@
#include "rs.h"
#include "../../client/dbclient.h"
#include "../../client/syncclusterconnection.h"
-#include "../../util/hostandport.h"
+#include "../../util/net/hostandport.h"
#include "../dbhelpers.h"
#include "connections.h"
#include "../oplog.h"
+#include "../instance.h"
using namespace bson;
@@ -36,7 +37,7 @@ namespace mongo {
while( i.more() ) {
BSONElement e = i.next();
if( !fields.count( e.fieldName() ) ) {
- uasserted(13434, str::stream() << "unexpected field '" << e.fieldName() << "'in object");
+ uasserted(13434, str::stream() << "unexpected field '" << e.fieldName() << "' in object");
}
}
}
@@ -63,27 +64,14 @@ namespace mongo {
//rather than above, do a logOp()? probably
BSONObj o = asBson();
Helpers::putSingletonGod(rsConfigNs.c_str(), o, false/*logOp=false; local db so would work regardless...*/);
- if( !comment.isEmpty() )
+ if( !comment.isEmpty() && (!theReplSet || theReplSet->isPrimary()) )
logOpInitiate(comment);
cx.db()->flushFiles(true);
}
- DEV log() << "replSet saveConfigLocally done" << rsLog;
+ log() << "replSet saveConfigLocally done" << rsLog;
}
- /*static*/
- /*void ReplSetConfig::receivedNewConfig(BSONObj cfg) {
- if( theReplSet )
- return; // this is for initial setup only, so far. todo
-
- ReplSetConfig c(cfg);
-
- writelock lk("admin.");
- if( theReplSet )
- return;
- c.saveConfigLocally(bo());
- }*/
-
bo ReplSetConfig::MemberCfg::asBson() const {
bob b;
b << "_id" << _id;
@@ -95,36 +83,52 @@ namespace mongo {
if( hidden ) b << "hidden" << hidden;
if( !buildIndexes ) b << "buildIndexes" << buildIndexes;
if( !tags.empty() ) {
- BSONArrayBuilder a;
- for( set<string>::const_iterator i = tags.begin(); i != tags.end(); i++ )
- a.append(*i);
- b.appendArray("tags", a.done());
- }
- if( !initialSync.isEmpty() ) {
- b << "initialSync" << initialSync;
+ BSONObjBuilder a;
+ for( map<string,string>::const_iterator i = tags.begin(); i != tags.end(); i++ )
+ a.append((*i).first, (*i).second);
+ b.append("tags", a.done());
}
return b.obj();
}
+ void ReplSetConfig::updateMembers(List1<Member> &dest) {
+ for (vector<MemberCfg>::iterator source = members.begin(); source < members.end(); source++) {
+ for( Member *d = dest.head(); d; d = d->next() ) {
+ if (d->fullName() == (*source).h.toString()) {
+ d->configw().groupsw() = (*source).groups();
+ }
+ }
+ }
+ }
+
bo ReplSetConfig::asBson() const {
bob b;
b.append("_id", _id).append("version", version);
- if( !ho.isDefault() || !getLastErrorDefaults.isEmpty() ) {
- bob settings;
- if( !ho.isDefault() )
- settings << "heartbeatConnRetries " << ho.heartbeatConnRetries <<
- "heartbeatSleep" << ho.heartbeatSleepMillis / 1000.0 <<
- "heartbeatTimeout" << ho.heartbeatTimeoutMillis / 1000.0;
- if( !getLastErrorDefaults.isEmpty() )
- settings << "getLastErrorDefaults" << getLastErrorDefaults;
- b << "settings" << settings.obj();
- }
BSONArrayBuilder a;
for( unsigned i = 0; i < members.size(); i++ )
a.append( members[i].asBson() );
b.append("members", a.arr());
+ if( !ho.isDefault() || !getLastErrorDefaults.isEmpty() || !rules.empty()) {
+ bob settings;
+ if( !rules.empty() ) {
+ bob modes;
+ for (map<string,TagRule*>::const_iterator it = rules.begin(); it != rules.end(); it++) {
+ bob clauses;
+ vector<TagClause*> r = (*it).second->clauses;
+ for (vector<TagClause*>::iterator it2 = r.begin(); it2 < r.end(); it2++) {
+ clauses << (*it2)->name << (*it2)->target;
+ }
+ modes << (*it).first << clauses.obj();
+ }
+ settings << "getLastErrorModes" << modes.obj();
+ }
+ if( !getLastErrorDefaults.isEmpty() )
+ settings << "getLastErrorDefaults" << getLastErrorDefaults;
+ b << "settings" << settings.obj();
+ }
+
return b.obj();
}
@@ -135,38 +139,87 @@ namespace mongo {
void ReplSetConfig::MemberCfg::check() const {
mchk(_id >= 0 && _id <= 255);
mchk(priority >= 0 && priority <= 1000);
- mchk(votes >= 0 && votes <= 100);
- uassert(13419, "this version of mongod only supports priorities 0 and 1", priority == 0 || priority == 1);
+ mchk(votes <= 100); // votes >= 0 because it is unsigned
+ uassert(13419, "priorities must be between 0.0 and 100.0", priority >= 0.0 && priority <= 100.0);
uassert(13437, "slaveDelay requires priority be zero", slaveDelay == 0 || priority == 0);
uassert(13438, "bad slaveDelay value", slaveDelay >= 0 && slaveDelay <= 3600 * 24 * 366);
uassert(13439, "priority must be 0 when hidden=true", priority == 0 || !hidden);
uassert(13477, "priority must be 0 when buildIndexes=false", buildIndexes || priority == 0);
+ }
+/*
+ string ReplSetConfig::TagSubgroup::toString() const {
+ bool first = true;
+ string result = "\""+name+"\": [";
+ for (set<const MemberCfg*>::const_iterator i = m.begin(); i != m.end(); i++) {
+ if (!first) {
+ result += ", ";
+ }
+ first = false;
+ result += (*i)->h.toString();
+ }
+ return result+"]";
+ }
+ */
+ string ReplSetConfig::TagClause::toString() const {
+ string result = name+": {";
+ for (map<string,TagSubgroup*>::const_iterator i = subgroups.begin(); i != subgroups.end(); i++) {
+//TEMP? result += (*i).second->toString()+", ";
+ }
+ result += "TagClause toString TEMPORARILY DISABLED";
+ return result + "}";
+ }
- if (!initialSync.isEmpty()) {
- static const string legal[] = {"state", "name", "_id","optime"};
- static const set<string> legals(legal, legal + 4);
- assertOnlyHas(initialSync, legals);
+ string ReplSetConfig::TagRule::toString() const {
+ string result = "{";
+ for (vector<TagClause*>::const_iterator it = clauses.begin(); it < clauses.end(); it++) {
+ result += ((TagClause*)(*it))->toString()+",";
+ }
+ return result+"}";
+ }
- if (initialSync.hasElement("state")) {
- uassert(13525, "initialSync source state must be 1 or 2",
- initialSync["state"].isNumber() &&
- (initialSync["state"].Number() == 1 ||
- initialSync["state"].Number() == 2));
- }
- if (initialSync.hasElement("name")) {
- uassert(13526, "initialSync source name must be a string",
- initialSync["name"].type() == mongo::String);
+ void ReplSetConfig::TagSubgroup::updateLast(const OpTime& op) {
+ if (last < op) {
+ last = op;
+
+ for (vector<TagClause*>::iterator it = clauses.begin(); it < clauses.end(); it++) {
+ (*it)->updateLast(op);
}
- if (initialSync.hasElement("_id")) {
- uassert(13527, "initialSync source _id must be a number",
- initialSync["_id"].isNumber());
+ }
+ }
+
+ void ReplSetConfig::TagClause::updateLast(const OpTime& op) {
+ if (last >= op) {
+ return;
+ }
+
+ // check at least n subgroups greater than clause.last
+ int count = 0;
+ map<string,TagSubgroup*>::iterator it;
+ for (it = subgroups.begin(); it != subgroups.end(); it++) {
+ if ((*it).second->last >= op) {
+ count++;
}
- if (initialSync.hasElement("optime")) {
- uassert(13528, "initialSync source optime must be a timestamp",
- initialSync["optime"].type() == mongo::Timestamp ||
- initialSync["optime"].type() == mongo::Date);
+ }
+
+ if (count >= actualTarget) {
+ last = op;
+ rule->updateLast(op);
+ }
+ }
+
+ void ReplSetConfig::TagRule::updateLast(const OpTime& op) {
+ OpTime *earliest = (OpTime*)&op;
+ vector<TagClause*>::iterator it;
+
+ for (it = clauses.begin(); it < clauses.end(); it++) {
+ if ((*it)->last < *earliest) {
+ earliest = &(*it)->last;
}
}
+
+ // rules are simply and-ed clauses, so whatever the most-behind
+ // clause is at is what the rule is at
+ last = *earliest;
}
/** @param o old config
@@ -184,18 +237,28 @@ namespace mongo {
if someone had some intermediate config this node doesnt have, that could be
necessary. but then how did we become primary? so perhaps we are fine as-is.
*/
- if( o.version + 1 != n.version ) {
- errmsg = "version number wrong";
+ if( o.version >= n.version ) {
+ errmsg = str::stream() << "version number must increase, old: "
+ << o.version << " new: " << n.version;
return false;
}
map<HostAndPort,const ReplSetConfig::MemberCfg*> old;
+ bool isLocalHost = false;
for( vector<ReplSetConfig::MemberCfg>::const_iterator i = o.members.begin(); i != o.members.end(); i++ ) {
+ if (i->h.isLocalHost()) {
+ isLocalHost = true;
+ }
old[i->h] = &(*i);
}
int me = 0;
for( vector<ReplSetConfig::MemberCfg>::const_iterator i = n.members.begin(); i != n.members.end(); i++ ) {
const ReplSetConfig::MemberCfg& m = *i;
+ if ( (isLocalHost && !m.h.isLocalHost()) || (!isLocalHost && m.h.isLocalHost())) {
+ log() << "reconfig error, cannot switch between localhost and hostnames: "
+ << m.h.toString() << rsLog;
+ uasserted(13645, "hosts cannot switch between localhost and hostname");
+ }
if( old.count(m.h) ) {
const ReplSetConfig::MemberCfg& oldCfg = *old[m.h];
if( oldCfg._id != m._id ) {
@@ -212,6 +275,7 @@ namespace mongo {
log() << "replSet reconfig error with member: " << m.h.toString() << " arbiterOnly cannot change. remove and readd the member instead " << rsLog;
uasserted(13510, "arbiterOnly may not change for members");
}
+ uassert(14827, "arbiters cannot have tags", !m.arbiterOnly || m.tags.size() == 0 );
}
if( m.h.isSelf() )
me++;
@@ -250,6 +314,122 @@ namespace mongo {
}
}
+ void ReplSetConfig::_populateTagMap(map<string,TagClause> &tagMap) {
+ // create subgroups for each server corresponding to each of
+ // its tags. E.g.:
+ //
+ // A is tagged with {"server" : "A", "dc" : "ny"}
+ // B is tagged with {"server" : "B", "dc" : "ny"}
+ //
+ // At the end of this step, tagMap will contain:
+ //
+ // "server" => {"A" : [A], "B" : [B]}
+ // "dc" => {"ny" : [A,B]}
+
+ for (unsigned i=0; i<members.size(); i++) {
+ MemberCfg member = members[i];
+
+ for (map<string,string>::iterator tag = member.tags.begin(); tag != member.tags.end(); tag++) {
+ string label = (*tag).first;
+ string value = (*tag).second;
+
+ TagClause& clause = tagMap[label];
+ clause.name = label;
+
+ TagSubgroup* subgroup;
+ // search for "ny" in "dc"'s clause
+ if (clause.subgroups.find(value) == clause.subgroups.end()) {
+ clause.subgroups[value] = subgroup = new TagSubgroup(value);
+ }
+ else {
+ subgroup = clause.subgroups[value];
+ }
+
+ subgroup->m.insert(&members[i]);
+ }
+ }
+ }
+
+ void ReplSetConfig::parseRules(const BSONObj& modes) {
+ map<string,TagClause> tagMap;
+ _populateTagMap(tagMap);
+
+ for (BSONObj::iterator i = modes.begin(); i.more(); ) {
+ unsigned int primaryOnly = 0;
+
+ // ruleName : {dc : 2, m : 3}
+ BSONElement rule = i.next();
+ uassert(14046, "getLastErrorMode rules must be objects", rule.type() == mongo::Object);
+
+ TagRule* r = new TagRule();
+
+ BSONObj clauseObj = rule.Obj();
+ for (BSONObj::iterator c = clauseObj.begin(); c.more(); ) {
+ BSONElement clauseElem = c.next();
+ uassert(14829, "getLastErrorMode criteria must be numeric", clauseElem.isNumber());
+
+ // get the clause, e.g., "x.y" : 3
+ const char *criteria = clauseElem.fieldName();
+ int value = clauseElem.numberInt();
+ uassert(14828, str::stream() << "getLastErrorMode criteria must be greater than 0: " << clauseElem, value > 0);
+
+ TagClause* node = new TagClause(tagMap[criteria]);
+
+ int numGroups = node->subgroups.size();
+ uassert(14831, str::stream() << "mode " << clauseObj << " requires "
+ << value << " tagged with " << criteria << ", but only "
+ << numGroups << " with this tag were found", numGroups >= value);
+
+ node->name = criteria;
+ node->target = value;
+ // if any subgroups contain "me", we can decrease the target
+ node->actualTarget = node->target;
+
+ // then we want to add pointers between clause & subgroup
+ for (map<string,TagSubgroup*>::iterator sgs = node->subgroups.begin();
+ sgs != node->subgroups.end(); sgs++) {
+ bool foundMe = false;
+ (*sgs).second->clauses.push_back(node);
+
+ // if this subgroup contains the primary, it's automatically always up-to-date
+ for( set<MemberCfg*>::const_iterator cfg = (*sgs).second->m.begin();
+ cfg != (*sgs).second->m.end();
+ cfg++)
+ {
+ if ((*cfg)->h.isSelf()) {
+ node->actualTarget--;
+ foundMe = true;
+ }
+ }
+
+ for (set<MemberCfg *>::iterator cfg = (*sgs).second->m.begin();
+ !foundMe && cfg != (*sgs).second->m.end(); cfg++) {
+ (*cfg)->groupsw().insert((*sgs).second);
+ }
+ }
+
+ // if all of the members of this clause involve the primary, it's always up-to-date
+ if (node->actualTarget == 0) {
+ node->last = OpTime(INT_MAX, INT_MAX);
+ primaryOnly++;
+ }
+
+ // this is a valid clause, so we want to add it to its rule
+ node->rule = r;
+ r->clauses.push_back(node);
+ }
+
+ // if all of the clauses are satisfied by the primary, this rule is trivially true
+ if (primaryOnly == r->clauses.size()) {
+ r->last = OpTime(INT_MAX, INT_MAX);
+ }
+
+ // if we got here, this is a valid rule
+ LOG(1) << "replSet new rule " << rule.fieldName() << ": " << r->toString() << rsLog;
+ rules[rule.fieldName()] = r;
+ }
+ }
+
void ReplSetConfig::from(BSONObj o) {
static const string legal[] = {"_id","version", "members","settings"};
static const set<string> legals(legal, legal + 4);
@@ -262,19 +442,6 @@ namespace mongo {
uassert(13115, "bad " + rsConfigNs + " config: version", version > 0);
}
- if( o["settings"].ok() ) {
- BSONObj settings = o["settings"].Obj();
- if( settings["heartbeatConnRetries "].ok() )
- ho.heartbeatConnRetries = settings["heartbeatConnRetries "].numberInt();
- if( settings["heartbeatSleep"].ok() )
- ho.heartbeatSleepMillis = (unsigned) (settings["heartbeatSleep"].Number() * 1000);
- if( settings["heartbeatTimeout"].ok() )
- ho.heartbeatTimeoutMillis = (unsigned) (settings["heartbeatTimeout"].Number() * 1000);
- ho.check();
- try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); }
- catch(...) { }
- }
-
set<string> hosts;
set<int> ords;
vector<BSONElement> members;
@@ -292,7 +459,7 @@ namespace mongo {
try {
static const string legal[] = {
"_id","votes","priority","host", "hidden","slaveDelay",
- "arbiterOnly","buildIndexes","tags","initialSync"
+ "arbiterOnly","buildIndexes","tags","initialSync" // deprecated
};
static const set<string> legals(legal, legal + 10);
assertOnlyHas(mobj, legals);
@@ -304,10 +471,12 @@ namespace mongo {
/* TODO: use of string exceptions may be problematic for reconfig case! */
throw "_id must be numeric";
}
- string s;
try {
- s = mobj["host"].String();
+ string s = mobj["host"].String();
m.h = HostAndPort(s);
+ if (!m.h.hasPort()) {
+ m.h.setPort(m.h.port());
+ }
}
catch(...) {
throw string("bad or missing host field? ") + mobj.toString();
@@ -325,12 +494,10 @@ namespace mongo {
if( mobj.hasElement("votes") )
m.votes = (unsigned) mobj["votes"].Number();
if( mobj.hasElement("tags") ) {
- vector<BSONElement> v = mobj["tags"].Array();
- for( unsigned i = 0; i < v.size(); i++ )
- m.tags.insert( v[i].String() );
- }
- if( mobj.hasElement("initialSync")) {
- m.initialSync = mobj["initialSync"].Obj().getOwned();
+ const BSONObj &t = mobj["tags"].Obj();
+ for (BSONObj::iterator c = t.begin(); c.more(); c.next()) {
+ m.tags[(*c).fieldName()] = (*c).String();
+ }
}
m.check();
}
@@ -356,22 +523,38 @@ namespace mongo {
}
uassert(13393, "can't use localhost in repl set member names except when using it for all members", localhosts == 0 || localhosts == members.size());
uassert(13117, "bad " + rsConfigNs + " config", !_id.empty());
+
+ if( o["settings"].ok() ) {
+ BSONObj settings = o["settings"].Obj();
+ if( settings["getLastErrorModes"].ok() ) {
+ parseRules(settings["getLastErrorModes"].Obj());
+ }
+ ho.check();
+ try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); }
+ catch(...) { }
+ }
}
static inline void configAssert(bool expr) {
uassert(13122, "bad repl set config?", expr);
}
- ReplSetConfig::ReplSetConfig(BSONObj cfg) {
+ ReplSetConfig::ReplSetConfig(BSONObj cfg, bool force) {
+ _constructed = false;
clear();
from(cfg);
- configAssert( version < 0 /*unspecified*/ || (version >= 1 && version <= 5000) );
+ if( force ) {
+ version += rand() % 100000 + 10000;
+ }
+ configAssert( version < 0 /*unspecified*/ || (version >= 1) );
if( version < 1 )
version = 1;
_ok = true;
+ _constructed = true;
}
ReplSetConfig::ReplSetConfig(const HostAndPort& h) {
+ _constructed = false;
clear();
int level = 2;
DEV level = 0;
@@ -447,6 +630,7 @@ namespace mongo {
checkRsConfig();
_ok = true;
log(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
+ _constructed = true;
}
}
diff --git a/db/repl/rs_config.h b/db/repl/rs_config.h
index 7d43fe6..f69052a 100644
--- a/db/repl/rs_config.h
+++ b/db/repl/rs_config.h
@@ -20,26 +20,37 @@
#pragma once
-#include "../../util/hostandport.h"
+#include "../../util/net/hostandport.h"
+#include "../../util/concurrency/race.h"
#include "health.h"
namespace mongo {
-
- /* singleton config object is stored here */
+ class Member;
const string rsConfigNs = "local.system.replset";
class ReplSetConfig {
enum { EMPTYCONFIG = -2 };
+ struct TagSubgroup;
public:
- /* if something is misconfigured, throws an exception.
- if couldn't be queried or is just blank, ok() will be false.
- */
+ /**
+ * This contacts the given host and tries to get a config from them.
+ *
+ * This sends a test heartbeat to the host and, if all goes well and the
+ * host has a more recent config, fetches the config and loads it (see
+ * from().
+ *
+ * If it's contacting itself, it skips the heartbeat (for obvious
+ * reasons.) If something is misconfigured, throws an exception. If the
+ * host couldn't be queried or is just blank, ok() will be false.
+ */
ReplSetConfig(const HostAndPort& h);
- ReplSetConfig(BSONObj cfg);
+ ReplSetConfig(BSONObj cfg, bool force=false);
bool ok() const { return _ok; }
+ struct TagRule;
+
struct MemberCfg {
MemberCfg() : _id(-1), votes(1), priority(1.0), arbiterOnly(false), slaveDelay(0), hidden(false), buildIndexes(true) { }
int _id; /* ordinal */
@@ -50,12 +61,24 @@ namespace mongo {
int slaveDelay; /* seconds. int rather than unsigned for convenient to/front bson conversion. */
bool hidden; /* if set, don't advertise to drives in isMaster. for non-primaries (priority 0) */
bool buildIndexes; /* if false, do not create any non-_id indexes */
- set<string> tags; /* tagging for data center, rack, etc. */
- BSONObj initialSync; /* directions for initial sync source */
-
+ map<string,string> tags; /* tagging for data center, rack, etc. */
+ private:
+ set<TagSubgroup*> _groups; // the subgroups this member belongs to
+ public:
+ const set<TagSubgroup*>& groups() const {
+ return _groups;
+ }
+ set<TagSubgroup*>& groupsw() {
+ return _groups;
+ }
void check() const; /* check validity, assert if not. */
BSONObj asBson() const;
bool potentiallyHot() const { return !arbiterOnly && priority > 0; }
+ void updateGroups(const OpTime& last) {
+ for (set<TagSubgroup*>::iterator it = _groups.begin(); it != _groups.end(); it++) {
+ ((TagSubgroup*)(*it))->updateLast(last);
+ }
+ }
bool operator==(const MemberCfg& r) const {
return _id==r._id && votes == r.votes && h == r.h && priority == r.priority &&
arbiterOnly == r.arbiterOnly && slaveDelay == r.slaveDelay && hidden == r.hidden &&
@@ -70,6 +93,7 @@ namespace mongo {
HealthOptions ho;
string md5;
BSONObj getLastErrorDefaults;
+ map<string,TagRule*> rules;
list<HostAndPort> otherMemberHostnames() const; // except self
@@ -88,12 +112,112 @@ namespace mongo {
void saveConfigLocally(BSONObj comment); // to local db
string saveConfigEverywhere(); // returns textual info on what happened
+ /**
+ * Update members' groups when the config changes but members stay the same.
+ */
+ void updateMembers(List1<Member> &dest);
+
BSONObj asBson() const;
+ bool _constructed;
private:
bool _ok;
void from(BSONObj);
void clear();
+
+ struct TagClause;
+
+ /**
+ * This is a logical grouping of servers. It is pointed to by a set of
+ * servers with a certain tag.
+ *
+ * For example, suppose servers A, B, and C have the tag "dc" : "nyc". If we
+ * have a rule {"dc" : 2}, then we want A _or_ B _or_ C to have the
+ * write for one of the "dc" critiria to be fulfilled, so all three will
+ * point to this subgroup. When one of their oplog-tailing cursors is
+ * updated, this subgroup is updated.
+ */
+ struct TagSubgroup : boost::noncopyable {
+ ~TagSubgroup(); // never called; not defined
+ TagSubgroup(string nm) : name(nm) { }
+ const string name;
+ OpTime last;
+ vector<TagClause*> clauses;
+
+ // this probably won't actually point to valid members after the
+ // subgroup is created, as initFromConfig() makes a copy of the
+ // config
+ set<MemberCfg*> m;
+
+ void updateLast(const OpTime& op);
+
+ //string toString() const;
+
+ /**
+ * If two tags have the same name, they should compare as equal so
+ * that members don't have to update two identical groups on writes.
+ */
+ bool operator() (TagSubgroup& lhs, TagSubgroup& rhs) const {
+ return lhs.name < rhs.name;
+ }
+ };
+
+ /**
+ * An argument in a rule. For example, if we had the rule {dc : 2,
+ * machines : 3}, "dc" : 2 and "machines" : 3 would be two TagClauses.
+ *
+ * Each tag clause has a set of associated subgroups. For example, if
+ * we had "dc" : 2, our subgroups might be "nyc", "sf", and "hk".
+ */
+ struct TagClause {
+ OpTime last;
+ map<string,TagSubgroup*> subgroups;
+ TagRule *rule;
+ string name;
+ /**
+ * If we have get a clause like {machines : 3} and this server is
+ * tagged with "machines", then it's really {machines : 2}, as we
+ * will always be up-to-date. So, target would be 3 and
+ * actualTarget would be 2, in that example.
+ */
+ int target;
+ int actualTarget;
+
+ void updateLast(const OpTime& op);
+ string toString() const;
+ };
+
+ /**
+ * Parses getLastErrorModes.
+ */
+ void parseRules(const BSONObj& modes);
+
+ /**
+ * Create a hash containing every possible clause that could be used in a
+ * rule and the servers related to that clause.
+ *
+ * For example, suppose we have the following servers:
+ * A {"dc" : "ny", "ny" : "rk1"}
+ * B {"dc" : "ny", "ny" : "rk1"}
+ * C {"dc" : "ny", "ny" : "rk2"}
+ * D {"dc" : "sf", "sf" : "rk1"}
+ * E {"dc" : "sf", "sf" : "rk2"}
+ *
+ * This would give us the possible criteria:
+ * "dc" -> {A, B, C},{D, E}
+ * "ny" -> {A, B},{C}
+ * "sf" -> {D},{E}
+ */
+ void _populateTagMap(map<string,TagClause> &tagMap);
+
+ public:
+ struct TagRule {
+ vector<TagClause*> clauses;
+ OpTime last;
+
+ void updateLast(const OpTime& op);
+ string toString() const;
+ };
};
}
diff --git a/db/repl/rs_initialsync.cpp b/db/repl/rs_initialsync.cpp
index 5a54059..101b03a 100644
--- a/db/repl/rs_initialsync.cpp
+++ b/db/repl/rs_initialsync.cpp
@@ -34,7 +34,7 @@ namespace mongo {
// add try/catch with sleep
- void isyncassert(const char *msg, bool expr) {
+ void isyncassert(const string& msg, bool expr) {
if( !expr ) {
string m = str::stream() << "initial sync " << msg;
theReplSet->sethbmsg(m, 0);
@@ -57,20 +57,15 @@ namespace mongo {
}
}
- bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
- bool slaveOk, bool useReplAuth, bool snapshot);
-
/* todo : progress metering to sethbmsg. */
static bool clone(const char *master, string db) {
string err;
return cloneFrom(master, err, db, false,
- /* slave_ok */ true, true, false);
+ /* slave_ok */ true, true, false, /*mayYield*/true, /*mayBeInterrupted*/false);
}
void _logOpObjRS(const BSONObj& op);
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string &errmsg, bool logforrepl);
-
static void emptyOplog() {
writelock lk(rsoplog);
Client::Context ctx(rsoplog);
@@ -80,104 +75,47 @@ namespace mongo {
if( d && d->stats.nrecords == 0 )
return; // already empty, ok.
- log(1) << "replSet empty oplog" << rsLog;
+ LOG(1) << "replSet empty oplog" << rsLog;
d->emptyCappedCollection(rsoplog);
-
- /*
- string errmsg;
- bob res;
- dropCollection(rsoplog, errmsg, res);
- log() << "replSet recreated oplog so it is empty. todo optimize this..." << rsLog;
- createOplog();*/
-
- // TEMP: restart to recreate empty oplog
- //log() << "replSet FATAL error during initial sync. mongod restart required." << rsLog;
- //dbexit( EXIT_CLEAN );
-
- /*
- writelock lk(rsoplog);
- Client::Context c(rsoplog, dbpath, 0, doauth/false);
- NamespaceDetails *oplogDetails = nsdetails(rsoplog);
- uassert(13412, str::stream() << "replSet error " << rsoplog << " is missing", oplogDetails != 0);
- oplogDetails->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
- */
}
- /**
- * Choose a member to sync from.
- *
- * The initalSync option is an object with 1 k/v pair:
- *
- * "state" : 1|2
- * "name" : "host"
- * "_id" : N
- * "optime" : t
- *
- * All except optime are exact matches. "optime" will find a secondary with
- * an optime >= to the optime given.
- */
const Member* ReplSetImpl::getMemberToSyncTo() {
- BSONObj sync = myConfig().initialSync;
- bool secondaryOnly = false, isOpTime = false;
- char *name = 0;
- int id = -1;
- OpTime optime;
-
- StateBox::SP sp = box.get();
- assert( !sp.state.primary() ); // wouldn't make sense if we were.
-
- // if it exists, we've already checked that these fields are valid in
- // rs_config.cpp
- if ( !sync.isEmpty() ) {
- if (sync.hasElement("state")) {
- if (sync["state"].Number() == 1) {
- if (sp.primary) {
- sethbmsg( str::stream() << "syncing to primary: " << sp.primary->fullName(), 0);
- return const_cast<Member*>(sp.primary);
- }
- else {
- sethbmsg("couldn't clone from primary");
- return NULL;
- }
- }
- else {
- secondaryOnly = true;
- }
- }
- if (sync.hasElement("name")) {
- name = (char*)sync["name"].valuestr();
- }
- if (sync.hasElement("_id")) {
- id = (int)sync["_id"].Number();
- }
- if (sync.hasElement("optime")) {
- isOpTime = true;
- optime = sync["optime"]._opTime();
+ Member *closest = 0;
+
+ // wait for 2N pings before choosing a sync target
+ if (_cfg) {
+ int needMorePings = config().members.size()*2 - HeartbeatInfo::numPings;
+
+ if (needMorePings > 0) {
+ OCCASIONALLY log() << "waiting for " << needMorePings << " pings from other members before syncing" << endl;
+ return NULL;
}
}
- for( Member *m = head(); m; m = m->next() ) {
- if (!m->hbinfo().up() ||
- (m->state() != MemberState::RS_SECONDARY &&
- m->state() != MemberState::RS_PRIMARY) ||
- (secondaryOnly && m->state() != MemberState::RS_SECONDARY) ||
- (id != -1 && (int)m->id() != id) ||
- (name != 0 && strcmp(name, m->fullName().c_str()) != 0) ||
- (isOpTime && optime >= m->hbinfo().opTime)) {
- continue;
+ // find the member with the lowest ping time that has more data than me
+ for (Member *m = _members.head(); m; m = m->next()) {
+ if (m->hbinfo().up() &&
+ (m->state() == MemberState::RS_PRIMARY ||
+ (m->state() == MemberState::RS_SECONDARY && m->hbinfo().opTime > lastOpTimeWritten)) &&
+ (!closest || m->hbinfo().ping < closest->hbinfo().ping)) {
+ closest = m;
}
+ }
+
+ {
+ lock lk(this);
- sethbmsg( str::stream() << "syncing to: " << m->fullName(), 0);
- return const_cast<Member*>(m);
+ if (!closest) {
+ _currentSyncTarget = NULL;
+ return NULL;
+ }
+
+ _currentSyncTarget = closest;
}
- sethbmsg( str::stream() << "couldn't find a member matching the sync criteria: " <<
- "\nstate? " << (secondaryOnly ? "2" : "none") <<
- "\nname? " << (name ? name : "none") <<
- "\n_id? " << id <<
- "\noptime? " << optime.toStringPretty() );
+ sethbmsg( str::stream() << "syncing to: " << closest->fullName(), 0);
- return NULL;
+ return const_cast<Member*>(closest);
}
/**
@@ -186,6 +124,12 @@ namespace mongo {
void ReplSetImpl::_syncDoInitialSync() {
sethbmsg("initial sync pending",0);
+ // if this is the first node, it may have already become primary
+ if ( box.getState().primary() ) {
+ sethbmsg("I'm already primary, no need for initial sync",0);
+ return;
+ }
+
const Member *source = getMemberToSyncTo();
if (!source) {
sethbmsg("initial sync need a member to be primary or secondary to do our initial sync", 0);
@@ -252,13 +196,14 @@ namespace mongo {
/* apply relevant portion of the oplog
*/
{
- sethbmsg("initial sync initial oplog application");
- isyncassert( "initial sync source must remain readable throughout our initial sync [2]", source->state().readable() );
+ isyncassert( str::stream() << "initial sync source must remain readable throughout our initial sync [2] state now: " << source->state().toString() , source->state().readable() );
if( ! initialSyncOplogApplication(source, /*applyGTE*/startingTS, /*minValid*/mvoptime) ) { // note we assume here that this call does not throw
log() << "replSet initial sync failed during applyoplog" << rsLog;
emptyOplog(); // otherwise we'll be up!
+
lastOpTimeWritten = OpTime();
lastH = 0;
+
log() << "replSet cleaning up [1]" << rsLog;
{
writelock lk("local.");
diff --git a/db/repl/rs_initiate.cpp b/db/repl/rs_initiate.cpp
index cf1941f..3d998a8 100644
--- a/db/repl/rs_initiate.cpp
+++ b/db/repl/rs_initiate.cpp
@@ -37,8 +37,8 @@ namespace mongo {
throws
@param initial true when initiating
*/
- void checkMembersUpForConfigChange(const ReplSetConfig& cfg, bool initial) {
- int failures = 0;
+ void checkMembersUpForConfigChange(const ReplSetConfig& cfg, BSONObjBuilder& result, bool initial) {
+ int failures = 0, allVotes = 0, allowableFailures = 0;
int me = 0;
stringstream selfs;
for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
@@ -51,7 +51,10 @@ namespace mongo {
uasserted(13420, "initiation and reconfiguration of a replica set must be sent to a node that can become primary");
}
}
+ allVotes += i->votes;
}
+ allowableFailures = allVotes - (allVotes/2 + 1);
+
uassert(13278, "bad config: isSelf is true for multiple hosts: " + selfs.str(), me <= 1); // dups?
if( me != 1 ) {
stringstream ss;
@@ -61,6 +64,7 @@ namespace mongo {
uasserted(13279, ss.str());
}
+ vector<string> down;
for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
// we know we're up
if (i->h.isSelf()) {
@@ -100,27 +104,27 @@ namespace mongo {
}
}
if( !ok && !res["rs"].trueValue() ) {
+ down.push_back(i->h.toString());
+
if( !res.isEmpty() ) {
/* strange. got a response, but not "ok". log it. */
log() << "replSet warning " << i->h.toString() << " replied: " << res.toString() << rsLog;
}
bool allowFailure = false;
- failures++;
- if( res.isEmpty() && !initial && failures == 1 ) {
- /* for now we are only allowing 1 node to be down on a reconfig. this can be made to be a minority
- trying to keep change small as release is near.
- */
+ failures += i->votes;
+ if( !initial && failures <= allowableFailures ) {
const Member* m = theReplSet->findById( i->_id );
if( m ) {
- // ok, so this was an existing member (wouldn't make sense to add to config a new member that is down)
assert( m->h().toString() == i->h.toString() );
- allowFailure = true;
}
+ // it's okay if the down member isn't part of the config,
+ // we might be adding a new member that isn't up yet
+ allowFailure = true;
}
if( !allowFailure ) {
- string msg = string("need members up to initiate, not ok : ") + i->h.toString();
+ string msg = string("need all members up to initiate, not ok : ") + i->h.toString();
if( !initial )
msg = string("need most members up to reconfigure, not ok : ") + i->h.toString();
uasserted(13144, msg);
@@ -133,6 +137,9 @@ namespace mongo {
!hasData || i->h.isSelf());
}
}
+ if (down.size() > 0) {
+ result.append("down", down);
+ }
}
class CmdReplSetInitiate : public ReplSetCommand {
@@ -143,7 +150,7 @@ namespace mongo {
h << "Initiate/christen a replica set.";
h << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
}
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
log() << "replSet replSetInitiate admin command received from client" << rsLog;
if( !replSet ) {
@@ -179,7 +186,7 @@ namespace mongo {
if( ReplSet::startupStatus == ReplSet::BADCONFIG ) {
errmsg = "server already in BADCONFIG state (check logs); not initiating";
- result.append("info", ReplSet::startupStatusMsg);
+ result.append("info", ReplSet::startupStatusMsg.get());
return false;
}
if( ReplSet::startupStatus != ReplSet::EMPTYCONFIG ) {
@@ -204,6 +211,7 @@ namespace mongo {
b.append("_id", name);
bob members;
members.append("0", BSON( "_id" << 0 << "host" << HostAndPort::Me().toString() ));
+ result.append("me", HostAndPort::Me().toString());
for( unsigned i = 0; i < seeds.size(); i++ )
members.append(bob::numStr(i+1), BSON( "_id" << i+1 << "host" << seeds[i].toString()));
b.appendArray("members", members.obj());
@@ -226,7 +234,7 @@ namespace mongo {
log() << "replSet replSetInitiate config object parses ok, " << newConfig.members.size() << " members specified" << rsLog;
- checkMembersUpForConfigChange(newConfig, true);
+ checkMembersUpForConfigChange(newConfig, result, true);
log() << "replSet replSetInitiate all members seem up" << rsLog;
@@ -238,7 +246,7 @@ namespace mongo {
log() << "replSet replSetInitiate config now saved locally. Should come online in about a minute." << rsLog;
result.append("info", "Config now saved locally. Should come online in about a minute.");
ReplSet::startupStatus = ReplSet::SOON;
- ReplSet::startupStatusMsg = "Received replSetInitiate - should come online shortly.";
+ ReplSet::startupStatusMsg.set("Received replSetInitiate - should come online shortly.");
}
catch( DBException& e ) {
log() << "replSet replSetInitiate exception: " << e.what() << rsLog;
@@ -248,6 +256,11 @@ namespace mongo {
errmsg = string("couldn't initiate : ") + e.what();
return false;
}
+ catch( string& e2 ) {
+ log() << e2 << rsLog;
+ errmsg = e2;
+ return false;
+ }
return true;
}
diff --git a/db/repl/rs_member.h b/db/repl/rs_member.h
index b685c04..d60bb52 100644
--- a/db/repl/rs_member.h
+++ b/db/repl/rs_member.h
@@ -49,6 +49,7 @@ namespace mongo {
MemberState(MS ms = RS_UNKNOWN) : s(ms) { }
explicit MemberState(int ms) : s((MS) ms) { }
+ bool startup() const { return s == RS_STARTUP; }
bool primary() const { return s == RS_PRIMARY; }
bool secondary() const { return s == RS_SECONDARY; }
bool recovering() const { return s == RS_RECOVERING; }
@@ -79,6 +80,8 @@ namespace mongo {
DiagStr lastHeartbeatMsg;
OpTime opTime;
int skew;
+ unsigned int ping; // milliseconds
+ static unsigned int numPings;
bool up() const { return health > 0; }
@@ -104,4 +107,20 @@ namespace mongo {
hbstate != old.hbstate;
}
+ inline string MemberState::toString() const {
+ switch ( s ) {
+ case RS_STARTUP: return "STARTUP";
+ case RS_PRIMARY: return "PRIMARY";
+ case RS_SECONDARY: return "SECONDARY";
+ case RS_RECOVERING: return "RECOVERING";
+ case RS_FATAL: return "FATAL";
+ case RS_STARTUP2: return "STARTUP2";
+ case RS_ARBITER: return "ARBITER";
+ case RS_DOWN: return "DOWN";
+ case RS_ROLLBACK: return "ROLLBACK";
+ case RS_UNKNOWN: return "UNKNOWN";
+ }
+ return "";
+ }
+
}
diff --git a/db/repl/rs_rollback.cpp b/db/repl/rs_rollback.cpp
index 0b4cc28..f012e65 100644
--- a/db/repl/rs_rollback.cpp
+++ b/db/repl/rs_rollback.cpp
@@ -20,7 +20,10 @@
#include "../../client/dbclient.h"
#include "rs.h"
#include "../repl.h"
-#include "../query.h"
+#include "../ops/query.h"
+#include "../cloner.h"
+#include "../ops/update.h"
+#include "../ops/delete.h"
/* Scenarios
@@ -62,7 +65,6 @@ namespace mongo {
using namespace bson;
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logforrepl);
void incRBID();
class rsfatal : public std::exception {
@@ -227,9 +229,9 @@ namespace mongo {
log() << "replSet info rollback our last optime: " << ourTime.toStringPretty() << rsLog;
log() << "replSet info rollback their last optime: " << theirTime.toStringPretty() << rsLog;
log() << "replSet info rollback diff in end of log times: " << diff << " seconds" << rsLog;
- if( diff > 3600 ) {
+ if( diff > 1800 ) {
log() << "replSet rollback too long a time period for a rollback." << rsLog;
- throw "error not willing to roll back more than one hour of data";
+ throw "error not willing to roll back more than 30 minutes of data";
}
}
@@ -339,7 +341,7 @@ namespace mongo {
{
/* TODO : slow. lots of round trips. */
n++;
- bo good= them->findOne(d.ns, d._id.wrap()).getOwned();
+ bo good= them->findOne(d.ns, d._id.wrap(), NULL, QueryOption_SlaveOk).getOwned();
totSize += good.objsize();
uassert( 13410, "replSet too much data to roll back", totSize < 300 * 1024 * 1024 );
@@ -393,7 +395,7 @@ namespace mongo {
dropCollection(ns, errmsg, res);
{
dbtemprelease r;
- bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, bo(), errmsg, false);
+ bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, bo(), errmsg, false, true, false);
if( !ok ) {
log() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg << rsLog;
throw "rollback error resyncing rollection [1]";
@@ -572,7 +574,7 @@ namespace mongo {
sethbmsg("rollback 6");
// clean up oplog
- log(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
+ LOG(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
// todo: fatal error if this throws?
oplogDetails->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
@@ -607,26 +609,20 @@ namespace mongo {
return 2;
}
- if( box.getState().secondary() ) {
+ if( state().secondary() ) {
/* by doing this, we will not service reads (return an error as we aren't in secondary staate.
that perhaps is moot becasue of the write lock above, but that write lock probably gets deferred
or removed or yielded later anyway.
also, this is better for status reporting - we know what is happening.
*/
- box.change(MemberState::RS_ROLLBACK, _self);
+ changeState(MemberState::RS_ROLLBACK);
}
HowToFixUp how;
sethbmsg("rollback 1");
{
r.resetCursor();
- /*DBClientConnection us(false, 0, 0);
- string errmsg;
- if( !us.connect(HostAndPort::me().toString(),errmsg) ) {
- sethbmsg("rollback connect to self failure" + errmsg);
- return;
- }*/
sethbmsg("rollback 2 FindCommonPoint");
try {
@@ -668,7 +664,7 @@ namespace mongo {
/* success - leave "ROLLBACK" state
can go to SECONDARY once minvalid is achieved
*/
- box.change(MemberState::RS_RECOVERING, _self);
+ changeState(MemberState::RS_RECOVERING);
}
return 0;
diff --git a/db/repl/rs_sync.cpp b/db/repl/rs_sync.cpp
index 8d06fcc..b29328b 100644
--- a/db/repl/rs_sync.cpp
+++ b/db/repl/rs_sync.cpp
@@ -20,28 +20,28 @@
#include "rs.h"
#include "../repl.h"
#include "connections.h"
+
namespace mongo {
using namespace bson;
extern unsigned replSetForceInitialSyncFailure;
+ void NOINLINE_DECL blank(const BSONObj& o) {
+ if( *o.getStringField("op") != 'n' ) {
+ log() << "replSet skipping bad op in oplog: " << o.toString() << rsLog;
+ }
+ }
+
/* apply the log op that is in param o */
void ReplSetImpl::syncApply(const BSONObj &o) {
- char db[MaxDatabaseNameLen];
const char *ns = o.getStringField("ns");
- nsToDatabase(ns, db);
-
if ( *ns == '.' || *ns == 0 ) {
- if( *o.getStringField("op") == 'n' )
- return;
- log() << "replSet skipping bad op in oplog: " << o.toString() << endl;
+ blank(o);
return;
}
Client::Context ctx(ns);
ctx.getClient()->curop()->reset();
-
- /* todo : if this asserts, do we want to ignore or not? */
applyOperation_inlock(o);
}
@@ -63,15 +63,11 @@ namespace mongo {
return false;
}
- {
- BSONObjBuilder q;
- q.appendDate("$gte", applyGTE.asDate());
- BSONObjBuilder query;
- query.append("ts", q.done());
- BSONObj queryObj = query.done();
- r.query(rsoplog, queryObj);
+ r.tailingQueryGTE( rsoplog, applyGTE );
+ if ( !r.haveCursor() ) {
+ log() << "replSet initial sync oplog query error" << rsLog;
+ return false;
}
- assert( r.haveCursor() );
{
if( !r.more() ) {
@@ -83,7 +79,7 @@ namespace mongo {
OpTime t = op["ts"]._opTime();
r.putBack(op);
- if( op.firstElement().fieldName() == string("$err") ) {
+ if( op.firstElementFieldName() == string("$err") ) {
log() << "replSet initial sync error querying " << rsoplog << " on " << hn << " : " << op.toString() << rsLog;
return false;
}
@@ -95,6 +91,9 @@ namespace mongo {
log() << "replSet initial sync but received a first optime of " << t << " from " << hn << rsLog;
return false;
}
+
+ sethbmsg(str::stream() << "initial oplog application from " << hn << " starting at "
+ << t.toStringPretty() << " to " << minValid.toStringPretty());
}
}
catch(DBException& e) {
@@ -107,6 +106,7 @@ namespace mongo {
// todo : use exhaust
OpTime ts;
+ time_t start = time(0);
unsigned long long n = 0;
while( 1 ) {
try {
@@ -139,18 +139,35 @@ namespace mongo {
}
_logOpObjRS(o); /* with repl sets we write the ops to our oplog too */
}
- if( ++n % 100000 == 0 ) {
- // simple progress metering
- log() << "replSet initialSyncOplogApplication " << n << rsLog;
+
+ if ( ++n % 1000 == 0 ) {
+ time_t now = time(0);
+ if (now - start > 10) {
+ // simple progress metering
+ log() << "replSet initialSyncOplogApplication applied " << n << " operations, synced to "
+ << ts.toStringPretty() << rsLog;
+ start = now;
+ }
}
getDur().commitIfNeeded();
}
catch (DBException& e) {
+ // skip duplicate key exceptions
if( e.getCode() == 11000 || e.getCode() == 11001 ) {
- // skip duplicate key exceptions
continue;
}
+
+ // handle cursor not found (just requery)
+ if( e.getCode() == 13127 ) {
+ r.resetCursor();
+ r.tailingQueryGTE(rsoplog, ts);
+ if( r.haveCursor() ) {
+ continue;
+ }
+ }
+
+ // TODO: handle server restart
if( ts <= minValid ) {
// didn't make it far enough
@@ -171,6 +188,16 @@ namespace mongo {
*/
bool ReplSetImpl::tryToGoLiveAsASecondary(OpTime& /*out*/ minvalid) {
bool golive = false;
+
+ {
+ lock lk( this );
+
+ if (_maintenanceMode > 0) {
+ // we're not actually going live
+ return true;
+ }
+ }
+
{
readlock lk("local.replset.minvalid");
BSONObj mv;
@@ -190,35 +217,35 @@ namespace mongo {
return golive;
}
- /**
- * Checks if the oplog given is too far ahead to read from.
- *
- * @param r the oplog
- * @param hn the hostname (for log messages)
- *
- * @return if we are stale compared to the oplog on hn
- */
bool ReplSetImpl::_isStale(OplogReader& r, const string& hn) {
BSONObj remoteOldestOp = r.findOne(rsoplog, Query());
OpTime ts = remoteOldestOp["ts"]._opTime();
DEV log() << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
- else log(3) << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
+ else LOG(3) << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
DEV {
- // debugging sync1.js...
log() << "replSet lastOpTimeWritten: " << lastOpTimeWritten.toStringLong() << rsLog;
log() << "replSet our state: " << state().toString() << rsLog;
}
- if( lastOpTimeWritten < ts ) {
- log() << "replSet error RS102 too stale to catch up, at least from " << hn << rsLog;
- log() << "replSet our last optime : " << lastOpTimeWritten.toStringLong() << rsLog;
- log() << "replSet oldest at " << hn << " : " << ts.toStringLong() << rsLog;
- log() << "replSet See http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << rsLog;
- sethbmsg("error RS102 too stale to catch up");
- changeState(MemberState::RS_RECOVERING);
- sleepsecs(120);
- return true;
- }
- return false;
+ if( lastOpTimeWritten >= ts ) {
+ return false;
+ }
+
+ // we're stale
+ log() << "replSet error RS102 too stale to catch up, at least from " << hn << rsLog;
+ log() << "replSet our last optime : " << lastOpTimeWritten.toStringLong() << rsLog;
+ log() << "replSet oldest at " << hn << " : " << ts.toStringLong() << rsLog;
+ log() << "replSet See http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << rsLog;
+
+ // reset minvalid so that we can't become primary prematurely
+ {
+ writelock lk("local.replset.minvalid");
+ Helpers::putSingleton("local.replset.minvalid", remoteOldestOp);
+ }
+
+ sethbmsg("error RS102 too stale to catch up");
+ changeState(MemberState::RS_RECOVERING);
+ sleepsecs(120);
+ return true;
}
/**
@@ -234,7 +261,7 @@ namespace mongo {
assert(r.conn() == 0);
if( !r.connect(hn) ) {
- log(2) << "replSet can't connect to " << hn << " to read operations" << rsLog;
+ LOG(2) << "replSet can't connect to " << hn << " to read operations" << rsLog;
r.resetConnection();
return false;
}
@@ -250,8 +277,11 @@ namespace mongo {
// todo : locking vis a vis the mgr...
OplogReader r;
string hn;
+ const Member *target = 0;
- const Member *target = box.getPrimary();
+ // if we cannot reach the master but someone else is more up-to-date
+ // than we are, sync from them.
+ target = getMemberToSyncTo();
if (target != 0) {
hn = target->h().toString();
if (!_getOplogReader(r, hn)) {
@@ -260,32 +290,21 @@ namespace mongo {
target = 0;
}
}
-
- // if we cannot reach the master but someone else is more up-to-date
- // than we are, sync from them.
- if( target == 0 ) {
- for(Member *m = head(); m; m=m->next()) {
- hn = m->h().toString();
- if (m->hbinfo().up() && m->state().readable() &&
- (m->hbinfo().opTime > lastOpTimeWritten) &&
- m->config().slaveDelay == 0 &&
- _getOplogReader(r, hn)) {
- target = m;
- break;
- }
- }
-
- // no server found
- if (target == 0) {
- // if there is no one to sync from
- OpTime minvalid;
- tryToGoLiveAsASecondary(minvalid);
- return;
- }
+
+ // no server found
+ if (target == 0) {
+ // if there is no one to sync from
+ OpTime minvalid;
+ tryToGoLiveAsASecondary(minvalid);
+ return;
}
-
+
r.tailingQueryGTE(rsoplog, lastOpTimeWritten);
- assert( r.haveCursor() );
+ // if target cut connections between connecting and querying (for
+ // example, because it stepped down) we might not have a cursor
+ if ( !r.haveCursor() ) {
+ return;
+ }
uassert(1000, "replSet source for syncing doesn't seem to be await capable -- is it an older version of mongodb?", r.awaitCapable() );
@@ -314,22 +333,14 @@ namespace mongo {
sleepsecs(2);
}
return;
- /*
- log() << "replSet syncTail error querying oplog >= " << lastOpTimeWritten.toString() << " from " << hn << rsLog;
- try {
- log() << "replSet " << hn << " last op: " << r.getLastOp(rsoplog).toString() << rsLog;
- }
- catch(...) { }
- sleepsecs(1);
- return;*/
}
BSONObj o = r.nextSafe();
OpTime ts = o["ts"]._opTime();
long long h = o["h"].numberLong();
if( ts != lastOpTimeWritten || h != lastH ) {
- log() << "replSet our last op time written: " << lastOpTimeWritten.toStringPretty() << endl;
- log() << "replset source's GTE: " << ts.toStringPretty() << endl;
+ log() << "replSet our last op time written: " << lastOpTimeWritten.toStringPretty() << rsLog;
+ log() << "replset source's GTE: " << ts.toStringPretty() << rsLog;
syncRollback(r);
return;
}
@@ -362,15 +373,8 @@ namespace mongo {
/* todo: too stale capability */
}
- {
- const Member *primary = box.getPrimary();
-
- if( !target->hbinfo().hbstate.readable() ||
- // if we are not syncing from the primary, return (if
- // it's up) so that we can try accessing it again
- (target != primary && primary != 0)) {
- return;
- }
+ if( !target->hbinfo().hbstate.readable() ) {
+ return;
}
}
if( !r.more() )
@@ -389,20 +393,22 @@ namespace mongo {
long long sleeptime = sd - lag;
if( sleeptime > 0 ) {
uassert(12000, "rs slaveDelay differential too big check clocks and systems", sleeptime < 0x40000000);
- log() << "replSet temp slavedelay sleep:" << sleeptime << rsLog;
if( sleeptime < 60 ) {
sleepsecs((int) sleeptime);
}
else {
+ log() << "replSet slavedelay sleep long time: " << sleeptime << rsLog;
// sleep(hours) would prevent reconfigs from taking effect & such!
long long waitUntil = b + sleeptime;
while( 1 ) {
sleepsecs(6);
if( time(0) >= waitUntil )
break;
+
if( !target->hbinfo().hbstate.readable() ) {
break;
}
+
if( myConfig().slaveDelay != sd ) // reconf
break;
}
@@ -411,7 +417,7 @@ namespace mongo {
}
- {
+ try {
writelock lk("");
/* if we have become primary, we dont' want to apply things from elsewhere
@@ -423,16 +429,22 @@ namespace mongo {
}
syncApply(o);
- _logOpObjRS(o); /* with repl sets we write the ops to our oplog too: */
+ _logOpObjRS(o); // with repl sets we write the ops to our oplog too
+ }
+ catch (DBException& e) {
+ sethbmsg(str::stream() << "syncTail: " << e.toString() << ", syncing: " << o);
+ sleepsecs(30);
+ return;
}
}
}
r.tailCheck();
if( !r.haveCursor() ) {
- log(1) << "replSet end syncTail pass with " << hn << rsLog;
+ LOG(1) << "replSet end syncTail pass with " << hn << rsLog;
// TODO : reuse our connection to the primary.
return;
}
+
if( !target->hbinfo().hbstate.readable() ) {
return;
}
@@ -446,7 +458,7 @@ namespace mongo {
sleepsecs(1);
return;
}
- if( sp.state.fatal() ) {
+ if( sp.state.fatal() || sp.state.startup() ) {
sleepsecs(5);
return;
}
@@ -462,32 +474,23 @@ namespace mongo {
}
void ReplSetImpl::syncThread() {
- /* test here was to force a receive timeout
- ScopedConn c("localhost");
- bo info;
- try {
- log() << "this is temp" << endl;
- c.runCommand("admin", BSON("sleep"<<120), info);
- log() << info.toString() << endl;
- c.runCommand("admin", BSON("sleep"<<120), info);
- log() << "temp" << endl;
- }
- catch( DBException& e ) {
- log() << e.toString() << endl;
- c.runCommand("admin", BSON("sleep"<<120), info);
- log() << "temp" << endl;
- }
- */
-
while( 1 ) {
- if( myConfig().arbiterOnly )
+ // After a reconfig, we may not be in the replica set anymore, so
+ // check that we are in the set (and not an arbiter) before
+ // trying to sync with other replicas.
+ if( ! _self ) {
+ log() << "replSet warning did not detect own host and port, not syncing, config: " << theReplSet->config() << rsLog;
+ return;
+ }
+ if( myConfig().arbiterOnly ) {
return;
+ }
try {
_syncThread();
}
catch(DBException& e) {
- sethbmsg("syncThread: " + e.toString());
+ sethbmsg(str::stream() << "syncThread: " << e.toString());
sleepsecs(10);
}
catch(...) {
@@ -501,7 +504,9 @@ namespace mongo {
are no heartbeat threads, so we do it here to be sure. this is relevant if the singleton
member has done a stepDown() and needs to come back up.
*/
- OCCASIONALLY mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
+ OCCASIONALLY {
+ mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
+ }
}
}
@@ -513,13 +518,115 @@ namespace mongo {
}
n++;
- Client::initThread("replica set sync");
- cc().iAmSyncThread();
- if (!noauth) {
- cc().getAuthenticationInfo()->authorize("local");
- }
+ Client::initThread("rsSync");
+ cc().iAmSyncThread(); // for isSyncThread() (which is used not used much, is used in secondary create index code
+ replLocalAuth();
theReplSet->syncThread();
cc().shutdown();
}
+ void GhostSync::starting() {
+ Client::initThread("rsGhostSync");
+ replLocalAuth();
+ }
+
+ void GhostSync::associateSlave(const BSONObj& id, const int memberId) {
+ const OID rid = id["_id"].OID();
+ rwlock lk( _lock , true );
+ GhostSlave &slave = _ghostCache[rid];
+ if (slave.init) {
+ LOG(1) << "tracking " << slave.slave->h().toString() << " as " << rid << rsLog;
+ return;
+ }
+
+ slave.slave = (Member*)rs->findById(memberId);
+ if (slave.slave != 0) {
+ slave.init = true;
+ }
+ else {
+ log() << "replset couldn't find a slave with id " << memberId
+ << ", not tracking " << rid << rsLog;
+ }
+ }
+
+ void GhostSync::updateSlave(const mongo::OID& rid, const OpTime& last) {
+ rwlock lk( _lock , false );
+ MAP::iterator i = _ghostCache.find( rid );
+ if ( i == _ghostCache.end() ) {
+ OCCASIONALLY warning() << "couldn't update slave " << rid << " no entry" << rsLog;
+ return;
+ }
+
+ GhostSlave& slave = i->second;
+ if (!slave.init) {
+ OCCASIONALLY log() << "couldn't update slave " << rid << " not init" << rsLog;
+ return;
+ }
+
+ ((ReplSetConfig::MemberCfg)slave.slave->config()).updateGroups(last);
+ }
+
+ void GhostSync::percolate(const BSONObj& id, const OpTime& last) {
+ const OID rid = id["_id"].OID();
+ GhostSlave* slave;
+ {
+ rwlock lk( _lock , false );
+
+ MAP::iterator i = _ghostCache.find( rid );
+ if ( i == _ghostCache.end() ) {
+ OCCASIONALLY log() << "couldn't percolate slave " << rid << " no entry" << rsLog;
+ return;
+ }
+
+ slave = &(i->second);
+ if (!slave->init) {
+ OCCASIONALLY log() << "couldn't percolate slave " << rid << " not init" << rsLog;
+ return;
+ }
+ }
+
+ assert(slave->slave);
+
+ const Member *target = rs->_currentSyncTarget;
+ if (!target || rs->box.getState().primary()
+ // we are currently syncing from someone who's syncing from us
+ // the target might end up with a new Member, but s.slave never
+ // changes so we'll compare the names
+ || target == slave->slave || target->fullName() == slave->slave->fullName()) {
+ LOG(1) << "replica set ghost target no good" << endl;
+ return;
+ }
+
+ try {
+ if (!slave->reader.haveCursor()) {
+ if (!slave->reader.connect(id, slave->slave->id(), target->fullName())) {
+ // error message logged in OplogReader::connect
+ return;
+ }
+ slave->reader.ghostQueryGTE(rsoplog, last);
+ }
+
+ LOG(1) << "replSet last: " << slave->last.toString() << " to " << last.toString() << rsLog;
+ if (slave->last > last) {
+ return;
+ }
+
+ while (slave->last <= last) {
+ if (!slave->reader.more()) {
+ // we'll be back
+ return;
+ }
+
+ BSONObj o = slave->reader.nextSafe();
+ slave->last = o["ts"]._opTime();
+ }
+ LOG(2) << "now last is " << slave->last.toString() << rsLog;
+ }
+ catch (DBException& e) {
+ // we'll be back
+ LOG(2) << "replSet ghost sync error: " << e.what() << " for "
+ << slave->slave->fullName() << rsLog;
+ slave->reader.resetConnection();
+ }
+ }
}
diff --git a/db/repl_block.cpp b/db/repl_block.cpp
index 05be343..dcac121 100644
--- a/db/repl_block.cpp
+++ b/db/repl_block.cpp
@@ -24,7 +24,7 @@
#include "../util/background.h"
#include "../util/mongoutils/str.h"
#include "../client/dbclient.h"
-#include "replpair.h"
+#include "replutil.h"
//#define REPLDEBUG(x) log() << "replBlock: " << x << endl;
#define REPLDEBUG(x)
@@ -41,7 +41,7 @@ namespace mongo {
struct Ident {
- Ident(BSONObj r,string h,string n) {
+ Ident(const BSONObj& r, const string& h, const string& n) {
BSONObjBuilder b;
b.appendElements( r );
b.append( "host" , h );
@@ -50,7 +50,7 @@ namespace mongo {
}
bool operator<( const Ident& other ) const {
- return obj.woCompare( other.obj ) < 0;
+ return obj["_id"].OID() < other.obj["_id"].OID();
}
BSONObj obj;
@@ -122,6 +122,11 @@ namespace mongo {
Ident ident(rid,host,ns);
Info& i = _slaves[ ident ];
+
+ if (theReplSet && theReplSet->isPrimary()) {
+ theReplSet->ghost->updateSlave(ident.obj["_id"].OID(), last);
+ }
+
if ( i.loc ) {
if( i.owned )
i.loc[0] = last;
@@ -153,11 +158,34 @@ namespace mongo {
}
- bool opReplicatedEnough( OpTime op , int w ) {
+ bool opReplicatedEnough( OpTime op , BSONElement w ) {
RARELY {
REPLDEBUG( "looking for : " << op << " w=" << w );
}
+ if (w.isNumber()) {
+ return replicatedToNum(op, w.numberInt());
+ }
+
+ if (!theReplSet) {
+ return false;
+ }
+
+ string wStr = w.String();
+ if (wStr == "majority") {
+ // use the entire set, including arbiters, to prevent writing
+ // to a majority of the set but not a majority of voters
+ return replicatedToNum(op, theReplSet->config().members.size()/2+1);
+ }
+
+ map<string,ReplSetConfig::TagRule*>::const_iterator it = theReplSet->config().rules.find(wStr);
+ uassert(14830, str::stream() << "unrecognized getLastError mode: " << wStr,
+ it != theReplSet->config().rules.end());
+
+ return op <= (*it).second->last;
+ }
+
+ bool replicatedToNum(OpTime& op, int w) {
if ( w <= 1 || ! _isMaster() )
return true;
@@ -203,12 +231,23 @@ namespace mongo {
return;
slaveTracking.update( rid , curop.getRemoteString( false ) , ns , lastOp );
+
+ if (theReplSet && !theReplSet->isPrimary()) {
+ // we don't know the slave's port, so we make the replica set keep
+ // a map of rids to slaves
+ log(2) << "percolating " << lastOp.toString() << " from " << rid << endl;
+ theReplSet->ghost->send( boost::bind(&GhostSync::percolate, theReplSet->ghost, rid, lastOp) );
+ }
}
- bool opReplicatedEnough( OpTime op , int w ) {
+ bool opReplicatedEnough( OpTime op , BSONElement w ) {
return slaveTracking.opReplicatedEnough( op , w );
}
+ bool opReplicatedEnough( OpTime op , int w ) {
+ return slaveTracking.replicatedToNum( op , w );
+ }
+
void resetSlaveCache() {
slaveTracking.reset();
}
diff --git a/db/repl_block.h b/db/repl_block.h
index 978932d..bb74dee 100644
--- a/db/repl_block.h
+++ b/db/repl_block.h
@@ -32,6 +32,7 @@ namespace mongo {
/** @return true if op has made it to w servers */
bool opReplicatedEnough( OpTime op , int w );
+ bool opReplicatedEnough( OpTime op , BSONElement w );
void resetSlaveCache();
unsigned getSlaveCount();
diff --git a/db/replpair.h b/db/replpair.h
deleted file mode 100644
index a551308..0000000
--- a/db/replpair.h
+++ /dev/null
@@ -1,238 +0,0 @@
-/**
-* Copyright (C) 2008 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#pragma once
-
-#include "db.h"
-#include "dbhelpers.h"
-#include "json.h"
-#include "../client/dbclient.h"
-#include "repl.h"
-#include "cmdline.h"
-#include "repl/rs.h"
-
-namespace mongo {
-
- extern const char *replAllDead;
-
- /* ReplPair is a pair of db servers replicating to one another and cooperating.
-
- Only one member of the pair is active at a time; so this is a smart master/slave
- configuration basically.
-
- You may read from the slave at anytime though (if you don't mind the slight lag).
-
- todo: Could be extended to be more than a pair, thus the name 'Set' -- for example,
- a set of 3...
- */
-
- class ReplPair {
- public:
- enum ReplState {
- State_CantArb = -3,
- State_Confused = -2,
- State_Negotiating = -1,
- State_Slave = 0,
- State_Master = 1
- };
-
- int state;
- ThreadSafeString info; // commentary about our current state
- string arbHost; // "-" for no arbiter. "host[:port]"
- int remotePort;
- string remoteHost;
- string remote; // host:port if port specified.
- // int date; // -1 not yet set; 0=slave; 1=master
-
- string getInfo() {
- stringstream ss;
- ss << " state: ";
- if ( state == 1 ) ss << "1 State_Master ";
- else if ( state == 0 ) ss << "0 State_Slave";
- else
- ss << "<b>" << state << "</b>";
- ss << '\n';
- ss << " info: " << info << '\n';
- ss << " arbhost: " << arbHost << '\n';
- ss << " remote: " << remoteHost << ':' << remotePort << '\n';
-// ss << " date: " << date << '\n';
- return ss.str();
- }
-
- ReplPair(const char *remoteEnd, const char *arbiter);
- virtual ~ReplPair() {}
-
- bool dominant(const string& myname) {
- if ( myname == remoteHost )
- return cmdLine.port > remotePort;
- return myname > remoteHost;
- }
-
- void setMasterLocked( int n, const char *_comment = "" ) {
- dblock p;
- setMaster( n, _comment );
- }
-
- void setMaster(int n, const char *_comment = "");
-
- /* negotiate with our peer who is master; returns state of peer */
- int negotiate(DBClientConnection *conn, string method);
-
- /* peer unreachable, try our arbitrator */
- void arbitrate();
-
- virtual
- DBClientConnection *newClientConnection() const {
- return new DBClientConnection();
- }
- };
-
- extern ReplPair *replPair;
-
- /* note we always return true for the "local" namespace.
-
- we should not allow most operations when not the master
- also we report not master if we are "dead".
-
- See also CmdIsMaster.
-
- If 'client' is not specified, the current client is used.
- */
- inline bool _isMaster() {
- if( replSet ) {
- if( theReplSet )
- return theReplSet->isPrimary();
- return false;
- }
-
- if( ! replSettings.slave )
- return true;
-
- if ( replAllDead )
- return false;
-
- if ( replPair ) {
- if( replPair->state == ReplPair::State_Master )
- return true;
- }
- else {
- if( replSettings.master ) {
- // if running with --master --slave, allow. note that master is also true
- // for repl pairs so the check for replPair above is important.
- return true;
- }
- }
-
- if ( cc().isGod() )
- return true;
-
- return false;
- }
- inline bool isMaster(const char *client = 0) {
- if( _isMaster() )
- return true;
- if ( !client ) {
- Database *database = cc().database();
- assert( database );
- client = database->name.c_str();
- }
- return strcmp( client, "local" ) == 0;
- }
-
- inline void notMasterUnless(bool expr) {
- uassert( 10107 , "not master" , expr );
- }
-
- /* we allow queries to SimpleSlave's -- but not to the slave (nonmaster) member of a replica pair
- so that queries to a pair are realtime consistent as much as possible. use setSlaveOk() to
- query the nonmaster member of a replica pair.
- */
- inline void replVerifyReadsOk(ParsedQuery& pq) {
- if( replSet ) {
- /* todo: speed up the secondary case. as written here there are 2 mutex entries, it can b 1. */
- if( isMaster() ) return;
- uassert(13435, "not master and slaveok=false", pq.hasOption(QueryOption_SlaveOk));
- uassert(13436, "not master or secondary, can't read", theReplSet && theReplSet->isSecondary() );
- }
- else {
- notMasterUnless(isMaster() || pq.hasOption(QueryOption_SlaveOk) || replSettings.slave == SimpleSlave );
- }
- }
-
- inline bool isMasterNs( const char *ns ) {
- char cl[ 256 ];
- nsToDatabase( ns, cl );
- return isMaster( cl );
- }
-
- inline ReplPair::ReplPair(const char *remoteEnd, const char *arb) {
- state = -1;
- remote = remoteEnd;
- remotePort = CmdLine::DefaultDBPort;
- remoteHost = remoteEnd;
- const char *p = strchr(remoteEnd, ':');
- if ( p ) {
- remoteHost = string(remoteEnd, p-remoteEnd);
- remotePort = atoi(p+1);
- uassert( 10125 , "bad port #", remotePort > 0 && remotePort < 0x10000 );
- if ( remotePort == CmdLine::DefaultDBPort )
- remote = remoteHost; // don't include ":27017" as it is default; in case ran in diff ways over time to normalizke the hostname format in sources collection
- }
-
- uassert( 10126 , "arbiter parm is missing, use '-' for none", arb);
- arbHost = arb;
- uassert( 10127 , "arbiter parm is empty", !arbHost.empty());
- }
-
- /* This is set to true if we have EVER been up to date -- this way a new pair member
- which is a replacement won't go online as master until we have initially fully synced.
- */
- class PairSync {
- int initialsynccomplete;
- public:
- PairSync() {
- initialsynccomplete = -1;
- }
-
- /* call before using the class. from dbmutex */
- void init() {
- BSONObj o;
- initialsynccomplete = 0;
- if ( Helpers::getSingleton("local.pair.sync", o) )
- initialsynccomplete = 1;
- }
-
- bool initialSyncCompleted() {
- return initialsynccomplete != 0;
- }
-
- void setInitialSyncCompleted() {
- BSONObj o = fromjson("{\"initialsynccomplete\":1}");
- Helpers::putSingleton("local.pair.sync", o);
- initialsynccomplete = 1;
- tlog() << "pair: initial sync complete" << endl;
- }
-
- void setInitialSyncCompletedLocking() {
- if ( initialsynccomplete == 1 )
- return;
- dblock lk;
- setInitialSyncCompleted();
- }
- };
-
-
-} // namespace mongo
diff --git a/db/replutil.h b/db/replutil.h
new file mode 100644
index 0000000..f2bea23
--- /dev/null
+++ b/db/replutil.h
@@ -0,0 +1,98 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "db.h"
+#include "dbhelpers.h"
+#include "json.h"
+#include "../client/dbclient.h"
+#include "repl.h"
+#include "cmdline.h"
+#include "repl/rs.h"
+#include "ops/query.h"
+
+namespace mongo {
+
+ extern const char *replAllDead;
+
+ /* note we always return true for the "local" namespace.
+
+ we should not allow most operations when not the master
+ also we report not master if we are "dead".
+
+ See also CmdIsMaster.
+
+ If 'client' is not specified, the current client is used.
+ */
+ inline bool _isMaster() {
+ if( replSet ) {
+ if( theReplSet )
+ return theReplSet->isPrimary();
+ return false;
+ }
+
+ if( ! replSettings.slave )
+ return true;
+
+ if ( replAllDead )
+ return false;
+
+ if( replSettings.master ) {
+ // if running with --master --slave, allow.
+ return true;
+ }
+
+ if ( cc().isGod() )
+ return true;
+
+ return false;
+ }
+ inline bool isMaster(const char *client = 0) {
+ if( _isMaster() )
+ return true;
+ if ( !client ) {
+ Database *database = cc().database();
+ assert( database );
+ client = database->name.c_str();
+ }
+ return strcmp( client, "local" ) == 0;
+ }
+
+ inline void notMasterUnless(bool expr) {
+ uassert( 10107 , "not master" , expr );
+ }
+
+ /** we allow queries to SimpleSlave's */
+ inline void replVerifyReadsOk(ParsedQuery& pq) {
+ if( replSet ) {
+ /* todo: speed up the secondary case. as written here there are 2 mutex entries, it can b 1. */
+ if( isMaster() ) return;
+ uassert(13435, "not master and slaveok=false", pq.hasOption(QueryOption_SlaveOk));
+ uassert(13436, "not master or secondary, can't read", theReplSet && theReplSet->isSecondary() );
+ }
+ else {
+ notMasterUnless(isMaster() || pq.hasOption(QueryOption_SlaveOk) || replSettings.slave == SimpleSlave );
+ }
+ }
+
+ inline bool isMasterNs( const char *ns ) {
+ char cl[ 256 ];
+ nsToDatabase( ns, cl );
+ return isMaster( cl );
+ }
+
+} // namespace mongo
diff --git a/db/restapi.cpp b/db/restapi.cpp
index 7460c94..b29521e 100644
--- a/db/restapi.cpp
+++ b/db/restapi.cpp
@@ -18,14 +18,14 @@
*/
#include "pch.h"
-#include "../util/miniwebserver.h"
+#include "../util/net/miniwebserver.h"
#include "../util/mongoutils/html.h"
#include "../util/md5.hpp"
#include "instance.h"
#include "dbwebserver.h"
#include "dbhelpers.h"
#include "repl.h"
-#include "replpair.h"
+#include "replutil.h"
#include "clientcursor.h"
#include "background.h"
@@ -279,14 +279,6 @@ namespace mongo {
else {
ss << "\nmaster: " << replSettings.master << '\n';
ss << "slave: " << replSettings.slave << '\n';
- if ( replPair ) {
- ss << "replpair:\n";
- ss << replPair->getInfo();
- }
- bool seemCaughtUp = getInitialSyncCompleted();
- if ( !seemCaughtUp ) ss << "<b>";
- ss << "initialSyncCompleted: " << seemCaughtUp;
- if ( !seemCaughtUp ) ss << "</b>";
ss << '\n';
}
diff --git a/db/scanandorder.cpp b/db/scanandorder.cpp
new file mode 100644
index 0000000..efa9c8d
--- /dev/null
+++ b/db/scanandorder.cpp
@@ -0,0 +1,93 @@
+/* scanandorder.cpp
+ Order results (that aren't already indexes and in order.)
+*/
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "scanandorder.h"
+
+namespace mongo {
+
+ const unsigned ScanAndOrder::MaxScanAndOrderBytes = 32 * 1024 * 1024;
+
+ void ScanAndOrder::_add(BSONObj& k, BSONObj o, DiskLoc* loc) {
+ if (!loc) {
+ _best.insert(make_pair(k.getOwned(),o.getOwned()));
+ }
+ else {
+ BSONObjBuilder b;
+ b.appendElements(o);
+ b.append("$diskLoc", loc->toBSONObj());
+ _best.insert(make_pair(k.getOwned(), b.obj().getOwned()));
+ }
+ }
+
+ void ScanAndOrder::_addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i, DiskLoc* loc) {
+ /* todo : we don't correct _approxSize here. */
+ const BSONObj& worstBestKey = i->first;
+ int c = worstBestKey.woCompare(k, _order._spec.keyPattern);
+ if ( c > 0 ) {
+ // k is better, 'upgrade'
+ _best.erase(i);
+ _add(k, o, loc);
+ }
+ }
+
+
+ void ScanAndOrder::add(BSONObj o, DiskLoc* loc) {
+ assert( o.isValid() );
+ BSONObj k = _order.getKeyFromObject(o);
+ if ( k.isEmpty() ) {
+ return;
+ }
+ if ( (int) _best.size() < _limit ) {
+ _approxSize += k.objsize();
+ _approxSize += o.objsize();
+
+ /* note : adjust when bson return limit adjusts. note this limit should be a bit higher. */
+ uassert( 10128 , "too much data for sort() with no index. add an index or specify a smaller limit", _approxSize < MaxScanAndOrderBytes );
+
+ _add(k, o, loc);
+ return;
+ }
+ BestMap::iterator i;
+ assert( _best.end() != _best.begin() );
+ i = _best.end();
+ i--;
+ _addIfBetter(k, o, i, loc);
+ }
+
+
+ void ScanAndOrder::fill(BufBuilder& b, Projection *filter, int& nout ) const {
+ int n = 0;
+ int nFilled = 0;
+ for ( BestMap::const_iterator i = _best.begin(); i != _best.end(); i++ ) {
+ n++;
+ if ( n <= _startFrom )
+ continue;
+ const BSONObj& o = i->second;
+ fillQueryResultFromObj(b, filter, o);
+ nFilled++;
+ if ( nFilled >= _limit )
+ break;
+ uassert( 10129 , "too much data for sort() with no index", b.len() < (int)MaxScanAndOrderBytes ); // appserver limit
+ }
+ nout = nFilled;
+ }
+
+} // namespace mongo
diff --git a/db/scanandorder.h b/db/scanandorder.h
index 4c491fa..33e76f6 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -20,27 +20,33 @@
#pragma once
+#include "indexkey.h"
+#include "queryutil.h"
+#include "projection.h"
+
namespace mongo {
/* todo:
- _ handle compound keys with differing directions. we don't handle this yet: neither here nor in indexes i think!!!
_ limit amount of data
*/
- /* see also IndexDetails::getKeysFromObject, which needs some merging with this. */
-
class KeyType : boost::noncopyable {
public:
- BSONObj pattern; // e.g., { ts : -1 }
+ IndexSpec _spec;
+ FieldRangeVector _keyCutter;
public:
- KeyType(BSONObj _keyPattern) {
- pattern = _keyPattern;
- assert( !pattern.isEmpty() );
+ KeyType(BSONObj pattern, const FieldRangeSet &frs):
+ _spec((assert(!pattern.isEmpty()),pattern)),
+ _keyCutter(frs, _spec, 1) {
}
- // returns the key value for o
+ /**
+ * @return first key of the object that would be encountered while
+ * scanning index with keySpec 'pattern' using constraints 'frs', or
+ * BSONObj() if no such key.
+ */
BSONObj getKeyFromObject(BSONObj o) {
- return o.extractFields(pattern,true);
+ return _keyCutter.firstMatch(o);
}
};
@@ -71,88 +77,34 @@ namespace mongo {
typedef multimap<BSONObj,BSONObj,BSONObjCmp> BestMap;
class ScanAndOrder {
- BestMap best; // key -> full object
- int startFrom;
- int limit; // max to send back.
- KeyType order;
- unsigned approxSize;
-
- void _add(BSONObj& k, BSONObj o, DiskLoc* loc) {
- if (!loc) {
- best.insert(make_pair(k.getOwned(),o.getOwned()));
- }
- else {
- BSONObjBuilder b;
- b.appendElements(o);
- b.append("$diskLoc", loc->toBSONObj());
- best.insert(make_pair(k.getOwned(), b.obj().getOwned()));
- }
- }
-
- void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i, DiskLoc* loc) {
- /* todo : we don't correct approxSize here. */
- const BSONObj& worstBestKey = i->first;
- int c = worstBestKey.woCompare(k, order.pattern);
- if ( c > 0 ) {
- // k is better, 'upgrade'
- best.erase(i);
- _add(k, o, loc);
- }
- }
-
public:
- ScanAndOrder(int _startFrom, int _limit, BSONObj _order) :
- best( BSONObjCmp( _order ) ),
- startFrom(_startFrom), order(_order) {
- limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
- approxSize = 0;
- }
+ static const unsigned MaxScanAndOrderBytes;
- int size() const {
- return best.size();
+ ScanAndOrder(int startFrom, int limit, BSONObj order, const FieldRangeSet &frs) :
+ _best( BSONObjCmp( order ) ),
+ _startFrom(startFrom), _order(order, frs) {
+ _limit = limit > 0 ? limit + _startFrom : 0x7fffffff;
+ _approxSize = 0;
}
- void add(BSONObj o, DiskLoc* loc) {
- assert( o.isValid() );
- BSONObj k = order.getKeyFromObject(o);
- if ( (int) best.size() < limit ) {
- approxSize += k.objsize();
- approxSize += o.objsize();
-
- /* note : adjust when bson return limit adjusts. note this limit should be a bit higher. */
- uassert( 10128 , "too much data for sort() with no index. add an index or specify a smaller limit", approxSize < 32 * 1024 * 1024 );
-
- _add(k, o, loc);
- return;
- }
- BestMap::iterator i;
- assert( best.end() != best.begin() );
- i = best.end();
- i--;
- _addIfBetter(k, o, i, loc);
- }
+ int size() const { return _best.size(); }
- void _fill(BufBuilder& b, Projection *filter, int& nout, BestMap::iterator begin, BestMap::iterator end) {
- int n = 0;
- int nFilled = 0;
- for ( BestMap::iterator i = begin; i != end; i++ ) {
- n++;
- if ( n <= startFrom )
- continue;
- BSONObj& o = i->second;
- fillQueryResultFromObj(b, filter, o);
- nFilled++;
- if ( nFilled >= limit )
- break;
- uassert( 10129 , "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
- }
- nout = nFilled;
- }
+ void add(BSONObj o, DiskLoc* loc);
/* scanning complete. stick the query result in b for n objects. */
- void fill(BufBuilder& b, Projection *filter, int& nout) {
- _fill(b, filter, nout, best.begin(), best.end());
- }
+ void fill(BufBuilder& b, Projection *filter, int& nout ) const;
+
+ private:
+
+ void _add(BSONObj& k, BSONObj o, DiskLoc* loc);
+
+ void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i, DiskLoc* loc);
+
+ BestMap _best; // key -> full object
+ int _startFrom;
+ int _limit; // max to send back.
+ KeyType _order;
+ unsigned _approxSize;
};
diff --git a/db/security.cpp b/db/security.cpp
index 1ec4218..ae14770 100644
--- a/db/security.cpp
+++ b/db/security.cpp
@@ -18,29 +18,42 @@
#include "pch.h"
#include "security.h"
+#include "security_common.h"
#include "instance.h"
#include "client.h"
#include "curop-inl.h"
#include "db.h"
#include "dbhelpers.h"
-namespace mongo {
+// this is the _mongod only_ implementation of security.h
- int AuthenticationInfo::warned = 0;
+namespace mongo {
- void AuthenticationInfo::print() {
+ bool AuthenticationInfo::_warned = false;
+ /*
+ void AuthenticationInfo::print() const {
cout << "AuthenticationInfo: " << this << '\n';
- for ( map<string,Auth>::iterator i=m.begin(); i!=m.end(); i++ ) {
+ for ( MA::const_iterator i=_dbs.begin(); i!=_dbs.end(); i++ ) {
cout << "\t" << i->first << "\t" << i->second.level << '\n';
}
cout << "END" << endl;
}
+ */
+
+ string AuthenticationInfo::getUser( const string& dbname ) const {
+ scoped_spinlock lk(_lock);
+
+ MA::const_iterator i = _dbs.find(dbname);
+ if ( i == _dbs.end() )
+ return "";
+
+ return i->second.user;
+ }
- bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) {
- if ( cc().isGod() ) {
+ bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) const {
+ if ( cc().isGod() )
return true;
- }
if ( isLocalHost ) {
atleastreadlock l("");
@@ -48,15 +61,58 @@ namespace mongo {
Client::Context c("admin.system.users");
BSONObj result;
if( ! Helpers::getSingleton("admin.system.users", result) ) {
- if( warned == 0 ) {
- warned++;
+ if( ! _warned ) {
+ // you could get a few of these in a race, but that's ok
+ _warned = true;
log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
}
return true;
}
}
+
return false;
}
+ bool CmdAuthenticate::getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd) {
+ if (user == internalSecurity.user) {
+ uassert(15889, "key file must be used to log in with internal user", cmdLine.keyFile);
+ pwd = internalSecurity.pwd;
+ }
+ else {
+ // static BSONObj userPattern = fromjson("{\"user\":1}");
+ string systemUsers = dbname + ".system.users";
+ // OCCASIONALLY Helpers::ensureIndex(systemUsers.c_str(), userPattern, false, "user_1");
+ {
+ BSONObjBuilder b;
+ b << "user" << user;
+ BSONObj query = b.done();
+ if( !Helpers::findOne(systemUsers.c_str(), query, userObj) ) {
+ log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
+ return false;
+ }
+ }
+
+ pwd = userObj.getStringField("pwd");
+ }
+ return true;
+ }
+
+ void CmdAuthenticate::authenticate(const string& dbname, const string& user, const bool readOnly) {
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+
+ if ( readOnly ) {
+ ai->authorizeReadOnly( cc().database()->name.c_str() , user );
+ }
+ else {
+ ai->authorize( cc().database()->name.c_str() , user );
+ }
+ }
+
+ bool CmdLogout::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ ai->logout(dbname);
+ return true;
+ }
+
} // namespace mongo
diff --git a/db/security.h b/db/security.h
index 2b947c1..2937ef2 100644..100755
--- a/db/security.h
+++ b/db/security.h
@@ -20,53 +20,84 @@
#include "nonce.h"
#include "concurrency.h"
-#include "security_key.h"
+#include "security_common.h"
+#include "../util/concurrency/spin_lock.h"
+
+// this is used by both mongos and mongod
namespace mongo {
- /* for a particular db */
+ /*
+ * for a particular db
+ * levels
+ * 0 : none
+ * 1 : read
+ * 2 : write
+ */
struct Auth {
Auth() { level = 0; }
int level;
+ string user;
};
class AuthenticationInfo : boost::noncopyable {
- mongo::mutex _lock;
- map<string, Auth> m; // dbname -> auth
- static int warned;
public:
bool isLocalHost;
- AuthenticationInfo() : _lock("AuthenticationInfo") { isLocalHost = false; }
- ~AuthenticationInfo() {
- }
+
+ AuthenticationInfo(){ isLocalHost = false; }
+ ~AuthenticationInfo() {}
+
+ // -- modifiers ----
+
void logout(const string& dbname ) {
- scoped_lock lk(_lock);
- m.erase(dbname);
+ scoped_spinlock lk(_lock);
+ _dbs.erase(dbname);
+ }
+ void authorize(const string& dbname , const string& user ) {
+ scoped_spinlock lk(_lock);
+ _dbs[dbname].level = 2;
+ _dbs[dbname].user = user;
+ }
+ void authorizeReadOnly(const string& dbname , const string& user ) {
+ scoped_spinlock lk(_lock);
+ _dbs[dbname].level = 1;
+ _dbs[dbname].user = user;
+ }
+
+ // -- accessors ---
+
+ bool isAuthorized(const string& dbname) const {
+ return _isAuthorized( dbname, 2 );
}
- void authorize(const string& dbname ) {
- scoped_lock lk(_lock);
- m[dbname].level = 2;
+
+ bool isAuthorizedReads(const string& dbname) const {
+ return _isAuthorized( dbname, 1 );
}
- void authorizeReadOnly(const string& dbname) {
- scoped_lock lk(_lock);
- m[dbname].level = 1;
+
+ bool isAuthorizedForLock(const string& dbname, int lockType ) const {
+ return _isAuthorized( dbname , lockType > 0 ? 2 : 1 );
}
- bool isAuthorized(const string& dbname) { return _isAuthorized( dbname, 2 ); }
- bool isAuthorizedReads(const string& dbname) { return _isAuthorized( dbname, 1 ); }
- bool isAuthorizedForLock(const string& dbname, int lockType ) { return _isAuthorized( dbname , lockType > 0 ? 2 : 1 ); }
- void print();
+ string getUser( const string& dbname ) const;
+
+ void print() const;
protected:
- bool _isAuthorized(const string& dbname, int level) {
- if( m[dbname].level >= level ) return true;
- if( noauth ) return true;
- if( m["admin"].level >= level ) return true;
- if( m["local"].level >= level ) return true;
- return _isAuthorizedSpecialChecks( dbname );
- }
+ /** takes a lock */
+ bool _isAuthorized(const string& dbname, int level) const;
+
+ bool _isAuthorizedSingle_inlock(const string& dbname, int level) const;
+
+ /** cannot call this locked */
+ bool _isAuthorizedSpecialChecks( const string& dbname ) const ;
+
+ private:
+ mutable SpinLock _lock;
+
+ typedef map<string,Auth> MA;
+ MA _dbs; // dbname -> auth
- bool _isAuthorizedSpecialChecks( const string& dbname );
+ static bool _warned;
};
} // namespace mongo
diff --git a/db/security_commands.cpp b/db/security_commands.cpp
index 67605aa..2db9680 100644
--- a/db/security_commands.cpp
+++ b/db/security_commands.cpp
@@ -39,12 +39,12 @@ namespace mongo {
getnonce sends nonce to client
- client then sends { authenticate:1, nonce:<nonce_str>, user:<username>, key:<key> }
+ client then sends { authenticate:1, nonce64:<nonce_str>, user:<username>, key:<key> }
where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
*/
- boost::thread_specific_ptr<nonce> lastNonce;
+ boost::thread_specific_ptr<nonce64> lastNonce;
class CmdGetNonce : public Command {
public:
@@ -56,8 +56,8 @@ namespace mongo {
void help(stringstream& h) const { h << "internal"; }
virtual LockType locktype() const { return NONE; }
CmdGetNonce() : Command("getnonce") {}
- bool run(const string&, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- nonce *n = new nonce(security.getNonce());
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ nonce64 *n = new nonce64(Security::getNonce());
stringstream ss;
ss << hex << *n;
result.append("nonce", ss.str() );
@@ -66,129 +66,78 @@ namespace mongo {
}
} cmdGetNonce;
- class CmdLogout : public Command {
- public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() const {
- return true;
- }
- void help(stringstream& h) const { h << "de-authenticate"; }
- virtual LockType locktype() const { return NONE; }
- CmdLogout() : Command("logout") {}
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- AuthenticationInfo *ai = cc().getAuthenticationInfo();
- ai->logout(dbname);
- return true;
- }
- } cmdLogout;
+ CmdLogout cmdLogout;
- class CmdAuthenticate : public Command {
- public:
- virtual bool requiresAuth() { return false; }
- virtual bool logTheOp() {
+ bool CmdAuthenticate::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ log() << " authenticate: " << cmdObj << endl;
+
+ string user = cmdObj.getStringField("user");
+ string key = cmdObj.getStringField("key");
+ string received_nonce = cmdObj.getStringField("nonce");
+
+ if( user.empty() || key.empty() || received_nonce.empty() ) {
+ log() << "field missing/wrong type in received authenticate command "
+ << dbname
+ << endl;
+ errmsg = "auth fails";
+ sleepmillis(10);
return false;
}
- virtual bool slaveOk() const {
- return true;
- }
- virtual LockType locktype() const { return WRITE; }
- virtual void help(stringstream& ss) const { ss << "internal"; }
- CmdAuthenticate() : Command("authenticate") {}
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- log(1) << " authenticate: " << cmdObj << endl;
-
- string user = cmdObj.getStringField("user");
- string key = cmdObj.getStringField("key");
- string received_nonce = cmdObj.getStringField("nonce");
-
- if( user.empty() || key.empty() || received_nonce.empty() ) {
- log() << "field missing/wrong type in received authenticate command "
- << dbname
- << endl;
- errmsg = "auth fails";
- sleepmillis(10);
- return false;
- }
-
- stringstream digestBuilder;
-
- {
- bool reject = false;
- nonce *ln = lastNonce.release();
- if ( ln == 0 ) {
- reject = true;
- log(1) << "auth: no lastNonce" << endl;
- }
- else {
- digestBuilder << hex << *ln;
- reject = digestBuilder.str() != received_nonce;
- if ( reject ) log(1) << "auth: different lastNonce" << endl;
- }
-
- if ( reject ) {
- log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << cc().database()->name << endl;
- errmsg = "auth fails";
- sleepmillis(30);
- return false;
- }
- }
- BSONObj userObj;
- string pwd;
+ stringstream digestBuilder;
- if (user == internalSecurity.user) {
- pwd = internalSecurity.pwd;
+ {
+ bool reject = false;
+ nonce64 *ln = lastNonce.release();
+ if ( ln == 0 ) {
+ reject = true;
+ log(1) << "auth: no lastNonce" << endl;
}
else {
- static BSONObj userPattern = fromjson("{\"user\":1}");
- string systemUsers = dbname + ".system.users";
- OCCASIONALLY Helpers::ensureIndex(systemUsers.c_str(), userPattern, false, "user_1");
- {
- BSONObjBuilder b;
- b << "user" << user;
- BSONObj query = b.done();
- if( !Helpers::findOne(systemUsers.c_str(), query, userObj) ) {
- log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
- errmsg = "auth fails";
- return false;
- }
- }
-
- pwd = userObj.getStringField("pwd");
+ digestBuilder << hex << *ln;
+ reject = digestBuilder.str() != received_nonce;
+ if ( reject ) log(1) << "auth: different lastNonce" << endl;
}
-
- md5digest d;
- {
- digestBuilder << user << pwd;
- string done = digestBuilder.str();
-
- md5_state_t st;
- md5_init(&st);
- md5_append(&st, (const md5_byte_t *) done.c_str(), done.size());
- md5_finish(&st, d);
- }
-
- string computed = digestToString( d );
-
- if ( key != computed ) {
- log() << "auth: key mismatch " << user << ", ns:" << dbname << endl;
+ if ( reject ) {
+ log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << dbname << endl;
errmsg = "auth fails";
+ sleepmillis(30);
return false;
}
+ }
- AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ BSONObj userObj;
+ string pwd;
+ if (!getUserObj(dbname, user, userObj, pwd)) {
+ errmsg = "auth fails";
+ return false;
+ }
- if ( userObj[ "readOnly" ].isBoolean() && userObj[ "readOnly" ].boolean() ) {
- ai->authorizeReadOnly( cc().database()->name.c_str() );
- }
- else {
- ai->authorize( cc().database()->name.c_str() );
- }
- return true;
+ md5digest d;
+ {
+ digestBuilder << user << pwd;
+ string done = digestBuilder.str();
+
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t *) done.c_str(), done.size());
+ md5_finish(&st, d);
+ }
+
+ string computed = digestToString( d );
+
+ if ( key != computed ) {
+ log() << "auth: key mismatch " << user << ", ns:" << dbname << endl;
+ errmsg = "auth fails";
+ return false;
}
- } cmdAuthenticate;
+
+ authenticate(dbname, user, userObj[ "readOnly" ].isBoolean() && userObj[ "readOnly" ].boolean());
+
+ return true;
+ }
+
+ CmdAuthenticate cmdAuthenticate;
} // namespace mongo
diff --git a/db/security_key.cpp b/db/security_common.cpp
index 1ea7021..04cea99 100644
--- a/db/security_key.cpp
+++ b/db/security_common.cpp
@@ -1,4 +1,4 @@
-// security_key.cpp
+// security_common.cpp
/*
* Copyright (C) 2010 10gen Inc.
*
@@ -22,8 +22,12 @@
*/
#include "pch.h"
-#include "security_key.h"
+#include "security.h"
+#include "security_common.h"
#include "../client/dbclient.h"
+#include "commands.h"
+#include "nonce.h"
+#include "../util/md5.hpp"
#include <sys/stat.h>
@@ -41,7 +45,7 @@ namespace mongo {
return false;
}
-#if !defined(WIN32)
+#if !defined(_WIN32)
// check permissions: must be X00, where X is >= 4
if ((stats.st_mode & (S_IRWXG|S_IRWXO)) != 0) {
log() << "permissions on " << filename << " are too open" << endl;
@@ -102,4 +106,29 @@ namespace mongo {
return true;
}
+
+ bool AuthenticationInfo::_isAuthorized(const string& dbname, int level) const {
+ {
+ scoped_spinlock lk(_lock);
+
+ if ( _isAuthorizedSingle_inlock( dbname , level ) )
+ return true;
+
+ if ( noauth )
+ return true;
+
+ if ( _isAuthorizedSingle_inlock( "admin" , level ) )
+ return true;
+
+ if ( _isAuthorizedSingle_inlock( "local" , level ) )
+ return true;
+ }
+ return _isAuthorizedSpecialChecks( dbname );
+ }
+
+ bool AuthenticationInfo::_isAuthorizedSingle_inlock(const string& dbname, int level) const {
+ MA::const_iterator i = _dbs.find(dbname);
+ return i != _dbs.end() && i->second.level >= level;
+ }
+
} // namespace mongo
diff --git a/db/security_common.h b/db/security_common.h
new file mode 100644
index 0000000..2f2565f
--- /dev/null
+++ b/db/security_common.h
@@ -0,0 +1,83 @@
+// security_common.h
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "commands.h"
+#include "concurrency.h"
+#include "../util/concurrency/spin_lock.h"
+
+namespace mongo {
+
+ /**
+ * Internal secret key info.
+ */
+ struct AuthInfo {
+ AuthInfo() {
+ user = "__system";
+ }
+ string user;
+ string pwd;
+ };
+
+ // --noauth cmd line option
+ extern bool noauth;
+ extern AuthInfo internalSecurity;
+
+ /**
+ * This method checks the validity of filename as a security key, hashes its
+ * contents, and stores it in the internalSecurity variable. Prints an
+ * error message to the logs if there's an error.
+ * @param filename the file containing the key
+ * @return if the key was successfully stored
+ */
+ bool setUpSecurityKey(const string& filename);
+
+ class CmdAuthenticate : public Command {
+ public:
+ virtual bool requiresAuth() { return false; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return READ; }
+ virtual void help(stringstream& ss) const { ss << "internal"; }
+ CmdAuthenticate() : Command("authenticate") {}
+ bool run(const string& dbname , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);
+ private:
+ bool getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd);
+ void authenticate(const string& dbname, const string& user, const bool readOnly);
+ };
+
+ class CmdLogout : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ void help(stringstream& h) const { h << "de-authenticate"; }
+ virtual LockType locktype() const { return NONE; }
+ CmdLogout() : Command("logout") {}
+ bool run(const string& dbname , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);
+ };
+
+} // namespace mongo
diff --git a/db/security_key.h b/db/security_key.h
deleted file mode 100644
index 86f1307..0000000
--- a/db/security_key.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// security_key.h
-
-/**
-* Copyright (C) 2009 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#pragma once
-
-namespace mongo {
-
- /**
- * Internal secret key info.
- */
- struct AuthInfo {
- AuthInfo() {
- user = "__system";
- }
- string user;
- string pwd;
- };
-
- // --noauth cmd line option
- extern bool noauth;
- extern AuthInfo internalSecurity;
-
- /**
- * This method checks the validity of filename as a security key, hashes its
- * contents, and stores it in the internalSecurity variable. Prints an
- * error message to the logs if there's an error.
- * @param filename the file containing the key
- * @return if the key was successfully stored
- */
- bool setUpSecurityKey(const string& filename);
-
-} // namespace mongo
diff --git a/db/stats/counters.h b/db/stats/counters.h
index b5cad85..d514a0f 100644
--- a/db/stats/counters.h
+++ b/db/stats/counters.h
@@ -19,7 +19,7 @@
#include "../../pch.h"
#include "../jsobj.h"
-#include "../../util/message.h"
+#include "../../util/net/message.h"
#include "../../util/processinfo.h"
#include "../../util/concurrency/spin_lock.h"
diff --git a/db/stats/snapshots.cpp b/db/stats/snapshots.cpp
index a81568d..ca5491b 100644
--- a/db/stats/snapshots.cpp
+++ b/db/stats/snapshots.cpp
@@ -38,19 +38,21 @@ namespace mongo {
: _older( older ) , _newer( newer ) {
assert( _newer._created > _older._created );
_elapsed = _newer._created - _older._created;
-
}
Top::CollectionData SnapshotDelta::globalUsageDiff() {
return Top::CollectionData( _older._globalUsage , _newer._globalUsage );
}
Top::UsageMap SnapshotDelta::collectionUsageDiff() {
+ assert( _newer._created > _older._created );
Top::UsageMap u;
for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ) {
Top::UsageMap::const_iterator j = _older._usage.find(i->first);
if (j != _older._usage.end())
u[i->first] = Top::CollectionData( j->second , i->second );
+ else
+ u[i->first] = i->second;
}
return u;
}
@@ -112,14 +114,10 @@ namespace mongo {
try {
const SnapshotData* s = statsSnapshots.takeSnapshot();
- if ( prev ) {
+ if ( prev && cmdLine.cpu ) {
unsigned long long elapsed = s->_created - prev->_created;
-
- if ( cmdLine.cpu ) {
- SnapshotDelta d( *prev , *s );
- log() << "cpu: elapsed:" << (elapsed/1000) <<" writelock: " << (int)(100*d.percentWriteLocked()) << "%" << endl;
- }
-
+ SnapshotDelta d( *prev , *s );
+ log() << "cpu: elapsed:" << (elapsed/1000) <<" writelock: " << (int)(100*d.percentWriteLocked()) << "%" << endl;
}
prev = s;
diff --git a/db/stats/top.cpp b/db/stats/top.cpp
index 77aef0d..f5b6ee4 100644
--- a/db/stats/top.cpp
+++ b/db/stats/top.cpp
@@ -18,16 +18,15 @@
#include "pch.h"
#include "top.h"
-#include "../../util/message.h"
+#include "../../util/net/message.h"
#include "../commands.h"
namespace mongo {
Top::UsageData::UsageData( const UsageData& older , const UsageData& newer ) {
// this won't be 100% accurate on rollovers and drop(), but at least it won't be negative
- time = (newer.time > older.time) ? (newer.time - older.time) : newer.time;
- count = (newer.count > older.count) ? (newer.count - older.count) : newer.count;
-
+ time = (newer.time >= older.time) ? (newer.time - older.time) : newer.time;
+ count = (newer.count >= older.count) ? (newer.count - older.count) : newer.count;
}
Top::CollectionData::CollectionData( const CollectionData& older , const CollectionData& newer )
@@ -155,11 +154,12 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
virtual LockType locktype() const { return READ; }
- virtual void help( stringstream& help ) const { help << "usage by collection"; }
+ virtual void help( stringstream& help ) const { help << "usage by collection, in micros "; }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
{
BSONObjBuilder b( result.subobjStart( "totals" ) );
+ b.append( "note" , "all times in microseconds" );
Top::global.append( b );
b.done();
}
diff --git a/dbtests/basictests.cpp b/dbtests/basictests.cpp
index 3e0eecd..80bd7d7 100644
--- a/dbtests/basictests.cpp
+++ b/dbtests/basictests.cpp
@@ -25,6 +25,9 @@
#include "../util/text.h"
#include "../util/queue.h"
#include "../util/paths.h"
+#include "../util/stringutils.h"
+#include "../util/compress.h"
+#include "../db/db.h"
namespace BasicTests {
@@ -195,12 +198,16 @@ namespace BasicTests {
int matches = 0;
for( int p = 0; p < 3; p++ ) {
sleepsecs( 1 );
- int sec = t.seconds();
+ int sec = (t.millis() + 2)/1000;
if( sec == 1 )
matches++;
+ else
+ log() << "temp millis: " << t.millis() << endl;
ASSERT( sec >= 0 && sec <= 2 );
t.reset();
}
+ if ( matches < 2 )
+ log() << "matches:" << matches << endl;
ASSERT( matches >= 2 );
sleepmicros( 1527123 );
@@ -222,7 +229,7 @@ namespace BasicTests {
{
int x = t.millis();
if ( x < 1000 || x > 2500 ) {
- cout << "sleeptest x: " << x << endl;
+ cout << "sleeptest finds sleep accuracy to be not great. x: " << x << endl;
ASSERT( x >= 1000 );
ASSERT( x <= 20000 );
}
@@ -399,6 +406,27 @@ namespace BasicTests {
ASSERT_EQUALS( -1, lexNumCmp( "a", "0a"));
ASSERT_EQUALS( -1, lexNumCmp( "000a", "001a"));
ASSERT_EQUALS( 0, lexNumCmp( "010a", "0010a"));
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "a0" , "a00" ) );
+ ASSERT_EQUALS( 0 , lexNumCmp( "a.0" , "a.00" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.b.c.d0" , "a.b.c.d00" ) );
+ ASSERT_EQUALS( 1 , lexNumCmp( "a.b.c.0.y" , "a.b.c.00.x" ) );
+
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "a-" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a-", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-", "a-" ) );
+
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "a-c" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a-c", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-c", "a-c" ) );
+
+ ASSERT_EQUALS( 1, lexNumCmp( "a-c.t", "a.t" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a.t", "a-c.t" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-c.t", "a-c.t" ) );
+
+ ASSERT_EQUALS( 1, lexNumCmp( "ac.t", "a.t" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a.t", "ac.t" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "ac.t", "ac.t" ) );
}
};
@@ -409,16 +437,16 @@ namespace BasicTests {
ASSERT( ! Database::validDBName( "foo/bar" ) );
ASSERT( ! Database::validDBName( "foo.bar" ) );
- ASSERT( isANormalNSName( "asdads" ) );
- ASSERT( ! isANormalNSName( "asda$ds" ) );
- ASSERT( isANormalNSName( "local.oplog.$main" ) );
+ ASSERT( NamespaceString::normal( "asdads" ) );
+ ASSERT( ! NamespaceString::normal( "asda$ds" ) );
+ ASSERT( NamespaceString::normal( "local.oplog.$main" ) );
}
};
class DatabaseOwnsNS {
public:
void run() {
-
+ dblock lk;
bool isNew = false;
// this leaks as ~Database is private
// if that changes, should put this on the stack
@@ -584,6 +612,40 @@ namespace BasicTests {
}
};
+ class CmdLineParseConfigTest {
+ public:
+ void run() {
+ stringstream ss1;
+ istringstream iss1("");
+ CmdLine::parseConfigFile( iss1, ss1 );
+ stringstream ss2;
+ istringstream iss2("password=\'foo bar baz\'");
+ CmdLine::parseConfigFile( iss2, ss2 );
+ stringstream ss3;
+ istringstream iss3("\t this = false \n#that = true\n #another = whocares\n\n other = monkeys ");
+ CmdLine::parseConfigFile( iss3, ss3 );
+
+ ASSERT( ss1.str().compare("\n") == 0 );
+ ASSERT( ss2.str().compare("password=\'foo bar baz\'\n\n") == 0 );
+ ASSERT( ss3.str().compare("\n other = monkeys \n\n") == 0 );
+ }
+ };
+
+ struct CompressionTest1 {
+ void run() {
+ const char * c = "this is a test";
+ std::string s;
+ size_t len = compress(c, strlen(c)+1, &s);
+ assert( len > 0 );
+
+ std::string out;
+ bool ok = uncompress(s.c_str(), s.size(), &out);
+ assert(ok);
+ assert( strcmp(out.c_str(), c) == 0 );
+ }
+ } ctest1;
+
+
class All : public Suite {
public:
All() : Suite( "basic" ) {
@@ -620,6 +682,9 @@ namespace BasicTests {
add< HostAndPortTests >();
add< RelativePathTest >();
+ add< CmdLineParseConfigTest >();
+
+ add< CompressionTest1 >();
}
} myall;
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index 4da7375..44c5474 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -24,1688 +24,26 @@
#include "dbtests.h"
-namespace BtreeTests {
-
- const char* ns() {
- return "unittests.btreetests";
- }
-
- // dummy, valid record loc
- const DiskLoc recordLoc() {
- return DiskLoc( 0, 2 );
- }
-
- class Ensure {
- public:
- Ensure() {
- _c.ensureIndex( ns(), BSON( "a" << 1 ), false, "testIndex" );
- }
- ~Ensure() {
- _c.dropIndexes( ns() );
- }
- private:
- DBDirectClient _c;
- };
-
- class Base : public Ensure {
- public:
- Base() :
- _context( ns() ) {
- {
- bool f = false;
- assert( f = true );
- massert( 10402 , "assert is misdefined", f);
- }
- }
- virtual ~Base() {}
- static string bigNumString( long long n, int len = 800 ) {
- char sub[17];
- sprintf( sub, "%.16llx", n );
- string val( len, ' ' );
- for( int i = 0; i < len; ++i ) {
- val[ i ] = sub[ i % 16 ];
- }
- return val;
- }
- protected:
- const BtreeBucket* bt() {
- return id().head.btree();
- }
- DiskLoc dl() {
- return id().head;
- }
- IndexDetails& id() {
- NamespaceDetails *nsd = nsdetails( ns() );
- assert( nsd );
- return nsd->idx( 1 );
- }
- void checkValid( int nKeys ) {
- ASSERT( bt() );
- ASSERT( bt()->isHead() );
- bt()->assertValid( order(), true );
- ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order(), 0, true ) );
- }
- void dump() {
- bt()->dumpTree( dl(), order() );
- }
- void insert( BSONObj &key ) {
- bt()->bt_insert( dl(), recordLoc(), key, Ordering::make(order()), true, id(), true );
- getDur().commitIfNeeded();
- }
- bool unindex( BSONObj &key ) {
- getDur().commitIfNeeded();
- return bt()->unindex( dl(), id(), key, recordLoc() );
- }
- static BSONObj simpleKey( char c, int n = 1 ) {
- BSONObjBuilder builder;
- string val( n, c );
- builder.append( "a", val );
- return builder.obj();
- }
- void locate( BSONObj &key, int expectedPos,
- bool expectedFound, const DiskLoc &expectedLocation,
- int direction = 1 ) {
- int pos;
- bool found;
- DiskLoc location =
- bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
- ASSERT_EQUALS( expectedFound, found );
- ASSERT( location == expectedLocation );
- ASSERT_EQUALS( expectedPos, pos );
- }
- bool present( BSONObj &key, int direction ) {
- int pos;
- bool found;
- bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
- return found;
- }
- BSONObj order() {
- return id().keyPattern();
- }
- const BtreeBucket *child( const BtreeBucket *b, int i ) {
- assert( i <= b->nKeys() );
- DiskLoc d;
- if ( i == b->nKeys() ) {
- d = b->getNextChild();
- }
- else {
- d = const_cast< DiskLoc& >( b->keyNode( i ).prevChildBucket );
- }
- assert( !d.isNull() );
- return d.btree();
- }
- void checkKey( char i ) {
- stringstream ss;
- ss << i;
- checkKey( ss.str() );
- }
- void checkKey( const string &k ) {
- BSONObj key = BSON( "" << k );
-// log() << "key: " << key << endl;
- ASSERT( present( key, 1 ) );
- ASSERT( present( key, -1 ) );
- }
- private:
- dblock lk_;
- Client::Context _context;
- };
-
- class Create : public Base {
- public:
- void run() {
- checkValid( 0 );
- }
- };
-
- class SimpleInsertDelete : public Base {
- public:
- void run() {
- BSONObj key = simpleKey( 'z' );
- insert( key );
-
- checkValid( 1 );
- locate( key, 0, true, dl() );
-
- unindex( key );
-
- checkValid( 0 );
- locate( key, 0, false, DiskLoc() );
- }
- };
-
- class SplitUnevenBucketBase : public Base {
- public:
- virtual ~SplitUnevenBucketBase() {}
- void run() {
- for ( int i = 0; i < 10; ++i ) {
- BSONObj shortKey = simpleKey( shortToken( i ), 1 );
- insert( shortKey );
- BSONObj longKey = simpleKey( longToken( i ), 800 );
- insert( longKey );
- }
- checkValid( 20 );
- ASSERT_EQUALS( 1, bt()->nKeys() );
- checkSplit();
- }
- protected:
- virtual char shortToken( int i ) const = 0;
- virtual char longToken( int i ) const = 0;
- static char leftToken( int i ) {
- return 'a' + i;
- }
- static char rightToken( int i ) {
- return 'z' - i;
- }
- virtual void checkSplit() = 0;
- };
-
- class SplitRightHeavyBucket : public SplitUnevenBucketBase {
- private:
- virtual char shortToken( int i ) const {
- return leftToken( i );
- }
- virtual char longToken( int i ) const {
- return rightToken( i );
- }
- virtual void checkSplit() {
- ASSERT_EQUALS( 15, child( bt(), 0 )->nKeys() );
- ASSERT_EQUALS( 4, child( bt(), 1 )->nKeys() );
- }
- };
-
- class SplitLeftHeavyBucket : public SplitUnevenBucketBase {
- private:
- virtual char shortToken( int i ) const {
- return rightToken( i );
- }
- virtual char longToken( int i ) const {
- return leftToken( i );
- }
- virtual void checkSplit() {
- ASSERT_EQUALS( 4, child( bt(), 0 )->nKeys() );
- ASSERT_EQUALS( 15, child( bt(), 1 )->nKeys() );
- }
- };
-
- class MissingLocate : public Base {
- public:
- void run() {
- for ( int i = 0; i < 3; ++i ) {
- BSONObj k = simpleKey( 'b' + 2 * i );
- insert( k );
- }
-
- locate( 1, 'a', 'b', dl() );
- locate( 1, 'c', 'd', dl() );
- locate( 1, 'e', 'f', dl() );
- locate( 1, 'g', 'g' + 1, DiskLoc() ); // of course, 'h' isn't in the index.
-
- // old behavior
- // locate( -1, 'a', 'b', dl() );
- // locate( -1, 'c', 'd', dl() );
- // locate( -1, 'e', 'f', dl() );
- // locate( -1, 'g', 'f', dl() );
-
- locate( -1, 'a', 'a' - 1, DiskLoc() ); // of course, 'a' - 1 isn't in the index
- locate( -1, 'c', 'b', dl() );
- locate( -1, 'e', 'd', dl() );
- locate( -1, 'g', 'f', dl() );
- }
- private:
- void locate( int direction, char token, char expectedMatch,
- DiskLoc expectedLocation ) {
- BSONObj k = simpleKey( token );
- int expectedPos = ( expectedMatch - 'b' ) / 2;
- Base::locate( k, expectedPos, false, expectedLocation, direction );
- }
- };
-
- class MissingLocateMultiBucket : public Base {
- public:
- void run() {
- for ( int i = 0; i < 8; ++i ) {
- insert( i );
- }
- insert( 9 );
- insert( 8 );
-// dump();
- BSONObj straddle = key( 'i' );
- locate( straddle, 0, false, dl(), 1 );
- straddle = key( 'k' );
- locate( straddle, 0, false, dl(), -1 );
- }
- private:
- BSONObj key( char c ) {
- return simpleKey( c, 800 );
- }
- void insert( int i ) {
- BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
- };
-
- class SERVER983 : public Base {
- public:
- void run() {
- for ( int i = 0; i < 10; ++i ) {
- insert( i );
- }
-// dump();
- BSONObj straddle = key( 'o' );
- locate( straddle, 0, false, dl(), 1 );
- straddle = key( 'q' );
- locate( straddle, 0, false, dl(), -1 );
- }
- private:
- BSONObj key( char c ) {
- return simpleKey( c, 800 );
- }
- void insert( int i ) {
- BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
- };
-
- class DontReuseUnused : public Base {
- public:
- void run() {
- for ( int i = 0; i < 10; ++i ) {
- insert( i );
- }
-// dump();
- BSONObj root = key( 'p' );
- unindex( root );
- Base::insert( root );
- locate( root, 0, true, bt()->getNextChild(), 1 );
- }
- private:
- BSONObj key( char c ) {
- return simpleKey( c, 800 );
- }
- void insert( int i ) {
- BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
- };
-
- class PackUnused : public Base {
- public:
- void run() {
- for ( long long i = 0; i < 1000000; i += 1000 ) {
- insert( i );
- }
-// dump();
- string orig, after;
- {
- stringstream ss;
- bt()->shape( ss );
- orig = ss.str();
- }
- vector< string > toDel;
- vector< string > other;
- BSONObjBuilder start;
- start.appendMinKey( "a" );
- BSONObjBuilder end;
- end.appendMaxKey( "a" );
- auto_ptr< BtreeCursor > c( new BtreeCursor( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
- while( c->ok() ) {
- if ( !c->currKeyNode().prevChildBucket.isNull() ) {
- toDel.push_back( c->currKey().firstElement().valuestr() );
- }
- else {
- other.push_back( c->currKey().firstElement().valuestr() );
- }
- c->advance();
- }
- ASSERT( toDel.size() > 0 );
- for( vector< string >::const_iterator i = toDel.begin(); i != toDel.end(); ++i ) {
- BSONObj o = BSON( "a" << *i );
- unindex( o );
- }
- ASSERT( other.size() > 0 );
- for( vector< string >::const_iterator i = other.begin(); i != other.end(); ++i ) {
- BSONObj o = BSON( "a" << *i );
- unindex( o );
- }
-
- int unused = 0;
- ASSERT_EQUALS( 0, bt()->fullValidate( dl(), order(), &unused, true ) );
-
- for ( long long i = 50000; i < 50100; ++i ) {
- insert( i );
- }
-
- int unused2 = 0;
- ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2, true ) );
-
-// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
-//
- ASSERT( unused2 <= unused );
- }
- protected:
- void insert( long long n ) {
- string val = bigNumString( n );
- BSONObj k = BSON( "a" << val );
- Base::insert( k );
- }
- };
-
- class DontDropReferenceKey : public PackUnused {
- public:
- void run() {
- // with 80 root node is full
- for ( long long i = 0; i < 80; i += 1 ) {
- insert( i );
- }
-
- BSONObjBuilder start;
- start.appendMinKey( "a" );
- BSONObjBuilder end;
- end.appendMaxKey( "a" );
- BSONObj l = bt()->keyNode( 0 ).key;
- string toInsert;
- auto_ptr< BtreeCursor > c( new BtreeCursor( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
- while( c->ok() ) {
- if ( c->currKey().woCompare( l ) > 0 ) {
- toInsert = c->currKey().firstElement().valuestr();
- break;
- }
- c->advance();
- }
- // too much work to try to make this happen through inserts and deletes
- // we are intentionally manipulating the btree bucket directly here
- getDur().writingDiskLoc( const_cast< DiskLoc& >( bt()->keyNode( 1 ).prevChildBucket ) ) = DiskLoc();
- getDur().writingInt( const_cast< DiskLoc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
- BSONObj k = BSON( "a" << toInsert );
- Base::insert( k );
- }
- };
-
- class MergeBuckets : public Base {
- public:
- virtual ~MergeBuckets() {}
- void run() {
- for ( int i = 0; i < 10; ++i ) {
- insert( i );
- }
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- int expectedCount = 10 - unindexKeys();
-// dump();
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- int unused = 0;
- ASSERT_EQUALS( expectedCount, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- }
- protected:
- BSONObj key( char c ) {
- return simpleKey( c, 800 );
- }
- void insert( int i ) {
- BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
- virtual int unindexKeys() = 0;
- };
-
- class MergeBucketsLeft : public MergeBuckets {
- virtual int unindexKeys() {
- BSONObj k = key( 'b' );
- unindex( k );
- k = key( 'b' + 2 );
- unindex( k );
- k = key( 'b' + 4 );
- unindex( k );
- k = key( 'b' + 6 );
- unindex( k );
- return 4;
- }
- };
-
- class MergeBucketsRight : public MergeBuckets {
- virtual int unindexKeys() {
- BSONObj k = key( 'b' + 2 * 9 );
- unindex( k );
- return 1;
- }
- };
-
- // deleting from head won't coalesce yet
-// class MergeBucketsHead : public MergeBuckets {
-// virtual BSONObj unindexKey() { return key( 'p' ); }
-// };
-
- class MergeBucketsDontReplaceHead : public Base {
- public:
- void run() {
- for ( int i = 0; i < 18; ++i ) {
- insert( i );
- }
- // dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = key( 'a' + 17 );
- unindex( k );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- int unused = 0;
- ASSERT_EQUALS( 17, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- }
- private:
- BSONObj key( char c ) {
- return simpleKey( c, 800 );
- }
- void insert( int i ) {
- BSONObj k = key( 'a' + i );
- Base::insert( k );
- }
- };
-
- // Tool to construct custom trees for tests.
- class ArtificialTree : public BtreeBucket {
- public:
- void push( const BSONObj &key, const DiskLoc &child ) {
- pushBack( dummyDiskLoc(), key, Ordering::make( BSON( "a" << 1 ) ), child );
- }
- void setNext( const DiskLoc &child ) {
- nextChild = child;
- }
- static DiskLoc make( IndexDetails &id ) {
- DiskLoc ret = addBucket( id );
- is( ret )->init();
- getDur().commitIfNeeded();
- return ret;
- }
- static ArtificialTree *is( const DiskLoc &l ) {
- return static_cast< ArtificialTree * >( l.btreemod() );
- }
- static DiskLoc makeTree( const string &spec, IndexDetails &id ) {
- return makeTree( fromjson( spec ), id );
- }
- static DiskLoc makeTree( const BSONObj &spec, IndexDetails &id ) {
- DiskLoc node = make( id );
- ArtificialTree *n = ArtificialTree::is( node );
- BSONObjIterator i( spec );
- while( i.more() ) {
- BSONElement e = i.next();
- DiskLoc child;
- if ( e.type() == Object ) {
- child = makeTree( e.embeddedObject(), id );
- }
- if ( e.fieldName() == string( "_" ) ) {
- n->setNext( child );
- }
- else {
- n->push( BSON( "" << expectedKey( e.fieldName() ) ), child );
- }
- }
- n->fixParentPtrs( node );
- return node;
- }
- static void setTree( const string &spec, IndexDetails &id ) {
- set( makeTree( spec, id ), id );
- }
- static void set( const DiskLoc &l, IndexDetails &id ) {
- ArtificialTree::is( id.head )->deallocBucket( id.head, id );
- getDur().writingDiskLoc(id.head) = l;
- }
- static string expectedKey( const char *spec ) {
- if ( spec[ 0 ] != '$' ) {
- return spec;
- }
- char *endPtr;
- // parsing a long long is a pain, so just allow shorter keys for now
- unsigned long long num = strtol( spec + 1, &endPtr, 16 );
- int len = 800;
- if( *endPtr == '$' ) {
- len = strtol( endPtr + 1, 0, 16 );
- }
- return Base::bigNumString( num, len );
- }
- static void checkStructure( const BSONObj &spec, const IndexDetails &id, const DiskLoc node ) {
- ArtificialTree *n = ArtificialTree::is( node );
- BSONObjIterator j( spec );
- for( int i = 0; i < n->n; ++i ) {
- ASSERT( j.more() );
- BSONElement e = j.next();
- KeyNode kn = n->keyNode( i );
- string expected = expectedKey( e.fieldName() );
- ASSERT( present( id, BSON( "" << expected ), 1 ) );
- ASSERT( present( id, BSON( "" << expected ), -1 ) );
- ASSERT_EQUALS( expected, kn.key.firstElement().valuestr() );
- if ( kn.prevChildBucket.isNull() ) {
- ASSERT( e.type() == jstNULL );
- }
- else {
- ASSERT( e.type() == Object );
- checkStructure( e.embeddedObject(), id, kn.prevChildBucket );
- }
- }
- if ( n->nextChild.isNull() ) {
- // maybe should allow '_' field with null value?
- ASSERT( !j.more() );
- }
- else {
- BSONElement e = j.next();
- ASSERT_EQUALS( string( "_" ), e.fieldName() );
- ASSERT( e.type() == Object );
- checkStructure( e.embeddedObject(), id, n->nextChild );
- }
- ASSERT( !j.more() );
- }
- static void checkStructure( const string &spec, const IndexDetails &id ) {
- checkStructure( fromjson( spec ), id, id.head );
- }
- static bool present( const IndexDetails &id, const BSONObj &key, int direction ) {
- int pos;
- bool found;
- id.head.btree()->locate( id, id.head, key, Ordering::make(id.keyPattern()), pos, found, recordLoc(), direction );
- return found;
- }
- int headerSize() const { return BtreeBucket::headerSize(); }
- int packedDataSize( int pos ) const { return BtreeBucket::packedDataSize( pos ); }
- void fixParentPtrs( const DiskLoc &thisLoc ) { BtreeBucket::fixParentPtrs( thisLoc ); }
- void forcePack() {
- topSize += emptySize;
- emptySize = 0;
- setNotPacked();
- }
- private:
- DiskLoc dummyDiskLoc() const { return DiskLoc( 0, 2 ); }
- };
-
- /**
- * We could probably refactor the following tests, but it's easier to debug
- * them in the present state.
- */
-
- class MergeBucketsDelInternal : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}", id() );
- }
- };
-
- class MergeBucketsRightNull : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}", id() );
- }
- };
-
- // not yet handling this case
- class DontMergeSingleBucket : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},c:null}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{d:{b:{a:null}}}", id() );
- }
- };
-
- class ParentMergeNonRightToLeft : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- // child does not currently replace parent in this case
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
- }
- };
-
- class ParentMergeNonRightToRight : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "ff" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- // child does not currently replace parent in this case
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
- }
- };
-
- class CantMergeRightNoMerge : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{d:{b:{a:null},cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
- }
- };
-
- class CantMergeLeftNoMerge : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "g" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{c:{b:{a:null}},d:null,_:{f:{e:null}}}", id() );
- }
- };
-
- class MergeOption : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "ee" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}", id() );
- }
- };
-
- class ForceMergeLeft : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "ee" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}", id() );
- }
- };
-
- class ForceMergeRight : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "ee" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}", id() );
- }
- };
-
- class RecursiveMerge : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- // height is not currently reduced in this case
- ArtificialTree::checkStructure( "{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}", id() );
- }
- };
-
- class RecursiveMergeRightBucket : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}", id() );
- }
- };
-
- class RecursiveMergeDoubleRightBucket : public Base {
- public:
- void run() {
- ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
-// dump();
- string ns = id().indexNamespace();
- ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
-
- BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- // no recursion currently in this case
- ArtificialTree::checkStructure( "{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}", id() );
- }
- };
-
- class MergeSizeBase : public Base {
- public:
- MergeSizeBase() : _count() {}
- virtual ~MergeSizeBase() {}
- void run() {
- typedef ArtificialTree A;
- A::set( A::make( id() ), id() );
- A* root = A::is( dl() );
- DiskLoc left = A::make( id() );
- root->push( biggestKey( 'm' ), left );
- _count = 1;
- A* l = A::is( left );
- DiskLoc right = A::make( id() );
- root->setNext( right );
- A* r = A::is( right );
- root->fixParentPtrs( dl() );
-
- ASSERT_EQUALS( bigSize(), bigSize() / 2 * 2 );
- fillToExactSize( l, leftSize(), 'a' );
- fillToExactSize( r, rightSize(), 'n' );
- ASSERT( leftAdditional() <= 2 );
- if ( leftAdditional() >= 2 ) {
- l->push( bigKey( 'k' ), DiskLoc() );
- }
- if ( leftAdditional() >= 1 ) {
- l->push( bigKey( 'l' ), DiskLoc() );
- }
- ASSERT( rightAdditional() <= 2 );
- if ( rightAdditional() >= 2 ) {
- r->push( bigKey( 'y' ), DiskLoc() );
- }
- if ( rightAdditional() >= 1 ) {
- r->push( bigKey( 'z' ), DiskLoc() );
- }
- _count += leftAdditional() + rightAdditional();
-
-// dump();
-
- initCheck();
- string ns = id().indexNamespace();
- const char *keys = delKeys();
- for( const char *i = keys; *i; ++i ) {
- int unused = 0;
- ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = bigKey( *i );
- unindex( k );
-// dump();
- --_count;
- }
-
-// dump();
-
- int unused = 0;
- ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- validate();
- if ( !merge() ) {
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- }
- else {
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- }
- }
- protected:
- virtual int leftAdditional() const { return 2; }
- virtual int rightAdditional() const { return 2; }
- virtual void initCheck() {}
- virtual void validate() {}
- virtual int leftSize() const = 0;
- virtual int rightSize() const = 0;
- virtual const char * delKeys() const { return "klyz"; }
- virtual bool merge() const { return true; }
- void fillToExactSize( ArtificialTree *t, int targetSize, char startKey ) {
- int size = 0;
- while( size < targetSize ) {
- int space = targetSize - size;
- int nextSize = space - sizeof( _KeyNode );
- assert( nextSize > 0 );
- BSONObj newKey = key( startKey++, nextSize );
- t->push( newKey, DiskLoc() );
- size += newKey.objsize() + sizeof( _KeyNode );
- _count += 1;
- }
- ASSERT_EQUALS( t->packedDataSize( 0 ), targetSize );
- }
- static BSONObj key( char a, int size ) {
- if ( size >= bigSize() ) {
- return bigKey( a );
- }
- return simpleKey( a, size - ( bigSize() - 801 ) );
- }
- static BSONObj bigKey( char a ) {
- return simpleKey( a, 801 );
- }
- static BSONObj biggestKey( char a ) {
- int size = BtreeBucket::getKeyMax() - bigSize() + 801;
- return simpleKey( a, size );
- }
- static int bigSize() {
- return bigKey( 'a' ).objsize();
- }
- static int biggestSize() {
- return biggestKey( 'a' ).objsize();
- }
- int _count;
- };
-
- class MergeSizeJustRightRight : public MergeSizeBase {
- protected:
- virtual int rightSize() const { return BtreeBucket::getLowWaterMark() - 1; }
- virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::getLowWaterMark() - 1 ); }
- };
-
- class MergeSizeJustRightLeft : public MergeSizeBase {
- protected:
- virtual int leftSize() const { return BtreeBucket::getLowWaterMark() - 1; }
- virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::getLowWaterMark() - 1 ); }
- virtual const char * delKeys() const { return "yzkl"; }
- };
-
- class MergeSizeRight : public MergeSizeJustRightRight {
- virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() - 1; }
- virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
- };
-
- class MergeSizeLeft : public MergeSizeJustRightLeft {
- virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
- virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() - 1; }
- };
-
- class NoMergeBelowMarkRight : public MergeSizeJustRightRight {
- virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
- virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() - 1; }
- virtual bool merge() const { return false; }
- };
-
- class NoMergeBelowMarkLeft : public MergeSizeJustRightLeft {
- virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() - 1; }
- virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
- virtual bool merge() const { return false; }
- };
-
- class MergeSizeRightTooBig : public MergeSizeJustRightLeft {
- virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
- virtual bool merge() const { return false; }
- };
-
- class MergeSizeLeftTooBig : public MergeSizeJustRightRight {
- virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
- virtual bool merge() const { return false; }
- };
-
- class BalanceOneLeftToRight : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},b:{$20:null,$30:null,$40:null,$50:null,a:null},_:{c:null}}", id() );
- ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x40 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
- }
- };
-
- class BalanceOneRightToLeft : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null},b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
- ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x3 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
- }
- };
-
- class BalanceThreeLeftToRight : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},b:{$30:null,$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
- ASSERT_EQUALS( 23, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x30 ) );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
- }
- };
-
- class BalanceThreeRightToLeft : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
- ASSERT_EQUALS( 25, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x5 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 24, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
- }
- };
-
- class BalanceSingleParentKey : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x40 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
- }
- };
-
- class PackEmpty : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null}", id() );
- BSONObj k = BSON( "" << "a" );
- ASSERT( unindex( k ) );
- ArtificialTree *t = ArtificialTree::is( dl() );
- t->forcePack();
- Tester::checkEmpty( t, id() );
- }
- class Tester : public ArtificialTree {
- public:
- static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
- Tester *t = static_cast< Tester * >( a );
- ASSERT_EQUALS( 0, t->n );
- ASSERT( !( t->flags & Packed ) );
- Ordering o = Ordering::make( id.keyPattern() );
- int zero = 0;
- t->_packReadyForMod( o, zero );
- ASSERT_EQUALS( 0, t->n );
- ASSERT_EQUALS( 0, t->topSize );
- ASSERT_EQUALS( BtreeBucket::bodySize(), t->emptySize );
- ASSERT( t->flags & Packed );
- }
- };
- };
-
- class PackedDataSizeEmpty : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null}", id() );
- BSONObj k = BSON( "" << "a" );
- ASSERT( unindex( k ) );
- ArtificialTree *t = ArtificialTree::is( dl() );
- t->forcePack();
- Tester::checkEmpty( t, id() );
- }
- class Tester : public ArtificialTree {
- public:
- static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
- Tester *t = static_cast< Tester * >( a );
- ASSERT_EQUALS( 0, t->n );
- ASSERT( !( t->flags & Packed ) );
- int zero = 0;
- ASSERT_EQUALS( 0, t->packedDataSize( zero ) );
- ASSERT( !( t->flags & Packed ) );
- }
- };
- };
-
- class BalanceSingleParentKeyPackParent : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- // force parent pack
- ArtificialTree::is( dl() )->forcePack();
- BSONObj k = BSON( "" << bigNumString( 0x40 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
- }
- };
-
- class BalanceSplitParent : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10$10:{$1:null,$2:null,$3:null,$4:null},$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null,$500:null,$600:null,$700:null,$800:null,$900:null,_:{c:null}}", id() );
- ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x3 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 21, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
- }
- };
-
- class RebalancedSeparatorBase : public Base {
- public:
- void run() {
- ArtificialTree::setTree( treeSpec(), id() );
- modTree();
- Tester::checkSeparator( id(), expectedSeparator() );
- }
- virtual string treeSpec() const = 0;
- virtual int expectedSeparator() const = 0;
- virtual void modTree() {}
- struct Tester : public ArtificialTree {
- static void checkSeparator( const IndexDetails& id, int expected ) {
- ASSERT_EQUALS( expected, static_cast< Tester * >( id.head.btreemod() )->rebalancedSeparatorPos( id.head, 0 ) );
- }
- };
- };
-
- class EvenRebalanceLeft : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$7:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null,$6:null},_:{$8:null,$9:null,$10$31e:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class EvenRebalanceLeftCusp : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},_:{$7:null,$8:null,$9$31e:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class EvenRebalanceRight : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class EvenRebalanceRightCusp : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class EvenRebalanceCenter : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class OddRebalanceLeft : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class OddRebalanceRight : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$4:{$1:null,$2:null,$3:null},_:{$5:null,$6:null,$7:null,$8$31f:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class OddRebalanceCenter : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class RebalanceEmptyRight : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$b:null}}"; }
- virtual void modTree() {
- BSONObj k = BSON( "" << bigNumString( 0xb ) );
- ASSERT( unindex( k ) );
- }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class RebalanceEmptyLeft : public RebalancedSeparatorBase {
- virtual string treeSpec() const { return "{$a:{$1:null},_:{$11:null,$12:null,$13:null,$14:null,$15:null,$16:null,$17:null,$18:null,$19:null}}"; }
- virtual void modTree() {
- BSONObj k = BSON( "" << bigNumString( 0x1 ) );
- ASSERT( unindex( k ) );
- }
- virtual int expectedSeparator() const { return 4; }
- };
-
- class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight {
- virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
- virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key; }
- virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key ); }
- virtual bool merge() const { return false; }
- protected:
- BSONObj _oldTop;
- };
-
- class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight {
- virtual int rightSize() const { return MergeSizeJustRightRight::rightSize(); }
- virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
- // different top means we rebalanced
- virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key ) ); }
- };
-
- class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft {
- virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
- virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key; }
- virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key ); }
- virtual bool merge() const { return false; }
- protected:
- BSONObj _oldTop;
- };
-
- class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft {
- virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize(); }
- virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
- // different top means we rebalanced
- virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key ) ); }
- };
-
- class PreferBalanceLeft : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$30:null}}", id() );
- ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x12 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
- }
- };
-
- class PreferBalanceRight : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$1:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
- ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x12 ) );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
- }
- };
-
- class RecursiveMergeThenBalance : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},_:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
- ASSERT_EQUALS( 15, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x7 ) );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
- }
- };
-
- class MergeRightEmpty : public MergeSizeBase {
- protected:
- virtual int rightAdditional() const { return 1; }
- virtual int leftAdditional() const { return 1; }
- virtual const char * delKeys() const { return "lz"; }
- virtual int rightSize() const { return 0; }
- virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
- };
-
- class MergeMinRightEmpty : public MergeSizeBase {
- protected:
- virtual int rightAdditional() const { return 1; }
- virtual int leftAdditional() const { return 0; }
- virtual const char * delKeys() const { return "z"; }
- virtual int rightSize() const { return 0; }
- virtual int leftSize() const { return bigSize() + sizeof( _KeyNode ); }
- };
-
- class MergeLeftEmpty : public MergeSizeBase {
- protected:
- virtual int rightAdditional() const { return 1; }
- virtual int leftAdditional() const { return 1; }
- virtual const char * delKeys() const { return "zl"; }
- virtual int leftSize() const { return 0; }
- virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
- };
-
- class MergeMinLeftEmpty : public MergeSizeBase {
- protected:
- virtual int leftAdditional() const { return 1; }
- virtual int rightAdditional() const { return 0; }
- virtual const char * delKeys() const { return "l"; }
- virtual int leftSize() const { return 0; }
- virtual int rightSize() const { return bigSize() + sizeof( _KeyNode ); }
- };
-
- class BalanceRightEmpty : public MergeRightEmpty {
- protected:
- virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
- virtual bool merge() const { return false; }
- virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key; }
- virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key ) ); }
- private:
- BSONObj _oldTop;
- };
-
- class BalanceLeftEmpty : public MergeLeftEmpty {
- protected:
- virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
- virtual bool merge() const { return false; }
- virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key; }
- virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key ) ); }
- private:
- BSONObj _oldTop;
- };
-
- class DelEmptyNoNeighbors : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{b:{a:null}}", id() );
- ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "a" );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{b:null}", id() );
- }
- };
-
- class DelEmptyEmptyNeighbors : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
- ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "b" );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
- }
- };
-
- class DelInternal : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
- int unused = 0;
- ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "c" );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
- }
- };
-
- class DelInternalReplaceWithUnused : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
- getDur().writingInt( const_cast< DiskLoc& >( bt()->keyNode( 1 ).prevChildBucket.btree()->keyNode( 0 ).recordLoc ).GETOFS() ) |= 1; // make unused
- int unused = 0;
- ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "c" );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- unused = 0;
- ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- // doesn't discriminate between used and unused
- ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
- }
- };
-
- class DelInternalReplaceRight : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,_:{b:null}}", id() );
- int unused = 0;
- ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "a" );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- unused = 0;
- ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{b:null}", id() );
- }
- };
-
- class DelInternalPromoteKey : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}", id() );
- int unused = 0;
- ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "y" );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- unused = 0;
- ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
- }
- };
-
- class DelInternalPromoteRightKey : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,_:{e:{c:null},_:{f:null}}}", id() );
- int unused = 0;
- ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "a" );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- unused = 0;
- ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
- }
- };
-
- class DelInternalReplacementPrevNonNull : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,d:{c:{b:null}},e:null}", id() );
- int unused = 0;
- ASSERT_EQUALS( 5, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "d" );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
- ASSERT( bt()->keyNode( 1 ).recordLoc.getOfs() & 1 ); // check 'unused' key
- }
- };
-
- class DelInternalReplacementNextNonNull : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{a:null,_:{c:null,_:{d:null}}}", id() );
- int unused = 0;
- ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << "a" );
- // dump();
- ASSERT( unindex( k ) );
- // dump();
- ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
- ASSERT( bt()->keyNode( 0 ).recordLoc.getOfs() & 1 ); // check 'unused' key
- }
- };
-
- class DelInternalSplitPromoteLeft : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}", id() );
- int unused = 0;
- ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x30, 0x10 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
- }
- };
-
- class DelInternalSplitPromoteRight : public Base {
- public:
- void run() {
- string ns = id().indexNamespace();
- ArtificialTree::setTree( "{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}", id() );
- int unused = 0;
- ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- BSONObj k = BSON( "" << bigNumString( 0x100, 0x10 ) );
-// dump();
- ASSERT( unindex( k ) );
-// dump();
- ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
- ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "btree" ) {
- }
+#define BtreeBucket BtreeBucket<V0>
+#define btree btree<V0>
+#define btreemod btreemod<V0>
+#define testName "btree"
+#define BTVERSION 0
+namespace BtreeTests0 {
+ #include "btreetests.inl"
+}
- void setupTests() {
- add< Create >();
- add< SimpleInsertDelete >();
- add< SplitRightHeavyBucket >();
- add< SplitLeftHeavyBucket >();
- add< MissingLocate >();
- add< MissingLocateMultiBucket >();
- add< SERVER983 >();
- add< DontReuseUnused >();
- add< PackUnused >();
- add< DontDropReferenceKey >();
- add< MergeBucketsLeft >();
- add< MergeBucketsRight >();
-// add< MergeBucketsHead >();
- add< MergeBucketsDontReplaceHead >();
- add< MergeBucketsDelInternal >();
- add< MergeBucketsRightNull >();
- add< DontMergeSingleBucket >();
- add< ParentMergeNonRightToLeft >();
- add< ParentMergeNonRightToRight >();
- add< CantMergeRightNoMerge >();
- add< CantMergeLeftNoMerge >();
- add< MergeOption >();
- add< ForceMergeLeft >();
- add< ForceMergeRight >();
- add< RecursiveMerge >();
- add< RecursiveMergeRightBucket >();
- add< RecursiveMergeDoubleRightBucket >();
- add< MergeSizeJustRightRight >();
- add< MergeSizeJustRightLeft >();
- add< MergeSizeRight >();
- add< MergeSizeLeft >();
- add< NoMergeBelowMarkRight >();
- add< NoMergeBelowMarkLeft >();
- add< MergeSizeRightTooBig >();
- add< MergeSizeLeftTooBig >();
- add< BalanceOneLeftToRight >();
- add< BalanceOneRightToLeft >();
- add< BalanceThreeLeftToRight >();
- add< BalanceThreeRightToLeft >();
- add< BalanceSingleParentKey >();
- add< PackEmpty >();
- add< PackedDataSizeEmpty >();
- add< BalanceSingleParentKeyPackParent >();
- add< BalanceSplitParent >();
- add< EvenRebalanceLeft >();
- add< EvenRebalanceLeftCusp >();
- add< EvenRebalanceRight >();
- add< EvenRebalanceRightCusp >();
- add< EvenRebalanceCenter >();
- add< OddRebalanceLeft >();
- add< OddRebalanceRight >();
- add< OddRebalanceCenter >();
- add< RebalanceEmptyRight >();
- add< RebalanceEmptyLeft >();
- add< NoMoveAtLowWaterMarkRight >();
- add< MoveBelowLowWaterMarkRight >();
- add< NoMoveAtLowWaterMarkLeft >();
- add< MoveBelowLowWaterMarkLeft >();
- add< PreferBalanceLeft >();
- add< PreferBalanceRight >();
- add< RecursiveMergeThenBalance >();
- add< MergeRightEmpty >();
- add< MergeMinRightEmpty >();
- add< MergeLeftEmpty >();
- add< MergeMinLeftEmpty >();
- add< BalanceRightEmpty >();
- add< BalanceLeftEmpty >();
- add< DelEmptyNoNeighbors >();
- add< DelEmptyEmptyNeighbors >();
- add< DelInternal >();
- add< DelInternalReplaceWithUnused >();
- add< DelInternalReplaceRight >();
- add< DelInternalPromoteKey >();
- add< DelInternalPromoteRightKey >();
- add< DelInternalReplacementPrevNonNull >();
- add< DelInternalReplacementNextNonNull >();
- add< DelInternalSplitPromoteLeft >();
- add< DelInternalSplitPromoteRight >();
- }
- } myall;
+#undef BtreeBucket
+#undef btree
+#undef btreemod
+#define BtreeBucket BtreeBucket<V1>
+#define btree btree<V1>
+#define btreemod btreemod<V1>
+#undef testName
+#define testName "btree1"
+#undef BTVERSION
+#define BTVERSION 1
+namespace BtreeTests1 {
+ #include "btreetests.inl"
}
diff --git a/dbtests/btreetests.inl b/dbtests/btreetests.inl
new file mode 100644
index 0000000..ed9f0ea
--- /dev/null
+++ b/dbtests/btreetests.inl
@@ -0,0 +1,1702 @@
+ typedef BtreeBucket::_KeyNode _KeyNode;
+
+ const char* ns() {
+ return "unittests.btreetests";
+ }
+
+ // dummy, valid record loc
+ const DiskLoc recordLoc() {
+ return DiskLoc( 0, 2 );
+ }
+
+ class Ensure {
+ public:
+ Ensure() {
+ _c.ensureIndex( ns(), BSON( "a" << 1 ), false, "testIndex",
+ false, // given two versions not sure if cache true would mess us up...
+ false, BTVERSION);
+ }
+ ~Ensure() {
+ _c.dropCollection( ns() );
+ //_c.dropIndexes( ns() );
+ }
+ private:
+ DBDirectClient _c;
+ };
+
+ class Base : public Ensure {
+ public:
+ Base() :
+ _context( ns() ) {
+ {
+ bool f = false;
+ assert( f = true );
+ massert( 10402 , "assert is misdefined", f);
+ }
+ }
+ virtual ~Base() {}
+ static string bigNumString( long long n, int len = 800 ) {
+ char sub[17];
+ sprintf( sub, "%.16llx", n );
+ string val( len, ' ' );
+ for( int i = 0; i < len; ++i ) {
+ val[ i ] = sub[ i % 16 ];
+ }
+ return val;
+ }
+ protected:
+ const BtreeBucket* bt() {
+ return id().head.btree();
+ }
+ DiskLoc dl() {
+ return id().head;
+ }
+ IndexDetails& id() {
+ NamespaceDetails *nsd = nsdetails( ns() );
+ assert( nsd );
+ return nsd->idx( 1 );
+ }
+ void checkValid( int nKeys ) {
+ ASSERT( bt() );
+ ASSERT( bt()->isHead() );
+ bt()->assertValid( order(), true );
+ ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order(), 0, true ) );
+ }
+ void dump() {
+ bt()->dumpTree( dl(), order() );
+ }
+ void insert( BSONObj &key ) {
+ const BtreeBucket *b = bt();
+ b->bt_insert( dl(), recordLoc(), key, Ordering::make(order()), true, id(), true );
+ getDur().commitIfNeeded();
+ }
+ bool unindex( BSONObj &key ) {
+ getDur().commitIfNeeded();
+ return bt()->unindex( dl(), id(), key, recordLoc() );
+ }
+ static BSONObj simpleKey( char c, int n = 1 ) {
+ BSONObjBuilder builder;
+ string val( n, c );
+ builder.append( "a", val );
+ return builder.obj();
+ }
+ void locate( BSONObj &key, int expectedPos,
+ bool expectedFound, const DiskLoc &expectedLocation,
+ int direction = 1 ) {
+ int pos;
+ bool found;
+ DiskLoc location =
+ bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
+ ASSERT_EQUALS( expectedFound, found );
+ ASSERT( location == expectedLocation );
+ ASSERT_EQUALS( expectedPos, pos );
+ }
+ bool present( BSONObj &key, int direction ) {
+ int pos;
+ bool found;
+ bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
+ return found;
+ }
+ BSONObj order() {
+ return id().keyPattern();
+ }
+ const BtreeBucket *child( const BtreeBucket *b, int i ) {
+ assert( i <= b->nKeys() );
+ DiskLoc d;
+ if ( i == b->nKeys() ) {
+ d = b->getNextChild();
+ }
+ else {
+ d = b->keyNode( i ).prevChildBucket;
+ }
+ assert( !d.isNull() );
+ return d.btree();
+ }
+ void checkKey( char i ) {
+ stringstream ss;
+ ss << i;
+ checkKey( ss.str() );
+ }
+ void checkKey( const string &k ) {
+ BSONObj key = BSON( "" << k );
+// log() << "key: " << key << endl;
+ ASSERT( present( key, 1 ) );
+ ASSERT( present( key, -1 ) );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ checkValid( 0 );
+ }
+ };
+
+ class SimpleInsertDelete : public Base {
+ public:
+ void run() {
+ BSONObj key = simpleKey( 'z' );
+ insert( key );
+
+ checkValid( 1 );
+ locate( key, 0, true, dl() );
+
+ unindex( key );
+
+ checkValid( 0 );
+ locate( key, 0, false, DiskLoc() );
+ }
+ };
+
+ class SplitUnevenBucketBase : public Base {
+ public:
+ virtual ~SplitUnevenBucketBase() {}
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ BSONObj shortKey = simpleKey( shortToken( i ), 1 );
+ insert( shortKey );
+ BSONObj longKey = simpleKey( longToken( i ), 800 );
+ insert( longKey );
+ }
+ checkValid( 20 );
+ ASSERT_EQUALS( 1, bt()->nKeys() );
+ checkSplit();
+ }
+ protected:
+ virtual char shortToken( int i ) const = 0;
+ virtual char longToken( int i ) const = 0;
+ static char leftToken( int i ) {
+ return 'a' + i;
+ }
+ static char rightToken( int i ) {
+ return 'z' - i;
+ }
+ virtual void checkSplit() = 0;
+ };
+
+ class SplitRightHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS( 15, child( bt(), 0 )->nKeys() );
+ ASSERT_EQUALS( 4, child( bt(), 1 )->nKeys() );
+ }
+ };
+
+ class SplitLeftHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS( 4, child( bt(), 0 )->nKeys() );
+ ASSERT_EQUALS( 15, child( bt(), 1 )->nKeys() );
+ }
+ };
+
+ class MissingLocate : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 3; ++i ) {
+ BSONObj k = simpleKey( 'b' + 2 * i );
+ insert( k );
+ }
+
+ locate( 1, 'a', 'b', dl() );
+ locate( 1, 'c', 'd', dl() );
+ locate( 1, 'e', 'f', dl() );
+ locate( 1, 'g', 'g' + 1, DiskLoc() ); // of course, 'h' isn't in the index.
+
+ // old behavior
+ // locate( -1, 'a', 'b', dl() );
+ // locate( -1, 'c', 'd', dl() );
+ // locate( -1, 'e', 'f', dl() );
+ // locate( -1, 'g', 'f', dl() );
+
+ locate( -1, 'a', 'a' - 1, DiskLoc() ); // of course, 'a' - 1 isn't in the index
+ locate( -1, 'c', 'b', dl() );
+ locate( -1, 'e', 'd', dl() );
+ locate( -1, 'g', 'f', dl() );
+ }
+ private:
+ void locate( int direction, char token, char expectedMatch,
+ DiskLoc expectedLocation ) {
+ BSONObj k = simpleKey( token );
+ int expectedPos = ( expectedMatch - 'b' ) / 2;
+ Base::locate( k, expectedPos, false, expectedLocation, direction );
+ }
+ };
+
+ class MissingLocateMultiBucket : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 8; ++i ) {
+ insert( i );
+ }
+ insert( 9 );
+ insert( 8 );
+// dump();
+ BSONObj straddle = key( 'i' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'k' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class SERVER983 : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj straddle = key( 'o' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'q' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class DontReuseUnused : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj root = key( 'p' );
+ unindex( root );
+ Base::insert( root );
+ locate( root, 0, true, bt()->getNextChild(), 1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class PackUnused : public Base {
+ public:
+ void run() {
+ for ( long long i = 0; i < 1000000; i += 1000 ) {
+ insert( i );
+ }
+ string orig, after;
+ {
+ stringstream ss;
+ bt()->shape( ss );
+ orig = ss.str();
+ }
+ vector< string > toDel;
+ vector< string > other;
+ BSONObjBuilder start;
+ start.appendMinKey( "a" );
+ BSONObjBuilder end;
+ end.appendMaxKey( "a" );
+ auto_ptr< BtreeCursor > c( BtreeCursor::make( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
+ while( c->ok() ) {
+ if ( c->curKeyHasChild() ) {
+ toDel.push_back( c->currKey().firstElement().valuestr() );
+ }
+ else {
+ other.push_back( c->currKey().firstElement().valuestr() );
+ }
+ c->advance();
+ }
+ ASSERT( toDel.size() > 0 );
+ for( vector< string >::const_iterator i = toDel.begin(); i != toDel.end(); ++i ) {
+ BSONObj o = BSON( "a" << *i );
+ unindex( o );
+ }
+ ASSERT( other.size() > 0 );
+ for( vector< string >::const_iterator i = other.begin(); i != other.end(); ++i ) {
+ BSONObj o = BSON( "a" << *i );
+ unindex( o );
+ }
+
+ long long unused = 0;
+ ASSERT_EQUALS( 0, bt()->fullValidate( dl(), order(), &unused, true ) );
+
+ for ( long long i = 50000; i < 50100; ++i ) {
+ insert( i );
+ }
+
+ long long unused2 = 0;
+ ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2, true ) );
+
+// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
+//
+ ASSERT( unused2 <= unused );
+ }
+ protected:
+ void insert( long long n ) {
+ string val = bigNumString( n );
+ BSONObj k = BSON( "a" << val );
+ Base::insert( k );
+ }
+ };
+
+ class DontDropReferenceKey : public PackUnused {
+ public:
+ void run() {
+ // with 80 root node is full
+ for ( long long i = 0; i < 80; i += 1 ) {
+ insert( i );
+ }
+
+ BSONObjBuilder start;
+ start.appendMinKey( "a" );
+ BSONObjBuilder end;
+ end.appendMaxKey( "a" );
+ BSONObj l = bt()->keyNode( 0 ).key.toBson();
+ string toInsert;
+ auto_ptr< BtreeCursor > c( BtreeCursor::make( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
+ while( c->ok() ) {
+ if ( c->currKey().woCompare( l ) > 0 ) {
+ toInsert = c->currKey().firstElement().valuestr();
+ break;
+ }
+ c->advance();
+ }
+ // too much work to try to make this happen through inserts and deletes
+ // we are intentionally manipulating the btree bucket directly here
+ BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket );
+ getDur().writing(L)->Null();
+ getDur().writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ BSONObj k = BSON( "a" << toInsert );
+ Base::insert( k );
+ }
+ };
+
+ class MergeBuckets : public Base {
+ public:
+ virtual ~MergeBuckets() {}
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ int expectedCount = 10 - unindexKeys();
+// dump();
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ long long unused = 0;
+ ASSERT_EQUALS( expectedCount, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ }
+ protected:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ virtual int unindexKeys() = 0;
+ };
+
+ class MergeBucketsLeft : public MergeBuckets {
+ virtual int unindexKeys() {
+ BSONObj k = key( 'b' );
+ unindex( k );
+ k = key( 'b' + 2 );
+ unindex( k );
+ k = key( 'b' + 4 );
+ unindex( k );
+ k = key( 'b' + 6 );
+ unindex( k );
+ return 4;
+ }
+ };
+
+ class MergeBucketsRight : public MergeBuckets {
+ virtual int unindexKeys() {
+ BSONObj k = key( 'b' + 2 * 9 );
+ unindex( k );
+ return 1;
+ }
+ };
+
+ // deleting from head won't coalesce yet
+// class MergeBucketsHead : public MergeBuckets {
+// virtual BSONObj unindexKey() { return key( 'p' ); }
+// };
+
+ class MergeBucketsDontReplaceHead : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 18; ++i ) {
+ insert( i );
+ }
+ // dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = key( 'a' + 17 );
+ unindex( k );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ long long unused = 0;
+ ASSERT_EQUALS( 17, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'a' + i );
+ Base::insert( k );
+ }
+ };
+
+ // Tool to construct custom trees for tests.
+ class ArtificialTree : public BtreeBucket {
+ public:
+ void push( const BSONObj &key, const DiskLoc &child ) {
+ KeyOwned k(key);
+ pushBack( dummyDiskLoc(), k, Ordering::make( BSON( "a" << 1 ) ), child );
+ }
+ void setNext( const DiskLoc &child ) {
+ nextChild = child;
+ }
+ static DiskLoc make( IndexDetails &id ) {
+ DiskLoc ret = addBucket( id );
+ is( ret )->init();
+ getDur().commitIfNeeded();
+ return ret;
+ }
+ static ArtificialTree *is( const DiskLoc &l ) {
+ return static_cast< ArtificialTree * >( l.btreemod() );
+ }
+ static DiskLoc makeTree( const string &spec, IndexDetails &id ) {
+ return makeTree( fromjson( spec ), id );
+ }
+ static DiskLoc makeTree( const BSONObj &spec, IndexDetails &id ) {
+ DiskLoc node = make( id );
+ ArtificialTree *n = ArtificialTree::is( node );
+ BSONObjIterator i( spec );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ DiskLoc child;
+ if ( e.type() == Object ) {
+ child = makeTree( e.embeddedObject(), id );
+ }
+ if ( e.fieldName() == string( "_" ) ) {
+ n->setNext( child );
+ }
+ else {
+ n->push( BSON( "" << expectedKey( e.fieldName() ) ), child );
+ }
+ }
+ n->fixParentPtrs( node );
+ return node;
+ }
+ static void setTree( const string &spec, IndexDetails &id ) {
+ set( makeTree( spec, id ), id );
+ }
+ static void set( const DiskLoc &l, IndexDetails &id ) {
+ ArtificialTree::is( id.head )->deallocBucket( id.head, id );
+ getDur().writingDiskLoc(id.head) = l;
+ }
+ static string expectedKey( const char *spec ) {
+ if ( spec[ 0 ] != '$' ) {
+ return spec;
+ }
+ char *endPtr;
+ // parsing a long long is a pain, so just allow shorter keys for now
+ unsigned long long num = strtol( spec + 1, &endPtr, 16 );
+ int len = 800;
+ if( *endPtr == '$' ) {
+ len = strtol( endPtr + 1, 0, 16 );
+ }
+ return Base::bigNumString( num, len );
+ }
+ static void checkStructure( const BSONObj &spec, const IndexDetails &id, const DiskLoc node ) {
+ ArtificialTree *n = ArtificialTree::is( node );
+ BSONObjIterator j( spec );
+ for( int i = 0; i < n->n; ++i ) {
+ ASSERT( j.more() );
+ BSONElement e = j.next();
+ KeyNode kn = n->keyNode( i );
+ string expected = expectedKey( e.fieldName() );
+ ASSERT( present( id, BSON( "" << expected ), 1 ) );
+ ASSERT( present( id, BSON( "" << expected ), -1 ) );
+ ASSERT_EQUALS( expected, kn.key.toBson().firstElement().valuestr() );
+ if ( kn.prevChildBucket.isNull() ) {
+ ASSERT( e.type() == jstNULL );
+ }
+ else {
+ ASSERT( e.type() == Object );
+ checkStructure( e.embeddedObject(), id, kn.prevChildBucket );
+ }
+ }
+ if ( n->nextChild.isNull() ) {
+ // maybe should allow '_' field with null value?
+ ASSERT( !j.more() );
+ }
+ else {
+ BSONElement e = j.next();
+ ASSERT_EQUALS( string( "_" ), e.fieldName() );
+ ASSERT( e.type() == Object );
+ checkStructure( e.embeddedObject(), id, n->nextChild );
+ }
+ ASSERT( !j.more() );
+ }
+ static void checkStructure( const string &spec, const IndexDetails &id ) {
+ checkStructure( fromjson( spec ), id, id.head );
+ }
+ static bool present( const IndexDetails &id, const BSONObj &key, int direction ) {
+ int pos;
+ bool found;
+ id.head.btree()->locate( id, id.head, key, Ordering::make(id.keyPattern()), pos, found, recordLoc(), direction );
+ return found;
+ }
+ int headerSize() const { return BtreeBucket::headerSize(); }
+ int packedDataSize( int pos ) const { return BtreeBucket::packedDataSize( pos ); }
+ void fixParentPtrs( const DiskLoc &thisLoc ) { BtreeBucket::fixParentPtrs( thisLoc ); }
+ void forcePack() {
+ topSize += emptySize;
+ emptySize = 0;
+ setNotPacked();
+ }
+ private:
+ DiskLoc dummyDiskLoc() const { return DiskLoc( 0, 2 ); }
+ };
+
+ /**
+ * We could probably refactor the following tests, but it's easier to debug
+ * them in the present state.
+ */
+
+ class MergeBucketsDelInternal : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}", id() );
+ }
+ };
+
+ class MergeBucketsRightNull : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}", id() );
+ }
+ };
+
+ // not yet handling this case
+ class DontMergeSingleBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},c:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{d:{b:{a:null}}}", id() );
+ }
+ };
+
+ class ParentMergeNonRightToLeft : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ // child does not currently replace parent in this case
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class ParentMergeNonRightToRight : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ff" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ // child does not currently replace parent in this case
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class CantMergeRightNoMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{d:{b:{a:null},cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class CantMergeLeftNoMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "g" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},d:null,_:{f:{e:null}}}", id() );
+ }
+ };
+
+ class MergeOption : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}", id() );
+ }
+ };
+
+ class ForceMergeLeft : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}", id() );
+ }
+ };
+
+ class ForceMergeRight : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}", id() );
+ }
+ };
+
+ class RecursiveMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ // height is not currently reduced in this case
+ ArtificialTree::checkStructure( "{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}", id() );
+ }
+ };
+
+ class RecursiveMergeRightBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}", id() );
+ }
+ };
+
+ class RecursiveMergeDoubleRightBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+ long long keyCount = bt()->fullValidate( dl(), order(), 0, true );
+ ASSERT_EQUALS( 7, keyCount );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ // no recursion currently in this case
+ ArtificialTree::checkStructure( "{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}", id() );
+ }
+ };
+
+ class MergeSizeBase : public Base {
+ public:
+ MergeSizeBase() : _count() {}
+ virtual ~MergeSizeBase() {}
+ void run() {
+ typedef ArtificialTree A;
+ A::set( A::make( id() ), id() );
+ A* root = A::is( dl() );
+ DiskLoc left = A::make( id() );
+ root->push( biggestKey( 'm' ), left );
+ _count = 1;
+ A* l = A::is( left );
+ DiskLoc right = A::make( id() );
+ root->setNext( right );
+ A* r = A::is( right );
+ root->fixParentPtrs( dl() );
+
+ //ASSERT_EQUALS( bigSize(), bigSize() / 2 * 2 );
+ fillToExactSize( l, leftSize(), 'a' );
+ fillToExactSize( r, rightSize(), 'n' );
+ ASSERT( leftAdditional() <= 2 );
+ if ( leftAdditional() >= 2 ) {
+ l->push( bigKey( 'k' ), DiskLoc() );
+ }
+ if ( leftAdditional() >= 1 ) {
+ l->push( bigKey( 'l' ), DiskLoc() );
+ }
+ ASSERT( rightAdditional() <= 2 );
+ if ( rightAdditional() >= 2 ) {
+ r->push( bigKey( 'y' ), DiskLoc() );
+ }
+ if ( rightAdditional() >= 1 ) {
+ r->push( bigKey( 'z' ), DiskLoc() );
+ }
+ _count += leftAdditional() + rightAdditional();
+
+// dump();
+
+ initCheck();
+ string ns = id().indexNamespace();
+ const char *keys = delKeys();
+ for( const char *i = keys; *i; ++i ) {
+ long long unused = 0;
+ ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = bigKey( *i );
+ unindex( k );
+// dump();
+ --_count;
+ }
+
+// dump();
+
+ long long unused = 0;
+ ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ validate();
+ if ( !merge() ) {
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ else {
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ }
+ protected:
+ virtual int leftAdditional() const { return 2; }
+ virtual int rightAdditional() const { return 2; }
+ virtual void initCheck() {}
+ virtual void validate() {}
+ virtual int leftSize() const = 0;
+ virtual int rightSize() const = 0;
+ virtual const char * delKeys() const { return "klyz"; }
+ virtual bool merge() const { return true; }
+ void fillToExactSize( ArtificialTree *t, int targetSize, char startKey ) {
+ int size = 0;
+ while( size < targetSize ) {
+ int space = targetSize - size;
+ int nextSize = space - sizeof( _KeyNode );
+ assert( nextSize > 0 );
+ BSONObj newKey = key( startKey++, nextSize );
+ t->push( newKey, DiskLoc() );
+ size += BtreeBucket::KeyOwned(newKey).dataSize() + sizeof( _KeyNode );
+ _count += 1;
+ }
+ if( t->packedDataSize( 0 ) != targetSize ) {
+ ASSERT_EQUALS( t->packedDataSize( 0 ), targetSize );
+ }
+ }
+ static BSONObj key( char a, int size ) {
+ if ( size >= bigSize() ) {
+ return bigKey( a );
+ }
+ return simpleKey( a, size - ( bigSize() - 801 ) );
+ }
+ static BSONObj bigKey( char a ) {
+ return simpleKey( a, 801 );
+ }
+ static BSONObj biggestKey( char a ) {
+ int size = BtreeBucket::getKeyMax() - bigSize() + 801;
+ return simpleKey( a, size );
+ }
+ static int bigSize() {
+ return BtreeBucket::KeyOwned(bigKey( 'a' )).dataSize();
+ }
+ static int biggestSize() {
+ return BtreeBucket::KeyOwned(biggestKey( 'a' )).dataSize();
+ }
+ int _count;
+ };
+
+ class MergeSizeJustRightRight : public MergeSizeBase {
+ protected:
+ virtual int rightSize() const { return BtreeBucket::lowWaterMark() - 1; }
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::lowWaterMark() - 1 ); }
+ };
+
+ class MergeSizeJustRightLeft : public MergeSizeBase {
+ protected:
+ virtual int leftSize() const { return BtreeBucket::lowWaterMark() - 1; }
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::lowWaterMark() - 1 ); }
+ virtual const char * delKeys() const { return "yzkl"; }
+ };
+
+ class MergeSizeRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() - 1; }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ };
+
+ class MergeSizeLeft : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() - 1; }
+ };
+
+ class NoMergeBelowMarkRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() - 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class NoMergeBelowMarkLeft : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() - 1; }
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class MergeSizeRightTooBig : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class MergeSizeLeftTooBig : public MergeSizeJustRightRight {
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class BalanceOneLeftToRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},b:{$20:null,$30:null,$40:null,$50:null,a:null},_:{c:null}}", id() );
+ ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceOneRightToLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null},b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x3 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceThreeLeftToRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},b:{$30:null,$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ ASSERT_EQUALS( 23, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x30 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceThreeRightToLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ ASSERT_EQUALS( 25, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x5 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 24, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceSingleParentKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
+ };
+
+ class PackEmpty : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null}", id() );
+ BSONObj k = BSON( "" << "a" );
+ ASSERT( unindex( k ) );
+ ArtificialTree *t = ArtificialTree::is( dl() );
+ t->forcePack();
+ Tester::checkEmpty( t, id() );
+ }
+ class Tester : public ArtificialTree {
+ public:
+ static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
+ Tester *t = static_cast< Tester * >( a );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT( !( t->flags & Packed ) );
+ Ordering o = Ordering::make( id.keyPattern() );
+ int zero = 0;
+ t->_packReadyForMod( o, zero );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT_EQUALS( 0, t->topSize );
+ ASSERT_EQUALS( BtreeBucket::bodySize(), t->emptySize );
+ ASSERT( t->flags & Packed );
+ }
+ };
+ };
+
+ class PackedDataSizeEmpty : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null}", id() );
+ BSONObj k = BSON( "" << "a" );
+ ASSERT( unindex( k ) );
+ ArtificialTree *t = ArtificialTree::is( dl() );
+ t->forcePack();
+ Tester::checkEmpty( t, id() );
+ }
+ class Tester : public ArtificialTree {
+ public:
+ static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
+ Tester *t = static_cast< Tester * >( a );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT( !( t->flags & Packed ) );
+ int zero = 0;
+ ASSERT_EQUALS( 0, t->packedDataSize( zero ) );
+ ASSERT( !( t->flags & Packed ) );
+ }
+ };
+ };
+
+ class BalanceSingleParentKeyPackParent : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ // force parent pack
+ ArtificialTree::is( dl() )->forcePack();
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
+ };
+
+ class BalanceSplitParent : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10$10:{$1:null,$2:null,$3:null,$4:null},$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null,$500:null,$600:null,$700:null,$800:null,$900:null,_:{c:null}}", id() );
+ ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x3 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 21, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
+ }
+ };
+
+ class RebalancedSeparatorBase : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( treeSpec(), id() );
+ modTree();
+ Tester::checkSeparator( id(), expectedSeparator() );
+ }
+ virtual string treeSpec() const = 0;
+ virtual int expectedSeparator() const = 0;
+ virtual void modTree() {}
+ struct Tester : public ArtificialTree {
+ static void checkSeparator( const IndexDetails& id, int expected ) {
+ ASSERT_EQUALS( expected, static_cast< Tester * >( id.head.btreemod() )->rebalancedSeparatorPos( id.head, 0 ) );
+ }
+ };
+ };
+
+ class EvenRebalanceLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$7:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null,$6:null},_:{$8:null,$9:null,$10$31e:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceLeftCusp : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},_:{$7:null,$8:null,$9$31e:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceRightCusp : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceCenter : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$4:{$1:null,$2:null,$3:null},_:{$5:null,$6:null,$7:null,$8$31f:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceCenter : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class RebalanceEmptyRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$b:null}}"; }
+ virtual void modTree() {
+ BSONObj k = BSON( "" << bigNumString( 0xb ) );
+ ASSERT( unindex( k ) );
+ }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class RebalanceEmptyLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$a:{$1:null},_:{$11:null,$12:null,$13:null,$14:null,$15:null,$16:null,$17:null,$18:null,$19:null}}"; }
+ virtual void modTree() {
+ BSONObj k = BSON( "" << bigNumString( 0x1 ) );
+ ASSERT( unindex( k ) );
+ }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key.toBson() ); }
+ virtual bool merge() const { return false; }
+ protected:
+ BSONObj _oldTop;
+ };
+
+ class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize(); }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ // different top means we rebalanced
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ };
+
+ class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft {
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key.toBson() ); }
+ virtual bool merge() const { return false; }
+ protected:
+ BSONObj _oldTop;
+ };
+
+ class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft {
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize(); }
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ // different top means we rebalanced
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ };
+
+ class PreferBalanceLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$30:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x12 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
+ }
+ };
+
+ class PreferBalanceRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x12 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ }
+ };
+
+ class RecursiveMergeThenBalance : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},_:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ ASSERT_EQUALS( 15, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x7 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ }
+ };
+
+ class MergeRightEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 1; }
+ virtual const char * delKeys() const { return "lz"; }
+ virtual int rightSize() const { return 0; }
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
+ };
+
+ class MergeMinRightEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 0; }
+ virtual const char * delKeys() const { return "z"; }
+ virtual int rightSize() const { return 0; }
+ virtual int leftSize() const { return bigSize() + sizeof( _KeyNode ); }
+ };
+
+ class MergeLeftEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 1; }
+ virtual const char * delKeys() const { return "zl"; }
+ virtual int leftSize() const { return 0; }
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
+ };
+
+ class MergeMinLeftEmpty : public MergeSizeBase {
+ protected:
+ virtual int leftAdditional() const { return 1; }
+ virtual int rightAdditional() const { return 0; }
+ virtual const char * delKeys() const { return "l"; }
+ virtual int leftSize() const { return 0; }
+ virtual int rightSize() const { return bigSize() + sizeof( _KeyNode ); }
+ };
+
+ class BalanceRightEmpty : public MergeRightEmpty {
+ protected:
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
+ virtual bool merge() const { return false; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ private:
+ BSONObj _oldTop;
+ };
+
+ class BalanceLeftEmpty : public MergeLeftEmpty {
+ protected:
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
+ virtual bool merge() const { return false; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ private:
+ BSONObj _oldTop;
+ };
+
+ class DelEmptyNoNeighbors : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{b:{a:null}}", id() );
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:null}", id() );
+ }
+ };
+
+ class DelEmptyEmptyNeighbors : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "b" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
+ }
+ };
+
+ class DelInternal : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ }
+ };
+
+ class DelInternalReplaceWithUnused : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ getDur().writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).prevChildBucket.btree()->keyNode( 0 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ long long unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ // doesn't discriminate between used and unused
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ }
+ };
+
+ class DelInternalReplaceRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{b:null}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:null}", id() );
+ }
+ };
+
+ class DelInternalPromoteKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "y" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
+ }
+ };
+
+ class DelInternalPromoteRightKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{e:{c:null},_:{f:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
+ }
+ };
+
+ class DelInternalReplacementPrevNonNull : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,d:{c:{b:null}},e:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 5, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "d" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
+ ASSERT( bt()->keyNode( 1 ).recordLoc.getOfs() & 1 ); // check 'unused' key
+ }
+ };
+
+ class DelInternalReplacementNextNonNull : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ ASSERT( bt()->keyNode( 0 ).recordLoc.getOfs() & 1 ); // check 'unused' key
+ }
+ };
+
+ class DelInternalSplitPromoteLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x30, 0x10 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
+ }
+ };
+
+ class DelInternalSplitPromoteRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x100, 0x10 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
+ }
+ };
+
+ class SignedZeroDuplication : public Base {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0.0, -0.0 );
+ DBDirectClient c;
+ c.ensureIndex( ns(), BSON( "b" << 1 ), true );
+ c.insert( ns(), BSON( "b" << 0.0 ) );
+ c.insert( ns(), BSON( "b" << 1.0 ) );
+ c.update( ns(), BSON( "b" << 1.0 ), BSON( "b" << -0.0 ) );
+ ASSERT_EQUALS( 1U, c.count( ns(), BSON( "b" << 0.0 ) ) );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( testName ) {
+ }
+
+ void setupTests() {
+ add< Create >();
+ add< SimpleInsertDelete >();
+ add< SplitRightHeavyBucket >();
+ add< SplitLeftHeavyBucket >();
+ add< MissingLocate >();
+ add< MissingLocateMultiBucket >();
+ add< SERVER983 >();
+ add< DontReuseUnused >();
+ add< PackUnused >();
+ add< DontDropReferenceKey >();
+ add< MergeBucketsLeft >();
+ add< MergeBucketsRight >();
+// add< MergeBucketsHead >();
+ add< MergeBucketsDontReplaceHead >();
+ add< MergeBucketsDelInternal >();
+ add< MergeBucketsRightNull >();
+ add< DontMergeSingleBucket >();
+ add< ParentMergeNonRightToLeft >();
+ add< ParentMergeNonRightToRight >();
+ add< CantMergeRightNoMerge >();
+ add< CantMergeLeftNoMerge >();
+ add< MergeOption >();
+ add< ForceMergeLeft >();
+ add< ForceMergeRight >();
+ add< RecursiveMerge >();
+ add< RecursiveMergeRightBucket >();
+ add< RecursiveMergeDoubleRightBucket >();
+ add< MergeSizeJustRightRight >();
+ add< MergeSizeJustRightLeft >();
+ add< MergeSizeRight >();
+ add< MergeSizeLeft >();
+ add< NoMergeBelowMarkRight >();
+ add< NoMergeBelowMarkLeft >();
+ add< MergeSizeRightTooBig >();
+ add< MergeSizeLeftTooBig >();
+ add< BalanceOneLeftToRight >();
+ add< BalanceOneRightToLeft >();
+ add< BalanceThreeLeftToRight >();
+ add< BalanceThreeRightToLeft >();
+ add< BalanceSingleParentKey >();
+ add< PackEmpty >();
+ add< PackedDataSizeEmpty >();
+ add< BalanceSingleParentKeyPackParent >();
+ add< BalanceSplitParent >();
+ add< EvenRebalanceLeft >();
+ add< EvenRebalanceLeftCusp >();
+ add< EvenRebalanceRight >();
+ add< EvenRebalanceRightCusp >();
+ add< EvenRebalanceCenter >();
+ add< OddRebalanceLeft >();
+ add< OddRebalanceRight >();
+ add< OddRebalanceCenter >();
+ add< RebalanceEmptyRight >();
+ add< RebalanceEmptyLeft >();
+ add< NoMoveAtLowWaterMarkRight >();
+ add< MoveBelowLowWaterMarkRight >();
+ add< NoMoveAtLowWaterMarkLeft >();
+ add< MoveBelowLowWaterMarkLeft >();
+ add< PreferBalanceLeft >();
+ add< PreferBalanceRight >();
+ add< RecursiveMergeThenBalance >();
+ add< MergeRightEmpty >();
+ add< MergeMinRightEmpty >();
+ add< MergeLeftEmpty >();
+ add< MergeMinLeftEmpty >();
+ add< BalanceRightEmpty >();
+ add< BalanceLeftEmpty >();
+ add< DelEmptyNoNeighbors >();
+ add< DelEmptyEmptyNeighbors >();
+ add< DelInternal >();
+ add< DelInternalReplaceWithUnused >();
+ add< DelInternalReplaceRight >();
+ add< DelInternalPromoteKey >();
+ add< DelInternalPromoteRightKey >();
+ add< DelInternalReplacementPrevNonNull >();
+ add< DelInternalReplacementNextNonNull >();
+ add< DelInternalSplitPromoteLeft >();
+ add< DelInternalSplitPromoteRight >();
+ add< SignedZeroDuplication >();
+ }
+ } myall;
diff --git a/dbtests/clienttests.cpp b/dbtests/clienttests.cpp
index f51b765..a64894b 100644
--- a/dbtests/clienttests.cpp
+++ b/dbtests/clienttests.cpp
@@ -156,7 +156,7 @@ namespace ClientTests {
public:
Create() : Base( "Create" ) {}
void run() {
- db.createCollection( "unittests.clienttests.create", 0, true );
+ db.createCollection( "unittests.clienttests.create", 4096, true );
BSONObj info;
ASSERT( db.runCommand( "unittests", BSON( "collstats" << "clienttests.create" ), info ) );
}
diff --git a/dbtests/cursortests.cpp b/dbtests/cursortests.cpp
index ddd7b03..cf66186 100644
--- a/dbtests/cursortests.cpp
+++ b/dbtests/cursortests.cpp
@@ -18,10 +18,10 @@
*/
#include "pch.h"
-#include "../db/db.h"
#include "../db/clientcursor.h"
#include "../db/instance.h"
#include "../db/btree.h"
+#include "../db/queryutil.h"
#include "dbtests.h"
namespace CursorTests {
@@ -33,11 +33,12 @@ namespace CursorTests {
class Base {
protected:
+ static const char *ns() { return "unittests.cursortests.Base"; }
FieldRangeVector *vec( int *vals, int len, int direction = 1 ) {
- FieldRangeSet s( "", BSON( "a" << 1 ) );
+ FieldRangeSet s( "", BSON( "a" << 1 ), true );
for( int i = 0; i < len; i += 2 ) {
_objs.push_back( BSON( "a" << BSON( "$gte" << vals[ i ] << "$lte" << vals[ i + 1 ] ) ) );
- FieldRangeSet s2( "", _objs.back() );
+ FieldRangeSet s2( "", _objs.back(), true );
if ( i == 0 ) {
s.range( "a" ) = s2.range( "a" );
}
@@ -45,8 +46,11 @@ namespace CursorTests {
s.range( "a" ) |= s2.range( "a" );
}
}
- return new FieldRangeVector( s, BSON( "a" << 1 ), direction );
+ // orphan idxSpec for this test
+ IndexSpec *idxSpec = new IndexSpec( BSON( "a" << 1 ) );
+ return new FieldRangeVector( s, *idxSpec, direction );
}
+ DBDirectClient _c;
private:
vector< BSONObj > _objs;
};
@@ -65,7 +69,8 @@ namespace CursorTests {
int v[] = { 1, 2, 4, 6 };
boost::shared_ptr< FieldRangeVector > frv( vec( v, 4 ) );
Client::Context ctx( ns );
- BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) );
+ BtreeCursor &c = *_c.get();
ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
double expected[] = { 1, 2, 4, 5, 6 };
for( int i = 0; i < 5; ++i ) {
@@ -93,7 +98,8 @@ namespace CursorTests {
int v[] = { -50, 2, 40, 60, 109, 200 };
boost::shared_ptr< FieldRangeVector > frv( vec( v, 6 ) );
Client::Context ctx( ns );
- BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make(nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) );
+ BtreeCursor &c = *_c.get();
ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
double expected[] = { 0, 1, 2, 109 };
for( int i = 0; i < 4; ++i ) {
@@ -119,7 +125,8 @@ namespace CursorTests {
int v[] = { 1, 2, 4, 6 };
boost::shared_ptr< FieldRangeVector > frv( vec( v, 4, -1 ) );
Client::Context ctx( ns );
- BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, -1 );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, -1 ) );
+ BtreeCursor& c = *_c.get();
ASSERT_EQUALS( "BtreeCursor a_1 reverse multi", c.toString() );
double expected[] = { 6, 5, 4, 2, 1 };
for( int i = 0; i < 5; ++i ) {
@@ -144,16 +151,23 @@ namespace CursorTests {
_c.insert( ns(), o );
}
void check( const BSONObj &spec ) {
- _c.ensureIndex( ns(), idx() );
+ {
+ BSONObj keypat = idx();
+ //cout << keypat.toString() << endl;
+ _c.ensureIndex( ns(), idx() );
+ }
+
Client::Context ctx( ns() );
- FieldRangeSet frs( ns(), spec );
- boost::shared_ptr< FieldRangeVector > frv( new FieldRangeVector( frs, idx(), direction() ) );
- BtreeCursor c( nsdetails( ns() ), 1, nsdetails( ns() )->idx( 1 ), frv, direction() );
+ FieldRangeSet frs( ns(), spec, true );
+ // orphan spec for this test.
+ IndexSpec *idxSpec = new IndexSpec( idx() );
+ boost::shared_ptr< FieldRangeVector > frv( new FieldRangeVector( frs, *idxSpec, direction() ) );
+ scoped_ptr<BtreeCursor> c( BtreeCursor::make( nsdetails( ns() ), 1, nsdetails( ns() )->idx( 1 ), frv, direction() ) );
Matcher m( spec );
int count = 0;
- while( c.ok() ) {
- ASSERT( m.matches( c.current() ) );
- c.advance();
+ while( c->ok() ) {
+ ASSERT( m.matches( c->current() ) );
+ c->advance();
++count;
}
int expectedCount = 0;
@@ -246,6 +260,29 @@ namespace CursorTests {
}
virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
};
+
+ class AbortImplicitScan : public Base {
+ public:
+ void run() {
+ dblock lk;
+ IndexSpec idx( BSON( "a" << 1 << "b" << 1 ) );
+ _c.ensureIndex( ns(), idx.keyPattern );
+ for( int i = 0; i < 300; ++i ) {
+ _c.insert( ns(), BSON( "a" << i << "b" << 5 ) );
+ }
+ FieldRangeSet frs( ns(), BSON( "b" << 3 ), true );
+ boost::shared_ptr<FieldRangeVector> frv( new FieldRangeVector( frs, idx, 1 ) );
+ Client::Context ctx( ns() );
+ scoped_ptr<BtreeCursor> c( BtreeCursor::make( nsdetails( ns() ), 1, nsdetails( ns() )->idx(1), frv, 1 ) );
+ int initialNscanned = c->nscanned();
+ ASSERT( initialNscanned < 200 );
+ ASSERT( c->ok() );
+ c->advance();
+ ASSERT( c->nscanned() > initialNscanned );
+ ASSERT( c->nscanned() < 200 );
+ ASSERT( c->ok() );
+ }
+ };
} // namespace BtreeCursorTests
@@ -262,6 +299,7 @@ namespace CursorTests {
add< BtreeCursorTests::EqIn >();
add< BtreeCursorTests::RangeEq >();
add< BtreeCursorTests::RangeIn >();
+ add< BtreeCursorTests::AbortImplicitScan >();
}
} myall;
} // namespace CursorTests
diff --git a/dbtests/dbtests.cpp b/dbtests/dbtests.cpp
index 8ede08d..ac44edf 100644
--- a/dbtests/dbtests.cpp
+++ b/dbtests/dbtests.cpp
@@ -19,8 +19,11 @@
#include "pch.h"
#include "dbtests.h"
+#include "../util/unittest.h"
int main( int argc, char** argv ) {
static StaticObserver StaticObserver;
+ doPreServerStatupInits();
+ UnitTest::runTests();
return Suite::run(argc, argv, "/tmp/unittest");
}
diff --git a/dbtests/directclienttests.cpp b/dbtests/directclienttests.cpp
index 204bf92..860eb7e 100644
--- a/dbtests/directclienttests.cpp
+++ b/dbtests/directclienttests.cpp
@@ -18,12 +18,12 @@
*/
#include "pch.h"
-#include "../db/query.h"
+#include "../db/ops/query.h"
#include "../db/db.h"
#include "../db/instance.h"
#include "../db/json.h"
#include "../db/lasterror.h"
-#include "../db/update.h"
+#include "../db/ops/update.h"
#include "../util/timer.h"
#include "dbtests.h"
@@ -58,9 +58,9 @@ namespace DirectClientTests {
if( pass ) {
BSONObj info;
BSONObj cmd = BSON( "captrunc" << "b" << "n" << 1 << "inc" << true );
- cout << cmd.toString() << endl;
+ //cout << cmd.toString() << endl;
bool ok = client().runCommand("a", cmd, info);
- cout << info.toString() << endl;
+ //cout << info.toString() << endl;
assert(ok);
}
@@ -69,12 +69,35 @@ namespace DirectClientTests {
}
};
+ class InsertMany : ClientBase {
+ public:
+ virtual void run(){
+ vector<BSONObj> objs;
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 2));
+
+
+ client().dropCollection(ns);
+ client().insert(ns, objs);
+ ASSERT_EQUALS(client().getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client().count(ns), 1);
+
+ client().dropCollection(ns);
+ client().insert(ns, objs, InsertOption_ContinueOnError);
+ ASSERT_EQUALS(client().getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client().count(ns), 2);
+ }
+
+ };
+
class All : public Suite {
public:
All() : Suite( "directclient" ) {
}
void setupTests() {
add< Capped >();
+ add< InsertMany >();
}
} myall;
}
diff --git a/dbtests/framework.cpp b/dbtests/framework.cpp
index c92c8d6..95ed8b3 100644
--- a/dbtests/framework.cpp
+++ b/dbtests/framework.cpp
@@ -26,6 +26,7 @@
#include "framework.h"
#include "../util/file_allocator.h"
#include "../db/dur.h"
+#include "../util/background.h"
#ifndef _WIN32
#include <cxxabi.h>
@@ -78,7 +79,12 @@ namespace mongo {
Result * Result::cur = 0;
+ int minutesRunning = 0; // reset to 0 each time a new test starts
+ mutex minutesRunningMutex("minutesRunningMutex");
+ string currentTestName;
+
Result * Suite::run( const string& filter ) {
+ // set tlogLevel to -1 to suppress tlog() output in a test program
tlogLevel = -1;
log(1) << "\t about to setupTests" << endl;
@@ -107,6 +113,12 @@ namespace mongo {
stringstream err;
err << tc->getName() << "\t";
+ {
+ scoped_lock lk(minutesRunningMutex);
+ minutesRunning = 0;
+ currentTestName = tc->getName();
+ }
+
try {
tc->run();
passes = true;
@@ -146,6 +158,30 @@ namespace mongo {
<< options << "suite: run the specified test suite(s) only" << endl;
}
+ class TestWatchDog : public BackgroundJob {
+ public:
+ virtual string name() const { return "TestWatchDog"; }
+ virtual void run(){
+
+ while (true) {
+ sleepsecs(60);
+
+ scoped_lock lk(minutesRunningMutex);
+ minutesRunning++; //reset to 0 when new test starts
+
+ if (minutesRunning > 30){
+ log() << currentTestName << " has been running for more than 30 minutes. aborting." << endl;
+ ::abort();
+ }
+ else if (minutesRunning > 1){
+ warning() << currentTestName << " has been running for more than " << minutesRunning-1 << " minutes." << endl;
+ }
+ }
+ }
+ };
+
+ unsigned perfHist = 1;
+
int Suite::run( int argc , char** argv , string default_dbpath ) {
unsigned long long seed = time( 0 );
string dbpathSpec;
@@ -168,10 +204,12 @@ namespace mongo {
("dur", "enable journaling")
("nodur", "disable journaling (currently the default)")
("seed", po::value<unsigned long long>(&seed), "random number seed")
+ ("perfHist", po::value<unsigned>(&perfHist), "number of back runs of perf stats to display")
;
hidden_options.add_options()
("suites", po::value< vector<string> >(), "test suites to run")
+ ("nopreallocj", "disable journal prealloc")
;
positional_options.add("suites", -1);
@@ -201,13 +239,19 @@ namespace mongo {
return EXIT_CLEAN;
}
+ bool nodur = false;
if( params.count("nodur") ) {
+ nodur = true;
cmdLine.dur = false;
}
if( params.count("dur") || cmdLine.dur ) {
cmdLine.dur = true;
}
+ if( params.count("nopreallocj") ) {
+ cmdLine.preallocj = false;
+ }
+
if (params.count("debug") || params.count("verbose") ) {
logLevel = 1;
}
@@ -255,8 +299,18 @@ namespace mongo {
srand( (unsigned) seed );
printGitVersion();
printSysInfo();
+ DEV log() << "_DEBUG build" << endl;
+ if( sizeof(void*)==4 )
+ log() << "32bit" << endl;
log() << "random seed: " << seed << endl;
+ if( time(0) % 3 == 0 && !nodur ) {
+ cmdLine.dur = true;
+ log() << "****************" << endl;
+ log() << "running with journaling enabled to test that. dbtests will do this occasionally even if --dur is not specified." << endl;
+ log() << "****************" << endl;
+ }
+
FileAllocator::get()->start();
vector<string> suites;
@@ -272,10 +326,14 @@ namespace mongo {
dur::startup();
if( debug && cmdLine.dur ) {
- cout << "setting cmdLine.durOptions=8" << endl;
- cmdLine.durOptions = 8;
+ log() << "_DEBUG: automatically enabling cmdLine.durOptions=8 (DurParanoid)" << endl;
+ // this was commented out. why too slow or something? :
+ cmdLine.durOptions |= 8;
}
+ TestWatchDog twd;
+ twd.go();
+
int ret = run(suites,filter);
#if !defined(_WIN32) && !defined(__sunos__)
@@ -315,8 +373,6 @@ namespace mongo {
Logstream::get().flush();
cout << "**************************************************" << endl;
- cout << "**************************************************" << endl;
- cout << "**************************************************" << endl;
int rc = 0;
@@ -386,4 +442,5 @@ namespace mongo {
}
void setupSignals( bool inFork ) {}
+
}
diff --git a/dbtests/jsobjtests.cpp b/dbtests/jsobjtests.cpp
index 6804d71..82baaaa 100644
--- a/dbtests/jsobjtests.cpp
+++ b/dbtests/jsobjtests.cpp
@@ -18,23 +18,93 @@
*/
#include "pch.h"
+#include "../bson/util/builder.h"
#include "../db/jsobj.h"
#include "../db/jsobjmanipulator.h"
#include "../db/json.h"
#include "../db/repl.h"
#include "../db/extsort.h"
-
#include "dbtests.h"
#include "../util/mongoutils/checksum.h"
+#include "../db/key.h"
+#include "../db/btree.h"
namespace JsobjTests {
+
+ IndexInterface& indexInterfaceForTheseTests = (time(0)%2) ? *IndexDetails::iis[0] : *IndexDetails::iis[1];
+
+ void keyTest(const BSONObj& o, bool mustBeCompact = false) {
+ static KeyV1Owned *kLast;
+ static BSONObj last;
+
+ KeyV1Owned *key = new KeyV1Owned(o);
+ KeyV1Owned& k = *key;
+
+ ASSERT( !mustBeCompact || k.isCompactFormat() );
+
+ BSONObj x = k.toBson();
+ int res = o.woCompare(x, BSONObj(), /*considerfieldname*/false);
+ if( res ) {
+ cout << o.toString() << endl;
+ k.toBson();
+ cout << x.toString() << endl;
+ o.woCompare(x, BSONObj(), /*considerfieldname*/false);
+ ASSERT( res == 0 );
+ }
+ ASSERT( k.woEqual(k) );
+ ASSERT( !k.isCompactFormat() || k.dataSize() < o.objsize() );
+
+ {
+ // check BSONObj::equal. this part not a KeyV1 test.
+ int res = o.woCompare(last);
+ ASSERT( (res==0) == o.equal(last) );
+ }
+
+ if( kLast ) {
+ int r1 = o.woCompare(last, BSONObj(), false);
+ int r2 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ bool ok = (r1<0 && r2<0) || (r1>0&&r2>0) || r1==r2;
+ if( !ok ) {
+ cout << "r1r2 " << r1 << ' ' << r2 << endl;
+ cout << "o:" << o.toString() << endl;
+ cout << "last:" << last.toString() << endl;
+ cout << "k:" << k.toString() << endl;
+ cout << "kLast:" << kLast->toString() << endl;
+ int r3 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ cout << r3 << endl;
+ }
+ ASSERT(ok);
+ if( k.isCompactFormat() && kLast->isCompactFormat() ) { // only check if not bson as bson woEqual is broken! (or was may2011)
+ if( k.woEqual(*kLast) != (r2 == 0) ) { // check woEqual matches
+ cout << r2 << endl;
+ cout << k.toString() << endl;
+ cout << kLast->toString() << endl;
+ k.woEqual(*kLast);
+ ASSERT(false);
+ }
+ }
+ }
+
+ delete kLast;
+ kLast = key;
+ last = o.getOwned();
+ }
+
class BufBuilderBasic {
public:
void run() {
- BufBuilder b( 0 );
- b.appendStr( "foo" );
- ASSERT_EQUALS( 4, b.len() );
- ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ {
+ BufBuilder b( 0 );
+ b.appendStr( "foo" );
+ ASSERT_EQUALS( 4, b.len() );
+ ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ }
+ {
+ mongo::StackBufBuilder b;
+ b.appendStr( "foo" );
+ ASSERT_EQUALS( 4, b.len() );
+ ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ }
}
};
@@ -42,6 +112,9 @@ namespace JsobjTests {
public:
void run() {
ASSERT_EQUALS( 1, BSONElement().size() );
+
+ BSONObj x;
+ ASSERT_EQUALS( 1, x.firstElement().size() );
}
};
@@ -172,16 +245,36 @@ namespace JsobjTests {
ASSERT( BSON( "" << "b" << "" << "b" ).woCompare( BSON( "" << "c" ) , key ) < 0 );
{
+ // test a big key
+ string x(2000, 'z');
+ BSONObj o = BSON( "q" << x );
+ keyTest(o, false);
+ }
+ {
+ string y(200, 'w');
+ BSONObjBuilder b;
+ for( int i = 0; i < 10; i++ ) {
+ b.append("x", y);
+ }
+ keyTest(b.obj(), true);
+ }
+ {
+ double nan = numeric_limits<double>::quiet_NaN();
+ BSONObj o = BSON( "y" << nan );
+ keyTest(o);
+ }
+
+ {
BSONObjBuilder b;
b.append( "" , "c" );
b.appendNull( "" );
BSONObj o = b.obj();
+ keyTest(o);
ASSERT( o.woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
ASSERT( BSON( "" << "b" << "" << "h" ).woSortOrder( o , key ) < 0 );
}
-
ASSERT( BSON( "" << "a" ).woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 );
{
BSONObjBuilder b;
@@ -196,6 +289,13 @@ namespace JsobjTests {
class TimestampTest : public Base {
public:
void run() {
+ Client *c = currentClient.get();
+ if( c == 0 ) {
+ Client::initThread("pretouchN");
+ c = &cc();
+ }
+ writelock lk(""); // for initTimestamp
+
BSONObjBuilder b;
b.appendTimestamp( "a" );
BSONObj o = b.done();
@@ -236,21 +336,34 @@ namespace JsobjTests {
double inf = numeric_limits< double >::infinity();
double nan = numeric_limits< double >::quiet_NaN();
double nan2 = numeric_limits< double >::signaling_NaN();
+ ASSERT( isNaN(nan) );
+ ASSERT( isNaN(nan2) );
+ ASSERT( !isNaN(inf) );
ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << inf ) ) == 0 );
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << 1 ) ) < 0 );
- ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << inf ) ) > 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << 1 ) ) > 0 );
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << inf ) ) < 0 );
ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan ) ) == 0 );
ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 1 ) ) < 0 );
+
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 5000000000LL ) ) < 0 );
+
+ {
+ KeyV1Owned a( BSON( "a" << nan ) );
+ KeyV1Owned b( BSON( "a" << 1 ) );
+ Ordering o = Ordering::make(BSON("a"<<1));
+ ASSERT( a.woCompare(b, o) < 0 );
+ }
+
ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan ) ) > 0 );
ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << nan2 ) ) == 0 );
ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << 1 ) ) < 0 );
ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan2 ) ) > 0 );
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan ) ) == 0 );
- ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan2 ) ) == 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan ) ) > 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan2 ) ) > 0 );
ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan2 ) ) == 0 );
}
};
@@ -322,6 +435,14 @@ namespace JsobjTests {
struct AppendIntOrLL {
void run() {
const long long billion = 1000*1000*1000;
+
+ {
+ BSONObjBuilder b;
+ b.appendIntOrLL("L4", -4*billion);
+ keyTest(b.obj());
+ keyTest( BSON("" << billion) );
+ }
+
BSONObjBuilder b;
b.appendIntOrLL("i1", 1);
b.appendIntOrLL("i2", -1);
@@ -336,6 +457,7 @@ namespace JsobjTests {
b.appendIntOrLL("L6", -16*billion);
BSONObj o = b.obj();
+ keyTest(o);
ASSERT(o["i1"].type() == NumberInt);
ASSERT(o["i1"].number() == 1);
@@ -371,6 +493,7 @@ namespace JsobjTests {
b.appendNumber( "e" , 1024LL*1024*1024*1024*1024*1024 );
BSONObj o = b.obj();
+ keyTest(o);
ASSERT( o["a"].type() == NumberInt );
ASSERT( o["b"].type() == NumberDouble );
@@ -386,7 +509,144 @@ namespace JsobjTests {
void run() {
string spec = "{ a: [ \"a\", \"b\" ] }";
ASSERT_EQUALS( spec, fromjson( spec ).toString() );
- }
+
+ BSONObj x = BSON( "a" << "astring" << "b" << "str" );
+ keyTest(x);
+ keyTest(x);
+ BSONObj y = BSON( "a" << "astring" << "b" << "stra" );
+ keyTest(y);
+ y = BSON( "a" << "" );
+ keyTest(y);
+
+ keyTest( BSON("abc" << true ) );
+ keyTest( BSON("abc" << false ) );
+ keyTest( BSON("abc" << false << "b" << true ) );
+
+ Date_t now = jsTime();
+ keyTest( BSON("" << now << "" << 3 << "" << jstNULL << "" << true) );
+ keyTest( BSON("" << now << "" << 3 << "" << BSONObj() << "" << true) );
+
+ {
+ {
+ // check signed dates with new key format
+ KeyV1Owned a( BSONObjBuilder().appendDate("", -50).obj() );
+ KeyV1Owned b( BSONObjBuilder().appendDate("", 50).obj() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
+ }
+ {
+ // backward compatibility
+ KeyBson a( BSONObjBuilder().appendDate("", -50).obj() );
+ KeyBson b( BSONObjBuilder().appendDate("", 50).obj() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
+ }
+ {
+ // this is an uncompactible key:
+ BSONObj uc1 = BSONObjBuilder().appendDate("", -50).appendCode("", "abc").obj();
+ BSONObj uc2 = BSONObjBuilder().appendDate("", 55).appendCode("", "abc").obj();
+ ASSERT( uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0 );
+ {
+ KeyV1Owned a(uc1);
+ KeyV1Owned b(uc2);
+ ASSERT( !a.isCompactFormat() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
+ }
+ {
+ KeyBson a(uc1);
+ KeyBson b(uc2);
+ ASSERT( !a.isCompactFormat() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
+ }
+ }
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 8, (BinDataType) 1, "aaaabbbb");
+ b.appendBinData("e", 3, (BinDataType) 1, "aaa");
+ b.appendBinData("b", 1, (BinDataType) 1, "x");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ // check (non)equality
+ BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgh").obj();
+ BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgj").obj();
+ ASSERT( !a.equal(b) );
+ int res_ab = a.woCompare(b);
+ ASSERT( res_ab != 0 );
+ keyTest( a, true );
+ keyTest( b, true );
+
+ // check subtypes do not equal
+ BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType) 4, "abcdefgh").obj();
+ BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType) 0x81, "abcdefgh").obj();
+ ASSERT( !a.equal(c) );
+ int res_ac = a.woCompare(c);
+ ASSERT( res_ac != 0 );
+ keyTest( c, true );
+ ASSERT( !a.equal(d) );
+ int res_ad = a.woCompare(d);
+ ASSERT( res_ad != 0 );
+ keyTest( d, true );
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+ KeyV1Owned C(c);
+ KeyV1Owned D(d);
+ ASSERT( !A.woEqual(B) );
+ ASSERT( A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0 );
+ ASSERT( !A.woEqual(C) );
+ ASSERT( A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0 );
+ ASSERT( !A.woEqual(D) );
+ ASSERT( A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0 );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 33, (BinDataType) 1, "123456789012345678901234567890123");
+ BSONObj o = b.obj();
+ keyTest( o, false );
+ }
+
+ {
+ for( int i = 1; i <= 3; i++ ) {
+ for( int j = 1; j <= 3; j++ ) {
+ BSONObjBuilder b;
+ b.appendBinData("f", i, (BinDataType) j, "abc");
+ BSONObj o = b.obj();
+ keyTest( o, j != ByteArrayDeprecated );
+ }
+ }
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 1, (BinDataType) 133, "a");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append("AA", 3);
+ b.appendBinData("f", 0, (BinDataType) 0, "");
+ b.appendBinData("e", 3, (BinDataType) 7, "aaa");
+ b.appendBinData("b", 1, (BinDataType) 128, "x");
+ b.append("z", 3);
+ b.appendBinData("bb", 0, (BinDataType) 129, "x");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ // 9 is not supported in compact format. so test a non-compact case here.
+ BSONObjBuilder b;
+ b.appendBinData("f", 9, (BinDataType) 0, "aaaabbbbc");
+ BSONObj o = b.obj();
+ keyTest( o );
+ }
+ }
};
class ToStringNumber {
@@ -405,6 +665,8 @@ namespace JsobjTests {
b.append( "g" , -123.456 );
BSONObj x = b.obj();
+ keyTest(x);
+
ASSERT_EQUALS( "4", x["a"].toString( false , true ) );
ASSERT_EQUALS( "5.0", x["b"].toString( false , true ) );
ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
@@ -421,11 +683,44 @@ namespace JsobjTests {
class NullString {
public:
void run() {
+ {
+ BSONObjBuilder b;
+ const char x[] = {'a', 0, 'b', 0};
+ b.append("field", x, 4);
+ b.append("z", true);
+ BSONObj B = b.obj();
+ //cout << B.toString() << endl;
+
+ BSONObjBuilder a;
+ const char xx[] = {'a', 0, 'c', 0};
+ a.append("field", xx, 4);
+ a.append("z", true);
+ BSONObj A = a.obj();
+
+ BSONObjBuilder c;
+ const char xxx[] = {'a', 0, 'c', 0, 0};
+ c.append("field", xxx, 5);
+ c.append("z", true);
+ BSONObj C = c.obj();
+
+ // test that nulls are ok within bson strings
+ ASSERT( !(A == B) );
+ ASSERT( A > B );
+
+ ASSERT( !(B == C) );
+ ASSERT( C > B );
+
+ // check iteration is ok
+ ASSERT( B["z"].Bool() && A["z"].Bool() && C["z"].Bool() );
+ }
+
BSONObjBuilder b;
b.append("a", "a\0b", 4);
- b.append("b", string("a\0b", 3));
+ string z("a\0b", 3);
+ b.append("b", z);
b.appendAs(b.asTempObj()["a"], "c");
BSONObj o = b.obj();
+ keyTest(o);
stringstream ss;
ss << 'a' << '\0' << 'b';
@@ -438,6 +733,7 @@ namespace JsobjTests {
ASSERT_EQUALS(o["c"].valuestrsize(), 3+1);
ASSERT_EQUALS(o["c"].str(), ss.str());
+
}
};
@@ -479,6 +775,7 @@ namespace JsobjTests {
ASSERT_EQUALS( 2 , o.getFieldDotted( "b.a" ).numberInt() );
ASSERT_EQUALS( 3 , o.getFieldDotted( "c.0.a" ).numberInt() );
ASSERT_EQUALS( 4 , o.getFieldDotted( "c.1.a" ).numberInt() );
+ keyTest(o);
}
};
@@ -796,6 +1093,7 @@ namespace JsobjTests {
b.appendOID( "b" , 0 , false );
b.appendOID( "c" , 0 , true );
BSONObj o = b.obj();
+ keyTest(o);
ASSERT( o["a"].__oid().str() == "000000000000000000000000" );
ASSERT( o["b"].__oid().str() == "000000000000000000000000" );
@@ -1074,11 +1372,10 @@ namespace JsobjTests {
stringstream ss;
ss << "type: " << t;
string s = ss.str();
- massert( 10403 , s , min( t ).woCompare( max( t ) ) < 0 );
- massert( 10404 , s , max( t ).woCompare( min( t ) ) > 0 );
- massert( 10405 , s , min( t ).woCompare( min( t ) ) == 0 );
- massert( 10406 , s , max( t ).woCompare( max( t ) ) == 0 );
- massert( 10407 , s , abs( min( t ).firstElement().canonicalType() - max( t ).firstElement().canonicalType() ) <= 10 );
+ ASSERT( min( t ).woCompare( max( t ) ) <= 0 );
+ ASSERT( max( t ).woCompare( min( t ) ) >= 0 );
+ ASSERT( min( t ).woCompare( min( t ) ) == 0 );
+ ASSERT( max( t ).woCompare( max( t ) ) == 0 );
}
}
};
@@ -1091,7 +1388,7 @@ namespace JsobjTests {
assert( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
assert( x.woCompare( x.extractFields( BSON( "a" << 1 << "b" << 1 ) ) ) == 0 );
- assert( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElement().fieldName() );
+ assert( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElementFieldName() );
}
};
@@ -1147,7 +1444,8 @@ namespace JsobjTests {
class Basic1 {
public:
void run() {
- BSONObjExternalSorter sorter;
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+
sorter.add( BSON( "x" << 10 ) , 5 , 1);
sorter.add( BSON( "x" << 2 ) , 3 , 1 );
sorter.add( BSON( "x" << 5 ) , 6 , 1 );
@@ -1179,7 +1477,7 @@ namespace JsobjTests {
class Basic2 {
public:
void run() {
- BSONObjExternalSorter sorter( BSONObj() , 10 );
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 10 );
sorter.add( BSON( "x" << 10 ) , 5 , 11 );
sorter.add( BSON( "x" << 2 ) , 3 , 1 );
sorter.add( BSON( "x" << 5 ) , 6 , 1 );
@@ -1212,7 +1510,7 @@ namespace JsobjTests {
class Basic3 {
public:
void run() {
- BSONObjExternalSorter sorter( BSONObj() , 10 );
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 10 );
sorter.sort();
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
@@ -1225,7 +1523,7 @@ namespace JsobjTests {
class ByDiskLock {
public:
void run() {
- BSONObjExternalSorter sorter;
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
sorter.add( BSON( "x" << 10 ) , 5 , 4);
sorter.add( BSON( "x" << 2 ) , 3 , 0 );
sorter.add( BSON( "x" << 5 ) , 6 , 2 );
@@ -1259,7 +1557,7 @@ namespace JsobjTests {
class Big1 {
public:
void run() {
- BSONObjExternalSorter sorter( BSONObj() , 2000 );
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 2000 );
for ( int i=0; i<10000; i++ ) {
sorter.add( BSON( "x" << rand() % 10000 ) , 5 , i );
}
@@ -1284,7 +1582,7 @@ namespace JsobjTests {
public:
void run() {
const int total = 100000;
- BSONObjExternalSorter sorter( BSONObj() , total * 2 );
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , total * 2 );
for ( int i=0; i<total; i++ ) {
sorter.add( BSON( "a" << "b" ) , 5 , i );
}
@@ -1314,7 +1612,7 @@ namespace JsobjTests {
b.appendNull("");
BSONObj x = b.obj();
- BSONObjExternalSorter sorter;
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
sorter.add(x, DiskLoc(3,7));
sorter.add(x, DiskLoc(4,7));
sorter.add(x, DiskLoc(2,7));
@@ -1422,6 +1720,8 @@ namespace JsobjTests {
ASSERT_EQUALS(obj, arr);
BSONObj o = BSON( "obj" << obj << "arr" << arr << "arr2" << BSONArray(obj) );
+ keyTest(o);
+
ASSERT_EQUALS(o["obj"].type(), Object);
ASSERT_EQUALS(o["arr"].type(), Array);
ASSERT_EQUALS(o["arr2"].type(), Array);
@@ -1488,8 +1788,8 @@ namespace JsobjTests {
while ( j.more() )
l += strlen( j.next().fieldName() );
}
- unsigned long long tm = t.micros();
- cout << "time: " << tm << endl;
+ //unsigned long long tm = t.micros();
+ //cout << "time: " << tm << endl;
}
}
@@ -1528,7 +1828,7 @@ namespace JsobjTests {
{
char * crap = (char*)malloc( x.objsize() );
memcpy( crap , x.objdata() , x.objsize() );
- BSONObj y( crap , false );
+ BSONObj y( crap );
ASSERT_EQUALS( x , y );
free( crap );
}
@@ -1540,7 +1840,7 @@ namespace JsobjTests {
foo[0] = 123123123;
int state = 0;
try {
- BSONObj y( crap , false );
+ BSONObj y( crap );
state = 1;
}
catch ( std::exception& e ) {
@@ -1562,7 +1862,7 @@ namespace JsobjTests {
BSONElement a = x["a"];
BSONElement b = x["b"];
BSONElement c = x["c"];
- cout << "c: " << c << endl;
+ //cout << "c: " << c << endl;
ASSERT( a.woCompare( b ) != 0 );
ASSERT( a.woCompare( b , false ) == 0 );
@@ -1607,6 +1907,7 @@ namespace JsobjTests {
void run() {
BSONObj x = BSON( "a" << BSON( "b" << 1 ) );
BSONObj y = BSON( "a" << BSON( "b" << 1.0 ) );
+ keyTest(x); keyTest(y);
ASSERT_EQUALS( x , y );
ASSERT_EQUALS( 0 , x.woCompare( y ) );
}
@@ -1736,27 +2037,27 @@ namespace JsobjTests {
<< "asdasdasdas" << "asldkasldjasldjasldjlasjdlasjdlasdasdasdasdasdasdasd" );
{
- Timer t;
+ //Timer t;
for ( int i=0; i<N; i++ )
x.md5();
- int millis = t.millis();
- cout << "md5 : " << millis << endl;
+ //int millis = t.millis();
+ //cout << "md5 : " << millis << endl;
}
{
- Timer t;
+ //Timer t;
for ( int i=0; i<N; i++ )
x.toString();
- int millis = t.millis();
- cout << "toString : " << millis << endl;
+ //int millis = t.millis();
+ //cout << "toString : " << millis << endl;
}
{
- Timer t;
+ //Timer t;
for ( int i=0; i<N; i++ )
checksum( x.objdata() , x.objsize() );
- int millis = t.millis();
- cout << "checksum : " << millis << endl;
+ //int millis = t.millis();
+ //cout << "checksum : " << millis << endl;
}
}
@@ -1770,6 +2071,7 @@ namespace JsobjTests {
void setupTests() {
add< BufBuilderBasic >();
add< BSONElementBasic >();
+ add< BSONObjTests::NullString >();
add< BSONObjTests::Create >();
add< BSONObjTests::WoCompareBasic >();
add< BSONObjTests::NumericCompareBasic >();
@@ -1786,7 +2088,6 @@ namespace JsobjTests {
add< BSONObjTests::AppendNumber >();
add< BSONObjTests::ToStringArray >();
add< BSONObjTests::ToStringNumber >();
- add< BSONObjTests::NullString >();
add< BSONObjTests::AppendAs >();
add< BSONObjTests::ArrayAppendAs >();
add< BSONObjTests::GetField >();
diff --git a/dbtests/jsontests.cpp b/dbtests/jsontests.cpp
index b630523..efaee44 100644
--- a/dbtests/jsontests.cpp
+++ b/dbtests/jsontests.cpp
@@ -386,7 +386,8 @@ namespace JsonTests {
b.appendMaxKey( "u" );
BSONObj o = b.obj();
- cout << o.jsonString() << endl;
+ o.jsonString();
+ //cout << o.jsonString() << endl;
}
};
@@ -499,16 +500,21 @@ namespace JsonTests {
}
};
- class FancyNumber {
- public:
- virtual ~FancyNumber() {}
- void run() {
- ASSERT_EQUALS( int( 1000000 * bson().firstElement().number() ),
- int( 1000000 * fromjson( json() ).firstElement().number() ) );
+ class RealNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", strtod( "0.7", 0 ) );
+ return b.obj();
}
+ virtual string json() const {
+ return "{ \"a\" : 0.7 }";
+ }
+ };
+
+ class FancyNumber : public Base {
virtual BSONObj bson() const {
BSONObjBuilder b;
- b.append( "a", -4.4433e-2 );
+ b.append( "a", strtod( "-4.4433e-2", 0 ) );
return b.obj();
}
virtual string json() const {
@@ -1124,6 +1130,7 @@ namespace JsonTests {
add< FromJsonTests::ReservedFieldName >();
add< FromJsonTests::OkDollarFieldName >();
add< FromJsonTests::SingleNumber >();
+ add< FromJsonTests::RealNumber >();
add< FromJsonTests::FancyNumber >();
add< FromJsonTests::TwoElements >();
add< FromJsonTests::Subobject >();
diff --git a/dbtests/jstests.cpp b/dbtests/jstests.cpp
index c33b200..f4be230 100644
--- a/dbtests/jstests.cpp
+++ b/dbtests/jstests.cpp
@@ -106,24 +106,25 @@ namespace JSTests {
void run() {
Scope * s = globalScriptEngine->newScope();
- s->invoke( "x=5;" , BSONObj() );
+ s->invoke( "x=5;" , 0, 0 );
ASSERT( 5 == s->getNumber( "x" ) );
- s->invoke( "return 17;" , BSONObj() );
+ s->invoke( "return 17;" , 0, 0 );
ASSERT( 17 == s->getNumber( "return" ) );
- s->invoke( "function(){ return 17; }" , BSONObj() );
+ s->invoke( "function(){ return 17; }" , 0, 0 );
ASSERT( 17 == s->getNumber( "return" ) );
s->setNumber( "x" , 1.76 );
- s->invoke( "return x == 1.76; " , BSONObj() );
+ s->invoke( "return x == 1.76; " , 0, 0 );
ASSERT( s->getBoolean( "return" ) );
s->setNumber( "x" , 1.76 );
- s->invoke( "return x == 1.79; " , BSONObj() );
+ s->invoke( "return x == 1.79; " , 0, 0 );
ASSERT( ! s->getBoolean( "return" ) );
- s->invoke( "function( z ){ return 5 + z; }" , BSON( "" << 11 ) );
+ BSONObj obj = BSON( "" << 11.0 );
+ s->invoke( "function( z ){ return 5 + z; }" , &obj, 0 );
ASSERT_EQUALS( 16 , s->getNumber( "return" ) );
delete s;
@@ -135,52 +136,51 @@ namespace JSTests {
void run() {
Scope * s = globalScriptEngine->newScope();
- BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" );
+ BSONObj o = BSON( "x" << 17.0 << "y" << "eliot" << "z" << "sara" );
s->setObject( "blah" , o );
- s->invoke( "return blah.x;" , BSONObj() );
+ s->invoke( "return blah.x;" , 0, 0 );
ASSERT_EQUALS( 17 , s->getNumber( "return" ) );
- s->invoke( "return blah.y;" , BSONObj() );
+ s->invoke( "return blah.y;" , 0, 0 );
ASSERT_EQUALS( "eliot" , s->getString( "return" ) );
- s->setThis( & o );
- s->invoke( "return this.z;" , BSONObj() );
+ s->invoke( "return this.z;" , 0, &o );
ASSERT_EQUALS( "sara" , s->getString( "return" ) );
- s->invoke( "return this.z == 'sara';" , BSONObj() );
+ s->invoke( "return this.z == 'sara';" , 0, &o );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
- s->invoke( "this.z == 'sara';" , BSONObj() );
+ s->invoke( "this.z == 'sara';" , 0, &o );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
- s->invoke( "this.z == 'asara';" , BSONObj() );
+ s->invoke( "this.z == 'asara';" , 0, &o );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
- s->invoke( "return this.x == 17;" , BSONObj() );
+ s->invoke( "return this.x == 17;" , 0, &o );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
- s->invoke( "return this.x == 18;" , BSONObj() );
+ s->invoke( "return this.x == 18;" , 0, &o );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
- s->invoke( "function(){ return this.x == 17; }" , BSONObj() );
+ s->invoke( "function(){ return this.x == 17; }" , 0, &o );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
- s->invoke( "function(){ return this.x == 18; }" , BSONObj() );
+ s->invoke( "function(){ return this.x == 18; }" , 0, &o );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
- s->invoke( "function (){ return this.x == 17; }" , BSONObj() );
+ s->invoke( "function (){ return this.x == 17; }" , 0, &o );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
- s->invoke( "function z(){ return this.x == 18; }" , BSONObj() );
+ s->invoke( "function z(){ return this.x == 18; }" , 0, &o );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
- s->invoke( "function (){ this.x == 17; }" , BSONObj() );
+ s->invoke( "function (){ this.x == 17; }" , 0, &o );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
- s->invoke( "function z(){ this.x == 18; }" , BSONObj() );
+ s->invoke( "function z(){ this.x == 18; }" , 0, &o );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
- s->invoke( "x = 5; for( ; x <10; x++){ a = 1; }" , BSONObj() );
+ s->invoke( "x = 5; for( ; x <10; x++){ a = 1; }" , 0, &o );
ASSERT_EQUALS( 10 , s->getNumber( "x" ) );
delete s;
@@ -192,12 +192,12 @@ namespace JSTests {
void run() {
Scope * s = globalScriptEngine->newScope();
- s->invoke( "z = { num : 1 };" , BSONObj() );
+ s->invoke( "z = { num : 1 };" , 0, 0 );
BSONObj out = s->getObject( "z" );
ASSERT_EQUALS( 1 , out["num"].number() );
ASSERT_EQUALS( 1 , out.nFields() );
- s->invoke( "z = { x : 'eliot' };" , BSONObj() );
+ s->invoke( "z = { x : 'eliot' };" , 0, 0 );
out = s->getObject( "z" );
ASSERT_EQUALS( (string)"eliot" , out["x"].valuestr() );
ASSERT_EQUALS( 1 , out.nFields() );
@@ -219,7 +219,7 @@ namespace JSTests {
s->localConnect( "blah" );
- s->invoke( "z = { _id : new ObjectId() , a : 123 };" , BSONObj() );
+ s->invoke( "z = { _id : new ObjectId() , a : 123 };" , 0, 0 );
BSONObj out = s->getObject( "z" );
ASSERT_EQUALS( 123 , out["a"].number() );
ASSERT_EQUALS( jstOID , out["_id"].type() );
@@ -228,13 +228,13 @@ namespace JSTests {
s->setObject( "a" , out );
- s->invoke( "y = { _id : a._id , a : 124 };" , BSONObj() );
+ s->invoke( "y = { _id : a._id , a : 124 };" , 0, 0 );
out = s->getObject( "y" );
ASSERT_EQUALS( 124 , out["a"].number() );
ASSERT_EQUALS( jstOID , out["_id"].type() );
ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
- s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , BSONObj() );
+ s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , 0, 0 );
out = s->getObject( "y" );
ASSERT_EQUALS( 125 , out["a"].number() );
ASSERT_EQUALS( jstOID , out["_id"].type() );
@@ -274,15 +274,15 @@ namespace JSTests {
BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" << "zz" << BSONObj() );
s->setObject( "blah" , o , true );
- s->invoke( "blah.y = 'e'", BSONObj() );
+ s->invoke( "blah.y = 'e'", 0, 0 );
BSONObj out = s->getObject( "blah" );
ASSERT( strlen( out["y"].valuestr() ) > 1 );
- s->invoke( "blah.a = 19;" , BSONObj() );
+ s->invoke( "blah.a = 19;" , 0, 0 );
out = s->getObject( "blah" );
ASSERT( out["a"].eoo() );
- s->invoke( "blah.zz.a = 19;" , BSONObj() );
+ s->invoke( "blah.zz.a = 19;" , 0, 0 );
out = s->getObject( "blah" );
ASSERT( out["zz"].embeddedObject()["a"].eoo() );
@@ -290,12 +290,12 @@ namespace JSTests {
out = s->getObject( "blah" );
ASSERT( out["zz"].embeddedObject()["a"].eoo() );
- s->invoke( "delete blah['x']" , BSONObj() );
+ s->invoke( "delete blah['x']" , 0, 0 );
out = s->getObject( "blah" );
ASSERT( !out["x"].eoo() );
// read-only object itself can be overwritten
- s->invoke( "blah = {}", BSONObj() );
+ s->invoke( "blah = {}", 0, 0 );
out = s->getObject( "blah" );
ASSERT( out.isEmpty() );
@@ -328,13 +328,13 @@ namespace JSTests {
}
s->setObject( "x" , o );
- s->invoke( "return x.d.getTime() != 12;" , BSONObj() );
+ s->invoke( "return x.d.getTime() != 12;" , 0, 0 );
ASSERT_EQUALS( true, s->getBoolean( "return" ) );
- s->invoke( "z = x.d.getTime();" , BSONObj() );
+ s->invoke( "z = x.d.getTime();" , 0, 0 );
ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) );
- s->invoke( "z = { z : x.d }" , BSONObj() );
+ s->invoke( "z = { z : x.d }" , 0, 0 );
BSONObj out = s->getObject( "z" );
ASSERT( out["z"].type() == Date );
}
@@ -349,16 +349,16 @@ namespace JSTests {
}
s->setObject( "x" , o );
- s->invoke( "z = x.r.test( 'b' );" , BSONObj() );
+ s->invoke( "z = x.r.test( 'b' );" , 0, 0 );
ASSERT_EQUALS( false , s->getBoolean( "z" ) );
- s->invoke( "z = x.r.test( 'a' );" , BSONObj() );
+ s->invoke( "z = x.r.test( 'a' );" , 0, 0 );
ASSERT_EQUALS( true , s->getBoolean( "z" ) );
- s->invoke( "z = x.r.test( 'ba' );" , BSONObj() );
+ s->invoke( "z = x.r.test( 'ba' );" , 0, 0 );
ASSERT_EQUALS( false , s->getBoolean( "z" ) );
- s->invoke( "z = { a : x.r };" , BSONObj() );
+ s->invoke( "z = { a : x.r };" , 0, 0 );
BSONObj out = s->getObject("z");
ASSERT_EQUALS( (string)"^a" , out["a"].regex() );
@@ -402,7 +402,7 @@ namespace JSTests {
s->setObject( "z" , b.obj() );
- ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , BSONObj() ) == 0 );
+ ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , 0, 0 ) == 0 );
BSONObj out = s->getObject( "y" );
ASSERT_EQUALS( Timestamp , out["a"].type() );
@@ -436,7 +436,7 @@ namespace JSTests {
ASSERT_EQUALS( NumberDouble , o["b"].type() );
s->setObject( "z" , o );
- s->invoke( "return z" , BSONObj() );
+ s->invoke( "return z" , 0, 0 );
BSONObj out = s->getObject( "return" );
ASSERT_EQUALS( 5 , out["a"].number() );
ASSERT_EQUALS( 5.6 , out["b"].number() );
@@ -454,7 +454,7 @@ namespace JSTests {
}
s->setObject( "z" , o , false );
- s->invoke( "return z" , BSONObj() );
+ s->invoke( "return z" , 0, 0 );
out = s->getObject( "return" );
ASSERT_EQUALS( 5 , out["a"].number() );
ASSERT_EQUALS( 5.6 , out["b"].number() );
@@ -487,7 +487,7 @@ namespace JSTests {
ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() );
- s->invokeSafe( "z.z = 5;" , BSONObj() );
+ s->invokeSafe( "z.z = 5;" , 0, 0 );
out = s->getObject( "z" );
ASSERT_EQUALS( 5 , out["z"].number() );
ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
@@ -625,10 +625,10 @@ namespace JSTests {
for ( int i=5; i<100 ; i += 10 ) {
s->setObject( "a" , build(i) , false );
- s->invokeSafe( "tojson( a )" , BSONObj() );
+ s->invokeSafe( "tojson( a )" , 0, 0 );
s->setObject( "a" , build(5) , true );
- s->invokeSafe( "tojson( a )" , BSONObj() );
+ s->invokeSafe( "tojson( a )" , 0, 0 );
}
delete s;
@@ -715,9 +715,8 @@ namespace JSTests {
}
//cout << "ELIOT: " << b.jsonString() << endl;
- s->setThis( &b );
// its ok if this is handled by js, just can't create a c++ exception
- s->invoke( "x=this.x.length;" , BSONObj() );
+ s->invoke( "x=this.x.length;" , 0, &b );
}
};
@@ -735,7 +734,7 @@ namespace JSTests {
s->setObject( "foo" , b.obj() );
}
- s->invokeSafe( "out = {}; out.a = foo.a; foo.b(); foo.c();" , BSONObj() );
+ s->invokeSafe( "out = {}; out.a = foo.a; foo.b(); foo.c();" , 0, 0 );
BSONObj out = s->getObject( "out" );
ASSERT_EQUALS( 1 , out["a"].number() );
@@ -845,8 +844,8 @@ namespace JSTests {
s->setObject( "x" , in );
}
- s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , BSONObj() );
- s->invokeSafe( "y = { c : myb };" , BSONObj() );
+ s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , 0, 0 );
+ s->invokeSafe( "y = { c : myb };" , 0, 0 );
BSONObj out = s->getObject( "y" );
ASSERT_EQUALS( BinData , out["c"].type() );
@@ -855,7 +854,7 @@ namespace JSTests {
ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
// check that BinData js class is utilized
- s->invokeSafe( "q = x.b.toString();", BSONObj() );
+ s->invokeSafe( "q = x.b.toString();", 0, 0 );
stringstream expected;
expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
ASSERT_EQUALS( expected.str(), s->getString( "q" ) );
@@ -863,12 +862,12 @@ namespace JSTests {
stringstream scriptBuilder;
scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };";
string script = scriptBuilder.str();
- s->invokeSafe( script.c_str(), BSONObj() );
+ s->invokeSafe( script.c_str(), 0, 0 );
out = s->getObject( "z" );
// pp( "out" , out["c"] );
ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
- s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", BSONObj() );
+ s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", 0, 0 );
out = s->getObject( "a" );
int len = -1;
out[ "f" ].binData( len );
@@ -896,19 +895,18 @@ namespace JSTests {
class Speed1 {
public:
void run() {
- BSONObj start = BSON( "x" << 5 );
+ BSONObj start = BSON( "x" << 5.0 );
BSONObj empty;
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
ScriptingFunction f = s->createFunction( "return this.x + 6;" );
- s->setThis( &start );
Timer t;
double n = 0;
for ( ; n < 100000; n++ ) {
- s->invoke( f , empty );
+ s->invoke( f , &empty, &start );
ASSERT_EQUALS( 11 , s->getNumber( "return" ) );
}
//cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
@@ -921,23 +919,22 @@ namespace JSTests {
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
- s->invokeSafe( "x = 5;" , BSONObj() );
+ s->invokeSafe( "x = 5;" , 0, 0 );
{
BSONObjBuilder b;
s->append( b , "z" , "x" );
ASSERT_EQUALS( BSON( "z" << 5 ) , b.obj() );
}
- s->invokeSafe( "x = function(){ return 17; }" , BSONObj() );
+ s->invokeSafe( "x = function(){ return 17; }" , 0, 0 );
BSONObj temp;
{
BSONObjBuilder b;
s->append( b , "z" , "x" );
temp = b.obj();
- s->setThis( &temp );
}
- s->invokeSafe( "foo = this.z();" , BSONObj() );
+ s->invokeSafe( "foo = this.z();" , 0, &temp );
ASSERT_EQUALS( 17 , s->getNumber( "foo" ) );
}
};
diff --git a/dbtests/mockdbclient.h b/dbtests/mockdbclient.h
deleted file mode 100644
index fda0963..0000000
--- a/dbtests/mockdbclient.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// mockdbclient.h - mocked out client for testing.
-
-/**
- * Copyright (C) 2008 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#pragma once
-
-#include "../client/dbclient.h"
-#include "../db/commands.h"
-#include "../db/replpair.h"
-
-class MockDBClientConnection : public DBClientConnection {
-public:
- MockDBClientConnection() : connect_() {}
- virtual
- BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
- return one_;
- }
- virtual
- bool connect(const char * serverHostname, string& errmsg) {
- return connect_;
- }
- virtual
- bool connect(const HostAndPort& , string& errmsg) {
- return connect_;
- }
- virtual
- bool isMaster(bool& isMaster, BSONObj *info=0) {
- return isMaster_;
- }
- void one( const BSONObj &one ) {
- one_ = one;
- }
- void connect( bool val ) {
- connect_ = val;
- }
- void setIsMaster( bool val ) {
- isMaster_ = val;
- }
-private:
- BSONObj one_;
- bool connect_;
- bool isMaster_;
-};
-
-class DirectDBClientConnection : public DBClientConnection {
-public:
- struct ConnectionCallback {
- virtual ~ConnectionCallback() {}
- virtual void beforeCommand() {}
- virtual void afterCommand() {}
- };
- DirectDBClientConnection( ReplPair *rp, ConnectionCallback *cc = 0 ) :
- rp_( rp ),
- cc_( cc ) {
- }
- virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
- BSONObj c = query.obj.copy();
- if ( cc_ ) cc_->beforeCommand();
- SetGlobalReplPair s( rp_ );
- BSONObjBuilder result;
- result.append( "ok", Command::runAgainstRegistered( "admin.$cmd", c, result ) ? 1.0 : 0.0 );
- if ( cc_ ) cc_->afterCommand();
- return result.obj();
- }
- virtual bool connect( const string &serverHostname, string& errmsg ) {
- return true;
- }
-private:
- ReplPair *rp_;
- ConnectionCallback *cc_;
- class SetGlobalReplPair {
- public:
- SetGlobalReplPair( ReplPair *rp ) {
- backup_ = replPair;
- replPair = rp;
- }
- ~SetGlobalReplPair() {
- replPair = backup_;
- }
- private:
- ReplPair *backup_;
- };
-};
diff --git a/dbtests/namespacetests.cpp b/dbtests/namespacetests.cpp
index c2be0b0..bbb8f5e 100644
--- a/dbtests/namespacetests.cpp
+++ b/dbtests/namespacetests.cpp
@@ -27,6 +27,9 @@
#include "dbtests.h"
namespace NamespaceTests {
+
+ const int MinExtentSize = 4096;
+
namespace IndexDetailsTests {
class Base {
dblock lk;
@@ -41,12 +44,13 @@ namespace NamespaceTests {
ASSERT( theDataFileMgr.findAll( ns() )->eof() );
}
protected:
- void create() {
+ void create( bool sparse = false ) {
NamespaceDetailsTransient::get_w( ns() ).deletedIndex();
BSONObjBuilder builder;
builder.append( "ns", ns() );
builder.append( "name", "testIndex" );
builder.append( "key", key() );
+ builder.append( "sparse", sparse );
BSONObj bobj = builder.done();
id_.info = theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
// head not needed for current tests
@@ -87,8 +91,8 @@ namespace NamespaceTests {
b.append( "c", 4 );
return b.obj();
}
- static void checkSize( int expected, const BSONObjSetDefaultOrder &objs ) {
- ASSERT_EQUALS( BSONObjSetDefaultOrder::size_type( expected ), objs.size() );
+ static void checkSize( int expected, const BSONObjSet &objs ) {
+ ASSERT_EQUALS( BSONObjSet::size_type( expected ), objs.size() );
}
static void assertEquals( const BSONObj &a, const BSONObj &b ) {
if ( a.woCompare( b ) != 0 ) {
@@ -125,7 +129,7 @@ namespace NamespaceTests {
b.append( "b", 4 );
b.append( "a", 5 );
e.append( "", 5 );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 1, keys );
assertEquals( e.obj(), *keys.begin() );
@@ -141,7 +145,7 @@ namespace NamespaceTests {
a.append( "a", b.done() );
a.append( "c", "foo" );
e.append( "", 4 );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( a.done(), keys );
checkSize( 1, keys );
ASSERT_EQUALS( e.obj(), *keys.begin() );
@@ -159,11 +163,11 @@ namespace NamespaceTests {
BSONObjBuilder b;
b.append( "a", shortArray()) ;
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 3, keys );
int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
BSONObjBuilder b;
b.append( "", j );
assertEquals( b.obj(), *i );
@@ -179,11 +183,11 @@ namespace NamespaceTests {
b.append( "a", shortArray() );
b.append( "b", 2 );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 3, keys );
int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
BSONObjBuilder b;
b.append( "", j );
b.append( "", 2 );
@@ -204,11 +208,11 @@ namespace NamespaceTests {
b.append( "first", 5 );
b.append( "a", shortArray()) ;
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 3, keys );
int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
BSONObjBuilder b;
b.append( "", 5 );
b.append( "", j );
@@ -233,11 +237,11 @@ namespace NamespaceTests {
BSONObjBuilder a;
a.append( "a", b.done() );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( a.done(), keys );
checkSize( 3, keys );
int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
BSONObjBuilder b;
b.append( "", j );
assertEquals( b.obj(), *i );
@@ -257,7 +261,7 @@ namespace NamespaceTests {
b.append( "a", shortArray() );
b.append( "b", shortArray() );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
ASSERT_EXCEPTION( id().getKeysFromObject( b.done(), keys ),
UserException );
}
@@ -277,11 +281,11 @@ namespace NamespaceTests {
BSONObjBuilder b;
b.append( "a", elts );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 3, keys );
int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
BSONObjBuilder b;
b.append( "", j );
assertEquals( b.obj(), *i );
@@ -304,11 +308,11 @@ namespace NamespaceTests {
b.append( "a", elts );
b.append( "d", 99 );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 3, keys );
int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
BSONObjBuilder c;
c.append( "", j );
c.append( "", 99 );
@@ -336,12 +340,13 @@ namespace NamespaceTests {
elts.push_back( simpleBC( i ) );
BSONObjBuilder b;
b.append( "a", elts );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
+ BSONObj obj = b.obj();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( obj, keys );
checkSize( 4, keys );
- BSONObjSetDefaultOrder::iterator i = keys.begin();
- assertEquals( nullObj(), *i++ );
+ BSONObjSet::iterator i = keys.begin();
+ assertEquals( nullObj(), *i++ ); // see SERVER-3377
for ( int j = 1; j < 4; ++i, ++j ) {
BSONObjBuilder b;
b.append( "", j );
@@ -366,7 +371,7 @@ namespace NamespaceTests {
BSONObjBuilder b;
b.append( "a", elts );
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 1, keys );
assertEquals( nullObj(), *keys.begin() );
@@ -381,7 +386,7 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( BSON( "b" << 1 ), keys );
checkSize( 1, keys );
assertEquals( nullObj(), *keys.begin() );
@@ -396,7 +401,7 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
checkSize( 1, keys );
assertEquals( nullObj(), *keys.begin() );
@@ -413,14 +418,14 @@ namespace NamespaceTests {
create();
{
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{x:'a',y:'b'}" ) , keys );
checkSize( 1 , keys );
assertEquals( BSON( "" << "a" << "" << "b" ) , *keys.begin() );
}
{
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{x:'a'}" ) , keys );
checkSize( 1 , keys );
BSONObjBuilder b;
@@ -442,7 +447,7 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{a:[{b:[2]}]}" ), keys );
checkSize( 1, keys );
assertEquals( BSON( "" << 2 ), *keys.begin() );
@@ -457,7 +462,7 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
ASSERT_EXCEPTION( id().getKeysFromObject( fromjson( "{a:[{b:[1],c:[2]}]}" ), keys ),
UserException );
}
@@ -471,10 +476,10 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{a:[{b:1},{c:2}]}" ), keys );
checkSize( 2, keys );
- BSONObjSetDefaultOrder::iterator i = keys.begin();
+ BSONObjSet::iterator i = keys.begin();
{
BSONObjBuilder e;
e.appendNull( "" );
@@ -499,7 +504,7 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{a:[{b:1},{b:[1,2,3]}]}" ), keys );
checkSize( 3, keys );
}
@@ -514,7 +519,7 @@ namespace NamespaceTests {
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
checkSize(2, keys );
keys.clear();
@@ -529,16 +534,56 @@ namespace NamespaceTests {
id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
checkSize(1, keys );
+ ASSERT_EQUALS( Undefined, keys.begin()->firstElement().type() );
keys.clear();
}
};
+
+ class DoubleArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize(2, keys );
+ BSONObjSet::const_iterator i = keys.begin();
+ ASSERT_EQUALS( BSON( "" << 1 << "" << 1 ), *i );
+ ++i;
+ ASSERT_EQUALS( BSON( "" << 2 << "" << 2 ), *i );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return BSON( "a" << 1 << "a" << 1 );
+ }
+ };
+
+ class DoubleEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize(1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return BSON( "a" << 1 << "a" << 1 );
+ }
+ };
class MultiEmptyArray : Base {
public:
void run() {
create();
- BSONObjSetDefaultOrder keys;
+ BSONObjSet keys;
id().getKeysFromObject( fromjson( "{a:1,b:[1,2]}" ), keys );
checkSize(2, keys );
keys.clear();
@@ -555,7 +600,9 @@ namespace NamespaceTests {
id().getKeysFromObject( fromjson( "{a:1,b:[]}" ), keys );
checkSize(1, keys );
//cout << "YO : " << *(keys.begin()) << endl;
- ASSERT_EQUALS( NumberInt , keys.begin()->firstElement().type() );
+ BSONObjIterator i( *keys.begin() );
+ ASSERT_EQUALS( NumberInt , i.next().type() );
+ ASSERT_EQUALS( Undefined , i.next().type() );
keys.clear();
}
@@ -564,8 +611,313 @@ namespace NamespaceTests {
return aAndB();
}
};
+
+ class NestedEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class MultiNestedEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':null}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a.c" << 1 ); }
+ };
+
+ class UnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined,'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':{b:1},'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':{b:[]},'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a" << 1 << "a.b" << 1 ); }
+ };
+
+ class ReverseUnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a" << 1 ); }
+ };
+
+ class SparseReverseUnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a" << 1 ); }
+ };
+
+ class SparseEmptyArray : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{c:1}]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class SparseEmptyArraySecond : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{c:1}]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "z" << 1 << "a.b" << 1 ); }
+ };
+
+ class NonObjectMissingNestedField : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1,{b:1}]}" ), keys );
+ checkSize( 2, keys );
+ BSONObjSet::const_iterator c = keys.begin();
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *c );
+ ++c;
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *c );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class SparseNonObjectMissingNestedField : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1,{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class IndexedArrayIndex : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( BSON( "" << 1 ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[1]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:{'0':1}}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( BSON( "" << 1 ), *keys.begin() );
+ keys.clear();
+
+ ASSERT_EXCEPTION( id().getKeysFromObject( fromjson( "{a:[{'0':1}]}" ), keys ), UserException );
+
+ ASSERT_EXCEPTION( id().getKeysFromObject( fromjson( "{a:[1,{'0':2}]}" ), keys ), UserException );
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0" << 1 ); }
+ };
+
+ class DoubleIndexedArrayIndex : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[[1]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[[]]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.0" << 1 ); }
+ };
+
+ class ObjectWithinArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ id().getKeysFromObject( fromjson( "{a:[{b:[1]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[[1]]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:1}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[1]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ id().getKeysFromObject( fromjson( "{a:[[{b:[[1]]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.b" << 1 ); }
+ };
+
+ class ArrayWithinObjectWithinArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:[1]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.b.0" << 1 ); }
+ };
+
+ // also test numeric string field names
+
} // namespace IndexDetailsTests
namespace NamespaceDetailsTests {
@@ -622,9 +974,11 @@ namespace NamespaceTests {
NamespaceDetails *nsd() const {
return nsdetails( ns() )->writingWithExtra();
}
- static BSONObj bigObj() {
- string as( 187, 'a' );
+ static BSONObj bigObj(bool bGenID=false) {
BSONObjBuilder b;
+ if (bGenID)
+ b.appendOID("_id", 0, true);
+ string as( 187, 'a' );
b.append( "a", as );
return b.obj();
}
@@ -657,15 +1011,18 @@ namespace NamespaceTests {
public:
void run() {
create();
- BSONObj b = bigObj();
- DiskLoc l[ 6 ];
- for ( int i = 0; i < 6; ++i ) {
+ const int N = 20;
+ const int Q = 16; // these constants depend on the size of the bson object, the extent size allocated by the system too
+ DiskLoc l[ N ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj b = bigObj(true);
l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
ASSERT( !l[ i ].isNull() );
- ASSERT_EQUALS( 1 + i % 2, nRecords() );
- if ( i > 1 )
- ASSERT( l[ i ] == l[ i - 2 ] );
+ ASSERT( nRecords() <= Q );
+ //ASSERT_EQUALS( 1 + i % 2, nRecords() );
+ if ( i >= 16 )
+ ASSERT( l[ i ] == l[ i - Q] );
}
}
};
@@ -682,14 +1039,15 @@ namespace NamespaceTests {
for ( int i = 0; i < 8; ++i ) {
l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
ASSERT( !l[ i ].isNull() );
- ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
- if ( i > 3 )
- ASSERT( l[ i ] == l[ i - 4 ] );
+ //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ //if ( i > 3 )
+ // ASSERT( l[ i ] == l[ i - 4 ] );
}
+ ASSERT( nRecords() == 8 );
// Too big
BSONObjBuilder bob;
- bob.append( "a", string( 787, 'a' ) );
+ bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
BSONObj bigger = bob.done();
ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() );
ASSERT_EQUALS( 0, nRecords() );
@@ -710,16 +1068,24 @@ namespace NamespaceTests {
create();
ASSERT_EQUALS( 2, nExtents() );
- BSONObj b = bigObj();
+ BSONObj b = bigObj(true);
- DiskLoc l[ 8 ];
- for ( int i = 0; i < 8; ++i ) {
- l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
- ASSERT( !l[ i ].isNull() );
- ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ int N = MinExtentSize / b.objsize() * nExtents() + 5;
+ int T = N - 4;
+
+ DiskLoc truncAt;
+ //DiskLoc l[ 8 ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj bb = bigObj(true);
+ DiskLoc a = theDataFileMgr.insert( ns(), bb.objdata(), bb.objsize() );
+ if( T == i )
+ truncAt = a;
+ ASSERT( !a.isNull() );
+ /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
if ( i > 3 )
- ASSERT( l[ i ] == l[ i - 4 ] );
+ ASSERT( l[ i ] == l[ i - 4 ] );*/
}
+ ASSERT( nRecords() < N );
NamespaceDetails *nsd = nsdetails(ns());
@@ -736,10 +1102,8 @@ namespace NamespaceTests {
ASSERT( first != last ) ;
}
- DiskLoc d = l[6];
- long long n = nsd->stats.nrecords;
- nsd->cappedTruncateAfter(ns(), d, false);
- ASSERT_EQUALS( nsd->stats.nrecords , n-1 );
+ nsd->cappedTruncateAfter(ns(), truncAt, false);
+ ASSERT_EQUALS( nsd->stats.nrecords , 28 );
{
ForwardCappedCursor c(nsd);
@@ -753,7 +1117,8 @@ namespace NamespaceTests {
// Too big
BSONObjBuilder bob;
- bob.append( "a", string( 787, 'a' ) );
+ bob.appendOID("_id", 0, true);
+ bob.append( "a", string( MinExtentSize + 300, 'a' ) );
BSONObj bigger = bob.done();
ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() );
ASSERT_EQUALS( 0, nRecords() );
@@ -846,7 +1211,22 @@ namespace NamespaceTests {
add< IndexDetailsTests::AlternateMissing >();
add< IndexDetailsTests::MultiComplex >();
add< IndexDetailsTests::EmptyArray >();
+ add< IndexDetailsTests::DoubleArray >();
+ add< IndexDetailsTests::DoubleEmptyArray >();
add< IndexDetailsTests::MultiEmptyArray >();
+ add< IndexDetailsTests::NestedEmptyArray >();
+ add< IndexDetailsTests::MultiNestedEmptyArray >();
+ add< IndexDetailsTests::UnevenNestedEmptyArray >();
+ add< IndexDetailsTests::ReverseUnevenNestedEmptyArray >();
+ add< IndexDetailsTests::SparseReverseUnevenNestedEmptyArray >();
+ add< IndexDetailsTests::SparseEmptyArray >();
+ add< IndexDetailsTests::SparseEmptyArraySecond >();
+ add< IndexDetailsTests::NonObjectMissingNestedField >();
+ add< IndexDetailsTests::SparseNonObjectMissingNestedField >();
+ add< IndexDetailsTests::IndexedArrayIndex >();
+ add< IndexDetailsTests::DoubleIndexedArrayIndex >();
+ add< IndexDetailsTests::ObjectWithinArray >();
+ add< IndexDetailsTests::ArrayWithinObjectWithinArray >();
add< IndexDetailsTests::MissingField >();
add< IndexDetailsTests::SubobjectMissing >();
add< IndexDetailsTests::CompoundMissing >();
diff --git a/dbtests/pairingtests.cpp b/dbtests/pairingtests.cpp
deleted file mode 100644
index 9cca548..0000000
--- a/dbtests/pairingtests.cpp
+++ /dev/null
@@ -1,344 +0,0 @@
-// pairingtests.cpp : Pairing unit tests.
-//
-
-/**
- * Copyright (C) 2008 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "pch.h"
-#include "../db/replpair.h"
-#include "dbtests.h"
-#include "mockdbclient.h"
-#include "../db/cmdline.h"
-
-namespace mongo {
- extern PairSync *pairSync;
-} // namespace mongo
-
-namespace PairingTests {
- class Base {
- protected:
- Base() {
- backup = pairSync;
- setSynced();
- }
- ~Base() {
- pairSync = backup;
- dblock lk;
- Helpers::emptyCollection( "local.pair.sync" );
- if ( pairSync->initialSyncCompleted() ) {
- // save to db
- pairSync->setInitialSyncCompleted();
- }
- }
- static void setSynced() {
- init();
- pairSync = synced;
- pairSync->setInitialSyncCompletedLocking();
- ASSERT( pairSync->initialSyncCompleted() );
- }
- static void setNotSynced() {
- init();
- pairSync = notSynced;
- ASSERT( !pairSync->initialSyncCompleted() );
- }
- static void flipSync() {
- if ( pairSync->initialSyncCompleted() )
- setNotSynced();
- else
- setSynced();
- }
- private:
- static void init() {
- dblock lk;
- Helpers::emptyCollection( "local.pair.sync" );
- if ( synced != 0 && notSynced != 0 )
- return;
- notSynced = new PairSync();
- notSynced->init();
- synced = new PairSync();
- synced->init();
- synced->setInitialSyncCompleted();
- Helpers::emptyCollection( "local.pair.sync" );
- }
- PairSync *backup;
- static PairSync *synced;
- static PairSync *notSynced;
- };
- PairSync *Base::synced = 0;
- PairSync *Base::notSynced = 0;
-
- namespace ReplPairTests {
- class Create : public Base {
- public:
- void run() {
- ReplPair rp1( "foo", "bar" );
- checkFields( rp1, "foo", "foo", CmdLine::DefaultDBPort, "bar" );
-
- ReplPair rp2( "foo:1", "bar" );
- checkFields( rp2, "foo:1", "foo", 1, "bar" );
-
- // FIXME Should we accept this input?
- ReplPair rp3( "", "bar" );
- checkFields( rp3, "", "", CmdLine::DefaultDBPort, "bar" );
-
- ASSERT_EXCEPTION( ReplPair( "foo:", "bar" ),
- UserException );
-
- ASSERT_EXCEPTION( ReplPair( "foo:0", "bar" ),
- UserException );
-
- ASSERT_EXCEPTION( ReplPair( "foo:10000000", "bar" ),
- UserException );
-
- ASSERT_EXCEPTION( ReplPair( "foo", "" ),
- UserException );
- }
- private:
- void checkFields( const ReplPair &rp,
- const string &remote,
- const string &remoteHost,
- int remotePort,
- const string &arbHost ) {
- ASSERT( rp.state == ReplPair::State_Negotiating );
- ASSERT_EQUALS( remote, rp.remote );
- ASSERT_EQUALS( remoteHost, rp.remoteHost );
- ASSERT_EQUALS( remotePort, rp.remotePort );
- ASSERT_EQUALS( arbHost, rp.arbHost );
- }
- };
-
- class Dominant : public Base {
- public:
- Dominant() : oldPort_( cmdLine.port ) {
- cmdLine.port = 10;
- }
- ~Dominant() {
- cmdLine.port = oldPort_;
- }
- void run() {
- ASSERT( ReplPair( "b:9", "-" ).dominant( "b" ) );
- ASSERT( !ReplPair( "b:10", "-" ).dominant( "b" ) );
- ASSERT( ReplPair( "b", "-" ).dominant( "c" ) );
- ASSERT( !ReplPair( "b", "-" ).dominant( "a" ) );
- }
- private:
- int oldPort_;
- };
-
- class SetMaster {
- public:
- void run() {
- ReplPair rp( "a", "b" );
- rp.setMaster( ReplPair::State_CantArb, "foo" );
- ASSERT( rp.state == ReplPair::State_CantArb );
- ASSERT_EQUALS( rp.info , "foo" );
- rp.setMaster( ReplPair::State_Confused, "foo" );
- ASSERT( rp.state == ReplPair::State_Confused );
- }
- };
-
- class Negotiate : public Base {
- public:
- void run() {
- ReplPair rp( "a", "b" );
- MockDBClientConnection cc;
-
- cc.one( res( 0, 0 ) );
- rp.negotiate( &cc, "dummy" );
- ASSERT( rp.state == ReplPair::State_Confused );
-
- rp.state = ReplPair::State_Negotiating;
- cc.one( res( 1, 2 ) );
- rp.negotiate( &cc, "dummy" );
- ASSERT( rp.state == ReplPair::State_Negotiating );
-
- cc.one( res( 1, ReplPair::State_Slave ) );
- rp.negotiate( &cc, "dummy" );
- ASSERT( rp.state == ReplPair::State_Slave );
-
- cc.one( res( 1, ReplPair::State_Master ) );
- rp.negotiate( &cc, "dummy" );
- ASSERT( rp.state == ReplPair::State_Master );
- }
- private:
- BSONObj res( int ok, int youAre ) {
- BSONObjBuilder b;
- b.append( "ok", ok );
- b.append( "you_are", youAre );
- return b.obj();
- }
- };
-
- class Arbitrate : public Base {
- public:
- void run() {
- ReplPair rp1( "a", "-" );
- rp1.arbitrate();
- ASSERT( rp1.state == ReplPair::State_Master );
-
- TestableReplPair rp2( false, BSONObj() );
- rp2.arbitrate();
- ASSERT( rp2.state == ReplPair::State_CantArb );
-
- TestableReplPair rp3( true, fromjson( "{ok:0}" ) );
- rp3.arbitrate();
- ASSERT_EQUALS( rp3.state , ReplPair::State_Confused );
-
- TestableReplPair rp4( true, fromjson( "{ok:1,you_are:1}" ) );
- rp4.arbitrate();
- ASSERT( rp4.state == ReplPair::State_Master );
-
- TestableReplPair rp5( true, fromjson( "{ok:1,you_are:0}" ) );
- rp5.arbitrate();
- ASSERT( rp5.state == ReplPair::State_Slave );
-
- TestableReplPair rp6( true, fromjson( "{ok:1,you_are:-1}" ) );
- rp6.arbitrate();
- // unchanged from initial value
- ASSERT( rp6.state == ReplPair::State_Negotiating );
- }
- private:
- class TestableReplPair : public ReplPair {
- public:
- TestableReplPair( bool connect, const BSONObj &one ) :
- ReplPair( "a", "z" ),
- connect_( connect ),
- one_( one ) {
- }
- virtual
- DBClientConnection *newClientConnection() const {
- MockDBClientConnection * c = new MockDBClientConnection();
- c->connect( connect_ );
- c->one( one_ );
- return c;
- }
- private:
- bool connect_;
- BSONObj one_;
- };
- };
- } // namespace ReplPairTests
-
- class DirectConnectBase : public Base {
- public:
- virtual ~DirectConnectBase() {}
- protected:
- void negotiate( ReplPair &a, ReplPair &b ) {
- auto_ptr< DBClientConnection > c( new DirectDBClientConnection( &b, cc() ) );
- a.negotiate( c.get(), "dummy" );
- }
- virtual DirectDBClientConnection::ConnectionCallback *cc() {
- return 0;
- }
- void checkNegotiation( const char *host1, const char *arb1, int state1, int newState1,
- const char *host2, const char *arb2, int state2, int newState2 ) {
- ReplPair one( host1, arb1 );
- one.state = state1;
- ReplPair two( host2, arb2 );
- two.state = state2;
- negotiate( one, two );
- ASSERT( one.state == newState1 );
- ASSERT( two.state == newState2 );
- }
- };
-
- class Negotiate : public DirectConnectBase {
- public:
- void run() {
- checkNegotiation( "a", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating,
- "b", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating );
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
-
- checkNegotiation( "b", "-", ReplPair::State_Master, ReplPair::State_Master,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
-
- // No change when negotiate() called on a.
- checkNegotiation( "a", "-", ReplPair::State_Master, ReplPair::State_Master,
- "b", "-", ReplPair::State_Master, ReplPair::State_Master );
- // Resolve Master - Master.
- checkNegotiation( "b", "-", ReplPair::State_Master, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Master, ReplPair::State_Master );
-
- // FIXME Move from negotiating to master?
- checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
- }
- };
-
- class NegotiateWithCatchup : public DirectConnectBase {
- public:
- void run() {
- // a caught up, b not
- setNotSynced();
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
- // b caught up, a not
- setSynced();
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Master,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
-
- // a caught up, b not
- setNotSynced();
- checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
- // b caught up, a not
- setSynced();
- checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Master,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
- }
- private:
- class NegateCatchup : public DirectDBClientConnection::ConnectionCallback {
- virtual void beforeCommand() {
- Base::flipSync();
- }
- virtual void afterCommand() {
- Base::flipSync();
- }
- };
- virtual DirectDBClientConnection::ConnectionCallback *cc() {
- return &cc_;
- }
- NegateCatchup cc_;
- };
-
- class NobodyCaughtUp : public DirectConnectBase {
- public:
- void run() {
- setNotSynced();
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "pairing" ) {
- }
-
- void setupTests() {
- add< ReplPairTests::Create >();
- add< ReplPairTests::Dominant >();
- add< ReplPairTests::SetMaster >();
- add< ReplPairTests::Negotiate >();
- add< ReplPairTests::Arbitrate >();
- add< Negotiate >();
- add< NegotiateWithCatchup >();
- add< NobodyCaughtUp >();
- }
- } myall;
-} // namespace PairingTests
-
diff --git a/dbtests/pdfiletests.cpp b/dbtests/pdfiletests.cpp
index 2844fc4..26c837c 100644
--- a/dbtests/pdfiletests.cpp
+++ b/dbtests/pdfiletests.cpp
@@ -360,7 +360,7 @@ namespace PdfileTests {
if( n == 5 && sizeof(void*)==4 )
break;
MongoDataFile * f = d->addAFile( big , false );
- cout << f->length() << ' ' << n << endl;
+ //cout << f->length() << ' ' << n << endl;
if ( f->length() == l )
break;
l = f->length();
@@ -368,7 +368,7 @@ namespace PdfileTests {
int start = d->numFiles();
for ( int i=0; i<start; i++ )
- d->allocExtent( c1.c_str() , d->getFile( i )->getHeader()->unusedLength , false );
+ d->allocExtent( c1.c_str() , d->getFile( i )->getHeader()->unusedLength , false, false );
ASSERT_EQUALS( start , d->numFiles() );
{
diff --git a/dbtests/perf/perftest.cpp b/dbtests/perf/perftest.cpp
index ef03551..b6219f7 100644
--- a/dbtests/perf/perftest.cpp
+++ b/dbtests/perf/perftest.cpp
@@ -21,7 +21,7 @@
#include "../../client/dbclient.h"
#include "../../db/instance.h"
-#include "../../db/query.h"
+#include "../../db/ops/query.h"
#include "../../db/queryoptimizer.h"
#include "../../util/file_allocator.h"
@@ -330,6 +330,37 @@ namespace BSON {
BSONObj o_;
};
+ template <int LEN>
+ class Copy {
+ public:
+ Copy(){
+ // putting it in a subobject to force copy on getOwned
+ BSONObjBuilder outer;
+ BSONObjBuilder b (outer.subobjStart("inner"));
+ while (b.len() < LEN)
+ b.append(BSONObjBuilder::numStr(b.len()), b.len());
+ b.done();
+ _base = outer.obj();
+ }
+
+ void run() {
+ int iterations = 1000*1000;
+ while (iterations--){
+ BSONObj temp = copy(_base.firstElement().embeddedObject().getOwned());
+ }
+ }
+
+ private:
+ // noinline should force copying even when optimized
+ NOINLINE_DECL BSONObj copy(BSONObj x){
+ return x;
+ }
+
+ BSONObj _base;
+ };
+
+
+
class All : public RunnerSuite {
public:
All() : RunnerSuite( "bson" ) {}
@@ -338,6 +369,10 @@ namespace BSON {
add< ShopwikiParse >();
add< Json >();
add< ShopwikiJson >();
+ add< Copy<10> >();
+ add< Copy<100> >();
+ add< Copy<1000> >();
+ add< Copy<10*1000> >();
}
} all;
@@ -684,12 +719,43 @@ namespace Plan {
add< Query >();
}
} all;
-
} // namespace Plan
+namespace Misc {
+ class TimeMicros64 {
+ public:
+ void run() {
+ int iterations = 1000*1000;
+ while(iterations--){
+ curTimeMicros64();
+ }
+ }
+ };
+
+ class JSTime {
+ public:
+ void run() {
+ int iterations = 1000*1000;
+ while(iterations--){
+ jsTime();
+ }
+ }
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite("misc") {}
+ void setupTests() {
+ add< TimeMicros64 >();
+ add< JSTime >();
+ }
+ } all;
+}
+
int main( int argc, char **argv ) {
logLevel = -1;
client_ = new DBDirectClient();
return Suite::run(argc, argv, "/data/db/perftest");
}
+
diff --git a/dbtests/perftests.cpp b/dbtests/perftests.cpp
index 182595c..3d9b6ee 100644
--- a/dbtests/perftests.cpp
+++ b/dbtests/perftests.cpp
@@ -23,18 +23,33 @@
*/
#include "pch.h"
-#include "../db/query.h"
+#include "../db/ops/query.h"
#include "../db/db.h"
#include "../db/instance.h"
#include "../db/json.h"
#include "../db/lasterror.h"
-#include "../db/update.h"
+#include "../db/ops/update.h"
#include "../db/taskqueue.h"
#include "../util/timer.h"
#include "dbtests.h"
#include "../db/dur_stats.h"
+#include "../util/checksum.h"
+#include "../util/version.h"
+#include "../db/key.h"
+#include "../util/compress.h"
+
+using namespace bson;
+
+namespace mongo {
+ namespace regression {
+ extern unsigned perfHist;
+ }
+}
namespace PerfTests {
+
+ const bool profiling = false;
+
typedef DBDirectClient DBClientType;
//typedef DBClientConnection DBClientType;
@@ -92,29 +107,173 @@ namespace PerfTests {
};
int TaskQueueTest::tot;
- class CappedTest : public ClientBase {
- };
-
class B : public ClientBase {
string _ns;
protected:
const char *ns() { return _ns.c_str(); }
- virtual void prep() = 0;
+
+ // anything you want to do before being timed
+ virtual void prep() { }
virtual void timed() = 0;
// optional 2nd test phase to be timed separately
// return name of it
- virtual const char * timed2() { return 0; }
+ virtual string timed2() { return ""; }
virtual void post() { }
+
virtual string name() = 0;
- virtual unsigned long long expectation() = 0;
- virtual int howLongMillis() { return 5000; }
+ virtual unsigned long long expectation() { return 0; }
+ virtual int expectationTimeMillis() { return -1; }
+
+ // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
+ virtual int howLongMillis() { return profiling ? 60000 : 5000; }
+
+ /* override if your test output doesn't need that */
+ virtual bool showDurStats() { return true; }
+
+ static DBClientConnection *conn;
+ static unsigned once;
+
public:
+ /* if you want recording of the timings, place the password for the perf database
+ in ./../settings.py:
+ pstatspassword="<pwd>"
+ */
+ void connect() {
+ if( once )
+ return;
+ ++once;
+
+ // no writing to perf db if _DEBUG
+ DEV return;
+
+ const char *fn = "../../settings.py";
+ if( !exists(fn) ) {
+ if( exists("settings.py") )
+ fn = "settings.py";
+ else {
+ cout << "no ../../settings.py or ./settings.py file found. will not write perf stats to pstats db." << endl;
+ cout << "it is recommended this be enabled even on dev boxes" << endl;
+ return;
+ }
+ }
+
+ try {
+ if( conn == 0 ) {
+ MemoryMappedFile f;
+ const char *p = (const char *) f.mapWithOptions(fn, MongoFile::READONLY);
+ string pwd;
+
+ {
+ const char *q = str::after(p, "pstatspassword=\"");
+ if( *q == 0 ) {
+ cout << "info perftests.cpp: no pstatspassword= in settings.py" << endl;
+ return;
+ }
+ else {
+ pwd = str::before(q, '\"');
+ }
+ }
+
+ DBClientConnection *c = new DBClientConnection(false, 0, 10);
+ string err;
+ if( c->connect("perfdb.10gen.cc", err) ) {
+ if( !c->auth("perf", "perf", pwd, err) ) {
+ cout << "info: authentication with stats db failed: " << err << endl;
+ assert(false);
+ }
+ conn = c;
+ }
+ else {
+ cout << err << " (to log perfstats)" << endl;
+ }
+ }
+ }
+ catch(...) { }
+ }
+
+ virtual unsigned batchSize() { return 50; }
+
void say(unsigned long long n, int ms, string s) {
- cout << setw(36) << left << s << ' ' << right << setw(7) << n*1000/ms << "/sec " << setw(4) << ms << "ms" << endl;
- cout << dur::stats.curr->_asObj().toString() << endl;
+ unsigned long long rps = n*1000/ms;
+ cout << "stats " << setw(33) << left << s << ' ' << right << setw(9) << rps << ' ' << right << setw(5) << ms << "ms ";
+ if( showDurStats() )
+ cout << dur::stats.curr->_asCSV();
+ cout << endl;
+
+ connect();
+
+ if( conn && !conn->isFailed() ) {
+ const char *ns = "perf.pstats";
+ if( perfHist ) {
+ static bool needver = true;
+ try {
+ // try to report rps from last time */
+ Query q;
+ {
+ BSONObjBuilder b;
+ b.append("host",getHostName()).append("test",s).append("dur",cmdLine.dur);
+ DEV { b.append("info.DEBUG",true); }
+ else b.appendNull("info.DEBUG");
+ if( sizeof(int*) == 4 )
+ b.append("info.bits", 32);
+ else
+ b.appendNull("info.bits");
+ q = Query(b.obj()).sort("when",-1);
+ }
+ BSONObj fields = BSON( "rps" << 1 << "info" << 1 );
+ vector<BSONObj> v;
+ conn->findN(v, ns, q, perfHist, 0, &fields);
+ for( vector<BSONObj>::iterator i = v.begin(); i != v.end(); i++ ) {
+ BSONObj o = *i;
+ double lastrps = o["rps"].Number();
+ if( lastrps ) {
+ cout << "stats " << setw(33) << right << "new/old:" << ' ' << setw(9);
+ cout << fixed << setprecision(2) << rps / lastrps;
+ if( needver ) {
+ cout << " " << o.getFieldDotted("info.git").toString();
+ }
+ cout << '\n';
+ }
+ }
+ } catch(...) { }
+ cout.flush();
+ needver = false;
+ }
+ {
+ bob b;
+ b.append("host", getHostName());
+ b.appendTimeT("when", time(0));
+ b.append("test", s);
+ b.append("rps", (int) rps);
+ b.append("millis", ms);
+ b.appendBool("dur", cmdLine.dur);
+ if( showDurStats() && cmdLine.dur )
+ b.append("durStats", dur::stats.curr->_asObj());
+ {
+ bob inf;
+ inf.append("version", versionString);
+ if( sizeof(int*) == 4 ) inf.append("bits", 32);
+ DEV inf.append("DEBUG", true);
+#if defined(_WIN32)
+ inf.append("os", "win");
+#endif
+ inf.append("git", gitVersion());
+ inf.append("boost", BOOST_VERSION);
+ b.append("info", inf.obj());
+ }
+ BSONObj o = b.obj();
+ //cout << "inserting " << o.toString() << endl;
+ try {
+ conn->insert(ns, o);
+ }
+ catch ( std::exception& e ) {
+ warning() << "couldn't save perf results: " << e.what() << endl;
+ }
+ }
+ }
}
void run() {
_ns = string("perftest.") + name();
@@ -123,33 +282,54 @@ namespace PerfTests {
prep();
int hlm = howLongMillis();
+ DEV {
+ // don't run very long with _DEBUG - not very meaningful anyway on that build
+ hlm = min(hlm, 500);
+ }
dur::stats._intervalMicros = 0; // no auto rotate
dur::stats.curr->reset();
- Timer t;
+ mongo::Timer t;
unsigned long long n = 0;
- const unsigned Batch = 50;
- do {
- unsigned i;
- for( i = 0; i < Batch; i++ )
- timed();
- n += i;
+ const unsigned Batch = batchSize();
+
+ if( hlm == 0 ) {
+ // means just do once
+ timed();
+ }
+ else {
+ do {
+ unsigned i;
+ for( i = 0; i < Batch; i++ )
+ timed();
+ n += i;
+ } while( t.millis() < hlm );
}
- while( t.millis() < hlm );
+
client().getLastError(); // block until all ops are finished
int ms = t.millis();
+
say(n, ms, name());
- if( n < expectation() ) {
- cout << "\ntest " << name() << " seems slow n:" << n << " ops/sec but expect greater than:" << expectation() << endl;
- cout << endl;
+ int etm = expectationTimeMillis();
+ DEV {
+ }
+ else if( etm > 0 ) {
+ if( ms > etm*2 ) {
+ cout << "test " << name() << " seems slow expected ~" << etm << "ms" << endl;
+ }
+ }
+ else if( n < expectation() ) {
+ cout << "test " << name() << " seems slow n:" << n << " ops/sec but expect greater than:" << expectation() << endl;
}
+ post();
+
{
- const char *test2name = timed2();
- if( test2name ) {
+ string test2name = timed2();
+ if( test2name.size() != 0 ) {
dur::stats.curr->reset();
- Timer t;
+ mongo::Timer t;
unsigned long long n = 0;
while( 1 ) {
unsigned i;
@@ -166,12 +346,374 @@ namespace PerfTests {
}
};
+ DBClientConnection *B::conn;
+ unsigned B::once;
+
+ unsigned dontOptimizeOutHopefully;
+
+ class NonDurTest : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class BSONIter : public NonDurTest {
+ public:
+ int n;
+ bo b, sub;
+ string name() { return "BSONIter"; }
+ BSONIter() {
+ n = 0;
+ bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
+ b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
+ }
+ void timed() {
+ for( bo::iterator i = b.begin(); i.more(); )
+ if( i.next().fieldName() )
+ n++;
+ for( bo::iterator i = sub.begin(); i.more(); )
+ if( i.next().fieldName() )
+ n++;
+ }
+ };
+
+ class BSONGetFields1 : public NonDurTest {
+ public:
+ int n;
+ bo b, sub;
+ string name() { return "BSONGetFields1By1"; }
+ BSONGetFields1() {
+ n = 0;
+ bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
+ b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
+ }
+ void timed() {
+ if( b["x"].eoo() )
+ n++;
+ if( b["q"].eoo() )
+ n++;
+ if( b["zzz"].eoo() )
+ n++;
+ }
+ };
+
+ class BSONGetFields2 : public BSONGetFields1 {
+ public:
+ string name() { return "BSONGetFields"; }
+ void timed() {
+ static const char *names[] = { "x", "q", "zzz" };
+ BSONElement elements[3];
+ b.getFields(3, names, elements);
+ if( elements[0].eoo() )
+ n++;
+ if( elements[1].eoo() )
+ n++;
+ if( elements[2].eoo() )
+ n++;
+ }
+ };
+
+ class KeyTest : public B {
+ public:
+ KeyV1Owned a,b,c;
+ string name() { return "Key-woequal"; }
+ virtual int howLongMillis() { return 3000; }
+ KeyTest() :
+ a(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
+ b(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
+ c(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqqb"))
+ {}
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ assert( a.woEqual(b) );
+ assert( !a.woEqual(c) );
+ }
+ };
+
+ unsigned long long aaa;
+
+ class Timer : public B {
+ public:
+ string name() { return "Timer"; }
+ virtual int howLongMillis() { return 1000; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::Timer t;
+ aaa += t.millis();
+ }
+ };
+
+ RWLock lk("testrw");
+ SimpleMutex m("simptst");
+ mongo::mutex mtest("mtest");
+ SpinLock s;
+
+ class mutexspeed : public B {
+ public:
+ string name() { return "mutex"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::mutex::scoped_lock lk(mtest);
+ }
+ };
+ class simplemutexspeed : public B {
+ public:
+ string name() { return "simplemutex"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ SimpleMutex::scoped_lock lk(m);
+ }
+ };
+ class spinlockspeed : public B {
+ public:
+ string name() { return "spinlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::scoped_spinlock lk(s);
+ }
+ };
+
+ class rlock : public B {
+ public:
+ string name() { return "rlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lock_shared();
+ lk.unlock_shared();
+ }
+ };
+ class wlock : public B {
+ public:
+ string name() { return "wlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lock();
+ lk.unlock();
+ }
+ };
+
+#if 0
+ class ulock : public B {
+ public:
+ string name() { return "ulock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lockAsUpgradable();
+ lk.unlockFromUpgradable();
+ }
+ };
+#endif
+
+ class CTM : public B {
+ public:
+ CTM() : last(0), delts(0), n(0) { }
+ string name() { return "curTimeMillis64"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ unsigned long long last;
+ unsigned long long delts;
+ unsigned n;
+ void timed() {
+ unsigned long long x = curTimeMillis64();
+ aaa += x;
+ if( last ) {
+ unsigned long long delt = x-last;
+ if( delt ) {
+ delts += delt;
+ n++;
+ }
+ }
+ last = x;
+ }
+ void post() {
+ // we need to know if timing is highly ungranular - that could be relevant in some places
+ if( n )
+ cout << " avg timer granularity: " << ((double)delts)/n << "ms " << endl;
+ }
+ };
+
+ class Bldr : public B {
+ public:
+ int n;
+ string name() { return "BufBuilder"; }
+ Bldr() {
+ }
+ virtual int howLongMillis() { return 3000; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ BufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+ };
+
+ class StkBldr : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ int n;
+ string name() { return "StackBufBuilder"; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ StackBufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+ };
+
+ // if a test is this fast, it was optimized out
+ class Dummy : public B {
+ public:
+ Dummy() { }
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "dummy"; }
+ void timed() {
+ dontOptimizeOutHopefully++;
+ }
+ unsigned long long expectation() { return 1000000; }
+ virtual bool showDurStats() { return false; }
+ };
+
+ // test thread local speed
+ class TLS : public B {
+ public:
+ TLS() { }
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "thread-local-storage"; }
+ void timed() {
+ if( &cc() )
+ dontOptimizeOutHopefully++;
+ }
+ unsigned long long expectation() { return 1000000; }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class Malloc : public B {
+ public:
+ Malloc() { }
+ virtual int howLongMillis() { return 4000; }
+ string name() { return "malloc"; }
+ void timed() {
+ char *p = new char[128];
+ if( dontOptimizeOutHopefully++ > 0 )
+ delete p;
+ }
+ unsigned long long expectation() { return 1000000; }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class Compress : public B {
+ public:
+ const unsigned sz;
+ void *p;
+ Compress() : sz(1024*1024*100+3) { }
+ virtual unsigned batchSize() { return 1; }
+ string name() { return "compress"; }
+ virtual bool showDurStats() { return false; }
+ virtual int howLongMillis() { return 4000; }
+ unsigned long long expectation() { return 1000000; }
+ void prep() {
+ p = malloc(sz);
+ // this isn't a fair test as it is mostly rands but we just want a rough perf check
+ static int last;
+ for (unsigned i = 0; i<sz; i++) {
+ int r = rand();
+ if( (r & 0x300) == 0x300 )
+ r = last;
+ ((char*)p)[i] = r;
+ last = r;
+ }
+ }
+ size_t last;
+ string res;
+ void timed() {
+ mongo::Timer t;
+ string out;
+ size_t len = compress((const char *) p, sz, &out);
+ bool ok = uncompress(out.c_str(), out.size(), &res);
+ ASSERT(ok);
+ static unsigned once;
+ if( once++ == 0 )
+ cout << "compress round trip " << sz/(1024.0*1024) / (t.millis()/1000.0) << "MB/sec\n";
+ //cout << len / (1024.0/1024) << " compressed" << endl;
+ (void)len; //fix unused error while above line is commented out
+ }
+ void post() {
+ ASSERT( memcmp(res.c_str(), p, sz) == 0 );
+ free(p);
+ }
+ };
+
+ // test speed of checksum method
+ class ChecksumTest : public B {
+ public:
+ const unsigned sz;
+ ChecksumTest() : sz(1024*1024*100+3) { }
+ string name() { return "checksum"; }
+ virtual int howLongMillis() { return 2000; }
+ int expectationTimeMillis() { return 5000; }
+ virtual bool showDurStats() { return false; }
+ virtual unsigned batchSize() { return 1; }
+
+ void *p;
+
+ void prep() {
+ {
+ // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
+ unsigned long long x = 0xffffffffffffffffULL;
+ ASSERT( x+2 == 1 );
+ }
+
+ p = malloc(sz);
+ for (unsigned i = 0; i<sz; i++)
+ ((char*)p)[i] = rand();
+ }
+
+ Checksum last;
+
+ void timed() {
+ static int i;
+ Checksum c;
+ c.gen(p, sz);
+ if( i == 0 )
+ last = c;
+ else if( i == 1 ) {
+ ASSERT( c == last );
+ }
+ }
+ void post() {
+ {
+ mongo::Checksum c;
+ c.gen(p, sz-1);
+ ASSERT( c != last );
+ ((char *&)p)[0]++; // check same data, different order, doesn't give same checksum
+ ((char *&)p)[1]--;
+ c.gen(p, sz);
+ ASSERT( c != last );
+ ((char *&)p)[1]++; // check same data, different order, doesn't give same checksum (different longwords case)
+ ((char *&)p)[8]--;
+ c.gen(p, sz);
+ ASSERT( c != last );
+ }
+ free(p);
+ }
+ };
+
class InsertDup : public B {
const BSONObj o;
public:
InsertDup() : o( BSON("_id" << 1) ) { } // dup keys
string name() {
- return "insert duplicate _ids";
+ return "insert-duplicate-_ids";
}
void prep() {
client().insert( ns(), o );
@@ -185,21 +727,32 @@ namespace PerfTests {
unsigned long long expectation() { return 1000; }
};
- class Insert1 : public InsertDup {
+ class Insert1 : public B {
const BSONObj x;
+ OID oid;
+ BSONObj query;
public:
- Insert1() : x( BSON("x" << 99) ) { }
- string name() { return "insert simple"; }
+ Insert1() : x( BSON("x" << 99) ) {
+ oid.init();
+ query = BSON("_id" << oid);
+ }
+ string name() { return "insert-simple"; }
void timed() {
client().insert( ns(), x );
}
+ string timed2() {
+ client().findOne(ns(), query);
+ return "findOne_by_id";
+ }
void post() {
- assert( client().count(ns()) > 100 );
+#if !defined(_DEBUG)
+ assert( client().count(ns()) > 50 );
+#endif
}
unsigned long long expectation() { return 1000; }
};
- class InsertBig : public InsertDup {
+ class InsertBig : public B {
BSONObj x;
virtual int howLongMillis() {
if( sizeof(void*) == 4 )
@@ -214,7 +767,7 @@ namespace PerfTests {
b.appendBinData("bin", 200000, (BinDataType) 129, buf);
x = b.obj();
}
- string name() { return "insert big"; }
+ string name() { return "insert-big"; }
void timed() {
client().insert( ns(), x );
}
@@ -223,7 +776,7 @@ namespace PerfTests {
class InsertRandom : public B {
public:
- string name() { return "random inserts"; }
+ string name() { return "random-inserts"; }
void prep() {
client().insert( ns(), BSONObj() );
client().ensureIndex(ns(), BSON("x"<<1));
@@ -233,8 +786,6 @@ namespace PerfTests {
BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
client().insert(ns(), y);
}
- void post() {
- }
unsigned long long expectation() { return 1000; }
};
@@ -246,7 +797,7 @@ namespace PerfTests {
static int rand() {
return std::rand() & 0x7fff;
}
- string name() { return "random upserts"; }
+ virtual string name() { return "random-upserts"; }
void prep() {
client().insert( ns(), BSONObj() );
client().ensureIndex(ns(), BSON("x"<<1));
@@ -258,7 +809,7 @@ namespace PerfTests {
client().update(ns(), q, y, /*upsert*/true);
}
- const char * timed2() {
+ virtual string timed2() {
static BSONObj I = BSON( "$inc" << BSON( "y" << 1 ) );
// test some $inc's
@@ -267,23 +818,31 @@ namespace PerfTests {
BSONObj q = BSON("x" << x);
client().update(ns(), q, I);
- return "inc";
+ return name()+"-inc";
}
- void post() {
- }
unsigned long long expectation() { return 1000; }
};
template <typename T>
class MoreIndexes : public T {
public:
- string name() { return T::name() + " with more indexes"; }
+ string name() { return T::name() + "-with-more-indexes"; }
void prep() {
T::prep();
this->client().ensureIndex(this->ns(), BSON("y"<<1));
this->client().ensureIndex(this->ns(), BSON("z"<<1));
}
+
+ /*
+ virtual string timed2() {
+ string x = T::timed2();
+ if ( x.size() == 0 )
+ return x;
+
+ return x + "-with-more-indexes";
+ }
+ */
};
void t() {
@@ -310,11 +869,8 @@ namespace PerfTests {
class All : public Suite {
public:
- All() : Suite( "perf" )
- {
- }
- ~All() {
- }
+ All() : Suite( "perf" ) { }
+
Result * run( const string& filter ) {
boost::thread a(t);
Result * res = Suite::run(filter);
@@ -323,14 +879,41 @@ namespace PerfTests {
}
void setupTests() {
- add< TaskQueueTest >();
- add< InsertDup >();
- add< Insert1 >();
- add< InsertRandom >();
- add< MoreIndexes<InsertRandom> >();
- add< Update1 >();
- add< MoreIndexes<Update1> >();
- add< InsertBig >();
+ cout
+ << "stats test rps------ time-- "
+ << dur::stats.curr->_CSVHeader() << endl;
+ if( profiling ) {
+ add< Update1 >();
+ }
+ else {
+ add< Dummy >();
+ add< ChecksumTest >();
+ add< Compress >();
+ add< TLS >();
+ add< Malloc >();
+ add< Timer >();
+ add< rlock >();
+ add< wlock >();
+ //add< ulock >();
+ add< mutexspeed >();
+ add< simplemutexspeed >();
+ add< spinlockspeed >();
+ add< CTM >();
+ add< KeyTest >();
+ add< Bldr >();
+ add< StkBldr >();
+ add< BSONIter >();
+ add< BSONGetFields1 >();
+ add< BSONGetFields2 >();
+ add< TaskQueueTest >();
+ add< InsertDup >();
+ add< Insert1 >();
+ add< InsertRandom >();
+ add< MoreIndexes<InsertRandom> >();
+ add< Update1 >();
+ add< MoreIndexes<Update1> >();
+ add< InsertBig >();
+ }
}
} myall;
}
diff --git a/dbtests/queryoptimizertests.cpp b/dbtests/queryoptimizertests.cpp
index 2d6f752..38d631e 100644
--- a/dbtests/queryoptimizertests.cpp
+++ b/dbtests/queryoptimizertests.cpp
@@ -19,12 +19,13 @@
#include "pch.h"
#include "../db/queryoptimizer.h"
-#include "../db/db.h"
-#include "../db/dbhelpers.h"
+#include "../db/querypattern.h"
#include "../db/instance.h"
-#include "../db/query.h"
+#include "../db/ops/query.h"
+#include "../db/ops/delete.h"
#include "dbtests.h"
+
namespace mongo {
extern BSONObj id_obj;
void runQuery(Message& m, QueryMessage& q, Message &response ) {
@@ -36,704 +37,22 @@ namespace mongo {
Message response;
runQuery( m, q, response );
}
+ void __forceLinkGeoPlugin();
+ shared_ptr<Cursor> newQueryOptimizerCursor( const char *ns, const BSONObj &query, const BSONObj &order = BSONObj() );
} // namespace mongo
namespace QueryOptimizerTests {
- namespace FieldRangeTests {
- class Base {
- public:
- virtual ~Base() {}
- void run() {
- const FieldRangeSet s( "ns", query() );
- checkElt( lower(), s.range( "a" ).min() );
- checkElt( upper(), s.range( "a" ).max() );
- ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).minInclusive() );
- ASSERT_EQUALS( upperInclusive(), s.range( "a" ).maxInclusive() );
- }
- protected:
- virtual BSONObj query() = 0;
- virtual BSONElement lower() { return minKey.firstElement(); }
- virtual bool lowerInclusive() { return true; }
- virtual BSONElement upper() { return maxKey.firstElement(); }
- virtual bool upperInclusive() { return true; }
- static void checkElt( BSONElement expected, BSONElement actual ) {
- if ( expected.woCompare( actual, false ) ) {
- log() << "expected: " << expected << ", got: " << actual;
- ASSERT( false );
- }
- }
- };
-
-
- class NumericBase : public Base {
- public:
- NumericBase() {
- o = BSON( "min" << -numeric_limits<double>::max() << "max" << numeric_limits<double>::max() );
- }
-
- virtual BSONElement lower() { return o["min"]; }
- virtual BSONElement upper() { return o["max"]; }
- private:
- BSONObj o;
- };
-
- class Empty : public Base {
- virtual BSONObj query() { return BSONObj(); }
- };
-
- class Eq : public Base {
- public:
- Eq() : o_( BSON( "a" << 1 ) ) {}
- virtual BSONObj query() { return o_; }
- virtual BSONElement lower() { return o_.firstElement(); }
- virtual BSONElement upper() { return o_.firstElement(); }
- BSONObj o_;
- };
-
- class DupEq : public Eq {
- public:
- virtual BSONObj query() { return BSON( "a" << 1 << "b" << 2 << "a" << 1 ); }
- };
-
- class Lt : public NumericBase {
- public:
- Lt() : o_( BSON( "-" << 1 ) ) {}
- virtual BSONObj query() { return BSON( "a" << LT << 1 ); }
- virtual BSONElement upper() { return o_.firstElement(); }
- virtual bool upperInclusive() { return false; }
- BSONObj o_;
- };
-
- class Lte : public Lt {
- virtual BSONObj query() { return BSON( "a" << LTE << 1 ); }
- virtual bool upperInclusive() { return true; }
- };
-
- class Gt : public NumericBase {
- public:
- Gt() : o_( BSON( "-" << 1 ) ) {}
- virtual BSONObj query() { return BSON( "a" << GT << 1 ); }
- virtual BSONElement lower() { return o_.firstElement(); }
- virtual bool lowerInclusive() { return false; }
- BSONObj o_;
- };
-
- class Gte : public Gt {
- virtual BSONObj query() { return BSON( "a" << GTE << 1 ); }
- virtual bool lowerInclusive() { return true; }
- };
-
- class TwoLt : public Lt {
- virtual BSONObj query() { return BSON( "a" << LT << 1 << LT << 5 ); }
- };
-
- class TwoGt : public Gt {
- virtual BSONObj query() { return BSON( "a" << GT << 0 << GT << 1 ); }
- };
-
- class EqGte : public Eq {
- virtual BSONObj query() { return BSON( "a" << 1 << "a" << GTE << 1 ); }
- };
-
- class EqGteInvalid {
- public:
- void run() {
- FieldRangeSet fbs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ) );
- ASSERT( !fbs.matchPossible() );
- }
- };
-
- struct RegexBase : Base {
- void run() { //need to only look at first interval
- FieldRangeSet s( "ns", query() );
- checkElt( lower(), s.range( "a" ).intervals()[0]._lower._bound );
- checkElt( upper(), s.range( "a" ).intervals()[0]._upper._bound );
- ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).intervals()[0]._lower._inclusive );
- ASSERT_EQUALS( upperInclusive(), s.range( "a" ).intervals()[0]._upper._inclusive );
- }
- };
-
- class Regex : public RegexBase {
- public:
- Regex() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
- virtual BSONObj query() {
- BSONObjBuilder b;
- b.appendRegex( "a", "^abc" );
- return b.obj();
- }
- virtual BSONElement lower() { return o1_.firstElement(); }
- virtual BSONElement upper() { return o2_.firstElement(); }
- virtual bool upperInclusive() { return false; }
- BSONObj o1_, o2_;
- };
-
- class RegexObj : public RegexBase {
- public:
- RegexObj() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
- virtual BSONObj query() { return BSON("a" << BSON("$regex" << "^abc")); }
- virtual BSONElement lower() { return o1_.firstElement(); }
- virtual BSONElement upper() { return o2_.firstElement(); }
- virtual bool upperInclusive() { return false; }
- BSONObj o1_, o2_;
- };
-
- class UnhelpfulRegex : public RegexBase {
- public:
- UnhelpfulRegex() {
- BSONObjBuilder b;
- b.appendMinForType("lower", String);
- b.appendMaxForType("upper", String);
- limits = b.obj();
- }
-
- virtual BSONObj query() {
- BSONObjBuilder b;
- b.appendRegex( "a", "abc" );
- return b.obj();
- }
- virtual BSONElement lower() { return limits["lower"]; }
- virtual BSONElement upper() { return limits["upper"]; }
- virtual bool upperInclusive() { return false; }
- BSONObj limits;
- };
-
- class In : public Base {
- public:
- In() : o1_( BSON( "-" << -3 ) ), o2_( BSON( "-" << 44 ) ) {}
- virtual BSONObj query() {
- vector< int > vals;
- vals.push_back( 4 );
- vals.push_back( 8 );
- vals.push_back( 44 );
- vals.push_back( -1 );
- vals.push_back( -3 );
- vals.push_back( 0 );
- BSONObjBuilder bb;
- bb.append( "$in", vals );
- BSONObjBuilder b;
- b.append( "a", bb.done() );
- return b.obj();
- }
- virtual BSONElement lower() { return o1_.firstElement(); }
- virtual BSONElement upper() { return o2_.firstElement(); }
- BSONObj o1_, o2_;
- };
-
- class Equality {
- public:
- void run() {
- FieldRangeSet s( "ns", BSON( "a" << 1 ) );
- ASSERT( s.range( "a" ).equality() );
- FieldRangeSet s2( "ns", BSON( "a" << GTE << 1 << LTE << 1 ) );
- ASSERT( s2.range( "a" ).equality() );
- FieldRangeSet s3( "ns", BSON( "a" << GT << 1 << LTE << 1 ) );
- ASSERT( !s3.range( "a" ).equality() );
- FieldRangeSet s4( "ns", BSON( "a" << GTE << 1 << LT << 1 ) );
- ASSERT( !s4.range( "a" ).equality() );
- FieldRangeSet s5( "ns", BSON( "a" << GTE << 1 << LTE << 1 << GT << 1 ) );
- ASSERT( !s5.range( "a" ).equality() );
- FieldRangeSet s6( "ns", BSON( "a" << GTE << 1 << LTE << 1 << LT << 1 ) );
- ASSERT( !s6.range( "a" ).equality() );
- }
- };
-
- class SimplifiedQuery {
- public:
- void run() {
- FieldRangeSet fbs( "ns", BSON( "a" << GT << 1 << GT << 5 << LT << 10 << "b" << 4 << "c" << LT << 4 << LT << 6 << "d" << GTE << 0 << GT << 0 << "e" << GTE << 0 << LTE << 10 ) );
- BSONObj simple = fbs.simplifiedQuery();
- cout << "simple: " << simple << endl;
- ASSERT( !simple.getObjectField( "a" ).woCompare( fromjson( "{$gt:5,$lt:10}" ) ) );
- ASSERT_EQUALS( 4, simple.getIntField( "b" ) );
- ASSERT( !simple.getObjectField( "c" ).woCompare( BSON("$gte" << -numeric_limits<double>::max() << "$lt" << 4 ) ) );
- ASSERT( !simple.getObjectField( "d" ).woCompare( BSON("$gt" << 0 << "$lte" << numeric_limits<double>::max() ) ) );
- ASSERT( !simple.getObjectField( "e" ).woCompare( fromjson( "{$gte:0,$lte:10}" ) ) );
- }
- };
-
- class QueryPatternTest {
- public:
- void run() {
- ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 1 ) ) );
- ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 5 ) ) );
- ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "b" << 1 ) ) );
- ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << LTE << 1 ) ) );
- ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << 1 << "b" << 2 ) ) );
- ASSERT( p( BSON( "a" << 1 << "b" << 3 ) ) != p( BSON( "a" << 1 ) ) );
- ASSERT( p( BSON( "a" << LT << 1 ) ) == p( BSON( "a" << LTE << 5 ) ) );
- ASSERT( p( BSON( "a" << LT << 1 << GTE << 0 ) ) == p( BSON( "a" << LTE << 5 << GTE << 0 ) ) );
- ASSERT( p( BSON( "a" << 1 ) ) < p( BSON( "a" << 1 << "b" << 1 ) ) );
- ASSERT( !( p( BSON( "a" << 1 << "b" << 1 ) ) < p( BSON( "a" << 1 ) ) ) );
- ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << "a" ) ) );
- ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 ) ) );
- ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "c" << 1 ) ) );
- ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << -1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 << "c" << 1 ) ) );
- ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 ) ) );
- ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) ) );
- }
- private:
- static QueryPattern p( const BSONObj &query, const BSONObj &sort = BSONObj() ) {
- return FieldRangeSet( "", query ).pattern( sort );
- }
- };
-
- class NoWhere {
- public:
- void run() {
- ASSERT_EQUALS( 0, FieldRangeSet( "ns", BSON( "$where" << 1 ) ).nNontrivialRanges() );
- }
- };
-
- class Numeric {
- public:
- void run() {
- FieldRangeSet f( "", BSON( "a" << 1 ) );
- ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 2.0 ).firstElement() ) < 0 );
- ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 0.0 ).firstElement() ) > 0 );
- }
- };
-
- class InLowerBound {
- public:
- void run() {
- FieldRangeSet f( "", fromjson( "{a:{$gt:4,$in:[1,2,3,4,5,6]}}" ) );
- ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 5.0 ).firstElement(), false ) == 0 );
- ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 6.0 ).firstElement(), false ) == 0 );
- }
- };
-
- class InUpperBound {
- public:
- void run() {
- FieldRangeSet f( "", fromjson( "{a:{$lt:4,$in:[1,2,3,4,5,6]}}" ) );
- ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 1.0 ).firstElement(), false ) == 0 );
- ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
- }
- };
-
- class UnionBound {
- public:
- void run() {
- FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:9,$lt:12}}" ) );
- FieldRange ret = frs.range( "a" );
- ret |= frs.range( "b" );
- ASSERT_EQUALS( 2U, ret.intervals().size() );
- }
- };
-
- class MultiBound {
- public:
- void run() {
- FieldRangeSet frs1( "", fromjson( "{a:{$in:[1,3,5,7,9]}}" ) );
- FieldRangeSet frs2( "", fromjson( "{a:{$in:[2,3,5,8,9]}}" ) );
- FieldRange fr1 = frs1.range( "a" );
- FieldRange fr2 = frs2.range( "a" );
- fr1 &= fr2;
- ASSERT( fr1.min().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
- ASSERT( fr1.max().woCompare( BSON( "a" << 9.0 ).firstElement(), false ) == 0 );
- vector< FieldInterval > intervals = fr1.intervals();
- vector< FieldInterval >::const_iterator j = intervals.begin();
- double expected[] = { 3, 5, 9 };
- for( int i = 0; i < 3; ++i, ++j ) {
- ASSERT_EQUALS( expected[ i ], j->_lower._bound.number() );
- ASSERT( j->_lower._inclusive );
- ASSERT( j->_lower == j->_upper );
- }
- ASSERT( j == intervals.end() );
- }
- };
-
- class DiffBase {
- public:
- virtual ~DiffBase() {}
- void run() {
- FieldRangeSet frs( "", fromjson( obj().toString() ) );
- FieldRange ret = frs.range( "a" );
- ret -= frs.range( "b" );
- check( ret );
- }
- protected:
- void check( const FieldRange &fr ) {
- vector< FieldInterval > fi = fr.intervals();
- ASSERT_EQUALS( len(), fi.size() );
- int i = 0;
- for( vector< FieldInterval >::const_iterator j = fi.begin(); j != fi.end(); ++j ) {
- ASSERT_EQUALS( nums()[ i ], j->_lower._bound.numberInt() );
- ASSERT_EQUALS( incs()[ i ], j->_lower._inclusive );
- ++i;
- ASSERT_EQUALS( nums()[ i ], j->_upper._bound.numberInt() );
- ASSERT_EQUALS( incs()[ i ], j->_upper._inclusive );
- ++i;
- }
- }
- virtual unsigned len() const = 0;
- virtual const int *nums() const = 0;
- virtual const bool *incs() const = 0;
- virtual BSONObj obj() const = 0;
- };
-
- class TwoRangeBase : public DiffBase {
- public:
- TwoRangeBase( string obj, int low, int high, bool lowI, bool highI )
- : _obj( obj ) {
- _n[ 0 ] = low;
- _n[ 1 ] = high;
- _b[ 0 ] = lowI;
- _b[ 1 ] = highI;
- }
- private:
- virtual unsigned len() const { return 1; }
- virtual const int *nums() const { return _n; }
- virtual const bool *incs() const { return _b; }
- virtual BSONObj obj() const { return fromjson( _obj ); }
- string _obj;
- int _n[ 2 ];
- bool _b[ 2 ];
- };
-
- struct Diff1 : public TwoRangeBase {
- Diff1() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:3,$lt:4}}", 1, 2, false, false ) {}
- };
-
- struct Diff2 : public TwoRangeBase {
- Diff2() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:2,$lt:4}}", 1, 2, false, false ) {}
- };
-
- struct Diff3 : public TwoRangeBase {
- Diff3() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gt:2,$lt:4}}", 1, 2, false, true ) {}
- };
-
- struct Diff4 : public TwoRangeBase {
- Diff4() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
- };
-
- struct Diff5 : public TwoRangeBase {
- Diff5() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
- };
-
- struct Diff6 : public TwoRangeBase {
- Diff6() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
- };
-
- struct Diff7 : public TwoRangeBase {
- Diff7() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
- };
-
- struct Diff8 : public TwoRangeBase {
- Diff8() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
- };
-
- struct Diff9 : public TwoRangeBase {
- Diff9() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
- };
-
- struct Diff10 : public TwoRangeBase {
- Diff10() : TwoRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
- };
-
- class SplitRangeBase : public DiffBase {
- public:
- SplitRangeBase( string obj, int low1, bool low1I, int high1, bool high1I, int low2, bool low2I, int high2, bool high2I )
- : _obj( obj ) {
- _n[ 0 ] = low1;
- _n[ 1 ] = high1;
- _n[ 2 ] = low2;
- _n[ 3 ] = high2;
- _b[ 0 ] = low1I;
- _b[ 1 ] = high1I;
- _b[ 2 ] = low2I;
- _b[ 3 ] = high2I;
- }
- private:
- virtual unsigned len() const { return 2; }
- virtual const int *nums() const { return _n; }
- virtual const bool *incs() const { return _b; }
- virtual BSONObj obj() const { return fromjson( _obj ); }
- string _obj;
- int _n[ 4 ];
- bool _b[ 4 ];
- };
-
- struct Diff11 : public SplitRangeBase {
- Diff11() : SplitRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 4, true) {}
- };
-
- struct Diff12 : public SplitRangeBase {
- Diff12() : SplitRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 5, false) {}
- };
-
- struct Diff13 : public TwoRangeBase {
- Diff13() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:4}}", 4, 5, true, false) {}
- };
-
- struct Diff14 : public SplitRangeBase {
- Diff14() : SplitRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:4}}", 1, true, 1, true, 4, true, 5, false) {}
- };
-
- struct Diff15 : public TwoRangeBase {
- Diff15() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
- };
-
- struct Diff16 : public TwoRangeBase {
- Diff16() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
- };
-
- struct Diff17 : public TwoRangeBase {
- Diff17() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:4}}", 4, 5, true, false) {}
- };
-
- struct Diff18 : public TwoRangeBase {
- Diff18() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:4}}", 4, 5, false, false) {}
- };
-
- struct Diff19 : public TwoRangeBase {
- Diff19() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
- };
-
- struct Diff20 : public TwoRangeBase {
- Diff20() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
- };
-
- struct Diff21 : public TwoRangeBase {
- Diff21() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, true, true) {}
- };
-
- struct Diff22 : public TwoRangeBase {
- Diff22() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, false, true) {}
- };
-
- struct Diff23 : public TwoRangeBase {
- Diff23() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:0.5}}", 1, 5, false, true) {}
- };
-
- struct Diff24 : public TwoRangeBase {
- Diff24() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:0}", 1, 5, false, true) {}
- };
-
- struct Diff25 : public TwoRangeBase {
- Diff25() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:0}", 1, 5, true, true) {}
- };
-
- struct Diff26 : public TwoRangeBase {
- Diff26() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:1}", 1, 5, false, true) {}
- };
-
- struct Diff27 : public TwoRangeBase {
- Diff27() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:1}", 1, 5, false, true) {}
- };
-
- struct Diff28 : public SplitRangeBase {
- Diff28() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:3}", 1, true, 3, false, 3, false, 5, true) {}
- };
-
- struct Diff29 : public TwoRangeBase {
- Diff29() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:5}", 1, 5, true, false) {}
- };
-
- struct Diff30 : public TwoRangeBase {
- Diff30() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:5}", 1, 5, true, false) {}
- };
-
- struct Diff31 : public TwoRangeBase {
- Diff31() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:6}", 1, 5, true, false) {}
- };
-
- struct Diff32 : public TwoRangeBase {
- Diff32() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:6}", 1, 5, true, true) {}
- };
-
- class EmptyBase : public DiffBase {
- public:
- EmptyBase( string obj )
- : _obj( obj ) {}
- private:
- virtual unsigned len() const { return 0; }
- virtual const int *nums() const { return 0; }
- virtual const bool *incs() const { return 0; }
- virtual BSONObj obj() const { return fromjson( _obj ); }
- string _obj;
- };
-
- struct Diff33 : public EmptyBase {
- Diff33() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:6}}" ) {}
- };
-
- struct Diff34 : public EmptyBase {
- Diff34() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
- };
-
- struct Diff35 : public EmptyBase {
- Diff35() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
- };
-
- struct Diff36 : public EmptyBase {
- Diff36() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:6}}" ) {}
- };
-
- struct Diff37 : public TwoRangeBase {
- Diff37() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:6}}", 1, 1, true, true ) {}
- };
-
- struct Diff38 : public EmptyBase {
- Diff38() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:5}}" ) {}
- };
-
- struct Diff39 : public EmptyBase {
- Diff39() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:5}}" ) {}
- };
-
- struct Diff40 : public EmptyBase {
- Diff40() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:0,$lte:5}}" ) {}
- };
-
- struct Diff41 : public TwoRangeBase {
- Diff41() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:5}}", 5, 5, true, true ) {}
- };
-
- struct Diff42 : public EmptyBase {
- Diff42() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:5}}" ) {}
- };
-
- struct Diff43 : public EmptyBase {
- Diff43() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lte:5}}" ) {}
- };
-
- struct Diff44 : public EmptyBase {
- Diff44() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
- };
-
- struct Diff45 : public EmptyBase {
- Diff45() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
- };
-
- struct Diff46 : public TwoRangeBase {
- Diff46() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
- };
-
- struct Diff47 : public EmptyBase {
- Diff47() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lte:5}}" ) {}
- };
-
- struct Diff48 : public TwoRangeBase {
- Diff48() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
- };
-
- struct Diff49 : public EmptyBase {
- Diff49() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
- };
-
- struct Diff50 : public TwoRangeBase {
- Diff50() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
- };
-
- struct Diff51 : public TwoRangeBase {
- Diff51() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
- };
-
- struct Diff52 : public EmptyBase {
- Diff52() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
- };
-
- struct Diff53 : public EmptyBase {
- Diff53() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
- };
-
- struct Diff54 : public SplitRangeBase {
- Diff54() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:5}}", 1, true, 1, true, 5, true, 5, true ) {}
- };
-
- struct Diff55 : public TwoRangeBase {
- Diff55() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
- };
-
- struct Diff56 : public TwoRangeBase {
- Diff56() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
- };
-
- struct Diff57 : public EmptyBase {
- Diff57() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
- };
-
- struct Diff58 : public TwoRangeBase {
- Diff58() : TwoRangeBase( "{a:1,b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
- };
-
- struct Diff59 : public EmptyBase {
- Diff59() : EmptyBase( "{a:1,b:{$gte:1,$lt:5}}" ) {}
- };
-
- struct Diff60 : public EmptyBase {
- Diff60() : EmptyBase( "{a:2,b:{$gte:1,$lt:5}}" ) {}
- };
-
- struct Diff61 : public EmptyBase {
- Diff61() : EmptyBase( "{a:5,b:{$gte:1,$lte:5}}" ) {}
- };
-
- struct Diff62 : public TwoRangeBase {
- Diff62() : TwoRangeBase( "{a:5,b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
- };
-
- struct Diff63 : public EmptyBase {
- Diff63() : EmptyBase( "{a:5,b:5}" ) {}
- };
-
- struct Diff64 : public TwoRangeBase {
- Diff64() : TwoRangeBase( "{a:{$gte:1,$lte:2},b:{$gt:0,$lte:1}}", 1, 2, false, true ) {}
- };
-
- class DiffMulti1 : public DiffBase {
- public:
- void run() {
- FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ) );
- FieldRange ret = frs.range( "a" );
- FieldRange other = frs.range( "b" );
- other |= frs.range( "c" );
- other |= frs.range( "d" );
- other |= frs.range( "e" );
- ret -= other;
- check( ret );
- }
- protected:
- virtual unsigned len() const { return 3; }
- virtual const int *nums() const { static int n[] = { 2, 3, 3, 4, 5, 7 }; return n; }
- virtual const bool *incs() const { static bool b[] = { true, false, false, true, true, true }; return b; }
- virtual BSONObj obj() const { return BSONObj(); }
- };
-
- class DiffMulti2 : public DiffBase {
- public:
- void run() {
- FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ) );
- FieldRange mask = frs.range( "a" );
- FieldRange ret = frs.range( "b" );
- ret |= frs.range( "c" );
- ret |= frs.range( "d" );
- ret |= frs.range( "e" );
- ret -= mask;
- check( ret );
- }
- protected:
- virtual unsigned len() const { return 2; }
- virtual const int *nums() const { static int n[] = { 0, 1, 9, 10 }; return n; }
- virtual const bool *incs() const { static bool b[] = { false, true, true, false }; return b; }
- virtual BSONObj obj() const { return BSONObj(); }
- };
-
- class SetIntersect {
- public:
- void run() {
- FieldRangeSet frs1( "", fromjson( "{b:{$in:[5,6]},c:7,d:{$in:[8,9]}}" ) );
- FieldRangeSet frs2( "", fromjson( "{a:1,b:5,c:{$in:[7,8]},d:{$in:[8,9]},e:10}" ) );
- frs1 &= frs2;
- ASSERT_EQUALS( fromjson( "{a:1,b:5,c:7,d:{$gte:8,$lte:9},e:10}" ), frs1.simplifiedQuery( BSONObj() ) );
- }
- };
+ void dropCollection( const char *ns ) {
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( ns, errmsg, result );
+ }
+
+ namespace QueryPlanTests {
- } // namespace FieldRangeTests
+ using boost::shared_ptr;
- namespace QueryPlanTests {
class Base {
public:
Base() : _ctx( ns() ) , indexNum_( 0 ) {
@@ -743,8 +62,7 @@ namespace QueryOptimizerTests {
~Base() {
if ( !nsd() )
return;
- string s( ns() );
- dropNS( s );
+ dropCollection( ns() );
}
protected:
static const char *ns() { return "unittests.QueryPlanTests"; }
@@ -783,15 +101,15 @@ namespace QueryOptimizerTests {
// There's a limit of 10 indexes total, make sure not to exceed this in a given test.
#define INDEXNO(x) nsd()->idxNo( *this->index( BSON(x) ) )
#define INDEX(x) this->index( BSON(x) )
- auto_ptr< FieldRangeSet > FieldRangeSet_GLOBAL;
-#define FBS(x) ( FieldRangeSet_GLOBAL.reset( new FieldRangeSet( ns(), x ) ), *FieldRangeSet_GLOBAL )
- auto_ptr< FieldRangeSet > FieldRangeSet_GLOBAL2;
-#define FBS2(x) ( FieldRangeSet_GLOBAL2.reset( new FieldRangeSet( ns(), x ) ), *FieldRangeSet_GLOBAL2 )
+ auto_ptr< FieldRangeSetPair > FieldRangeSetPair_GLOBAL;
+#define FRSP(x) ( FieldRangeSetPair_GLOBAL.reset( new FieldRangeSetPair( ns(), x ) ), *FieldRangeSetPair_GLOBAL )
+ auto_ptr< FieldRangeSetPair > FieldRangeSetPair_GLOBAL2;
+#define FRSP2(x) ( FieldRangeSetPair_GLOBAL2.reset( new FieldRangeSetPair( ns(), x ) ), FieldRangeSetPair_GLOBAL2.get() )
class NoIndex : public Base {
public:
void run() {
- QueryPlan p( nsd(), -1, FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSONObj() );
+ QueryPlan p( nsd(), -1, FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSONObj() );
ASSERT( !p.optimal() );
ASSERT( !p.scanAndOrderRequired() );
ASSERT( !p.exactKeyMatch() );
@@ -808,13 +126,13 @@ namespace QueryOptimizerTests {
b2.appendMaxKey( "" );
BSONObj end = b2.obj();
- QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( !p.scanAndOrderRequired() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << 1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << 1 ) );
ASSERT( !p2.scanAndOrderRequired() );
- QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "b" << 1 ) );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "b" << 1 ) );
ASSERT( p3.scanAndOrderRequired() );
ASSERT( !startKey( p3 ).woCompare( start ) );
ASSERT( !endKey( p3 ).woCompare( end ) );
@@ -824,7 +142,7 @@ namespace QueryOptimizerTests {
class MoreIndexThanNeeded : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( !p.scanAndOrderRequired() );
}
};
@@ -832,13 +150,13 @@ namespace QueryOptimizerTests {
class IndexSigns : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << -1 ) , FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << -1 ) , FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
ASSERT( !p.scanAndOrderRequired() );
ASSERT_EQUALS( 1, p.direction() );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
ASSERT( p2.scanAndOrderRequired() );
ASSERT_EQUALS( 0, p2.direction() );
- QueryPlan p3( nsd(), indexno( id_obj ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "_id" << 1 ) );
+ QueryPlan p3( nsd(), indexno( id_obj ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "_id" << 1 ) );
ASSERT( !p3.scanAndOrderRequired() );
ASSERT_EQUALS( 1, p3.direction() );
}
@@ -855,15 +173,15 @@ namespace QueryOptimizerTests {
b2.appendMaxKey( "" );
b2.appendMinKey( "" );
BSONObj end = b2.obj();
- QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ),FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ),FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
ASSERT( !p.scanAndOrderRequired() );
ASSERT_EQUALS( -1, p.direction() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
ASSERT( !p2.scanAndOrderRequired() );
ASSERT_EQUALS( -1, p2.direction() );
- QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << -1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << -1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
ASSERT( p3.scanAndOrderRequired() );
ASSERT_EQUALS( 0, p3.direction() );
}
@@ -880,11 +198,11 @@ namespace QueryOptimizerTests {
b2.append( "", 3 );
b2.appendMaxKey( "" );
BSONObj end = b2.obj();
- QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FBS( BSON( "a" << 3 ) ), FBS2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
+ QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FRSP( BSON( "a" << 3 ) ), FRSP2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
ASSERT( !p.scanAndOrderRequired() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
- QueryPlan p2( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FBS( BSON( "a" << 3 ) ), FBS2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FRSP( BSON( "a" << 3 ) ), FRSP2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
ASSERT( !p2.scanAndOrderRequired() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
@@ -894,11 +212,11 @@ namespace QueryOptimizerTests {
class EqualWithOrder : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << 4 ) ), FBS2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 4 ) ), FRSP2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSON( "b" << 1 ) );
ASSERT( !p.scanAndOrderRequired() );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "b" << 4 ) ), FBS2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "b" << 4 ) ), FRSP2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
ASSERT( !p2.scanAndOrderRequired() );
- QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 4 ) ), FBS2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 4 ) ), FRSP2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
ASSERT( p3.scanAndOrderRequired() );
}
};
@@ -906,23 +224,23 @@ namespace QueryOptimizerTests {
class Optimal : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( p.optimal() );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( p2.optimal() );
- QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << 1 ) ), FBS2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "a" << 1 ) );
ASSERT( p3.optimal() );
- QueryPlan p4( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 1 ) ), FBS2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p4( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "a" << 1 ) );
ASSERT( !p4.optimal() );
- QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << 1 ) ), FBS2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "b" << 1 ) );
+ QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "b" << 1 ) );
ASSERT( p5.optimal() );
- QueryPlan p6( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 1 ) ), FBS2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "b" << 1 ) );
+ QueryPlan p6( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "b" << 1 ) );
ASSERT( !p6.optimal() );
- QueryPlan p7( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << 1 << "b" << 1 ) ), FBS2( BSON( "a" << 1 << "b" << 1 ) ), BSON( "a" << 1 << "b" << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p7( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 << "b" << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << 1 ) ), BSON( "a" << 1 << "b" << 1 ), BSON( "a" << 1 ) );
ASSERT( p7.optimal() );
- QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << 1 << "b" << LT << 1 ) ), FBS2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
ASSERT( p8.optimal() );
- QueryPlan p9( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 << "b" << LT << 1 ) ), FBS2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p9( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
ASSERT( p9.optimal() );
}
};
@@ -930,13 +248,13 @@ namespace QueryOptimizerTests {
class MoreOptimal : public Base {
public:
void run() {
- QueryPlan p10( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 ) ), FBS2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSONObj() );
+ QueryPlan p10( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSONObj() );
ASSERT( p10.optimal() );
- QueryPlan p11( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 << "b" << LT << 1 ) ), FBS2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSONObj() );
+ QueryPlan p11( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSONObj() );
ASSERT( p11.optimal() );
- QueryPlan p12( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << LT << 1 ) ), FBS2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSONObj() );
+ QueryPlan p12( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << LT << 1 ) ), FRSP2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSONObj() );
ASSERT( p12.optimal() );
- QueryPlan p13( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << LT << 1 ) ), FBS2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p13( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << LT << 1 ) ), FRSP2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSON( "a" << 1 ) );
ASSERT( p13.optimal() );
}
};
@@ -944,23 +262,23 @@ namespace QueryOptimizerTests {
class KeyMatch : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( !p.exactKeyMatch() );
- QueryPlan p2( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( !p2.exactKeyMatch() );
- QueryPlan p3( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FBS( BSON( "b" << "z" ) ), FBS2( BSON( "b" << "z" ) ), BSON( "b" << "z" ), BSON( "a" << 1 ) );
+ QueryPlan p3( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FRSP( BSON( "b" << "z" ) ), FRSP2( BSON( "b" << "z" ) ), BSON( "b" << "z" ), BSON( "a" << 1 ) );
ASSERT( !p3.exactKeyMatch() );
- QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FBS( BSON( "c" << "y" << "b" << "z" ) ), FBS2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSON( "a" << 1 ) );
+ QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << "y" << "b" << "z" ) ), FRSP2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSON( "a" << 1 ) );
ASSERT( !p4.exactKeyMatch() );
- QueryPlan p5( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FBS( BSON( "c" << "y" << "b" << "z" ) ), FBS2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSONObj() );
+ QueryPlan p5( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << "y" << "b" << "z" ) ), FRSP2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSONObj() );
ASSERT( !p5.exactKeyMatch() );
- QueryPlan p6( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FBS( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), FBS2( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), BSON( "c" << LT << "y" << "b" << GT << "z" ), BSONObj() );
+ QueryPlan p6( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), FRSP2( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), BSON( "c" << LT << "y" << "b" << GT << "z" ), BSONObj() );
ASSERT( !p6.exactKeyMatch() );
- QueryPlan p7( nsd(), INDEXNO( "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ QueryPlan p7( nsd(), INDEXNO( "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( !p7.exactKeyMatch() );
- QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << "y" << "a" << "z" ) ), FBS2( BSON( "b" << "y" << "a" << "z" ) ), BSON( "b" << "y" << "a" << "z" ), BSONObj() );
+ QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << "y" << "a" << "z" ) ), FRSP2( BSON( "b" << "y" << "a" << "z" ) ), BSON( "b" << "y" << "a" << "z" ), BSONObj() );
ASSERT( p8.exactKeyMatch() );
- QueryPlan p9( nsd(), INDEXNO( "a" << 1 ), FBS( BSON( "a" << "z" ) ), FBS2( BSON( "a" << "z" ) ), BSON( "a" << "z" ), BSON( "a" << 1 ) );
+ QueryPlan p9( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "z" ) ), FRSP2( BSON( "a" << "z" ) ), BSON( "a" << "z" ), BSON( "a" << 1 ) );
ASSERT( p9.exactKeyMatch() );
}
};
@@ -968,7 +286,7 @@ namespace QueryOptimizerTests {
class MoreKeyMatch : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSON( "a" << "r" << "b" << NE << "q" ) ), FBS2( BSON( "a" << "r" << "b" << NE << "q" ) ), BSON( "a" << "r" << "b" << NE << "q" ), BSON( "a" << 1 ) );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "r" << "b" << NE << "q" ) ), FRSP2( BSON( "a" << "r" << "b" << NE << "q" ) ), BSON( "a" << "r" << "b" << NE << "q" ), BSON( "a" << 1 ) );
ASSERT( !p.exactKeyMatch() );
}
};
@@ -976,18 +294,18 @@ namespace QueryOptimizerTests {
class ExactKeyQueryTypes : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSON( "a" << "b" ) ), FBS2( BSON( "a" << "b" ) ), BSON( "a" << "b" ), BSONObj() );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "b" ) ), FRSP2( BSON( "a" << "b" ) ), BSON( "a" << "b" ), BSONObj() );
ASSERT( p.exactKeyMatch() );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 ), FBS( BSON( "a" << 4 ) ), FBS2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSONObj() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << 4 ) ), FRSP2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSONObj() );
ASSERT( !p2.exactKeyMatch() );
- QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FBS( BSON( "a" << BSON( "c" << "d" ) ) ), FBS2( BSON( "a" << BSON( "c" << "d" ) ) ), BSON( "a" << BSON( "c" << "d" ) ), BSONObj() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << BSON( "c" << "d" ) ) ), FRSP2( BSON( "a" << BSON( "c" << "d" ) ) ), BSON( "a" << BSON( "c" << "d" ) ), BSONObj() );
ASSERT( !p3.exactKeyMatch() );
BSONObjBuilder b;
b.appendRegex( "a", "^ddd" );
BSONObj q = b.obj();
- QueryPlan p4( nsd(), INDEXNO( "a" << 1 ), FBS( q ), FBS2( q ), q, BSONObj() );
+ QueryPlan p4( nsd(), INDEXNO( "a" << 1 ), FRSP( q ), FRSP2( q ), q, BSONObj() );
ASSERT( !p4.exactKeyMatch() );
- QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << "z" << "b" << 4 ) ), FBS2( BSON( "a" << "z" << "b" << 4 ) ), BSON( "a" << "z" << "b" << 4 ), BSONObj() );
+ QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << "z" << "b" << 4 ) ), FRSP2( BSON( "a" << "z" << "b" << 4 ) ), BSON( "a" << "z" << "b" << 4 ), BSONObj() );
ASSERT( !p5.exactKeyMatch() );
}
};
@@ -995,17 +313,17 @@ namespace QueryOptimizerTests {
class Unhelpful : public Base {
public:
void run() {
- QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 1 ) ), FBS2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSONObj() );
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSONObj() );
ASSERT( !p.range( "a" ).nontrivial() );
ASSERT( p.unhelpful() );
- QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 1 << "c" << 1 ) ), FBS2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSON( "a" << 1 ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 << "c" << 1 ) ), FRSP2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSON( "a" << 1 ) );
ASSERT( !p2.scanAndOrderRequired() );
ASSERT( !p2.range( "a" ).nontrivial() );
ASSERT( !p2.unhelpful() );
- QueryPlan p3( nsd(), INDEXNO( "b" << 1 ), FBS( BSON( "b" << 1 << "c" << 1 ) ), FBS2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSONObj() );
+ QueryPlan p3( nsd(), INDEXNO( "b" << 1 ), FRSP( BSON( "b" << 1 << "c" << 1 ) ), FRSP2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSONObj() );
ASSERT( p3.range( "b" ).nontrivial() );
ASSERT( !p3.unhelpful() );
- QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "c" << 1 ), FBS( BSON( "c" << 1 << "d" << 1 ) ), FBS2( BSON( "c" << 1 << "d" << 1 ) ), BSON( "c" << 1 << "d" << 1 ), BSONObj() );
+ QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "c" << 1 ), FRSP( BSON( "c" << 1 << "d" << 1 ) ), FRSP2( BSON( "c" << 1 << "d" << 1 ) ), BSON( "c" << 1 << "d" << 1 ), BSONObj() );
ASSERT( !p4.range( "b" ).nontrivial() );
ASSERT( p4.unhelpful() );
}
@@ -1023,9 +341,8 @@ namespace QueryOptimizerTests {
virtual ~Base() {
if ( !nsd() )
return;
- NamespaceDetailsTransient::_get( ns() ).clearQueryCache();
- string s( ns() );
- dropNS( s );
+ NamespaceDetailsTransient::get_inlock( ns() ).clearQueryCache();
+ dropCollection( ns() );
}
static void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
// see query.h for the protocol we are using here.
@@ -1051,9 +368,9 @@ namespace QueryOptimizerTests {
class NoIndexes : public Base {
public:
void run() {
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1063,9 +380,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "b_2" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSONObj() );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSONObj() );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1075,9 +392,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
ASSERT_EQUALS( 3, s.nPlans() );
}
};
@@ -1087,9 +404,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSONObj() ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSONObj(), BSONObj() );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSONObj() ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSONObj(), BSONObj() );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1101,9 +418,9 @@ namespace QueryOptimizerTests {
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
BSONObj b = BSON( "hint" << BSON( "a" << 1 ) );
BSONElement e = b.firstElement();
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1115,9 +432,9 @@ namespace QueryOptimizerTests {
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
BSONObj b = BSON( "hint" << "a_1" );
BSONElement e = b.firstElement();
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1129,9 +446,9 @@ namespace QueryOptimizerTests {
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
BSONObj b = BSON( "hint" << BSON( "$natural" << 1 ) );
BSONElement e = b.firstElement();
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1141,9 +458,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "b_2" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "$natural" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "$natural" << 1 ) );
ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1153,9 +470,9 @@ namespace QueryOptimizerTests {
void run() {
BSONObj b = BSON( "hint" << "a_1" );
BSONElement e = b.firstElement();
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- ASSERT_EXCEPTION( QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e ),
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ ASSERT_EXCEPTION( QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e ),
AssertionException );
}
};
@@ -1208,9 +525,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 << "c" << 2 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 << "c" << 2 ), BSONObj() );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 << "c" << 2 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 << "c" << 2 ), BSONObj() );
ASSERT_EQUALS( 2, s.nPlans() );
}
};
@@ -1220,9 +537,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
ASSERT_EQUALS( 3, s.nPlans() );
bool threw = false;
auto_ptr< TestOp > t( new TestOp( true, threw ) );
@@ -1264,9 +581,9 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
ASSERT_EQUALS( 3, s.nPlans() );
auto_ptr< TestOp > t( new TestOp() );
boost::shared_ptr< TestOp > done = s.runOp( *t );
@@ -1294,62 +611,71 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ // No best plan - all must be tried.
nPlans( 3 );
runQuery();
+ // Best plan selected by query.
nPlans( 1 );
nPlans( 1 );
Helpers::ensureIndex( ns(), BSON( "c" << 1 ), false, "c_1" );
+ // Best plan cleared when new index added.
nPlans( 3 );
runQuery();
+ // Best plan selected by query.
nPlans( 1 );
{
DBDirectClient client;
- for( int i = 0; i < 34; ++i ) {
+ for( int i = 0; i < 334; ++i ) {
client.insert( ns(), BSON( "i" << i ) );
client.update( ns(), QUERY( "i" << i ), BSON( "i" << i + 1 ) );
client.remove( ns(), BSON( "i" << i + 1 ) );
}
}
+ // Best plan cleared by ~1000 writes.
nPlans( 3 );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
NoRecordTestOp original;
s.runOp( original );
+ // NoRecordTestOp doesn't record a best plan (test cases where mayRecordPlan() is false).
nPlans( 3 );
BSONObj hint = fromjson( "{hint:{$natural:1}}" );
BSONElement hintElt = hint.firstElement();
- auto_ptr< FieldRangeSet > frs2( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig2( new FieldRangeSet( *frs2 ) );
- QueryPlanSet s2( ns(), frs2, frsOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ), &hintElt );
+ auto_ptr< FieldRangeSetPair > frsp2( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig2( new FieldRangeSetPair( *frsp2 ) );
+ QueryPlanSet s2( ns(), frsp2, frspOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ), true, &hintElt );
TestOp newOriginal;
s2.runOp( newOriginal );
+ // No plan recorded when a hint is used.
nPlans( 3 );
- auto_ptr< FieldRangeSet > frs3( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig3( new FieldRangeSet( *frs3 ) );
- QueryPlanSet s3( ns(), frs3, frsOrig3, BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp3( new FieldRangeSetPair( ns(), BSON( "a" << 4 ), true ) );
+ auto_ptr< FieldRangeSetPair > frspOrig3( new FieldRangeSetPair( *frsp3 ) );
+ QueryPlanSet s3( ns(), frsp3, frspOrig3, BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) );
TestOp newerOriginal;
s3.runOp( newerOriginal );
+ // Plan recorded was for a different query pattern (different sort spec).
nPlans( 3 );
+ // Best plan still selected by query after all these other tests.
runQuery();
nPlans( 1 );
}
private:
void nPlans( int n ) {
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
ASSERT_EQUALS( n, s.nPlans() );
}
void runQuery() {
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
TestOp original;
s.runOp( original );
}
@@ -1376,17 +702,18 @@ namespace QueryOptimizerTests {
void run() {
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
ScanOnlyTestOp op;
s.runOp( op );
- ASSERT( fromjson( "{$natural:1}" ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( s.fbs().pattern( BSON( "b" << 1 ) ) ) ) == 0 );
- ASSERT_EQUALS( 1, NamespaceDetailsTransient::_get( ns() ).nScannedForPattern( s.fbs().pattern( BSON( "b" << 1 ) ) ) );
+ pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( s.frsp(), BSON( "b" << 1 ) );
+ ASSERT( fromjson( "{$natural:1}" ).woCompare( best.first ) == 0 );
+ ASSERT_EQUALS( 1, best.second );
- auto_ptr< FieldRangeSet > frs2( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
- auto_ptr< FieldRangeSet > frsOrig2( new FieldRangeSet( *frs2 ) );
- QueryPlanSet s2( ns(), frs2, frsOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ auto_ptr< FieldRangeSetPair > frsp2( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig2( new FieldRangeSetPair( *frsp2 ) );
+ QueryPlanSet s2( ns(), frsp2, frspOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ) );
TestOp op2;
ASSERT( s2.runOp( op2 )->complete() );
}
@@ -1396,7 +723,7 @@ namespace QueryOptimizerTests {
TestOp() {}
virtual void _init() {}
virtual void next() {
- if ( qp().indexKey().firstElement().fieldName() == string( "$natural" ) )
+ if ( qp().indexKey().firstElementFieldName() == string( "$natural" ) )
massert( 10410 , "throw", false );
setComplete();
}
@@ -1442,8 +769,8 @@ namespace QueryOptimizerTests {
BSONObj one = BSON( "a" << 1 );
theDataFileMgr.insertWithObjMod( ns(), one );
deleteObjects( ns(), BSON( "a" << 1 ), false );
- ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ) ).pattern() ) ) == 0 );
- ASSERT_EQUALS( 1, NamespaceDetailsTransient::_get( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ) ).pattern() ) );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ), true ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 1, NamespaceDetailsTransient::get_inlock( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ), true ).pattern() ) );
}
};
@@ -1498,7 +825,7 @@ namespace QueryOptimizerTests {
QueryMessage q(d);
runQuery( m, q);
}
- ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
+ ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) ) == 0 );
Message m2;
assembleRequest( ns(), QUERY( "b" << 99 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m2 );
@@ -1507,8 +834,8 @@ namespace QueryOptimizerTests {
QueryMessage q(d);
runQuery( m2, q);
}
- ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
- ASSERT_EQUALS( 3, NamespaceDetailsTransient::_get( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 3, NamespaceDetailsTransient::get_inlock( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) );
}
};
@@ -1522,10 +849,10 @@ namespace QueryOptimizerTests {
}
BSONObj hint = fromjson( "{$hint:{a:1}}" );
BSONElement hintElt = hint.firstElement();
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj(), &hintElt );
- QueryPlan qp( nsd(), 1, s.fbs(), s.originalFrs(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj(), true, &hintElt );
+ QueryPlan qp( nsd(), 1, s.frsp(), s.originalFrsp(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj() );
boost::shared_ptr<Cursor> c = qp.newCursor();
double expected[] = { 2, 3, 6, 9 };
for( int i = 0; i < 4; ++i, c->advance() ) {
@@ -1535,10 +862,10 @@ namespace QueryOptimizerTests {
// now check reverse
{
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
- auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
- QueryPlanSet s( ns(), frs, frsOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ), &hintElt );
- QueryPlan qp( nsd(), 1, s.fbs(), s.originalFrs(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ) );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ), true, &hintElt );
+ QueryPlan qp( nsd(), 1, s.frsp(), s.originalFrsp(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ) );
boost::shared_ptr<Cursor> c = qp.newCursor();
double expected[] = { 9, 6, 3, 2 };
for( int i = 0; i < 4; ++i, c->advance() ) {
@@ -1558,8 +885,8 @@ namespace QueryOptimizerTests {
theDataFileMgr.insertWithObjMod( ns(), temp );
}
BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ) ) );
- QueryPlan qp( nsd(), 1, *frs, *frs, fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ) ) );
+ QueryPlan qp( nsd(), 1, *frsp, frsp.get(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
boost::shared_ptr<Cursor> c = qp.newCursor();
double expected[] = { 2, 3, 6, 9 };
ASSERT( c->ok() );
@@ -1580,8 +907,8 @@ namespace QueryOptimizerTests {
theDataFileMgr.insertWithObjMod( ns(), temp );
}
BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ) ) );
- QueryPlan qp( nsd(), 1, *frs, *frs, fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ) ) );
+ QueryPlan qp( nsd(), 1, *frsp, frsp.get(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
boost::shared_ptr<Cursor> c = qp.newCursor();
int matches[] = { 2, 3, 6, 9 };
for( int i = 0; i < 4; ++i, c->advance() ) {
@@ -1603,10 +930,10 @@ namespace QueryOptimizerTests {
if ( !nsd() )
return;
string s( ns() );
- dropNS( s );
+ dropCollection( ns() );
}
protected:
- static const char *ns() { return "unittests.BaseTests"; }
+ static const char *ns() { return "unittests.QueryOptimizerTests"; }
static NamespaceDetails *nsd() { return nsdetails( ns() ); }
private:
dblock lk_;
@@ -1626,156 +953,1784 @@ namespace QueryOptimizerTests {
boost::shared_ptr< Cursor > c = bestGuessCursor( ns(), BSON( "b" << 1 ), BSON( "a" << 1 ) );
ASSERT_EQUALS( string( "a" ), c->indexKeyPattern().firstElement().fieldName() );
c = bestGuessCursor( ns(), BSON( "a" << 1 ), BSON( "b" << 1 ) );
- ASSERT_EQUALS( string( "b" ), c->indexKeyPattern().firstElement().fieldName() );
+ ASSERT_EQUALS( string( "b" ), c->indexKeyPattern().firstElementFieldName() );
boost::shared_ptr< MultiCursor > m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{b:1,$or:[{z:1}]}" ), BSON( "a" << 1 ) ) );
ASSERT_EQUALS( string( "a" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
- ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
+ ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElementFieldName() );
- FieldRangeSet frs( "ns", BSON( "a" << 1 ) );
+ FieldRangeSet frs( "ns", BSON( "a" << 1 ), true );
{
- scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( frs.pattern( BSON( "b" << 1 ) ), BSON( "a" << 1 ), 0 );
}
m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
}
};
+
+ namespace QueryOptimizerCursorTests {
+
+ using boost::shared_ptr;
+
+ class Base {
+ public:
+ Base() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ dropCollection( ns() );
+ }
+ ~Base() {
+ cc().curop()->reset();
+ }
+ protected:
+ DBDirectClient _cli;
+ static const char *ns() { return "unittests.QueryOptimizerTests"; }
+ void setQueryOptimizerCursor( const BSONObj &query, const BSONObj &order = BSONObj() ) {
+ _c = newQueryOptimizerCursor( ns(), query, order );
+ if ( ok() && !mayReturnCurrent() ) {
+ advance();
+ }
+ }
+ bool ok() const { return _c->ok(); }
+ /** Handles matching and deduping. */
+ bool advance() {
+ while( _c->advance() && !mayReturnCurrent() );
+ return ok();
+ }
+ int itcount() {
+ int ret = 0;
+ while( ok() ) {
+ ++ret;
+ advance();
+ }
+ return ret;
+ }
+ BSONObj current() const { return _c->current(); }
+ bool mayReturnCurrent() {
+ return _c->matcher()->matchesCurrent( _c.get() ) && !_c->getsetdup( _c->currLoc() );
+ }
+ bool prepareToYield() const { return _c->prepareToYield(); }
+ void recoverFromYield() {
+ _c->recoverFromYield();
+ if ( ok() && !mayReturnCurrent() ) {
+ advance();
+ }
+ }
+ shared_ptr<Cursor> c() { return _c; }
+ long long nscanned() const { return _c->nscanned(); }
+ private:
+ shared_ptr<Cursor> _c;
+ };
+
+ /** No results for empty collection. */
+ class Empty : public Base {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSONObj() );
+ ASSERT( !c->ok() );
+ ASSERT_EXCEPTION( c->_current(), AssertionException );
+ ASSERT_EXCEPTION( c->current(), AssertionException );
+ ASSERT( c->currLoc().isNull() );
+ ASSERT( !c->advance() );
+ ASSERT_EXCEPTION( c->currKey(), AssertionException );
+ ASSERT_EXCEPTION( c->getsetdup( DiskLoc() ), AssertionException );
+ ASSERT_EXCEPTION( c->isMultiKey(), AssertionException );
+ ASSERT_EXCEPTION( c->matcher(), AssertionException );
+ }
+ };
+
+ /** Simple table scan. */
+ class Unindexed : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSONObj() );
+ ASSERT_EQUALS( 2, itcount() );
+ }
+ };
+
+ /** Basic test with two indexes and deduping requirement. */
+ class Basic : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 2 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 2 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ class NoMatch : public Base {
+ public:
+ void run() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 5 << LT << 4 << "a" << GT << 0 ) );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Order of results indicates that interleaving is occurring. */
+ class Interleaved : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 2 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 3 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 2 << "a" << 2 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Some values on each index do not match. */
+ class NotMatch : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 10 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 11 << "a" << 12 ) );
+ _cli.insert( ns(), BSON( "_id" << 12 << "a" << 11 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** After the first 101 matches for a plan, we stop interleaving the plans. */
+ class StopInterleaving : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ for( int i = 101; i < 200; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << (301-i) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << -1 ) );
+ for( int i = 0; i < 200; ++i ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Test correct deduping with the takeover cursor. */
+ class TakeoverWithDup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 500 << "a" << BSON_ARRAY( 0 << 300 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << -1 ) );
+ ASSERT_EQUALS( 102, itcount() );
+ }
+ };
+
+ /** Test usage of matcher with takeover cursor. */
+ class TakeoverWithNonMatches : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 101 << "a" << 600 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << LT << 500 ) );
+ ASSERT_EQUALS( 101, itcount() );
+ }
+ };
+
+ /** Check deduping of dups within just the takeover cursor. */
+ class TakeoverWithTakeoverDup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i*2 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << i*2+1 << "a" << 1 ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 202 << "a" << BSON_ARRAY( 2 << 3 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << 0) );
+ ASSERT_EQUALS( 102, itcount() );
+ }
+ };
+
+ /** Basic test with $or query. */
+ class BasicOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 0 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or first clause empty. */
+ class OrFirstClauseEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << -1 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or second clause empty. */
+ class OrSecondClauseEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 0 ) << BSON( "_id" << -1 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or multiple clauses empty empty. */
+ class OrMultipleClausesEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 2 ) << BSON( "_id" << 4 ) << BSON( "_id" << 0 ) << BSON( "_id" << -1 ) << BSON( "_id" << 6 ) << BSON( "a" << 1 ) << BSON( "_id" << 9 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** Check that takeover occurs at proper match count with $or clauses */
+ class TakeoverCountOr : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 60; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 0 ) );
+ }
+ for( int i = 60; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 1 ) );
+ }
+ for( int i = 120; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << (200-i) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "a" << 0 ) << BSON( "a" << 1 ) << BSON( "_id" << GTE << 120 << "a" << GT << 1 ) ) ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( ok() );
+ advance();
+ }
+ // Expect to be scanning on _id index only.
+ for( int i = 120; i < 150; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Takeover just at end of clause. */
+ class TakeoverEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 102; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 101 ) << BSON( "_id" << 101 ) ) ) );
+ for( int i = 0; i < 102; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ class TakeoverBeforeEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 100 ) << BSON( "_id" << 100 ) ) ) );
+ for( int i = 0; i < 101; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ class TakeoverAfterEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 103; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 102 ) << BSON( "_id" << 102 ) ) ) );
+ for( int i = 0; i < 103; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Test matching and deduping done manually by cursor client. */
+ class ManualMatchingDeduping : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 10 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 11 << "a" << 12 ) );
+ _cli.insert( ns(), BSON( "_id" << 12 << "a" << 11 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( c->ok() );
+
+ // _id 10 {_id:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 0 {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 0 {$natural:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {a:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 10 {$natural:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {a:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {$natural:1}
+ ASSERT_EQUALS( 11, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+
+ // {_id:1} scan is complete.
+ ASSERT( !c->advance() );
+ ASSERT( !c->ok() );
+
+ // Scan the results again - this time the winning plan has been
+ // recorded.
+ c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( c->ok() );
+
+ // _id 10 {_id:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+
+ // {_id:1} scan complete
+ ASSERT( !c->advance() );
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Curr key must be correct for currLoc for correct matching. */
+ class ManualMatchingUsingCurrKey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << "a" ) );
+ _cli.insert( ns(), BSON( "_id" << "b" ) );
+ _cli.insert( ns(), BSON( "_id" << "ba" ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), fromjson( "{_id:/a/}" ) );
+ ASSERT( c->ok() );
+ // "a"
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+ ASSERT( c->ok() );
+
+ // "b"
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ ASSERT( c->ok() );
+
+ // "ba"
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Test matching and deduping done manually by cursor client. */
+ class ManualMatchingDedupingTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 0 ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 300 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 300 ) << BSON( "a" << 1 ) ) ) );
+ for( int i = 0; i < 151; ++i ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Test single key matching bounds. */
+ class Singlekey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << "10" ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "a" << GT << 1 << LT << 5 ) );
+ // Two sided bounds work.
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Test multi key matching bounds. */
+ class Multikey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 10 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << 5 << LT << 3 ) );
+ // Multi key bounds work.
+ ASSERT( ok() );
+ }
+ };
+
+ /** Add other plans when the recorded one is doing more poorly than expected. */
+ class AddOtherPlans : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 0 << "b" << 0 ) );
+
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT( c->advance() );
+ // $natrual plan
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT( !c->advance() );
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << 100 << "b" << 149 ) );
+ // Try {a:1}, which was successful previously.
+ for( int i = 0; i < 11; ++i ) {
+ ASSERT( 149 != c->current().getIntField( "b" ) );
+ ASSERT( c->advance() );
+ }
+ // Now try {b:1} plan.
+ ASSERT_EQUALS( 149, c->current().getIntField( "b" ) );
+ ASSERT( c->advance() );
+ // {b:1} plan finished.
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Check $or clause range elimination. */
+ class OrRangeElimination : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) << BSON( "_id" << 1 ) ) ) );
+ ASSERT( c->ok() );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Check $or match deduping - in takeover cursor. */
+ class OrDedup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 140 ) << BSON( "_id" << 145 ) << BSON( "a" << 145 ) ) ) );
+
+ while( c->current().getIntField( "_id" ) < 140 ) {
+ ASSERT( c->advance() );
+ }
+ // Match from second $or clause.
+ ASSERT_EQUALS( 145, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ // Match from third $or clause.
+ ASSERT_EQUALS( 145, c->current().getIntField( "_id" ) );
+ // $or deduping is handled by the matcher.
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Standard dups with a multikey cursor. */
+ class EarlyDups : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 0 << 1 << 200 ) ) );
+ for( int i = 2; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << -1 ) );
+ ASSERT_EQUALS( 149, itcount() );
+ }
+ };
+
+ /** Pop or clause in takeover cursor. */
+ class OrPopInTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LTE << 147 ) << BSON( "_id" << 148 ) << BSON( "_id" << 149 ) ) ) );
+ for( int i = 0; i < 150; ++i ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( i, c->current().getIntField( "_id" ) );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Or clause iteration abandoned once full collection scan is performed. */
+ class OrCollectionScanAbort : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 << 2 << 3 << 4 << 5 ) << "b" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << BSON_ARRAY( 6 << 7 << 8 << 9 << 10 ) << "b" << 4 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "a" << LT << 6 << "b" << 4 ) << BSON( "a" << GTE << 6 << "b" << 4 ) ) ) );
+
+ ASSERT( c->ok() );
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {$natural:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 1 on {$natural:1}
+ ASSERT_EQUALS( 1, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // {$natural:1} finished
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Simple geo query. */
+ class Geo : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "loc" << BSON( "lon" << 30 << "lat" << 30 ) ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "loc" << BSON( "lon" << 31 << "lat" << 31 ) ) );
+ _cli.ensureIndex( ns(), BSON( "loc" << "2d" ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "loc" << BSON( "$near" << BSON_ARRAY( 30 << 30 ) ) ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 0, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ ASSERT( prepareToYield() );
+ recoverFromYield();
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry. */
+ class YieldDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << 1 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ ASSERT( !advance() );
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldDeleteContinue : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldDeleteContinueFurther : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and update current. */
+ class YieldUpdate : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "a" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.update( ns(), BSON( "a" << 1 ), BSON( "$set" << BSON( "a" << 3 ) ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "a" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop collection. */
+ class YieldDrop : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropCollection( ns() );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop collection with $or query. */
+ class YieldDropOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 ) << BSON( "_id" << 2 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropCollection( ns() );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT_EXCEPTION( recoverFromYield(), MsgAssertionException );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and overwrite current in capped collection. */
+ class YieldCappedOverwrite : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "x" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "x" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ int x = 2;
+ while( _cli.count( ns(), BSON( "x" << 1 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "x" << x++ ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT_EXCEPTION( recoverFromYield(), MsgAssertionException );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop unrelated index - see SERVER-2454. */
+ class YieldDropIndex : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << 1 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with multiple plans active. */
+ class YieldMultiplePlansNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with advance and multiple plans active. */
+ class YieldMultiplePlansAdvanceNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ advance();
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete and multiple plans active. */
+ class YieldMultiplePlansDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 4 << "a" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ advance();
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ // index {a:1} active during yield
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with multiple plans and capped overwrite. */
+ class YieldMultiplePlansCappedOverwrite : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ int i = 1;
+ while( _cli.count( ns(), BSON( "_id" << 1 ) ) > 0 ) {
+ ++i;
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /**
+ * Yielding with multiple plans and capped overwrite with unrecoverable cursor
+ * active at time of yield.
+ */
+ class YieldMultiplePlansCappedOverwriteManual : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ shared_ptr<Cursor> c;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT_EQUALS( 1, c->current().getIntField( "a" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+ ASSERT_EQUALS( 1, c->current().getIntField( "a" ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->prepareToYield() );
+ }
+
+ int i = 1;
+ while( _cli.count( ns(), BSON( "a" << 1 ) ) > 0 ) {
+ ++i;
+ _cli.insert( ns(), BSON( "a" << i << "b" << i ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c->recoverFromYield();
+ ASSERT( c->ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < c->current().getIntField( "a" ) );
+ }
+ }
+ };
+
+ /**
+ * Yielding with multiple plans and capped overwrite with unrecoverable cursor
+ * inctive at time of yield.
+ */
+ class YieldMultiplePlansCappedOverwriteManual2 : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 ) );
+
+ shared_ptr<Cursor> c;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, c->current().getIntField( "_id" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->prepareToYield() );
+ }
+
+ int n = 1;
+ while( _cli.count( ns(), BSON( "_id" << 1 ) ) > 0 ) {
+ ++n;
+ _cli.insert( ns(), BSON( "_id" << n << "a" << n ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c->recoverFromYield();
+ ASSERT( c->ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < c->current().getIntField( "_id" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ int i = c->current().getIntField( "_id" );
+ ASSERT( c->advance() );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ while( i < n ) {
+ ASSERT( c->advance() );
+ ++i;
+ ASSERT_EQUALS( i, c->current().getIntField( "_id" ) );
+ }
+ }
+ }
+ };
+
+ /** Try and fail to yield a geo query. */
+ class TryYieldGeo : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "loc" << BSON( "lon" << 30 << "lat" << 30 ) ) );
+ _cli.ensureIndex( ns(), BSON( "loc" << "2d" ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "loc" << BSON( "$near" << BSON_ARRAY( 50 << 50 ) ) ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 0, current().getIntField( "_id" ) );
+ ASSERT( !prepareToYield() );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 0, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Yield with takeover cursor. */
+ class YieldTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GTE << 0 << "a" << GTE << 0 ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( advance() );
+ }
+ ASSERT( ok() );
+ ASSERT_EQUALS( 120, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 120 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 121, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 122, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield with BacicCursor takeover cursor. */
+ class YieldTakeoverBasic : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << BSON_ARRAY( i << i+1 ) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ auto_ptr<ClientCursor> cc;
+ auto_ptr<ClientCursor::YieldData> data( new ClientCursor::YieldData() );
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "b" << NE << 0 << "a" << GTE << 0 ) );
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( advance() );
+ }
+ ASSERT( ok() );
+ ASSERT_EQUALS( 120, current().getIntField( "_id" ) );
+ cc->prepareToYield( *data );
+ }
+ _cli.remove( ns(), BSON( "_id" << 120 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( ClientCursor::recoverFromYield( *data ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 121, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 122, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield with advance of inactive cursor. */
+ class YieldInactiveCursorAdvance : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 10 - i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 9, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 9 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 8, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 7, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderId : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSONObj(), BSON( "_id" << 1 ) );
+
+ for( int i = 0; i < 10; ++i, advance() ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderMultiIndex : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 1 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GTE << 0 << "a" << GTE << 0 ), BSON( "_id" << 1 ) );
+
+ for( int i = 0; i < 10; ++i, advance() ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderReject : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i % 5 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GTE << 3 ), BSON( "_id" << 1 ) );
+
+ ASSERT( ok() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 8, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 9, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ }
+ };
+
+ class OrderNatural : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ _cli.insert( ns(), BSON( "_id" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 6 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ), BSON( "$natural" << 1 ) );
+
+ ASSERT( ok() );
+ ASSERT_EQUALS( 5, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 6, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ }
+ };
+
+ class OrderUnindexed : public Base {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( !newQueryOptimizerCursor( ns(), BSONObj(), BSON( "a" << 1 ) ).get() );
+ }
+ };
+
+ class RecordedOrderInvalid : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 << "b" << 2 ) );
+ _cli.insert( ns(), BSON( "a" << 3 << "b" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ ASSERT( _cli.query( ns(), QUERY( "a" << 2 ).sort( "b" ) )->more() );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 2 ), BSON( "b" << 1 ) );
+ // Check that we are scanning {b:1} not {a:1}.
+ for( int i = 0; i < 3; ++i ) {
+ ASSERT( c->ok() );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ class KillOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ mongolock lk( false );
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT( ok() );
+ cc().curop()->kill();
+ // First advance() call throws, subsequent calls just fail.
+ ASSERT_EXCEPTION( advance(), MsgAssertionException );
+ ASSERT( !advance() );
+ }
+ };
+
+ class KillOpFirstClause : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ mongolock lk( false );
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) << BSON( "b" << GT << 0 ) ) ) );
+ ASSERT( c->ok() );
+ cc().curop()->kill();
+ // First advance() call throws, subsequent calls just fail.
+ ASSERT_EXCEPTION( c->advance(), MsgAssertionException );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class Nscanned : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "_id" << GTE << 0 << "a" << GTE << 0 ) );
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 2, c->nscanned() );
+ c->advance();
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 2, c->nscanned() );
+ c->advance();
+ for( int i = 3; i < 222; ++i ) {
+ ASSERT( c->ok() );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ namespace GetCursor {
+
+ class Base : public QueryOptimizerCursorTests::Base {
+ public:
+ Base() {
+ // create collection
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ }
+ virtual ~Base() {}
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), query(), order() );
+ string type = c->toString().substr( 0, expectedType().length() );
+ ASSERT_EQUALS( expectedType(), type );
+ check( c );
+ }
+ protected:
+ virtual string expectedType() const = 0;
+ virtual BSONObj query() const { return BSONObj(); }
+ virtual BSONObj order() const { return BSONObj(); }
+ virtual void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( !c->matcher() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class NoConstraints : public Base {
+ string expectedType() const { return "BasicCursor"; }
+ };
+
+ class SimpleId : public Base {
+ public:
+ SimpleId() {
+ _cli.insert( ns(), BSON( "_id" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 ) );
+ }
+ string expectedType() const { return "BtreeCursor _id_"; }
+ BSONObj query() const { return BSON( "_id" << 5 ); }
+ };
+
+ class OptimalIndex : public Base {
+ public:
+ OptimalIndex() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 5 ) );
+ _cli.insert( ns(), BSON( "a" << 6 ) );
+ }
+ string expectedType() const { return "BtreeCursor a_1"; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "a" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( 6, c->current().getIntField( "a" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class Geo : public Base {
+ public:
+ Geo() {
+ _cli.insert( ns(), BSON( "_id" << 44 << "loc" << BSON_ARRAY( 44 << 45 ) ) );
+ _cli.ensureIndex( ns(), BSON( "loc" << "2d" ) );
+ }
+ string expectedType() const { return "GeoSearchCursor"; }
+ BSONObj query() const { return fromjson( "{ loc : { $near : [50,50] } }" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT_EQUALS( 44, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class OutOfOrder : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSONObj(), BSON( "b" << 1 ) );
+ ASSERT( !c );
+ }
+ };
+
+ class BestSavedOutOfOrder : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 << "b" << BSON_ARRAY( 1 << 2 << 3 << 4 << 5 ) ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 6 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ // record {_id:1} index for this query
+ ASSERT( _cli.query( ns(), QUERY( "_id" << GT << 0 << "b" << GT << 0 ).sort( "b" ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "_id" << GT << 0 << "b" << GT << 0 ), BSON( "b" << 1 ) );
+ // {_id:1} requires scan and order, so {b:1} must be chosen.
+ ASSERT( c );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ }
+ };
+
+ class MultiIndex : public Base {
+ public:
+ MultiIndex() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return BSON( "_id" << GT << 0 << "a" << GT << 0 ); }
+ void check( const shared_ptr<Cursor> &c ) {}
+ };
+
+ } // namespace GetCursor
+
+ } // namespace QueryOptimizerCursorTests
class All : public Suite {
public:
All() : Suite( "queryoptimizer" ) {}
void setupTests() {
- add< FieldRangeTests::Empty >();
- add< FieldRangeTests::Eq >();
- add< FieldRangeTests::DupEq >();
- add< FieldRangeTests::Lt >();
- add< FieldRangeTests::Lte >();
- add< FieldRangeTests::Gt >();
- add< FieldRangeTests::Gte >();
- add< FieldRangeTests::TwoLt >();
- add< FieldRangeTests::TwoGt >();
- add< FieldRangeTests::EqGte >();
- add< FieldRangeTests::EqGteInvalid >();
- add< FieldRangeTests::Regex >();
- add< FieldRangeTests::RegexObj >();
- add< FieldRangeTests::UnhelpfulRegex >();
- add< FieldRangeTests::In >();
- add< FieldRangeTests::Equality >();
- add< FieldRangeTests::SimplifiedQuery >();
- add< FieldRangeTests::QueryPatternTest >();
- add< FieldRangeTests::NoWhere >();
- add< FieldRangeTests::Numeric >();
- add< FieldRangeTests::InLowerBound >();
- add< FieldRangeTests::InUpperBound >();
- add< FieldRangeTests::UnionBound >();
- add< FieldRangeTests::MultiBound >();
- add< FieldRangeTests::Diff1 >();
- add< FieldRangeTests::Diff2 >();
- add< FieldRangeTests::Diff3 >();
- add< FieldRangeTests::Diff4 >();
- add< FieldRangeTests::Diff5 >();
- add< FieldRangeTests::Diff6 >();
- add< FieldRangeTests::Diff7 >();
- add< FieldRangeTests::Diff8 >();
- add< FieldRangeTests::Diff9 >();
- add< FieldRangeTests::Diff10 >();
- add< FieldRangeTests::Diff11 >();
- add< FieldRangeTests::Diff12 >();
- add< FieldRangeTests::Diff13 >();
- add< FieldRangeTests::Diff14 >();
- add< FieldRangeTests::Diff15 >();
- add< FieldRangeTests::Diff16 >();
- add< FieldRangeTests::Diff17 >();
- add< FieldRangeTests::Diff18 >();
- add< FieldRangeTests::Diff19 >();
- add< FieldRangeTests::Diff20 >();
- add< FieldRangeTests::Diff21 >();
- add< FieldRangeTests::Diff22 >();
- add< FieldRangeTests::Diff23 >();
- add< FieldRangeTests::Diff24 >();
- add< FieldRangeTests::Diff25 >();
- add< FieldRangeTests::Diff26 >();
- add< FieldRangeTests::Diff27 >();
- add< FieldRangeTests::Diff28 >();
- add< FieldRangeTests::Diff29 >();
- add< FieldRangeTests::Diff30 >();
- add< FieldRangeTests::Diff31 >();
- add< FieldRangeTests::Diff32 >();
- add< FieldRangeTests::Diff33 >();
- add< FieldRangeTests::Diff34 >();
- add< FieldRangeTests::Diff35 >();
- add< FieldRangeTests::Diff36 >();
- add< FieldRangeTests::Diff37 >();
- add< FieldRangeTests::Diff38 >();
- add< FieldRangeTests::Diff39 >();
- add< FieldRangeTests::Diff40 >();
- add< FieldRangeTests::Diff41 >();
- add< FieldRangeTests::Diff42 >();
- add< FieldRangeTests::Diff43 >();
- add< FieldRangeTests::Diff44 >();
- add< FieldRangeTests::Diff45 >();
- add< FieldRangeTests::Diff46 >();
- add< FieldRangeTests::Diff47 >();
- add< FieldRangeTests::Diff48 >();
- add< FieldRangeTests::Diff49 >();
- add< FieldRangeTests::Diff50 >();
- add< FieldRangeTests::Diff51 >();
- add< FieldRangeTests::Diff52 >();
- add< FieldRangeTests::Diff53 >();
- add< FieldRangeTests::Diff54 >();
- add< FieldRangeTests::Diff55 >();
- add< FieldRangeTests::Diff56 >();
- add< FieldRangeTests::Diff57 >();
- add< FieldRangeTests::Diff58 >();
- add< FieldRangeTests::Diff59 >();
- add< FieldRangeTests::Diff60 >();
- add< FieldRangeTests::Diff61 >();
- add< FieldRangeTests::Diff62 >();
- add< FieldRangeTests::Diff63 >();
- add< FieldRangeTests::Diff64 >();
- add< FieldRangeTests::DiffMulti1 >();
- add< FieldRangeTests::DiffMulti2 >();
- add< FieldRangeTests::SetIntersect >();
- add< QueryPlanTests::NoIndex >();
- add< QueryPlanTests::SimpleOrder >();
- add< QueryPlanTests::MoreIndexThanNeeded >();
- add< QueryPlanTests::IndexSigns >();
- add< QueryPlanTests::IndexReverse >();
- add< QueryPlanTests::NoOrder >();
- add< QueryPlanTests::EqualWithOrder >();
- add< QueryPlanTests::Optimal >();
- add< QueryPlanTests::MoreOptimal >();
- add< QueryPlanTests::KeyMatch >();
- add< QueryPlanTests::MoreKeyMatch >();
- add< QueryPlanTests::ExactKeyQueryTypes >();
- add< QueryPlanTests::Unhelpful >();
- add< QueryPlanSetTests::NoIndexes >();
- add< QueryPlanSetTests::Optimal >();
- add< QueryPlanSetTests::NoOptimal >();
- add< QueryPlanSetTests::NoSpec >();
- add< QueryPlanSetTests::HintSpec >();
- add< QueryPlanSetTests::HintName >();
- add< QueryPlanSetTests::NaturalHint >();
- add< QueryPlanSetTests::NaturalSort >();
- add< QueryPlanSetTests::BadHint >();
- add< QueryPlanSetTests::Count >();
- add< QueryPlanSetTests::QueryMissingNs >();
- add< QueryPlanSetTests::UnhelpfulIndex >();
- add< QueryPlanSetTests::SingleException >();
- add< QueryPlanSetTests::AllException >();
- add< QueryPlanSetTests::SaveGoodIndex >();
- add< QueryPlanSetTests::TryAllPlansOnErr >();
- add< QueryPlanSetTests::FindOne >();
- add< QueryPlanSetTests::Delete >();
- add< QueryPlanSetTests::DeleteOneScan >();
- add< QueryPlanSetTests::DeleteOneIndex >();
- add< QueryPlanSetTests::TryOtherPlansBeforeFinish >();
- add< QueryPlanSetTests::InQueryIntervals >();
- add< QueryPlanSetTests::EqualityThenIn >();
- add< QueryPlanSetTests::NotEqualityThenIn >();
- add< BestGuess >();
+ __forceLinkGeoPlugin();
+ add<QueryPlanTests::NoIndex>();
+ add<QueryPlanTests::SimpleOrder>();
+ add<QueryPlanTests::MoreIndexThanNeeded>();
+ add<QueryPlanTests::IndexSigns>();
+ add<QueryPlanTests::IndexReverse>();
+ add<QueryPlanTests::NoOrder>();
+ add<QueryPlanTests::EqualWithOrder>();
+ add<QueryPlanTests::Optimal>();
+ add<QueryPlanTests::MoreOptimal>();
+ add<QueryPlanTests::KeyMatch>();
+ add<QueryPlanTests::MoreKeyMatch>();
+ add<QueryPlanTests::ExactKeyQueryTypes>();
+ add<QueryPlanTests::Unhelpful>();
+ add<QueryPlanSetTests::NoIndexes>();
+ add<QueryPlanSetTests::Optimal>();
+ add<QueryPlanSetTests::NoOptimal>();
+ add<QueryPlanSetTests::NoSpec>();
+ add<QueryPlanSetTests::HintSpec>();
+ add<QueryPlanSetTests::HintName>();
+ add<QueryPlanSetTests::NaturalHint>();
+ add<QueryPlanSetTests::NaturalSort>();
+ add<QueryPlanSetTests::BadHint>();
+ add<QueryPlanSetTests::Count>();
+ add<QueryPlanSetTests::QueryMissingNs>();
+ add<QueryPlanSetTests::UnhelpfulIndex>();
+ add<QueryPlanSetTests::SingleException>();
+ add<QueryPlanSetTests::AllException>();
+ add<QueryPlanSetTests::SaveGoodIndex>();
+ add<QueryPlanSetTests::TryAllPlansOnErr>();
+ add<QueryPlanSetTests::FindOne>();
+ add<QueryPlanSetTests::Delete>();
+ add<QueryPlanSetTests::DeleteOneScan>();
+ add<QueryPlanSetTests::DeleteOneIndex>();
+ add<QueryPlanSetTests::TryOtherPlansBeforeFinish>();
+ add<QueryPlanSetTests::InQueryIntervals>();
+ add<QueryPlanSetTests::EqualityThenIn>();
+ add<QueryPlanSetTests::NotEqualityThenIn>();
+ add<BestGuess>();
+ add<QueryOptimizerCursorTests::Empty>();
+ add<QueryOptimizerCursorTests::Unindexed>();
+ add<QueryOptimizerCursorTests::Basic>();
+ add<QueryOptimizerCursorTests::NoMatch>();
+ add<QueryOptimizerCursorTests::Interleaved>();
+ add<QueryOptimizerCursorTests::NotMatch>();
+ add<QueryOptimizerCursorTests::StopInterleaving>();
+ add<QueryOptimizerCursorTests::TakeoverWithDup>();
+ add<QueryOptimizerCursorTests::TakeoverWithNonMatches>();
+ add<QueryOptimizerCursorTests::TakeoverWithTakeoverDup>();
+ add<QueryOptimizerCursorTests::BasicOr>();
+ add<QueryOptimizerCursorTests::OrFirstClauseEmpty>();
+ add<QueryOptimizerCursorTests::OrSecondClauseEmpty>();
+ add<QueryOptimizerCursorTests::OrMultipleClausesEmpty>();
+ add<QueryOptimizerCursorTests::TakeoverCountOr>();
+ add<QueryOptimizerCursorTests::TakeoverEndOfOrClause>();
+ add<QueryOptimizerCursorTests::TakeoverBeforeEndOfOrClause>();
+ add<QueryOptimizerCursorTests::TakeoverAfterEndOfOrClause>();
+ add<QueryOptimizerCursorTests::ManualMatchingDeduping>();
+ add<QueryOptimizerCursorTests::ManualMatchingUsingCurrKey>();
+ add<QueryOptimizerCursorTests::ManualMatchingDedupingTakeover>();
+ add<QueryOptimizerCursorTests::Singlekey>();
+ add<QueryOptimizerCursorTests::Multikey>();
+ add<QueryOptimizerCursorTests::AddOtherPlans>();
+ add<QueryOptimizerCursorTests::OrRangeElimination>();
+ add<QueryOptimizerCursorTests::OrDedup>();
+ add<QueryOptimizerCursorTests::EarlyDups>();
+ add<QueryOptimizerCursorTests::OrPopInTakeover>();
+ add<QueryOptimizerCursorTests::OrCollectionScanAbort>();
+ add<QueryOptimizerCursorTests::Geo>();
+ add<QueryOptimizerCursorTests::YieldNoOp>();
+ add<QueryOptimizerCursorTests::YieldDelete>();
+ add<QueryOptimizerCursorTests::YieldDeleteContinue>();
+ add<QueryOptimizerCursorTests::YieldDeleteContinueFurther>();
+ add<QueryOptimizerCursorTests::YieldUpdate>();
+ add<QueryOptimizerCursorTests::YieldDrop>();
+ add<QueryOptimizerCursorTests::YieldDropOr>();
+ add<QueryOptimizerCursorTests::YieldCappedOverwrite>();
+ add<QueryOptimizerCursorTests::YieldDropIndex>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansNoOp>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansAdvanceNoOp>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDelete>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwrite>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwriteManual>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwriteManual2>();
+ add<QueryOptimizerCursorTests::TryYieldGeo>();
+ add<QueryOptimizerCursorTests::YieldTakeover>();
+ add<QueryOptimizerCursorTests::YieldTakeoverBasic>();
+ add<QueryOptimizerCursorTests::YieldInactiveCursorAdvance>();
+ add<QueryOptimizerCursorTests::OrderId>();
+ add<QueryOptimizerCursorTests::OrderMultiIndex>();
+ add<QueryOptimizerCursorTests::OrderReject>();
+ add<QueryOptimizerCursorTests::OrderNatural>();
+ add<QueryOptimizerCursorTests::OrderUnindexed>();
+ add<QueryOptimizerCursorTests::RecordedOrderInvalid>();
+ add<QueryOptimizerCursorTests::KillOp>();
+ add<QueryOptimizerCursorTests::KillOpFirstClause>();
+ add<QueryOptimizerCursorTests::Nscanned>();
+ add<QueryOptimizerCursorTests::GetCursor::NoConstraints>();
+ add<QueryOptimizerCursorTests::GetCursor::SimpleId>();
+ add<QueryOptimizerCursorTests::GetCursor::OptimalIndex>();
+ add<QueryOptimizerCursorTests::GetCursor::Geo>();
+ add<QueryOptimizerCursorTests::GetCursor::OutOfOrder>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedOutOfOrder>();
+ add<QueryOptimizerCursorTests::GetCursor::MultiIndex>();
}
} myall;
diff --git a/dbtests/querytests.cpp b/dbtests/querytests.cpp
index d008e4d..694053b 100644
--- a/dbtests/querytests.cpp
+++ b/dbtests/querytests.cpp
@@ -18,9 +18,10 @@
*/
#include "pch.h"
-#include "../db/query.h"
+#include "../db/ops/query.h"
+#include "../db/dbhelpers.h"
+#include "../db/clientcursor.h"
-#include "../db/db.h"
#include "../db/instance.h"
#include "../db/json.h"
#include "../db/lasterror.h"
@@ -61,7 +62,7 @@ namespace QueryTests {
}
static void addIndex( const BSONObj &key ) {
BSONObjBuilder b;
- b.append( "name", key.firstElement().fieldName() );
+ b.append( "name", key.firstElementFieldName() );
b.append( "ns", ns() );
b.append( "key", key );
BSONObj o = b.done();
@@ -239,7 +240,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.ReturnOneOfManyAndTail";
- client().createCollection( ns, 0, true );
+ client().createCollection( ns, 1024, true );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
insert( ns, BSON( "a" << 2 ) );
@@ -258,7 +259,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.TailNotAtEnd";
- client().createCollection( ns, 0, true );
+ client().createCollection( ns, 2047, true );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
insert( ns, BSON( "a" << 2 ) );
@@ -283,7 +284,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.EmptyTail";
- client().createCollection( ns, 0, true );
+ client().createCollection( ns, 1900, true );
auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
ASSERT_EQUALS( 0, c->getCursorId() );
ASSERT( c->isDead() );
@@ -301,7 +302,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.TailableDelete";
- client().createCollection( ns, 0, true, 2 );
+ client().createCollection( ns, 8192, true, 2 );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
@@ -322,7 +323,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.TailableInsertDelete";
- client().createCollection( ns, 0, true );
+ client().createCollection( ns, 1330, true );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
@@ -356,23 +357,32 @@ namespace QueryTests {
~TailableQueryOnId() {
client().dropCollection( "unittests.querytests.TailableQueryOnId" );
}
+
+ void insertA(const char* ns, int a) {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.appendOID("value", 0, true);
+ b.append("a", a);
+ insert(ns, b.obj());
+ }
+
void run() {
const char *ns = "unittests.querytests.TailableQueryOnId";
BSONObj info;
- client().runCommand( "unittests", BSON( "create" << "querytests.TailableQueryOnId" << "capped" << true << "autoIndexId" << true ), info );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
+ client().runCommand( "unittests", BSON( "create" << "querytests.TailableQueryOnId" << "capped" << true << "size" << 8192 << "autoIndexId" << true ), info );
+ insertA( ns, 0 );
+ insertA( ns, 1 );
auto_ptr< DBClientCursor > c1 = client().query( ns, QUERY( "a" << GT << -1 ), 0, 0, 0, QueryOption_CursorTailable );
OID id;
id.init("000000000000000000000000");
- auto_ptr< DBClientCursor > c2 = client().query( ns, QUERY( "_id" << GT << id ), 0, 0, 0, QueryOption_CursorTailable );
+ auto_ptr< DBClientCursor > c2 = client().query( ns, QUERY( "value" << GT << id ), 0, 0, 0, QueryOption_CursorTailable );
c1->next();
c1->next();
ASSERT( !c1->more() );
c2->next();
c2->next();
ASSERT( !c2->more() );
- insert( ns, BSON( "a" << 2 ) );
+ insertA( ns, 2 );
ASSERT( c1->more() );
ASSERT_EQUALS( 2, c1->next().getIntField( "a" ) );
ASSERT( !c1->more() );
@@ -390,7 +400,6 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.OplogReplayMode";
- insert( ns, BSON( "ts" << 3 ) );
insert( ns, BSON( "ts" << 0 ) );
insert( ns, BSON( "ts" << 1 ) );
insert( ns, BSON( "ts" << 2 ) );
@@ -398,6 +407,12 @@ namespace QueryTests {
ASSERT( c->more() );
ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
ASSERT( !c->more() );
+
+ insert( ns, BSON( "ts" << 3 ) );
+ c = client().query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
+ ASSERT( c->more() );
}
};
@@ -725,6 +740,90 @@ namespace QueryTests {
};
BSONObj MinMax::empty_;
+ class MatchCodeCodeWScope : public ClientBase {
+ public:
+ MatchCodeCodeWScope() : _ns( "unittests.querytests.MatchCodeCodeWScope" ) {}
+ ~MatchCodeCodeWScope() {
+ client().dropCollection( "unittests.querytests.MatchCodeCodeWScope" );
+ }
+ void run() {
+ checkMatch();
+ client().ensureIndex( _ns, BSON( "a" << 1 ) );
+ checkMatch();
+ // Use explain queries to check index bounds.
+ {
+ BSONObj explain = client().findOne( _ns, QUERY( "a" << BSON( "$type" << (int)Code ) ).explain() );
+ BSONObjBuilder lower;
+ lower.appendCode( "", "" );
+ BSONObjBuilder upper;
+ upper.appendCodeWScope( "", "", BSONObj() );
+ ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
+ ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
+ }
+ {
+ BSONObj explain = client().findOne( _ns, QUERY( "a" << BSON( "$type" << (int)CodeWScope ) ).explain() );
+ BSONObjBuilder lower;
+ lower.appendCodeWScope( "", "", BSONObj() );
+ // This upper bound may change if a new bson type is added.
+ BSONObjBuilder upper;
+ upper << "" << BSON( "$maxElement" << 1 );
+ ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
+ ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
+ }
+ }
+ private:
+ void checkMatch() {
+ client().remove( _ns, BSONObj() );
+
+ client().insert( _ns, code() );
+ client().insert( _ns, codeWScope() );
+
+ ASSERT_EQUALS( 1U, client().count( _ns, code() ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, codeWScope() ) );
+
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)Code ) ) ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)CodeWScope ) ) ) );
+ }
+ BSONObj code() const {
+ BSONObjBuilder codeBuilder;
+ codeBuilder.appendCode( "a", "return 1;" );
+ return codeBuilder.obj();
+ }
+ BSONObj codeWScope() const {
+ BSONObjBuilder codeWScopeBuilder;
+ codeWScopeBuilder.appendCodeWScope( "a", "return 1;", BSONObj() );
+ return codeWScopeBuilder.obj();
+ }
+ const char *_ns;
+ };
+
+ class MatchDBRefType : public ClientBase {
+ public:
+ MatchDBRefType() : _ns( "unittests.querytests.MatchDBRefType" ) {}
+ ~MatchDBRefType() {
+ client().dropCollection( "unittests.querytests.MatchDBRefType" );
+ }
+ void run() {
+ checkMatch();
+ client().ensureIndex( _ns, BSON( "a" << 1 ) );
+ checkMatch();
+ }
+ private:
+ void checkMatch() {
+ client().remove( _ns, BSONObj() );
+ client().insert( _ns, dbref() );
+ ASSERT_EQUALS( 1U, client().count( _ns, dbref() ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)DBRef ) ) ) );
+ }
+ BSONObj dbref() const {
+ BSONObjBuilder b;
+ OID oid;
+ b.appendDBRef( "a", "ns", oid );
+ return b.obj();
+ }
+ const char *_ns;
+ };
+
class DirectLocking : public ClientBase {
public:
void run() {
@@ -850,10 +949,12 @@ namespace QueryTests {
writelock lk("");
Client::Context ctx( "unittests" );
+ // note that extents are always at least 4KB now - so this will get rounded up a bit.
ASSERT( userCreateNS( ns() , fromjson( "{ capped : true , size : 2000 }" ) , err , false ) );
- for ( int i=0; i<100; i++ ) {
+ for ( int i=0; i<200; i++ ) {
insertNext();
- ASSERT( count() < 45 );
+// cout << count() << endl;
+ ASSERT( count() < 90 );
}
int a = count();
@@ -870,7 +971,7 @@ namespace QueryTests {
insertNext();
ASSERT( c->more() );
- for ( int i=0; i<50; i++ ) {
+ for ( int i=0; i<90; i++ ) {
insertNext();
}
@@ -879,7 +980,10 @@ namespace QueryTests {
}
void insertNext() {
- insert( ns() , BSON( "i" << _n++ ) );
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.append("i", _n++);
+ insert( ns() , b.obj() );
}
int _n;
@@ -913,6 +1017,7 @@ namespace QueryTests {
unsigned long long slow , fast;
int n = 10000;
+ DEV n = 1000;
{
Timer t;
for ( int i=0; i<n; i++ ) {
@@ -986,7 +1091,7 @@ namespace QueryTests {
void run() {
BSONObj info;
- ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "size" << 1000 << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
int i = 0;
for( int oldCount = -1;
@@ -1003,6 +1108,7 @@ namespace QueryTests {
ASSERT( !next[ "ts" ].eoo() );
ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
}
+ //cout << k << endl;
}
}
@@ -1023,7 +1129,7 @@ namespace QueryTests {
unsigned startNumCursors = ClientCursor::numCursors();
BSONObj info;
- ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "size" << 10000 << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
int i = 0;
for( ; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
@@ -1046,7 +1152,35 @@ namespace QueryTests {
private:
int _old;
};
+
+ /**
+ * Check OplogReplay mode where query timestamp is earlier than the earliest
+ * entry in the collection.
+ */
+ class FindingStartStale : public CollectionBase {
+ public:
+ FindingStartStale() : CollectionBase( "findingstart" ) {}
+
+ void run() {
+ unsigned startNumCursors = ClientCursor::numCursors();
+
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ // Check OplogReplay mode with empty collection.
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( !c->more() );
+ // Check with some docs in the collection.
+ for( int i = 100; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
+ c = client().query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 100, c->next()[ "ts" ].numberInt() );
+
+ // Check that no persistent cursors outlast our queries above.
+ ASSERT_EQUALS( startNumCursors, ClientCursor::numCursors() );
+ }
+ };
class WhatsMyUri : public CollectionBase {
public:
@@ -1217,6 +1351,7 @@ namespace QueryTests {
}
void setupTests() {
+ add< FindingStart >();
add< CountBasic >();
add< CountQuery >();
add< CountFields >();
@@ -1250,6 +1385,8 @@ namespace QueryTests {
add< IndexInsideArrayCorrect >();
add< SubobjArr >();
add< MinMax >();
+ add< MatchCodeCodeWScope >();
+ add< MatchDBRefType >();
add< DirectLocking >();
add< FastCountIn >();
add< EmbeddedArray >();
@@ -1258,8 +1395,8 @@ namespace QueryTests {
add< TailableCappedRaceCondition >();
add< HelperTest >();
add< HelperByIdTest >();
- add< FindingStart >();
add< FindingStartPartiallyFull >();
+ add< FindingStartStale >();
add< WhatsMyUri >();
add< parsedtests::basic1 >();
diff --git a/dbtests/queryutiltests.cpp b/dbtests/queryutiltests.cpp
new file mode 100644
index 0000000..e825b4f
--- /dev/null
+++ b/dbtests/queryutiltests.cpp
@@ -0,0 +1,989 @@
+// queryutiltests.cpp : query utility unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryutil.h"
+#include "../db/querypattern.h"
+#include "../db/instance.h"
+#include "../db/pdfile.h"
+#include "dbtests.h"
+
+namespace QueryUtilTests {
+
+ namespace FieldRangeTests {
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ const FieldRangeSet s( "ns", query(), true );
+ checkElt( lower(), s.range( "a" ).min() );
+ checkElt( upper(), s.range( "a" ).max() );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).minInclusive() );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).maxInclusive() );
+ }
+ protected:
+ virtual BSONObj query() = 0;
+ virtual BSONElement lower() { return minKey.firstElement(); }
+ virtual bool lowerInclusive() { return true; }
+ virtual BSONElement upper() { return maxKey.firstElement(); }
+ virtual bool upperInclusive() { return true; }
+ static void checkElt( BSONElement expected, BSONElement actual ) {
+ if ( expected.woCompare( actual, false ) ) {
+ log() << "expected: " << expected << ", got: " << actual;
+ ASSERT( false );
+ }
+ }
+ };
+
+
+ class NumericBase : public Base {
+ public:
+ NumericBase() {
+ o = BSON( "min" << -numeric_limits<double>::max() << "max" << numeric_limits<double>::max() );
+ }
+
+ virtual BSONElement lower() { return o["min"]; }
+ virtual BSONElement upper() { return o["max"]; }
+ private:
+ BSONObj o;
+ };
+
+ class Empty : public Base {
+ virtual BSONObj query() { return BSONObj(); }
+ };
+
+ class Eq : public Base {
+ public:
+ Eq() : o_( BSON( "a" << 1 ) ) {}
+ virtual BSONObj query() { return o_; }
+ virtual BSONElement lower() { return o_.firstElement(); }
+ virtual BSONElement upper() { return o_.firstElement(); }
+ BSONObj o_;
+ };
+
+ class DupEq : public Eq {
+ public:
+ virtual BSONObj query() { return BSON( "a" << 1 << "b" << 2 << "a" << 1 ); }
+ };
+
+ class Lt : public NumericBase {
+ public:
+ Lt() : o_( BSON( "-" << 1 ) ) {}
+ virtual BSONObj query() { return BSON( "a" << LT << 1 ); }
+ virtual BSONElement upper() { return o_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o_;
+ };
+
+ class Lte : public Lt {
+ virtual BSONObj query() { return BSON( "a" << LTE << 1 ); }
+ virtual bool upperInclusive() { return true; }
+ };
+
+ class Gt : public NumericBase {
+ public:
+ Gt() : o_( BSON( "-" << 1 ) ) {}
+ virtual BSONObj query() { return BSON( "a" << GT << 1 ); }
+ virtual BSONElement lower() { return o_.firstElement(); }
+ virtual bool lowerInclusive() { return false; }
+ BSONObj o_;
+ };
+
+ class Gte : public Gt {
+ virtual BSONObj query() { return BSON( "a" << GTE << 1 ); }
+ virtual bool lowerInclusive() { return true; }
+ };
+
+ class TwoLt : public Lt {
+ virtual BSONObj query() { return BSON( "a" << LT << 1 << LT << 5 ); }
+ };
+
+ class TwoGt : public Gt {
+ virtual BSONObj query() { return BSON( "a" << GT << 0 << GT << 1 ); }
+ };
+
+ class EqGte : public Eq {
+ virtual BSONObj query() { return BSON( "a" << 1 << "a" << GTE << 1 ); }
+ };
+
+ class EqGteInvalid {
+ public:
+ void run() {
+ FieldRangeSet frs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ), true );
+ ASSERT( !frs.matchPossible() );
+ }
+ };
+
+ struct RegexBase : Base {
+ void run() { //need to only look at first interval
+ FieldRangeSet s( "ns", query(), true );
+ checkElt( lower(), s.range( "a" ).intervals()[0]._lower._bound );
+ checkElt( upper(), s.range( "a" ).intervals()[0]._upper._bound );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).intervals()[0]._lower._inclusive );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).intervals()[0]._upper._inclusive );
+ }
+ };
+
+ class Regex : public RegexBase {
+ public:
+ Regex() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
+ virtual BSONObj query() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "^abc" );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o1_, o2_;
+ };
+
+ class RegexObj : public RegexBase {
+ public:
+ RegexObj() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
+ virtual BSONObj query() { return BSON("a" << BSON("$regex" << "^abc")); }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o1_, o2_;
+ };
+
+ class UnhelpfulRegex : public RegexBase {
+ public:
+ UnhelpfulRegex() {
+ BSONObjBuilder b;
+ b.appendMinForType("lower", String);
+ b.appendMaxForType("upper", String);
+ limits = b.obj();
+ }
+
+ virtual BSONObj query() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "abc" );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return limits["lower"]; }
+ virtual BSONElement upper() { return limits["upper"]; }
+ virtual bool upperInclusive() { return false; }
+ BSONObj limits;
+ };
+
+ class In : public Base {
+ public:
+ In() : o1_( BSON( "-" << -3 ) ), o2_( BSON( "-" << 44 ) ) {}
+ virtual BSONObj query() {
+ vector< int > vals;
+ vals.push_back( 4 );
+ vals.push_back( 8 );
+ vals.push_back( 44 );
+ vals.push_back( -1 );
+ vals.push_back( -3 );
+ vals.push_back( 0 );
+ BSONObjBuilder bb;
+ bb.append( "$in", vals );
+ BSONObjBuilder b;
+ b.append( "a", bb.done() );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ BSONObj o1_, o2_;
+ };
+
+ class Equality {
+ public:
+ void run() {
+ FieldRangeSet s( "ns", BSON( "a" << 1 ), true );
+ ASSERT( s.range( "a" ).equality() );
+ FieldRangeSet s2( "ns", BSON( "a" << GTE << 1 << LTE << 1 ), true );
+ ASSERT( s2.range( "a" ).equality() );
+ FieldRangeSet s3( "ns", BSON( "a" << GT << 1 << LTE << 1 ), true );
+ ASSERT( !s3.range( "a" ).equality() );
+ FieldRangeSet s4( "ns", BSON( "a" << GTE << 1 << LT << 1 ), true );
+ ASSERT( !s4.range( "a" ).equality() );
+ FieldRangeSet s5( "ns", BSON( "a" << GTE << 1 << LTE << 1 << GT << 1 ), true );
+ ASSERT( !s5.range( "a" ).equality() );
+ FieldRangeSet s6( "ns", BSON( "a" << GTE << 1 << LTE << 1 << LT << 1 ), true );
+ ASSERT( !s6.range( "a" ).equality() );
+ }
+ };
+
+ class SimplifiedQuery {
+ public:
+ void run() {
+ FieldRangeSet frs( "ns", BSON( "a" << GT << 1 << GT << 5 << LT << 10 << "b" << 4 << "c" << LT << 4 << LT << 6 << "d" << GTE << 0 << GT << 0 << "e" << GTE << 0 << LTE << 10 ), true );
+ BSONObj simple = frs.simplifiedQuery();
+ cout << "simple: " << simple << endl;
+ ASSERT( !simple.getObjectField( "a" ).woCompare( fromjson( "{$gt:5,$lt:10}" ) ) );
+ ASSERT_EQUALS( 4, simple.getIntField( "b" ) );
+ ASSERT( !simple.getObjectField( "c" ).woCompare( BSON("$gte" << -numeric_limits<double>::max() << "$lt" << 4 ) ) );
+ ASSERT( !simple.getObjectField( "d" ).woCompare( BSON("$gt" << 0 << "$lte" << numeric_limits<double>::max() ) ) );
+ ASSERT( !simple.getObjectField( "e" ).woCompare( fromjson( "{$gte:0,$lte:10}" ) ) );
+ }
+ };
+
+ class QueryPatternTest {
+ public:
+ void run() {
+ ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 5 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "b" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << LTE << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << 1 << "b" << 2 ) ) );
+ ASSERT( p( BSON( "a" << 1 << "b" << 3 ) ) != p( BSON( "a" << 1 ) ) );
+ ASSERT( p( BSON( "a" << LT << 1 ) ) == p( BSON( "a" << LTE << 5 ) ) );
+ ASSERT( p( BSON( "a" << LT << 1 << GTE << 0 ) ) == p( BSON( "a" << LTE << 5 << GTE << 0 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) < p( BSON( "a" << 1 << "b" << 1 ) ) );
+ ASSERT( !( p( BSON( "a" << 1 << "b" << 1 ) ) < p( BSON( "a" << 1 ) ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << "a" ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "c" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << -1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 << "c" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) ) );
+ }
+ private:
+ static QueryPattern p( const BSONObj &query, const BSONObj &sort = BSONObj() ) {
+ return FieldRangeSet( "", query, true ).pattern( sort );
+ }
+ };
+
+ class NoWhere {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0, FieldRangeSet( "ns", BSON( "$where" << 1 ), true ).nNontrivialRanges() );
+ }
+ };
+
+ class Numeric {
+ public:
+ void run() {
+ FieldRangeSet f( "", BSON( "a" << 1 ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 2.0 ).firstElement() ) < 0 );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 0.0 ).firstElement() ) > 0 );
+ }
+ };
+
+ class InLowerBound {
+ public:
+ void run() {
+ FieldRangeSet f( "", fromjson( "{a:{$gt:4,$in:[1,2,3,4,5,6]}}" ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 5.0 ).firstElement(), false ) == 0 );
+ ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 6.0 ).firstElement(), false ) == 0 );
+ }
+ };
+
+ class InUpperBound {
+ public:
+ void run() {
+ FieldRangeSet f( "", fromjson( "{a:{$lt:4,$in:[1,2,3,4,5,6]}}" ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 1.0 ).firstElement(), false ) == 0 );
+ ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
+ }
+ };
+
+ class UnionBound {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:9,$lt:12}}" ), true );
+ FieldRange ret = frs.range( "a" );
+ ret |= frs.range( "b" );
+ ASSERT_EQUALS( 2U, ret.intervals().size() );
+ }
+ };
+
+ class MultiBound {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", fromjson( "{a:{$in:[1,3,5,7,9]}}" ), true );
+ FieldRangeSet frs2( "", fromjson( "{a:{$in:[2,3,5,8,9]}}" ), true );
+ FieldRange fr1 = frs1.range( "a" );
+ FieldRange fr2 = frs2.range( "a" );
+ fr1 &= fr2;
+ ASSERT( fr1.min().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
+ ASSERT( fr1.max().woCompare( BSON( "a" << 9.0 ).firstElement(), false ) == 0 );
+ vector< FieldInterval > intervals = fr1.intervals();
+ vector< FieldInterval >::const_iterator j = intervals.begin();
+ double expected[] = { 3, 5, 9 };
+ for( int i = 0; i < 3; ++i, ++j ) {
+ ASSERT_EQUALS( expected[ i ], j->_lower._bound.number() );
+ ASSERT( j->_lower._inclusive );
+ ASSERT( j->_lower == j->_upper );
+ }
+ ASSERT( j == intervals.end() );
+ }
+ };
+
+ class DiffBase {
+ public:
+ virtual ~DiffBase() {}
+ void run() {
+ FieldRangeSet frs( "", fromjson( obj().toString() ), true );
+ FieldRange ret = frs.range( "a" );
+ ret -= frs.range( "b" );
+ check( ret );
+ }
+ protected:
+ void check( const FieldRange &fr ) {
+ vector< FieldInterval > fi = fr.intervals();
+ ASSERT_EQUALS( len(), fi.size() );
+ int i = 0;
+ for( vector< FieldInterval >::const_iterator j = fi.begin(); j != fi.end(); ++j ) {
+ ASSERT_EQUALS( nums()[ i ], j->_lower._bound.numberInt() );
+ ASSERT_EQUALS( incs()[ i ], j->_lower._inclusive );
+ ++i;
+ ASSERT_EQUALS( nums()[ i ], j->_upper._bound.numberInt() );
+ ASSERT_EQUALS( incs()[ i ], j->_upper._inclusive );
+ ++i;
+ }
+ }
+ virtual unsigned len() const = 0;
+ virtual const int *nums() const = 0;
+ virtual const bool *incs() const = 0;
+ virtual BSONObj obj() const = 0;
+ };
+
+ class TwoRangeBase : public DiffBase {
+ public:
+ TwoRangeBase( string obj, int low, int high, bool lowI, bool highI )
+ : _obj( obj ) {
+ _n[ 0 ] = low;
+ _n[ 1 ] = high;
+ _b[ 0 ] = lowI;
+ _b[ 1 ] = highI;
+ }
+ private:
+ virtual unsigned len() const { return 1; }
+ virtual const int *nums() const { return _n; }
+ virtual const bool *incs() const { return _b; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ int _n[ 2 ];
+ bool _b[ 2 ];
+ };
+
+ struct Diff1 : public TwoRangeBase {
+ Diff1() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:3,$lt:4}}", 1, 2, false, false ) {}
+ };
+
+ struct Diff2 : public TwoRangeBase {
+ Diff2() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:2,$lt:4}}", 1, 2, false, false ) {}
+ };
+
+ struct Diff3 : public TwoRangeBase {
+ Diff3() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gt:2,$lt:4}}", 1, 2, false, true ) {}
+ };
+
+ struct Diff4 : public TwoRangeBase {
+ Diff4() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff5 : public TwoRangeBase {
+ Diff5() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff6 : public TwoRangeBase {
+ Diff6() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff7 : public TwoRangeBase {
+ Diff7() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff8 : public TwoRangeBase {
+ Diff8() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff9 : public TwoRangeBase {
+ Diff9() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff10 : public TwoRangeBase {
+ Diff10() : TwoRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
+ };
+
+ class SplitRangeBase : public DiffBase {
+ public:
+ SplitRangeBase( string obj, int low1, bool low1I, int high1, bool high1I, int low2, bool low2I, int high2, bool high2I )
+ : _obj( obj ) {
+ _n[ 0 ] = low1;
+ _n[ 1 ] = high1;
+ _n[ 2 ] = low2;
+ _n[ 3 ] = high2;
+ _b[ 0 ] = low1I;
+ _b[ 1 ] = high1I;
+ _b[ 2 ] = low2I;
+ _b[ 3 ] = high2I;
+ }
+ private:
+ virtual unsigned len() const { return 2; }
+ virtual const int *nums() const { return _n; }
+ virtual const bool *incs() const { return _b; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ int _n[ 4 ];
+ bool _b[ 4 ];
+ };
+
+ struct Diff11 : public SplitRangeBase {
+ Diff11() : SplitRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 4, true) {}
+ };
+
+ struct Diff12 : public SplitRangeBase {
+ Diff12() : SplitRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 5, false) {}
+ };
+
+ struct Diff13 : public TwoRangeBase {
+ Diff13() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff14 : public SplitRangeBase {
+ Diff14() : SplitRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:4}}", 1, true, 1, true, 4, true, 5, false) {}
+ };
+
+ struct Diff15 : public TwoRangeBase {
+ Diff15() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff16 : public TwoRangeBase {
+ Diff16() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff17 : public TwoRangeBase {
+ Diff17() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff18 : public TwoRangeBase {
+ Diff18() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:4}}", 4, 5, false, false) {}
+ };
+
+ struct Diff19 : public TwoRangeBase {
+ Diff19() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff20 : public TwoRangeBase {
+ Diff20() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff21 : public TwoRangeBase {
+ Diff21() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, true, true) {}
+ };
+
+ struct Diff22 : public TwoRangeBase {
+ Diff22() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff23 : public TwoRangeBase {
+ Diff23() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:0.5}}", 1, 5, false, true) {}
+ };
+
+ struct Diff24 : public TwoRangeBase {
+ Diff24() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:0}", 1, 5, false, true) {}
+ };
+
+ struct Diff25 : public TwoRangeBase {
+ Diff25() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:0}", 1, 5, true, true) {}
+ };
+
+ struct Diff26 : public TwoRangeBase {
+ Diff26() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:1}", 1, 5, false, true) {}
+ };
+
+ struct Diff27 : public TwoRangeBase {
+ Diff27() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:1}", 1, 5, false, true) {}
+ };
+
+ struct Diff28 : public SplitRangeBase {
+ Diff28() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:3}", 1, true, 3, false, 3, false, 5, true) {}
+ };
+
+ struct Diff29 : public TwoRangeBase {
+ Diff29() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:5}", 1, 5, true, false) {}
+ };
+
+ struct Diff30 : public TwoRangeBase {
+ Diff30() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:5}", 1, 5, true, false) {}
+ };
+
+ struct Diff31 : public TwoRangeBase {
+ Diff31() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:6}", 1, 5, true, false) {}
+ };
+
+ struct Diff32 : public TwoRangeBase {
+ Diff32() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:6}", 1, 5, true, true) {}
+ };
+
+ class EmptyBase : public DiffBase {
+ public:
+ EmptyBase( string obj )
+ : _obj( obj ) {}
+ private:
+ virtual unsigned len() const { return 0; }
+ virtual const int *nums() const { return 0; }
+ virtual const bool *incs() const { return 0; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ };
+
+ struct Diff33 : public EmptyBase {
+ Diff33() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:6}}" ) {}
+ };
+
+ struct Diff34 : public EmptyBase {
+ Diff34() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
+ };
+
+ struct Diff35 : public EmptyBase {
+ Diff35() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
+ };
+
+ struct Diff36 : public EmptyBase {
+ Diff36() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:6}}" ) {}
+ };
+
+ struct Diff37 : public TwoRangeBase {
+ Diff37() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:6}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff38 : public EmptyBase {
+ Diff38() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:5}}" ) {}
+ };
+
+ struct Diff39 : public EmptyBase {
+ Diff39() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:5}}" ) {}
+ };
+
+ struct Diff40 : public EmptyBase {
+ Diff40() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:0,$lte:5}}" ) {}
+ };
+
+ struct Diff41 : public TwoRangeBase {
+ Diff41() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff42 : public EmptyBase {
+ Diff42() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:5}}" ) {}
+ };
+
+ struct Diff43 : public EmptyBase {
+ Diff43() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lte:5}}" ) {}
+ };
+
+ struct Diff44 : public EmptyBase {
+ Diff44() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff45 : public EmptyBase {
+ Diff45() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff46 : public TwoRangeBase {
+ Diff46() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff47 : public EmptyBase {
+ Diff47() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lte:5}}" ) {}
+ };
+
+ struct Diff48 : public TwoRangeBase {
+ Diff48() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff49 : public EmptyBase {
+ Diff49() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff50 : public TwoRangeBase {
+ Diff50() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff51 : public TwoRangeBase {
+ Diff51() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff52 : public EmptyBase {
+ Diff52() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff53 : public EmptyBase {
+ Diff53() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff54 : public SplitRangeBase {
+ Diff54() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:5}}", 1, true, 1, true, 5, true, 5, true ) {}
+ };
+
+ struct Diff55 : public TwoRangeBase {
+ Diff55() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff56 : public TwoRangeBase {
+ Diff56() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff57 : public EmptyBase {
+ Diff57() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff58 : public TwoRangeBase {
+ Diff58() : TwoRangeBase( "{a:1,b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff59 : public EmptyBase {
+ Diff59() : EmptyBase( "{a:1,b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff60 : public EmptyBase {
+ Diff60() : EmptyBase( "{a:2,b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff61 : public EmptyBase {
+ Diff61() : EmptyBase( "{a:5,b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff62 : public TwoRangeBase {
+ Diff62() : TwoRangeBase( "{a:5,b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff63 : public EmptyBase {
+ Diff63() : EmptyBase( "{a:5,b:5}" ) {}
+ };
+
+ struct Diff64 : public TwoRangeBase {
+ Diff64() : TwoRangeBase( "{a:{$gte:1,$lte:2},b:{$gt:0,$lte:1}}", 1, 2, false, true ) {}
+ };
+
+ class DiffMulti1 : public DiffBase {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ), true );
+ FieldRange ret = frs.range( "a" );
+ FieldRange other = frs.range( "b" );
+ other |= frs.range( "c" );
+ other |= frs.range( "d" );
+ other |= frs.range( "e" );
+ ret -= other;
+ check( ret );
+ }
+ protected:
+ virtual unsigned len() const { return 3; }
+ virtual const int *nums() const { static int n[] = { 2, 3, 3, 4, 5, 7 }; return n; }
+ virtual const bool *incs() const { static bool b[] = { true, false, false, true, true, true }; return b; }
+ virtual BSONObj obj() const { return BSONObj(); }
+ };
+
+ class DiffMulti2 : public DiffBase {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ), true );
+ FieldRange mask = frs.range( "a" );
+ FieldRange ret = frs.range( "b" );
+ ret |= frs.range( "c" );
+ ret |= frs.range( "d" );
+ ret |= frs.range( "e" );
+ ret -= mask;
+ check( ret );
+ }
+ protected:
+ virtual unsigned len() const { return 2; }
+ virtual const int *nums() const { static int n[] = { 0, 1, 9, 10 }; return n; }
+ virtual const bool *incs() const { static bool b[] = { false, true, true, false }; return b; }
+ virtual BSONObj obj() const { return BSONObj(); }
+ };
+
+ } // namespace FieldRangeTests
+
+ namespace FieldRangeSetTests {
+
+ class Intersect {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", fromjson( "{b:{$in:[5,6]},c:7,d:{$in:[8,9]}}" ), true );
+ FieldRangeSet frs2( "", fromjson( "{a:1,b:5,c:{$in:[7,8]},d:{$in:[8,9]},e:10}" ), true );
+ frs1 &= frs2;
+ ASSERT_EQUALS( fromjson( "{a:1,b:5,c:7,d:{$gte:8,$lte:9},e:10}" ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MultiKeyIntersect {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSONObj(), false );
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 ), false );
+ FieldRangeSet frs3( "", BSON( "a" << LT << 6 ), false );
+ // An intersection with a trivial range is allowed.
+ frs1 &= frs2;
+ ASSERT_EQUALS( frs2.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ // An intersection with a nontrivial range is not allowed, as it might prevent a valid
+ // multikey match.
+ frs1 &= frs3;
+ ASSERT_EQUALS( frs2.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ // Now intersect with a fully contained range.
+ FieldRangeSet frs4( "", BSON( "a" << GT << 6 ), false );
+ frs1 &= frs4;
+ ASSERT_EQUALS( frs4.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MultiKeyDiff {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 ), false );
+ FieldRangeSet frs2( "", BSON( "a" << GT << 6 ), false );
+ // Range subtraction is no different for multikey ranges.
+ frs1 -= frs2;
+ ASSERT_EQUALS( BSON( "a" << GT << 4 << LTE << 6 ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MatchPossible {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 ), true );
+ ASSERT( frs1.matchPossible() );
+ // Conflicting constraints invalid for a single key set.
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 << LT << 2 ), true );
+ ASSERT( !frs2.matchPossible() );
+ // Conflicting constraints not possible for a multi key set.
+ FieldRangeSet frs3( "", BSON( "a" << GT << 4 << LT << 2 ), false );
+ ASSERT( frs3.matchPossible() );
+ }
+ };
+
+ class MatchPossibleForIndex {
+ public:
+ void run() {
+ // Conflicting constraints not possible for a multi key set.
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 << LT << 2 ), false );
+ ASSERT( frs1.matchPossibleForIndex( BSON( "a" << 1 ) ) );
+ // Conflicting constraints for a multi key set.
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 << LT << 2 ), true );
+ ASSERT( !frs2.matchPossibleForIndex( BSON( "a" << 1 ) ) );
+ // If the index doesn't include the key, it is not single key invalid.
+ ASSERT( frs2.matchPossibleForIndex( BSON( "b" << 1 ) ) );
+ // If the index key is not an index, the set is not single key invalid.
+ ASSERT( frs2.matchPossibleForIndex( BSON( "$natural" << 1 ) ) );
+ ASSERT( frs2.matchPossibleForIndex( BSONObj() ) );
+ }
+ };
+
+ } // namespace FieldRangeSetTests
+
+ namespace FieldRangeSetPairTests {
+
+ class NoNontrivialRanges {
+ public:
+ void run() {
+ FieldRangeSetPair frsp1( "", BSONObj() );
+ ASSERT( frsp1.noNontrivialRanges() );
+ FieldRangeSetPair frsp2( "", BSON( "a" << 1 ) );
+ ASSERT( !frsp2.noNontrivialRanges() );
+ FieldRangeSetPair frsp3( "", BSON( "a" << GT << 1 ) );
+ ASSERT( !frsp3.noNontrivialRanges() );
+ // A single key invalid constraint is still nontrivial.
+ FieldRangeSetPair frsp4( "", BSON( "a" << GT << 1 << LT << 0 ) );
+ ASSERT( !frsp4.noNontrivialRanges() );
+ // Still nontrivial if multikey invalid.
+ frsp4 -= frsp4.frsForIndex( 0, -1 );
+ ASSERT( !frsp4.noNontrivialRanges() );
+ }
+ };
+
+ class MatchPossible {
+ public:
+ void run() {
+ // Match possible for simple query.
+ FieldRangeSetPair frsp1( "", BSON( "a" << 1 ) );
+ ASSERT( frsp1.matchPossible() );
+ // Match possible for single key invalid query.
+ FieldRangeSetPair frsp2( "", BSON( "a" << GT << 1 << LT << 0 ) );
+ ASSERT( frsp2.matchPossible() );
+ // Match not possible for multi key invalid query.
+ frsp1 -= frsp1.frsForIndex( 0, - 1 );
+ ASSERT( !frsp1.matchPossible() );
+ }
+ };
+
+ class IndexBase {
+ public:
+ IndexBase() : _ctx( ns() ) , indexNum_( 0 ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~IndexBase() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ dropNS( s );
+ }
+ protected:
+ static const char *ns() { return "unittests.FieldRangeSetPairTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ IndexDetails *index( const BSONObj &key ) {
+ stringstream ss;
+ ss << indexNum_++;
+ string name = ss.str();
+ client_.resetIndexCache();
+ client_.ensureIndex( ns(), key, false, name.c_str() );
+ NamespaceDetails *d = nsd();
+ for( int i = 0; i < d->nIndexes; ++i ) {
+ if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
+ return &d->idx(i);
+ }
+ assert( false );
+ return 0;
+ }
+ int indexno( const BSONObj &key ) {
+ return nsd()->idxNo( *index(key) );
+ }
+ static DBDirectClient client_;
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ int indexNum_;
+ };
+ DBDirectClient IndexBase::client_;
+
+ class MatchPossibleForIndex : public IndexBase {
+ public:
+ void run() {
+ int a = indexno( BSON( "a" << 1 ) );
+ int b = indexno( BSON( "b" << 1 ) );
+ IndexBase::client_.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 2 ) << "b" << 1 ) );
+ // Valid ranges match possible for both indexes.
+ FieldRangeSetPair frsp1( ns(), BSON( "a" << GT << 1 << LT << 4 << "b" << GT << 1 << LT << 4 ) );
+ ASSERT( frsp1.matchPossibleForIndex( nsd(), a, BSON( "a" << 1 ) ) );
+ ASSERT( frsp1.matchPossibleForIndex( nsd(), b, BSON( "b" << 1 ) ) );
+ // Single key invalid range means match impossible for single key index.
+ FieldRangeSetPair frsp2( ns(), BSON( "a" << GT << 4 << LT << 1 << "b" << GT << 4 << LT << 1 ) );
+ ASSERT( frsp2.matchPossibleForIndex( nsd(), a, BSON( "a" << 1 ) ) );
+ ASSERT( !frsp2.matchPossibleForIndex( nsd(), b, BSON( "b" << 1 ) ) );
+ }
+ };
+
+ } // namespace FieldRangeSetPairTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryutil" ) {}
+
+ void setupTests() {
+ add< FieldRangeTests::Empty >();
+ add< FieldRangeTests::Eq >();
+ add< FieldRangeTests::DupEq >();
+ add< FieldRangeTests::Lt >();
+ add< FieldRangeTests::Lte >();
+ add< FieldRangeTests::Gt >();
+ add< FieldRangeTests::Gte >();
+ add< FieldRangeTests::TwoLt >();
+ add< FieldRangeTests::TwoGt >();
+ add< FieldRangeTests::EqGte >();
+ add< FieldRangeTests::EqGteInvalid >();
+ add< FieldRangeTests::Regex >();
+ add< FieldRangeTests::RegexObj >();
+ add< FieldRangeTests::UnhelpfulRegex >();
+ add< FieldRangeTests::In >();
+ add< FieldRangeTests::Equality >();
+ add< FieldRangeTests::SimplifiedQuery >();
+ add< FieldRangeTests::QueryPatternTest >();
+ add< FieldRangeTests::NoWhere >();
+ add< FieldRangeTests::Numeric >();
+ add< FieldRangeTests::InLowerBound >();
+ add< FieldRangeTests::InUpperBound >();
+ add< FieldRangeTests::UnionBound >();
+ add< FieldRangeTests::MultiBound >();
+ add< FieldRangeTests::Diff1 >();
+ add< FieldRangeTests::Diff2 >();
+ add< FieldRangeTests::Diff3 >();
+ add< FieldRangeTests::Diff4 >();
+ add< FieldRangeTests::Diff5 >();
+ add< FieldRangeTests::Diff6 >();
+ add< FieldRangeTests::Diff7 >();
+ add< FieldRangeTests::Diff8 >();
+ add< FieldRangeTests::Diff9 >();
+ add< FieldRangeTests::Diff10 >();
+ add< FieldRangeTests::Diff11 >();
+ add< FieldRangeTests::Diff12 >();
+ add< FieldRangeTests::Diff13 >();
+ add< FieldRangeTests::Diff14 >();
+ add< FieldRangeTests::Diff15 >();
+ add< FieldRangeTests::Diff16 >();
+ add< FieldRangeTests::Diff17 >();
+ add< FieldRangeTests::Diff18 >();
+ add< FieldRangeTests::Diff19 >();
+ add< FieldRangeTests::Diff20 >();
+ add< FieldRangeTests::Diff21 >();
+ add< FieldRangeTests::Diff22 >();
+ add< FieldRangeTests::Diff23 >();
+ add< FieldRangeTests::Diff24 >();
+ add< FieldRangeTests::Diff25 >();
+ add< FieldRangeTests::Diff26 >();
+ add< FieldRangeTests::Diff27 >();
+ add< FieldRangeTests::Diff28 >();
+ add< FieldRangeTests::Diff29 >();
+ add< FieldRangeTests::Diff30 >();
+ add< FieldRangeTests::Diff31 >();
+ add< FieldRangeTests::Diff32 >();
+ add< FieldRangeTests::Diff33 >();
+ add< FieldRangeTests::Diff34 >();
+ add< FieldRangeTests::Diff35 >();
+ add< FieldRangeTests::Diff36 >();
+ add< FieldRangeTests::Diff37 >();
+ add< FieldRangeTests::Diff38 >();
+ add< FieldRangeTests::Diff39 >();
+ add< FieldRangeTests::Diff40 >();
+ add< FieldRangeTests::Diff41 >();
+ add< FieldRangeTests::Diff42 >();
+ add< FieldRangeTests::Diff43 >();
+ add< FieldRangeTests::Diff44 >();
+ add< FieldRangeTests::Diff45 >();
+ add< FieldRangeTests::Diff46 >();
+ add< FieldRangeTests::Diff47 >();
+ add< FieldRangeTests::Diff48 >();
+ add< FieldRangeTests::Diff49 >();
+ add< FieldRangeTests::Diff50 >();
+ add< FieldRangeTests::Diff51 >();
+ add< FieldRangeTests::Diff52 >();
+ add< FieldRangeTests::Diff53 >();
+ add< FieldRangeTests::Diff54 >();
+ add< FieldRangeTests::Diff55 >();
+ add< FieldRangeTests::Diff56 >();
+ add< FieldRangeTests::Diff57 >();
+ add< FieldRangeTests::Diff58 >();
+ add< FieldRangeTests::Diff59 >();
+ add< FieldRangeTests::Diff60 >();
+ add< FieldRangeTests::Diff61 >();
+ add< FieldRangeTests::Diff62 >();
+ add< FieldRangeTests::Diff63 >();
+ add< FieldRangeTests::Diff64 >();
+ add< FieldRangeTests::DiffMulti1 >();
+ add< FieldRangeTests::DiffMulti2 >();
+ add< FieldRangeSetTests::Intersect >();
+ add< FieldRangeSetTests::MultiKeyIntersect >();
+ add< FieldRangeSetTests::MultiKeyDiff >();
+ add< FieldRangeSetTests::MatchPossible >();
+ add< FieldRangeSetTests::MatchPossibleForIndex >();
+ add< FieldRangeSetPairTests::NoNontrivialRanges >();
+ add< FieldRangeSetPairTests::MatchPossible >();
+ add< FieldRangeSetPairTests::MatchPossibleForIndex >();
+ }
+ } myall;
+
+} // namespace QueryUtilTests
+
diff --git a/dbtests/repltests.cpp b/dbtests/repltests.cpp
index c6ffba2..0b53d36 100644
--- a/dbtests/repltests.cpp
+++ b/dbtests/repltests.cpp
@@ -25,6 +25,8 @@
#include "../db/json.h"
#include "dbtests.h"
+#include "../db/oplog.h"
+#include "../db/queryoptimizer.h"
namespace mongo {
void createOplog();
@@ -1012,120 +1014,94 @@ namespace ReplTests {
ASSERT( !one( BSON( "_id" << 2 ) ).isEmpty() );
}
};
-
- class DbIdsTest {
+
+ class DatabaseIgnorerBasic {
public:
void run() {
- Client::Context ctx( "unittests.repltest.DbIdsTest" );
-
- s_.reset( new DbIds( "local.temp.DbIdsTest" ) );
- s_->reset();
- check( false, false, false );
-
- s_->set( "a", BSON( "_id" << 4 ), true );
- check( true, false, false );
- s_->set( "a", BSON( "_id" << 4 ), false );
- check( false, false, false );
-
- s_->set( "b", BSON( "_id" << 4 ), true );
- check( false, true, false );
- s_->set( "b", BSON( "_id" << 4 ), false );
- check( false, false, false );
-
- s_->set( "a", BSON( "_id" << 5 ), true );
- check( false, false, true );
- s_->set( "a", BSON( "_id" << 5 ), false );
- check( false, false, false );
-
- s_->set( "a", BSON( "_id" << 4 ), true );
- s_->set( "b", BSON( "_id" << 4 ), true );
- s_->set( "a", BSON( "_id" << 5 ), true );
- check( true, true, true );
-
- s_->reset();
- check( false, false, false );
-
- s_->set( "a", BSON( "_id" << 4 ), true );
- s_->set( "a", BSON( "_id" << 4 ), true );
- check( true, false, false );
- s_->set( "a", BSON( "_id" << 4 ), false );
- check( false, false, false );
- }
- private:
- void check( bool one, bool two, bool three ) {
- ASSERT_EQUALS( one, s_->get( "a", BSON( "_id" << 4 ) ) );
- ASSERT_EQUALS( two, s_->get( "b", BSON( "_id" << 4 ) ) );
- ASSERT_EQUALS( three, s_->get( "a", BSON( "_id" << 5 ) ) );
+ DatabaseIgnorer d;
+ ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ ASSERT( !d.ignoreAt( "b", OpTime( 4, 0 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 4, 10 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 5, 1 ) ) );
+ // Ignore state is expired.
+ ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
}
- dblock lk_;
- auto_ptr< DbIds > s_;
};
- class MemIdsTest {
+ class DatabaseIgnorerUpdate {
public:
void run() {
- int n = sizeof( BSONObj ) + BSON( "_id" << 4 ).objsize();
-
- s_.reset();
- ASSERT_EQUALS( 0, s_.roughSize() );
- ASSERT( !s_.get( "a", BSON( "_id" << 4 ) ) );
- ASSERT( !s_.get( "b", BSON( "_id" << 4 ) ) );
- s_.set( "a", BSON( "_id" << 4 ), true );
- ASSERT_EQUALS( n, s_.roughSize() );
- ASSERT( s_.get( "a", BSON( "_id" << 4 ) ) );
- ASSERT( !s_.get( "b", BSON( "_id" << 4 ) ) );
- s_.set( "a", BSON( "_id" << 4 ), false );
- ASSERT_EQUALS( 0, s_.roughSize() );
- ASSERT( !s_.get( "a", BSON( "_id" << 4 ) ) );
-
- s_.set( "a", BSON( "_id" << 4 ), true );
- s_.set( "b", BSON( "_id" << 4 ), true );
- s_.set( "b", BSON( "_id" << 100 ), true );
- s_.set( "b", BSON( "_id" << 101 ), true );
- ASSERT_EQUALS( n * 4, s_.roughSize() );
+ DatabaseIgnorer d;
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
+
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
}
- private:
- MemIds s_;
};
-
- class IdTrackerTest {
+
+ /**
+ * Check against oldest document in the oplog before scanning backward
+ * from the newest document.
+ */
+ class FindingStartCursorStale : public Base {
public:
void run() {
- Client::Context ctx( "unittests.repltests.IdTrackerTest" );
-
- ASSERT( s_.inMem() );
- s_.reset( 4 * sizeof( BSONObj ) - 1 );
- s_.haveId( "a", BSON( "_id" << 0 ), true );
- s_.haveId( "a", BSON( "_id" << 1 ), true );
- s_.haveId( "b", BSON( "_id" << 0 ), true );
- s_.haveModId( "b", BSON( "_id" << 0 ), true );
- ASSERT( s_.inMem() );
- check();
- s_.mayUpgradeStorage();
- ASSERT( !s_.inMem() );
- check();
-
- s_.haveId( "a", BSON( "_id" << 1 ), false );
- ASSERT( !s_.haveId( "a", BSON( "_id" << 1 ) ) );
- s_.haveId( "a", BSON( "_id" << 1 ), true );
- check();
- ASSERT( !s_.inMem() );
-
- s_.reset( 4 * sizeof( BSONObj ) - 1 );
- s_.mayUpgradeStorage();
- ASSERT( s_.inMem() );
+ for( int i = 0; i < 10; ++i ) {
+ client()->insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( cllNS() );
+ NamespaceDetails *nsd = nsdetails( cllNS() );
+ BSONObjBuilder b;
+ b.appendTimestamp( "$gte" );
+ BSONObj query = BSON( "ts" << b.obj() );
+ FieldRangeSetPair frsp( cllNS(), query );
+ BSONObj order = BSON( "$natural" << 1 );
+ QueryPlan qp( nsd, -1, frsp, &frsp, query, order );
+ FindingStartCursor fsc( qp );
+ ASSERT( fsc.done() );
+ ASSERT_EQUALS( 0, fsc.cursor()->current()[ "o" ].Obj()[ "_id" ].Int() );
}
- private:
- void check() {
- ASSERT( s_.haveId( "a", BSON( "_id" << 0 ) ) );
- ASSERT( s_.haveId( "a", BSON( "_id" << 1 ) ) );
- ASSERT( s_.haveId( "b", BSON( "_id" << 0 ) ) );
- ASSERT( s_.haveModId( "b", BSON( "_id" << 0 ) ) );
+ };
+
+ /** Check unsuccessful yield recovery with FindingStartCursor */
+ class FindingStartCursorYield : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ client()->insert( ns(), BSON( "_id" << i ) );
+ }
+ Date_t ts = client()->query( "local.oplog.$main", Query().sort( BSON( "$natural" << 1 ) ), 1, 4 )->next()[ "ts" ].date();
+ Client::Context ctx( cllNS() );
+ NamespaceDetails *nsd = nsdetails( cllNS() );
+ BSONObjBuilder b;
+ b.appendDate( "$gte", ts );
+ BSONObj query = BSON( "ts" << b.obj() );
+ FieldRangeSetPair frsp( cllNS(), query );
+ BSONObj order = BSON( "$natural" << 1 );
+ QueryPlan qp( nsd, -1, frsp, &frsp, query, order );
+ FindingStartCursor fsc( qp );
+ ASSERT( !fsc.done() );
+ fsc.next();
+ ASSERT( !fsc.done() );
+ ASSERT( fsc.prepareToYield() );
+ ClientCursor::invalidate( "local.oplog.$main" );
+ ASSERT_EXCEPTION( fsc.recoverFromYield(), MsgAssertionException );
}
- dblock lk_;
- IdTracker s_;
};
-
+
class All : public Suite {
public:
All() : Suite( "repl" ) {
@@ -1178,9 +1154,10 @@ namespace ReplTests {
add< Idempotence::RenameOverwrite >();
add< Idempotence::NoRename >();
add< DeleteOpIsIdBased >();
- add< DbIdsTest >();
- add< MemIdsTest >();
- add< IdTrackerTest >();
+ add< DatabaseIgnorerBasic >();
+ add< DatabaseIgnorerUpdate >();
+ add< FindingStartCursorStale >();
+ add< FindingStartCursorYield >();
}
} myall;
diff --git a/dbtests/socktests.cpp b/dbtests/socktests.cpp
index 5cd42f5..176db8c 100644
--- a/dbtests/socktests.cpp
+++ b/dbtests/socktests.cpp
@@ -18,7 +18,7 @@
*/
#include "pch.h"
-#include "../util/sock.h"
+#include "../util/net/sock.h"
#include "dbtests.h"
namespace SockTests {
diff --git a/dbtests/spin_lock_test.cpp b/dbtests/spin_lock_test.cpp
index 4b24aba..dbd637e 100644
--- a/dbtests/spin_lock_test.cpp
+++ b/dbtests/spin_lock_test.cpp
@@ -16,9 +16,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "../pch.h"
+#include "pch.h"
#include <boost/thread/thread.hpp>
-
#include "dbtests.h"
#include "../util/concurrency/spin_lock.h"
@@ -70,8 +69,6 @@ namespace {
public:
void run() {
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(_WIN32)
-
SpinLock spin;
int counter = 0;
@@ -92,14 +89,8 @@ namespace {
}
ASSERT_EQUALS( counter, threads*incs );
-#else
- warning() << "spin lock slow on this platform" << endl;
-
#if defined(__linux__)
- // we don't want to have linux binaries without a fast spinlock
- //ASSERT( false ); TODO SERVER-3075
-#endif
-
+ ASSERT( SpinLock::isfast() );
#endif
}
diff --git a/dbtests/test.sln b/dbtests/test.sln
new file mode 100755
index 0000000..3a1b741
--- /dev/null
+++ b/dbtests/test.sln
@@ -0,0 +1,26 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcxproj", "{215B2D68-0A70-4D10-8E75-B33010C62A91}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.ActiveCfg = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.Build.0 = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.ActiveCfg = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.Build.0 = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/dbtests/test.vcxproj b/dbtests/test.vcxproj
index b80a730..a4987d9 100644
--- a/dbtests/test.vcxproj
+++ b/dbtests/test.vcxproj
@@ -1,712 +1,776 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug|Win32">
- <Configuration>Debug</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|Win32">
- <Configuration>Release</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectGuid>{215B2D68-0A70-4D10-8E75-B33010C62A91}</ProjectGuid>
- <RootNamespace>dbtests</RootNamespace>
- <Keyword>Win32Proj</Keyword>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseOfMfc>false</UseOfMfc>
- <UseOfAtl>false</UseOfAtl>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseOfMfc>false</UseOfMfc>
- <UseOfAtl>false</UseOfAtl>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup>
- <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <MinimalRebuild>No</MinimalRebuild>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
- <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <TargetMachine>MachineX86</TargetMachine>
- <Profile>true</Profile>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_DURABLE;_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
- <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <TargetMachine>MachineX86</TargetMachine>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- </Link>
- </ItemDefinitionGroup>
- <ItemGroup>
- <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
- <ClInclude Include="..\db\dur.h" />
- <ClInclude Include="..\db\durop.h" />
- <ClInclude Include="..\db\dur_journal.h" />
- <ClInclude Include="..\db\jsobjmanipulator.h" />
- <ClInclude Include="..\db\mongommf.h" />
- <ClInclude Include="..\db\mongomutex.h" />
- <ClInclude Include="..\pcre-7.4\pcrecpp.h" />
- <ClInclude Include="..\targetver.h" />
- <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
- <ClInclude Include="..\pcre-7.4\config.h" />
- <ClInclude Include="..\pcre-7.4\pcre.h" />
- <ClInclude Include="..\client\connpool.h" />
- <ClInclude Include="..\client\dbclient.h" />
- <ClInclude Include="..\client\model.h" />
- <ClInclude Include="..\db\btree.h" />
- <ClInclude Include="..\db\clientcursor.h" />
- <ClInclude Include="..\db\cmdline.h" />
- <ClInclude Include="..\db\commands.h" />
- <ClInclude Include="..\db\concurrency.h" />
- <ClInclude Include="..\db\curop.h" />
- <ClInclude Include="..\db\cursor.h" />
- <ClInclude Include="..\db\database.h" />
- <ClInclude Include="..\db\db.h" />
- <ClInclude Include="..\db\dbhelpers.h" />
- <ClInclude Include="..\db\dbinfo.h" />
- <ClInclude Include="..\db\dbmessage.h" />
- <ClInclude Include="..\db\diskloc.h" />
- <ClInclude Include="..\db\extsort.h" />
- <ClInclude Include="..\db\introspect.h" />
- <ClInclude Include="..\db\jsobj.h" />
- <ClInclude Include="..\db\json.h" />
- <ClInclude Include="..\db\matcher.h" />
- <ClInclude Include="..\grid\message.h" />
- <ClInclude Include="..\db\minilex.h" />
- <ClInclude Include="..\db\namespace.h" />
- <ClInclude Include="..\pch.h" />
- <ClInclude Include="..\db\pdfile.h" />
- <ClInclude Include="..\grid\protocol.h" />
- <ClInclude Include="..\db\query.h" />
- <ClInclude Include="..\db\queryoptimizer.h" />
- <ClInclude Include="..\db\repl.h" />
- <ClInclude Include="..\db\replset.h" />
- <ClInclude Include="..\db\resource.h" />
- <ClInclude Include="..\db\scanandorder.h" />
- <ClInclude Include="..\db\security.h" />
- <ClInclude Include="..\util\builder.h" />
- <ClInclude Include="..\util\concurrency\list.h" />
- <ClInclude Include="..\util\concurrency\task.h" />
- <ClInclude Include="..\util\concurrency\value.h" />
- <ClInclude Include="..\util\file.h" />
- <ClInclude Include="..\util\goodies.h" />
- <ClInclude Include="..\util\hashtab.h" />
- <ClInclude Include="..\db\lasterror.h" />
- <ClInclude Include="..\util\log.h" />
- <ClInclude Include="..\util\logfile.h" />
- <ClInclude Include="..\util\lruishmap.h" />
- <ClInclude Include="..\util\md5.h" />
- <ClInclude Include="..\util\md5.hpp" />
- <ClInclude Include="..\util\miniwebserver.h" />
- <ClInclude Include="..\util\mmap.h" />
- <ClInclude Include="..\util\sock.h" />
- <ClInclude Include="..\util\unittest.h" />
- </ItemGroup>
- <ItemGroup>
- <ClCompile Include="..\bson\oid.cpp" />
- <ClCompile Include="..\client\dbclientcursor.cpp" />
- <ClCompile Include="..\client\dbclient_rs.cpp" />
- <ClCompile Include="..\client\distlock.cpp" />
- <ClCompile Include="..\client\gridfs.cpp" />
- <ClCompile Include="..\client\model.cpp" />
- <ClCompile Include="..\client\parallel.cpp" />
- <ClCompile Include="..\db\cap.cpp" />
- <ClCompile Include="..\db\commands\isself.cpp" />
- <ClCompile Include="..\db\compact.cpp" />
- <ClCompile Include="..\db\dbcommands_generic.cpp" />
- <ClCompile Include="..\db\dur.cpp" />
- <ClCompile Include="..\db\durop.cpp" />
- <ClCompile Include="..\db\dur_commitjob.cpp" />
- <ClCompile Include="..\db\dur_journal.cpp" />
- <ClCompile Include="..\db\dur_preplogbuffer.cpp" />
- <ClCompile Include="..\db\dur_recover.cpp" />
- <ClCompile Include="..\db\dur_writetodatafiles.cpp" />
- <ClCompile Include="..\db\geo\2d.cpp" />
- <ClCompile Include="..\db\geo\haystack.cpp" />
- <ClCompile Include="..\db\mongommf.cpp" />
- <ClCompile Include="..\db\projection.cpp" />
- <ClCompile Include="..\db\repl\consensus.cpp" />
- <ClCompile Include="..\db\repl\heartbeat.cpp" />
- <ClCompile Include="..\db\repl\manager.cpp" />
- <ClCompile Include="..\db\repl\rs.cpp" />
- <ClCompile Include="..\db\repl\rs_initialsync.cpp" />
- <ClCompile Include="..\db\repl\rs_initiate.cpp" />
- <ClCompile Include="..\db\repl\rs_rollback.cpp" />
- <ClCompile Include="..\db\repl\rs_sync.cpp" />
- <ClCompile Include="..\db\restapi.cpp" />
- <ClCompile Include="..\db\security_key.cpp" />
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\client\connpool.cpp" />
- <ClCompile Include="..\client\dbclient.cpp" />
- <ClCompile Include="..\client\syncclusterconnection.cpp" />
- <ClCompile Include="..\db\btree.cpp" />
- <ClCompile Include="..\db\btreecursor.cpp" />
- <ClCompile Include="..\pch.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\db\client.cpp" />
- <ClCompile Include="..\db\clientcursor.cpp" />
- <ClCompile Include="..\db\cloner.cpp" />
- <ClCompile Include="..\db\commands.cpp" />
- <ClCompile Include="..\db\common.cpp" />
- <ClCompile Include="..\db\cursor.cpp" />
- <ClCompile Include="..\db\database.cpp" />
- <ClCompile Include="..\db\dbcommands.cpp" />
- <ClCompile Include="..\db\dbeval.cpp" />
- <ClCompile Include="..\db\dbhelpers.cpp" />
- <ClCompile Include="..\db\dbwebserver.cpp" />
- <ClCompile Include="..\db\extsort.cpp" />
- <ClCompile Include="..\db\index.cpp" />
- <ClCompile Include="..\db\indexkey.cpp" />
- <ClCompile Include="..\db\instance.cpp" />
- <ClCompile Include="..\db\introspect.cpp" />
- <ClCompile Include="..\db\jsobj.cpp" />
- <ClCompile Include="..\db\json.cpp" />
- <ClCompile Include="..\db\lasterror.cpp" />
- <ClCompile Include="..\db\matcher.cpp" />
- <ClCompile Include="..\scripting\bench.cpp" />
- <ClCompile Include="..\s\chunk.cpp" />
- <ClCompile Include="..\s\config.cpp" />
- <ClCompile Include="..\s\d_chunk_manager.cpp" />
- <ClCompile Include="..\s\d_migrate.cpp" />
- <ClCompile Include="..\s\d_split.cpp" />
- <ClCompile Include="..\s\d_state.cpp" />
- <ClCompile Include="..\s\d_writeback.cpp" />
- <ClCompile Include="..\s\grid.cpp" />
- <ClCompile Include="..\s\shard.cpp" />
- <ClCompile Include="..\s\shardconnection.cpp" />
- <ClCompile Include="..\s\shardkey.cpp" />
- <ClCompile Include="..\util\alignedbuilder.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
- <ClCompile Include="..\util\concurrency\synchronization.cpp" />
- <ClCompile Include="..\util\concurrency\task.cpp" />
- <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
- <ClCompile Include="..\util\concurrency\vars.cpp" />
- <ClCompile Include="..\util\file_allocator.cpp" />
- <ClCompile Include="..\util\log.cpp" />
- <ClCompile Include="..\util\logfile.cpp" />
- <ClCompile Include="..\util\mmap_win.cpp" />
- <ClCompile Include="..\db\namespace.cpp" />
- <ClCompile Include="..\db\nonce.cpp" />
- <ClCompile Include="..\db\pdfile.cpp" />
- <ClCompile Include="..\db\query.cpp" />
- <ClCompile Include="..\db\queryoptimizer.cpp" />
- <ClCompile Include="..\util\processinfo.cpp" />
- <ClCompile Include="..\db\repl.cpp" />
- <ClCompile Include="..\db\security.cpp" />
- <ClCompile Include="..\db\security_commands.cpp" />
- <ClCompile Include="..\db\tests.cpp" />
- <ClCompile Include="..\db\update.cpp" />
- <ClCompile Include="..\db\cmdline.cpp" />
- <ClCompile Include="..\db\matcher_covered.cpp" />
- <ClCompile Include="..\db\oplog.cpp" />
- <ClCompile Include="..\db\queryutil.cpp" />
- <ClCompile Include="..\db\repl_block.cpp" />
- <ClCompile Include="..\util\assert_util.cpp" />
- <ClCompile Include="..\util\background.cpp" />
- <ClCompile Include="..\util\base64.cpp" />
- <ClCompile Include="..\util\httpclient.cpp" />
- <ClCompile Include="..\util\md5.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeaderFile>
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeaderFile>
- </ClCompile>
- <ClCompile Include="..\util\md5main.cpp" />
- <ClCompile Include="..\util\message.cpp" />
- <ClCompile Include="..\util\message_server_port.cpp" />
- <ClCompile Include="..\util\miniwebserver.cpp" />
- <ClCompile Include="..\util\mmap.cpp" />
- <ClCompile Include="..\util\processinfo_win32.cpp" />
- <ClCompile Include="..\util\sock.cpp" />
- <ClCompile Include="..\util\stringutils.cpp" />
- <ClCompile Include="..\util\text.cpp" />
- <ClCompile Include="..\util\util.cpp" />
- <ClCompile Include="..\s\d_logic.cpp" />
- <ClCompile Include="..\scripting\engine.cpp" />
- <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
- <ClCompile Include="..\shell\mongo_vstudio.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\scripting\utils.cpp" />
- <ClCompile Include="..\util\version.cpp" />
- <ClCompile Include="basictests.cpp" />
- <ClCompile Include="btreetests.cpp" />
- <ClCompile Include="clienttests.cpp" />
- <ClCompile Include="cursortests.cpp" />
- <ClCompile Include="dbtests.cpp" />
- <ClCompile Include="directclienttests.cpp" />
- <ClCompile Include="framework.cpp" />
- <ClCompile Include="jsobjtests.cpp" />
- <ClCompile Include="jsontests.cpp" />
- <ClCompile Include="jstests.cpp" />
- <ClCompile Include="matchertests.cpp" />
- <ClCompile Include="mmaptests.cpp" />
- <ClCompile Include="namespacetests.cpp" />
- <ClCompile Include="pairingtests.cpp" />
- <ClCompile Include="pdfiletests.cpp" />
- <ClCompile Include="perftests.cpp" />
- <ClCompile Include="queryoptimizertests.cpp" />
- <ClCompile Include="querytests.cpp" />
- <ClCompile Include="repltests.cpp" />
- <ClCompile Include="socktests.cpp" />
- <ClCompile Include="threadedtests.cpp">
- <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- </ClCompile>
- <ClCompile Include="updatetests.cpp" />
- <ClCompile Include="..\db\stats\counters.cpp" />
- <ClCompile Include="..\db\stats\snapshots.cpp" />
- <ClCompile Include="..\db\stats\top.cpp" />
- <ClCompile Include="..\db\repl\health.cpp" />
- <ClCompile Include="..\db\repl\replset_commands.cpp" />
- <ClCompile Include="..\db\repl\rs_config.cpp" />
- </ItemGroup>
- <ItemGroup>
- <None Include="..\SConstruct" />
- </ItemGroup>
- <ItemGroup>
- <Library Include="..\..\js\js32d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js32r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- </ItemGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{215B2D68-0A70-4D10-8E75-B33010C62A91}</ProjectGuid>
+ <RootNamespace>dbtests</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ <Profile>true</Profile>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_DURABLE;_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bsonelement.h" />
+ <ClInclude Include="..\bson\bsonmisc.h" />
+ <ClInclude Include="..\bson\bsonobj.h" />
+ <ClInclude Include="..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\bson\bsontypes.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\oid.h" />
+ <ClInclude Include="..\bson\ordering.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\db\dur.h" />
+ <ClInclude Include="..\db\durop.h" />
+ <ClInclude Include="..\db\dur_journal.h" />
+ <ClInclude Include="..\db\jsobjmanipulator.h" />
+ <ClInclude Include="..\db\mongommf.h" />
+ <ClInclude Include="..\db\mongomutex.h" />
+ <ClInclude Include="..\db\ops\delete.h" />
+ <ClInclude Include="..\db\ops\query.h" />
+ <ClInclude Include="..\db\ops\update.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcrecpp.h" />
+ <ClInclude Include="..\server.h" />
+ <ClInclude Include="..\targetver.h" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="..\db\btree.h" />
+ <ClInclude Include="..\db\clientcursor.h" />
+ <ClInclude Include="..\db\cmdline.h" />
+ <ClInclude Include="..\db\commands.h" />
+ <ClInclude Include="..\db\concurrency.h" />
+ <ClInclude Include="..\db\curop.h" />
+ <ClInclude Include="..\db\cursor.h" />
+ <ClInclude Include="..\db\database.h" />
+ <ClInclude Include="..\db\db.h" />
+ <ClInclude Include="..\db\dbhelpers.h" />
+ <ClInclude Include="..\db\dbinfo.h" />
+ <ClInclude Include="..\db\dbmessage.h" />
+ <ClInclude Include="..\db\diskloc.h" />
+ <ClInclude Include="..\db\extsort.h" />
+ <ClInclude Include="..\db\introspect.h" />
+ <ClInclude Include="..\db\jsobj.h" />
+ <ClInclude Include="..\db\json.h" />
+ <ClInclude Include="..\db\matcher.h" />
+ <ClInclude Include="..\grid\message.h" />
+ <ClInclude Include="..\db\minilex.h" />
+ <ClInclude Include="..\db\namespace.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="..\db\pdfile.h" />
+ <ClInclude Include="..\grid\protocol.h" />
+ <ClInclude Include="..\db\query.h" />
+ <ClInclude Include="..\db\queryoptimizer.h" />
+ <ClInclude Include="..\db\repl.h" />
+ <ClInclude Include="..\db\replset.h" />
+ <ClInclude Include="..\db\resource.h" />
+ <ClInclude Include="..\db\scanandorder.h" />
+ <ClInclude Include="..\db\security.h" />
+ <ClInclude Include="..\third_party\snappy\config.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-c.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-internal.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-sinksource.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-internal.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-public.h" />
+ <ClInclude Include="..\third_party\snappy\snappy.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\checksum.h" />
+ <ClInclude Include="..\util\compress.h" />
+ <ClInclude Include="..\util\concurrency\list.h" />
+ <ClInclude Include="..\util\concurrency\task.h" />
+ <ClInclude Include="..\util\concurrency\value.h" />
+ <ClInclude Include="..\util\file.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\util\hashtab.h" />
+ <ClInclude Include="..\db\lasterror.h" />
+ <ClInclude Include="..\util\log.h" />
+ <ClInclude Include="..\util\logfile.h" />
+ <ClInclude Include="..\util\lruishmap.h" />
+ <ClInclude Include="..\util\md5.h" />
+ <ClInclude Include="..\util\md5.hpp" />
+ <ClInclude Include="..\util\miniwebserver.h" />
+ <ClInclude Include="..\util\mmap.h" />
+ <ClInclude Include="..\util\mongoutils\hash.h" />
+ <ClInclude Include="..\util\sock.h" />
+ <ClInclude Include="..\util\unittest.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\client\gridfs.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="..\db\btreebuilder.cpp" />
+ <ClCompile Include="..\db\cap.cpp" />
+ <ClCompile Include="..\db\commands\isself.cpp" />
+ <ClCompile Include="..\db\compact.cpp" />
+ <ClCompile Include="..\db\dbcommands_admin.cpp" />
+ <ClCompile Include="..\db\dbcommands_generic.cpp" />
+ <ClCompile Include="..\db\dur.cpp" />
+ <ClCompile Include="..\db\durop.cpp" />
+ <ClCompile Include="..\db\dur_commitjob.cpp" />
+ <ClCompile Include="..\db\dur_journal.cpp" />
+ <ClCompile Include="..\db\dur_preplogbuffer.cpp" />
+ <ClCompile Include="..\db\dur_recover.cpp" />
+ <ClCompile Include="..\db\dur_writetodatafiles.cpp" />
+ <ClCompile Include="..\db\geo\2d.cpp" />
+ <ClCompile Include="..\db\geo\haystack.cpp" />
+ <ClCompile Include="..\db\key.cpp" />
+ <ClCompile Include="..\db\mongommf.cpp" />
+ <ClCompile Include="..\db\ops\delete.cpp" />
+ <ClCompile Include="..\db\ops\query.cpp" />
+ <ClCompile Include="..\db\ops\update.cpp" />
+ <ClCompile Include="..\db\projection.cpp" />
+ <ClCompile Include="..\db\queryoptimizercursor.cpp" />
+ <ClCompile Include="..\db\querypattern.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\record.cpp" />
+ <ClCompile Include="..\db\repl\consensus.cpp" />
+ <ClCompile Include="..\db\repl\heartbeat.cpp" />
+ <ClCompile Include="..\db\repl\manager.cpp" />
+ <ClCompile Include="..\db\repl\rs.cpp" />
+ <ClCompile Include="..\db\repl\rs_initialsync.cpp" />
+ <ClCompile Include="..\db\repl\rs_initiate.cpp" />
+ <ClCompile Include="..\db\repl\rs_rollback.cpp" />
+ <ClCompile Include="..\db\repl\rs_sync.cpp" />
+ <ClCompile Include="..\db\restapi.cpp" />
+ <ClCompile Include="..\db\scanandorder.cpp" />
+ <ClCompile Include="..\db\security_common.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\db\btree.cpp" />
+ <ClCompile Include="..\db\btreecursor.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\client.cpp" />
+ <ClCompile Include="..\db\clientcursor.cpp" />
+ <ClCompile Include="..\db\cloner.cpp" />
+ <ClCompile Include="..\db\commands.cpp" />
+ <ClCompile Include="..\db\common.cpp" />
+ <ClCompile Include="..\db\cursor.cpp" />
+ <ClCompile Include="..\db\database.cpp" />
+ <ClCompile Include="..\db\dbcommands.cpp" />
+ <ClCompile Include="..\db\dbeval.cpp" />
+ <ClCompile Include="..\db\dbhelpers.cpp" />
+ <ClCompile Include="..\db\dbwebserver.cpp" />
+ <ClCompile Include="..\db\extsort.cpp" />
+ <ClCompile Include="..\db\index.cpp" />
+ <ClCompile Include="..\db\indexkey.cpp" />
+ <ClCompile Include="..\db\instance.cpp" />
+ <ClCompile Include="..\db\introspect.cpp" />
+ <ClCompile Include="..\db\jsobj.cpp" />
+ <ClCompile Include="..\db\json.cpp" />
+ <ClCompile Include="..\db\lasterror.cpp" />
+ <ClCompile Include="..\db\matcher.cpp" />
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\s\chunk.cpp" />
+ <ClCompile Include="..\s\config.cpp" />
+ <ClCompile Include="..\s\d_chunk_manager.cpp" />
+ <ClCompile Include="..\s\d_migrate.cpp" />
+ <ClCompile Include="..\s\d_split.cpp" />
+ <ClCompile Include="..\s\d_state.cpp" />
+ <ClCompile Include="..\s\d_writeback.cpp" />
+ <ClCompile Include="..\s\grid.cpp" />
+ <ClCompile Include="..\s\shard.cpp" />
+ <ClCompile Include="..\s\shardconnection.cpp" />
+ <ClCompile Include="..\s\shardkey.cpp" />
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\synchronization.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\logfile.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="..\db\namespace.cpp" />
+ <ClCompile Include="..\db\nonce.cpp" />
+ <ClCompile Include="..\db\pdfile.cpp" />
+ <ClCompile Include="..\db\queryoptimizer.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\db\repl.cpp" />
+ <ClCompile Include="..\db\security.cpp" />
+ <ClCompile Include="..\db\security_commands.cpp" />
+ <ClCompile Include="..\db\tests.cpp" />
+ <ClCompile Include="..\db\cmdline.cpp" />
+ <ClCompile Include="..\db\dbmessage.cpp" />
+ <ClCompile Include="..\db\matcher_covered.cpp" />
+ <ClCompile Include="..\db\oplog.cpp" />
+ <ClCompile Include="..\db\queryutil.cpp" />
+ <ClCompile Include="..\db\repl_block.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeaderFile>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeaderFile>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp" />
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="..\s\d_logic.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\shell\mongo_vstudio.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="basictests.cpp" />
+ <ClCompile Include="btreetests.cpp" />
+ <ClCompile Include="clienttests.cpp" />
+ <ClCompile Include="cursortests.cpp" />
+ <ClCompile Include="dbtests.cpp" />
+ <ClCompile Include="directclienttests.cpp" />
+ <ClCompile Include="framework.cpp" />
+ <ClCompile Include="jsobjtests.cpp" />
+ <ClCompile Include="jsontests.cpp" />
+ <ClCompile Include="jstests.cpp" />
+ <ClCompile Include="matchertests.cpp" />
+ <ClCompile Include="mmaptests.cpp" />
+ <ClCompile Include="namespacetests.cpp" />
+ <ClCompile Include="pdfiletests.cpp" />
+ <ClCompile Include="perftests.cpp" />
+ <ClCompile Include="queryoptimizertests.cpp" />
+ <ClCompile Include="querytests.cpp" />
+ <ClCompile Include="repltests.cpp" />
+ <ClCompile Include="socktests.cpp" />
+ <ClCompile Include="spin_lock_test.cpp" />
+ <ClCompile Include="threadedtests.cpp">
+ <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ </ClCompile>
+ <ClCompile Include="updatetests.cpp" />
+ <ClCompile Include="..\db\stats\counters.cpp" />
+ <ClCompile Include="..\db\stats\snapshots.cpp" />
+ <ClCompile Include="..\db\stats\top.cpp" />
+ <ClCompile Include="..\db\repl\health.cpp" />
+ <ClCompile Include="..\db\repl\replset_commands.cpp" />
+ <ClCompile Include="..\db\repl\rs_config.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\SConstruct" />
+ <None Include="btreetests.inl" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
</Project> \ No newline at end of file
diff --git a/dbtests/test.vcxproj.filters b/dbtests/test.vcxproj.filters
index c52f7f6..3554ce7 100755
--- a/dbtests/test.vcxproj.filters
+++ b/dbtests/test.vcxproj.filters
@@ -4,7 +4,7 @@
<Filter Include="misc and third party">
<UniqueIdentifier>{17c97725-06a4-41a6-bc1c-f0e05eada682}</UniqueIdentifier>
</Filter>
- <Filter Include="misc and third party\pcre">
+ <Filter Include="misc and third party">
<UniqueIdentifier>{0a50fb63-4ac3-4e30-a9d4-b0841878ee73}</UniqueIdentifier>
</Filter>
<Filter Include="client">
@@ -53,26 +53,23 @@
<Filter Include="dur">
<UniqueIdentifier>{c296d097-0d46-46ee-9097-f2df659d9596}</UniqueIdentifier>
</Filter>
+ <Filter Include="bson">
+ <UniqueIdentifier>{e6652333-c77f-420c-af8e-72d55bc095fe}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="misc and third party\snappy">
+ <UniqueIdentifier>{fbc4416f-ca67-4e63-a1ea-49027de7e080}</UniqueIdentifier>
+ </Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp">
<Filter>misc and third party</Filter>
</ClInclude>
- <ClInclude Include="..\pcre-7.4\pcrecpp.h">
- <Filter>misc and third party</Filter>
- </ClInclude>
<ClInclude Include="..\targetver.h">
<Filter>misc and third party</Filter>
</ClInclude>
<ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp">
<Filter>misc and third party</Filter>
</ClInclude>
- <ClInclude Include="..\pcre-7.4\config.h">
- <Filter>misc and third party\pcre</Filter>
- </ClInclude>
- <ClInclude Include="..\pcre-7.4\pcre.h">
- <Filter>misc and third party\pcre</Filter>
- </ClInclude>
<ClInclude Include="..\client\connpool.h">
<Filter>client</Filter>
</ClInclude>
@@ -244,6 +241,87 @@
<ClInclude Include="..\db\mongomutex.h">
<Filter>db</Filter>
</ClInclude>
+ <ClInclude Include="..\util\mongoutils\hash.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\checksum.h">
+ <Filter>util</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson_db.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonelement.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson-inl.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonmisc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobj.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobjbuilder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobjiterator.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsontypes.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\inline_decls.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\oid.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\ordering.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\stringdata.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\delete.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\update.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\query.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\server.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\config.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-c.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-internal.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-sinksource.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-internal.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-public.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\compress.h">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<Library Include="..\..\js\js64r.lib">
@@ -260,78 +338,6 @@
</Library>
</ItemGroup>
<ItemGroup>
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <Filter>misc and third party</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <Filter>misc and third party\pcre</Filter>
- </ClCompile>
<ClCompile Include="..\client\connpool.cpp">
<Filter>client</Filter>
</ClCompile>
@@ -419,9 +425,6 @@
<ClCompile Include="..\db\pdfile.cpp">
<Filter>db\cpp</Filter>
</ClCompile>
- <ClCompile Include="..\db\query.cpp">
- <Filter>db\cpp</Filter>
- </ClCompile>
<ClCompile Include="..\db\queryoptimizer.cpp">
<Filter>db\cpp</Filter>
</ClCompile>
@@ -437,9 +440,6 @@
<ClCompile Include="..\db\tests.cpp">
<Filter>db\cpp</Filter>
</ClCompile>
- <ClCompile Include="..\db\update.cpp">
- <Filter>db\cpp</Filter>
- </ClCompile>
<ClCompile Include="..\db\cmdline.cpp">
<Filter>db\h</Filter>
</ClCompile>
@@ -464,33 +464,18 @@
<ClCompile Include="..\util\base64.cpp">
<Filter>util\cpp</Filter>
</ClCompile>
- <ClCompile Include="..\util\httpclient.cpp">
- <Filter>util\cpp</Filter>
- </ClCompile>
<ClCompile Include="..\util\md5.c">
<Filter>util\cpp</Filter>
</ClCompile>
<ClCompile Include="..\util\md5main.cpp">
<Filter>util\cpp</Filter>
</ClCompile>
- <ClCompile Include="..\util\message.cpp">
- <Filter>util\cpp</Filter>
- </ClCompile>
- <ClCompile Include="..\util\message_server_port.cpp">
- <Filter>util\cpp</Filter>
- </ClCompile>
- <ClCompile Include="..\util\miniwebserver.cpp">
- <Filter>util\cpp</Filter>
- </ClCompile>
<ClCompile Include="..\util\mmap.cpp">
<Filter>util\cpp</Filter>
</ClCompile>
<ClCompile Include="..\util\processinfo_win32.cpp">
<Filter>util\cpp</Filter>
</ClCompile>
- <ClCompile Include="..\util\sock.cpp">
- <Filter>util\cpp</Filter>
- </ClCompile>
<ClCompile Include="..\util\util.cpp">
<Filter>util\cpp</Filter>
</ClCompile>
@@ -542,9 +527,6 @@
<ClCompile Include="namespacetests.cpp">
<Filter>dbtests</Filter>
</ClCompile>
- <ClCompile Include="pairingtests.cpp">
- <Filter>dbtests</Filter>
- </ClCompile>
<ClCompile Include="pdfiletests.cpp">
<Filter>dbtests</Filter>
</ClCompile>
@@ -692,9 +674,6 @@
<ClCompile Include="..\db\restapi.cpp">
<Filter>db\cpp</Filter>
</ClCompile>
- <ClCompile Include="..\util\concurrency\spin_lock.cpp">
- <Filter>db\cpp</Filter>
- </ClCompile>
<ClCompile Include="mmaptests.cpp">
<Filter>dbtests</Filter>
</ClCompile>
@@ -761,16 +740,88 @@
<ClCompile Include="directclienttests.cpp">
<Filter>dbtests</Filter>
</ClCompile>
- <ClCompile Include="..\db\security_key.cpp">
- <Filter>db\cpp</Filter>
- </ClCompile>
<ClCompile Include="..\util\file_allocator.cpp">
<Filter>util\cpp</Filter>
</ClCompile>
+ <ClCompile Include="..\db\dbcommands_admin.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\querypattern.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\ramlog.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\key.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btreebuilder.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryoptimizercursor.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\record.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\delete.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\update.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_common.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\query.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbmessage.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\listen.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_server_port.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_port.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\miniwebserver.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\sock.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="spin_lock_test.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <Filter>misc and third party\snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <Filter>misc and third party</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <Filter>misc and third party\snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\scanandorder.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="..\SConstruct">
<Filter>misc and third party</Filter>
</None>
+ <None Include="btreetests.inl">
+ <Filter>dbtests</Filter>
+ </None>
</ItemGroup>
</Project> \ No newline at end of file
diff --git a/dbtests/threadedtests.cpp b/dbtests/threadedtests.cpp
index 805b2d5..3a5ee10 100644
--- a/dbtests/threadedtests.cpp
+++ b/dbtests/threadedtests.cpp
@@ -21,6 +21,7 @@
#include "../bson/util/atomic_int.h"
#include "../util/concurrency/mvar.h"
#include "../util/concurrency/thread_pool.h"
+#include "../util/concurrency/list.h"
#include "../util/timer.h"
#include <boost/thread.hpp>
#include <boost/bind.hpp>
@@ -33,8 +34,8 @@ namespace ThreadedTests {
class ThreadedTest {
public:
virtual void setup() {} //optional
- virtual void subthread() = 0;
- virtual void validate() = 0;
+ virtual void subthread(int remaining) = 0; // each thread whatever test work you want done
+ virtual void validate() = 0; // after work is done
static const int nthreads = nthreads_param;
@@ -48,12 +49,11 @@ namespace ThreadedTests {
private:
void launch_subthreads(int remaining) {
- if (!remaining) return;
-
- boost::thread athread(boost::bind(&ThreadedTest::subthread, this));
+ if (!remaining)
+ return;
+ boost::thread athread(boost::bind(&ThreadedTest::subthread, this, remaining));
launch_subthreads(remaining - 1);
-
athread.join();
}
};
@@ -65,8 +65,18 @@ namespace ThreadedTests {
enum { N = 40000 };
#endif
MongoMutex *mm;
+ ProgressMeter pm;
public:
+ MongoMutexTest() : pm(N * nthreads) {}
void run() {
+ DEV {
+ // in _DEBUG builds on linux we mprotect each time a writelock
+ // is taken. That can greatly slow down this test if there are
+ // many open files
+ DBDirectClient db;
+ db.simpleCommand("admin", NULL, "closeAllDatabases");
+ }
+
Timer t;
cout << "MongoMutexTest N:" << N << endl;
ThreadedTest<135>::run();
@@ -74,9 +84,9 @@ namespace ThreadedTests {
}
private:
virtual void setup() {
- mm = new MongoMutex("MongoMutexTest");
+ mm = &dbMutex;
}
- virtual void subthread() {
+ virtual void subthread(int) {
Client::initThread("mongomutextest");
sleepmillis(0);
for( int i = 0; i < N; i++ ) {
@@ -122,6 +132,7 @@ namespace ThreadedTests {
mm->lock_shared();
mm->unlock_shared();
}
+ pm.hit();
}
cc().shutdown();
}
@@ -139,7 +150,7 @@ namespace ThreadedTests {
static const int iterations = 1000000;
AtomicUInt target;
- void subthread() {
+ void subthread(int) {
for(int i=0; i < iterations; i++) {
//target.x++; // verified to fail with this version
target++;
@@ -170,7 +181,7 @@ namespace ThreadedTests {
public:
MVarTest() : target(0) {}
- void subthread() {
+ void subthread(int) {
for(int i=0; i < iterations; i++) {
int val = target.take();
#if BOOST_VERSION >= 103500
@@ -224,16 +235,370 @@ namespace ThreadedTests {
}
};
- class All : public Suite {
+ class RWLockTest1 {
+ public:
+ void run() {
+ RWLock lk( "eliot" );
+ {
+ rwlock r( lk , true , 1000 );
+ }
+ }
+ };
+
+ class RWLockTest2 {
+ public:
+
+ static void worker1( const RWLock * lk , AtomicUInt * x ) {
+ (*x)++; // 1
+ //cout << "lock b try" << endl;
+ rwlock b( *lk , true );
+ //cout << "lock b got" << endl;
+ (*x)++; // 2
+ }
+
+ static void worker2( const RWLock * lk , AtomicUInt * x ) {
+ //cout << "lock c try" << endl;
+ rwlock c( *lk , false );
+ (*x)++;
+ //cout << "lock c got" << endl;
+ }
+
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+ RWLock lk( "eliot2" , 120 * 1000 );
+ cout << "RWLock impl: " << lk.implType() << endl;
+
+ auto_ptr<rwlock> a( new rwlock( lk , false ) );
+
+ AtomicUInt x1 = 0;
+ cout << "A : " << &x1 << endl;
+ boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
+ while ( ! x1 );
+ assert( x1 == 1 );
+ sleepmillis( 500 );
+ assert( x1 == 1 );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ a.reset();
+
+ for ( int i=0; i<2000; i++ ) {
+ if ( x1 == 2 )
+ break;
+ sleepmillis(1);
+ }
+
+ assert( x1 == 2 );
+ t1.join();
+
+ }
+ };
+
+
+
+ /** test of shared lock */
+ class RWLockTest3 {
+ public:
+
+ static void worker2( RWLock * lk , AtomicUInt * x ) {
+ assert( ! lk->lock_try(0) );
+ //cout << "lock c try" << endl;
+ rwlock c( *lk , false );
+ (*x)++;
+ //cout << "lock c got" << endl;
+ }
+
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+ RWLock lk( "eliot2" , 120 * 1000 );
+
+ auto_ptr<rwlock> a( new rwlock( lk , false ) );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ a.reset();
+
+ }
+ };
+
+ class RWLockTest4 {
+ public:
+
+#if defined(__linux__) || defined(__APPLE__)
+ static void worker1( pthread_rwlock_t * lk , AtomicUInt * x ) {
+ (*x)++; // 1
+ cout << "lock b try" << endl;
+ while ( 1 ) {
+ if ( pthread_rwlock_trywrlock( lk ) == 0 )
+ break;
+ sleepmillis(10);
+ }
+ cout << "lock b got" << endl;
+ (*x)++; // 2
+ pthread_rwlock_unlock( lk );
+ }
+
+ static void worker2( pthread_rwlock_t * lk , AtomicUInt * x ) {
+ cout << "lock c try" << endl;
+ pthread_rwlock_rdlock( lk );
+ (*x)++;
+ cout << "lock c got" << endl;
+ pthread_rwlock_unlock( lk );
+ }
+#endif
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+#if defined(__linux__) || defined(__APPLE__)
+
+ // create
+ pthread_rwlock_t lk;
+ assert( pthread_rwlock_init( &lk , 0 ) == 0 );
+
+ // read lock
+ assert( pthread_rwlock_rdlock( &lk ) == 0 );
+
+ AtomicUInt x1 = 0;
+ boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
+ while ( ! x1 );
+ assert( x1 == 1 );
+ sleepmillis( 500 );
+ assert( x1 == 1 );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ pthread_rwlock_unlock( &lk );
+
+ for ( int i=0; i<2000; i++ ) {
+ if ( x1 == 2 )
+ break;
+ sleepmillis(1);
+ }
+
+ assert( x1 == 2 );
+ t1.join();
+#endif
+ }
+ };
+
+ class List1Test2 : public ThreadedTest<> {
+ static const int iterations = 1000; // note: a lot of iterations will use a lot of memory as List1 leaks on purpose
+ class M : public List1<M>::Base {
+ public:
+ M(int x) : _x(x) { }
+ const int _x;
+ };
+ List1<M> l;
+ public:
+ void validate() { }
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ int r = std::rand() % 256;
+ if( r == 0 ) {
+ l.orphanAll();
+ }
+ else if( r < 4 ) {
+ l.push(new M(r));
+ }
+ else {
+ M *orph = 0;
+ for( M *m = l.head(); m; m=m->next() ) {
+ ASSERT( m->_x > 0 && m->_x < 4 );
+ if( r > 192 && std::rand() % 8 == 0 )
+ orph = m;
+ }
+ if( orph ) {
+ try {
+ l.orphan(orph);
+ }
+ catch(...) { }
+ }
+ }
+ }
+ }
+ };
+
+ class List1Test {
public:
- All() : Suite( "threading" ) {
+ class M : public List1<M>::Base {
+ ~M();
+ public:
+ M( int x ) {
+ num = x;
+ }
+ int num;
+ };
+
+ void run(){
+ List1<M> l;
+
+ vector<M*> ms;
+ for ( int i=0; i<5; i++ ) {
+ M * m = new M(i);
+ ms.push_back( m );
+ l.push( m );
+ }
+
+ // must assert as the item is missing
+ ASSERT_EXCEPTION( l.orphan( new M( -3 ) ) , UserException );
}
+ };
+
+#if 0
+ class UpgradableTest : public ThreadedTest<7> {
+ RWLock m;
+ public:
+ UpgradableTest() : m("utest") {}
+ private:
+ virtual void validate() { }
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+
+ /* r = read lock
+ R = get a read lock and we expect it to be fast
+ w = write lock
+ */
+ // /-- verify upgrade can be done instantly while in a read lock already
+ // | /-- verify upgrade acquisition isn't greedy
+ // | | /-- verify writes aren't greedy while in upgradable
+ // v v v
+ const char *what = " RURuRwR";
+
+ sleepmillis(100*x);
+
+ log() << x << what[x] << " request" << endl;
+ switch( what[x] ) {
+ case 'w':
+ {
+ m.lock();
+ log() << x << " W got" << endl;
+ sleepmillis(100);
+ log() << x << " W unlock" << endl;
+ m.unlock();
+ }
+ break;
+ case 'u':
+ case 'U':
+ {
+ Timer t;
+ m.lockAsUpgradable();
+ log() << x << " U got" << endl;
+ if( what[x] == 'U' ) {
+ if( t.millis() > 20 ) {
+ DEV {
+ // a _DEBUG buildbot might be slow, try to avoid false positives
+ log() << "warning lock upgrade was slow " << t.millis() << endl;
+ }
+ else {
+ ASSERT( false );
+ }
+ }
+ }
+ sleepsecs(1);
+ log() << x << " U unlock" << endl;
+ m.unlockFromUpgradable();
+ }
+ break;
+ case 'r':
+ case 'R':
+ {
+ Timer t;
+ m.lock_shared();
+ log() << x << " R got " << endl;
+ if( what[x] == 'R' ) {
+ if( t.millis() > 15 ) {
+ log() << "warning: when in upgradable write locks are still greedy on this platform" << endl;
+ }
+ }
+ sleepmillis(200);
+ log() << x << " R unlock" << endl;
+ m.unlock_shared();
+ }
+ break;
+ default:
+ ASSERT(false);
+ }
+
+ cc().shutdown();
+ }
+ };
+#endif
+
+ class WriteLocksAreGreedy : public ThreadedTest<3> {
+ public:
+ WriteLocksAreGreedy() : m("gtest") {}
+ private:
+ RWLock m;
+ virtual void validate() { }
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+ if( x == 1 ) {
+ cout << mongo::curTimeMillis64() % 10000 << " 1" << endl;
+ rwlock_shared lk(m);
+ sleepmillis(300);
+ cout << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
+ }
+ if( x == 2 ) {
+ sleepmillis(100);
+ cout << mongo::curTimeMillis64() % 10000 << " 2" << endl;
+ rwlock lk(m, true);
+ //m._lock();
+ cout << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
+ //m.unlock();
+ }
+ if( x == 3 ) {
+ sleepmillis(200);
+ Timer t;
+ cout << mongo::curTimeMillis64() % 10000 << " 3" << endl;
+ rwlock_shared lk(m);
+ cout << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
+ cout << t.millis() << endl;
+ ASSERT( t.millis() > 50 );
+ }
+ cc().shutdown();
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "threading" ) { }
void setupTests() {
+ add< WriteLocksAreGreedy >();
+ //add< UpgradableTest >();
+ add< List1Test >();
+ add< List1Test2 >();
+
add< IsAtomicUIntAtomic >();
add< MVarTest >();
add< ThreadPoolTest >();
add< LockTest >();
+
+ add< RWLockTest1 >();
+ //add< RWLockTest2 >(); // SERVER-2996
+ add< RWLockTest3 >();
+ add< RWLockTest4 >();
+
add< MongoMutexTest >();
}
} myall;
diff --git a/dbtests/updatetests.cpp b/dbtests/updatetests.cpp
index 0f95a32..c912bf4 100644
--- a/dbtests/updatetests.cpp
+++ b/dbtests/updatetests.cpp
@@ -18,13 +18,13 @@
*/
#include "pch.h"
-#include "../db/query.h"
+#include "../db/ops/query.h"
#include "../db/db.h"
#include "../db/instance.h"
#include "../db/json.h"
#include "../db/lasterror.h"
-#include "../db/update.h"
+#include "../db/ops/update.h"
#include "dbtests.h"
@@ -750,18 +750,19 @@ namespace UpdateTests {
virtual BSONObj after() { return BSONObj(); }
void dotest() {
- client().insert( ns() , BSON( "x" << 5 ) );
+ long long start = numeric_limits<int>::max() - 5;
+ long long max = numeric_limits<int>::max() + 5ll;
+
+ client().insert( ns() , BSON( "x" << (int)start ) );
ASSERT( findOne()["x"].type() == NumberInt );
- long long start = 5;
- long long max = numeric_limits<int>::max();
- max *= 32;
while ( start < max ) {
- update( BSON( "$inc" << BSON( "x" << 500000 ) ) );
- start += 500000;
+ update( BSON( "$inc" << BSON( "x" << 1 ) ) );
+ start += 1;
ASSERT_EQUALS( start , findOne()["x"].numberLong() ); // SERVER-2005
}
+ ASSERT( findOne()["x"].type() == NumberLong );
}
};
diff --git a/distsrc/README b/distsrc/README
index 745608d..9b092a3 100644
--- a/distsrc/README
+++ b/distsrc/README
@@ -1,34 +1,52 @@
-
-MongoDB
-=======
-
+MongoDB README
+
Welcome to MongoDB!
-Package Contents
-----------------
+COMPONENTS
+
+ bin/mongod - The database process.
+ bin/mongos - Sharding controller.
+ bin/mongo - The database shell (uses interactive javascript).
- bin/mongod - MongoDB server
- bin/mongo - MongoDB client
+UTILITIES
bin/mongodump - MongoDB dump tool - for backups, snapshots, etc..
bin/mongorestore - MongoDB restore a dump
- bin/mongoexport - Export a single collection to test (json,csv)
- bin/mongoimportjson - Import a json file into a collection
-
- bin/mongofiles - Utility for putting and getting files from MongoDB gridfs
-
-
-Useful Resources
-----------------
-
- MongoDB Website
-
- * http://www.mongodb.org/
-
-Documentation
-
- * http://www.mongodb.org/display/DOCS/Documentation
-
- MongoDB Maillists & IRC
-
- * http://www.mongodb.org/display/DOCS/Community
+ bin/mongoexport - Export a single collection to test (JSON, CSV)
+ bin/mongoimport - Import from JSON or CSV
+ bin/mongofiles - Utility for putting and getting files from MongoDB GridFS
+ bin/mongostat - Show performance statistics
+
+RUNNING
+
+ For command line options invoke:
+
+ $ ./mongod --help
+
+ To run a single server database:
+
+ $ mkdir /data/db
+ $ ./mongod
+ $
+ $ # The mongo javascript shell connects to localhost and test database by default:
+ $ ./mongo
+ > help
+
+DRIVERS
+
+ Client drivers for most programming languages are available at mongodb.org. Use the
+ shell ("mongo") for administrative tasks.
+
+DOCUMENTATION
+
+ http://www.mongodb.org/
+
+MAIL LISTS AND IRC
+
+ http://www.mongodb.org/display/DOCS/Community
+
+32 BIT BUILD NOTES
+
+ MongoDB uses memory mapped files. If built as a 32 bit executable, you will
+ not be able to work with large (multi-gigabyte) databases. However, 32 bit
+ builds work fine with small development databases.
diff --git a/distsrc/client/SConstruct b/distsrc/client/SConstruct
index a97699e..54fc943 100644..100755
--- a/distsrc/client/SConstruct
+++ b/distsrc/client/SConstruct
@@ -1,3 +1,4 @@
+# scons file for MongoDB c++ client library and examples
import os
@@ -18,7 +19,7 @@ AddOption( "--prefix",
help="installation root" )
-env = Environment()
+env = Environment( MSVS_ARCH=None )
def addExtraLibs( s ):
for x in s.split(","):
@@ -40,7 +41,7 @@ linux = False
if "darwin" == os.sys.platform:
addExtraLibs( "/opt/local/" )
nix = True
-elif "linux2" == os.sys.platform:
+elif "linux2" == os.sys.platform or "linux3" == os.sys.platform:
nix = True
linux = True
@@ -50,7 +51,7 @@ if nix:
if linux:
env.Append( LINKFLAGS=" -Wl,--as-needed -Wl,-zdefs " )
-boostLibs = [ "thread" , "filesystem" , "system" ]
+boostLibs = [ "thread" , "filesystem" , "system", "thread" ]
conf = Configure(env)
for lib in boostLibs:
if not conf.CheckLib("boost_%s-mt" % lib):
@@ -60,7 +61,7 @@ dirs = [ "" , "bson/" , "bson/util/" ,
"client/" , "s/" , "shell/" ,
"db/" ,
"scripting/" ,
- "util/" , "util/concurrency/" , "util/mongoutils/" ]
+ "util/" , "util/concurrency/" , "util/mongoutils/" , "util/net/" ]
allClientFiles = []
for x in dirs:
@@ -93,6 +94,7 @@ clientEnv.Prepend( LIBS=["libmongoclient.a"])
clientEnv.Prepend( LIBPATH=["."] )
# examples
+
clientTests += [ clientEnv.Program( "firstExample" , [ "client/examples/first.cpp" ] ) ]
clientTests += [ clientEnv.Program( "secondExample" , [ "client/examples/second.cpp" ] ) ]
clientTests += [ clientEnv.Program( "whereExample" , [ "client/examples/whereExample.cpp" ] ) ]
@@ -100,3 +102,4 @@ clientTests += [ clientEnv.Program( "authTest" , [ "client/examples/authTest.cpp
clientTests += [ clientEnv.Program( "httpClientTest" , [ "client/examples/httpClientTest.cpp" ] ) ]
clientTests += [ clientEnv.Program( "clientTest" , [ "client/examples/clientTest.cpp" ] ) ]
clientEnv.Alias("clientTests", clientTests, [])
+
diff --git a/docs/errors.md b/docs/errors.md
new file mode 100644
index 0000000..3da8e2c
--- /dev/null
+++ b/docs/errors.md
@@ -0,0 +1,1564 @@
+MongoDB Error Codes
+==========
+
+
+
+
+bson/bson-inl.h
+----
+* 10065 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L178)
+* 10313 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L551) Insufficient bytes to calculate element size
+* 10314 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L555) Insufficient bytes to calculate element size
+* 10315 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L560) Insufficient bytes to calculate element size
+* 10316 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L565) Insufficient bytes to calculate element size
+* 10317 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L569) Insufficient bytes to calculate element size
+* 10318 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L575) Invalid regex string
+* 10319 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L585) Invalid regex options string
+* 10320 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L659)
+* 10321 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L496)
+* 10322 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L501) Invalid CodeWScope size
+* 10323 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L503) Invalid CodeWScope string size
+* 10324 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L504) Invalid CodeWScope string size
+* 10325 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L507) Invalid CodeWScope size
+* 10326 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L509) Invalid CodeWScope object size
+* 10327 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L458) Object does not end with EOO
+* 10328 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L460) Invalid element size
+* 10329 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L461) Element too large
+* 10330 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L463) Element extends past end of object
+* 10331 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L468) EOO Before end of object
+* 10334 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L217)
+* 13655 [code](http://github.com/mongodb/mongo/blob/master/bson/bson-inl.h#L593)
+
+
+bson/bson_db.h
+----
+* 10062 [code](http://github.com/mongodb/mongo/blob/master/bson/bson_db.h#L60) not code
+
+
+bson/bsonelement.h
+----
+* 10063 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonelement.h#L362) not a dbref
+* 10064 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonelement.h#L367) not a dbref
+* 10333 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonelement.h#L392) Invalid field name
+* 13111 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonelement.h#L429)
+* 13118 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonelement.h#L434) unexpected or missing type value in BSON object
+
+
+bson/bsonobjbuilder.h
+----
+* 10335 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonobjbuilder.h#L546) builder does not own memory
+* 10336 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonobjbuilder.h#L621) No subobject started
+* 13048 [code](http://github.com/mongodb/mongo/blob/master/bson/bsonobjbuilder.h#L762) can't append to array using string field name [" + name.data() + "]
+
+
+bson/ordering.h
+----
+* 13103 [code](http://github.com/mongodb/mongo/blob/master/bson/ordering.h#L64) too many compound keys
+
+
+bson/util/builder.h
+----
+* 10000 [code](http://github.com/mongodb/mongo/blob/master/bson/util/builder.h#L92) out of memory BufBuilder
+* 13548 [code](http://github.com/mongodb/mongo/blob/master/bson/util/builder.h#L202) BufBuilder grow() > 64MB
+
+
+client/clientOnly.cpp
+----
+* 10256 [code](http://github.com/mongodb/mongo/blob/master/client/clientOnly.cpp#L62) no createDirectClient in clientOnly
+
+
+client/connpool.cpp
+----
+* 13071 [code](http://github.com/mongodb/mongo/blob/master/client/connpool.cpp#L171) invalid hostname [" + host + "]
+* 13328 [code](http://github.com/mongodb/mongo/blob/master/client/connpool.cpp#L157) : connect failed " + url.toString() + " :
+
+
+client/connpool.h
+----
+* 11004 [code](http://github.com/mongodb/mongo/blob/master/client/connpool.h#L219) connection was returned to the pool already
+* 11005 [code](http://github.com/mongodb/mongo/blob/master/client/connpool.h#L225) connection was returned to the pool already
+* 13102 [code](http://github.com/mongodb/mongo/blob/master/client/connpool.h#L231) connection was returned to the pool already
+
+
+client/dbclient.cpp
+----
+* 10005 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L483) listdatabases failed" , runCommand( "admin" , BSON( "listDatabases
+* 10006 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L484) listDatabases.databases not array" , info["databases
+* 10007 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L792) dropIndex failed
+* 10008 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L799) dropIndexes failed
+* 10276 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L545) DBClientBase::findN: transport error: " << getServerAddress() << " query:
+* 10278 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L933) dbclient error communicating with server:
+* 10337 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L885) object not valid
+* 11010 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L284) count fails:
+* 13386 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L669) socket error for mapping query
+* 13421 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.cpp#L103) trying to connect to invalid ConnectionString
+
+
+client/dbclient.h
+----
+* 10011 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.h#L501) no collection name
+* 9000 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient.h#L806)
+
+
+client/dbclient_rs.cpp
+----
+* 10009 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient_rs.cpp#L208) ReplicaSetMonitor no master found for set:
+* 13610 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient_rs.cpp#L156) ConfigChangeHook already specified
+* 13639 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient_rs.cpp#L453) can't connect to new replica set master [" << _masterHost.toString() << "] err:
+* 13642 [code](http://github.com/mongodb/mongo/blob/master/client/dbclient_rs.cpp#L79) need at least 1 node for a replica set
+
+
+client/dbclientcursor.cpp
+----
+* 13127 [code](http://github.com/mongodb/mongo/blob/master/client/dbclientcursor.cpp#L159) getMore: cursor didn't exist on server, possible restart or timeout?
+* 13422 [code](http://github.com/mongodb/mongo/blob/master/client/dbclientcursor.cpp#L207) DBClientCursor next() called but more() is false
+* 14821 [code](http://github.com/mongodb/mongo/blob/master/client/dbclientcursor.cpp#L265) No client or lazy client specified, cannot store multi-host connection.
+
+
+client/dbclientcursor.h
+----
+* 13106 [code](http://github.com/mongodb/mongo/blob/master/client/dbclientcursor.h#L78)
+* 13348 [code](http://github.com/mongodb/mongo/blob/master/client/dbclientcursor.h#L210) connection died
+* 13383 [code](http://github.com/mongodb/mongo/blob/master/client/dbclientcursor.h#L227) BatchIterator empty
+
+
+client/distlock.cpp
+----
+* 14023 [code](http://github.com/mongodb/mongo/blob/master/client/distlock.cpp#L582) remote time in cluster " << _conn.toString() << " is now skewed, cannot force lock.
+
+
+client/distlock_test.cpp
+----
+* 13678 [code](http://github.com/mongodb/mongo/blob/master/client/distlock_test.cpp#L374) Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by
+
+
+client/gridfs.cpp
+----
+* 10012 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L90) file doesn't exist" , fileName == "-
+* 10013 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L97) error opening file
+* 10014 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L210) chunk is empty!
+* 10015 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L242) doesn't exists
+* 13296 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L64) invalid chunk size is specified
+* 13325 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L236) couldn't open file:
+* 9008 [code](http://github.com/mongodb/mongo/blob/master/client/gridfs.cpp#L136) filemd5 failed
+
+
+client/model.cpp
+----
+* 10016 [code](http://github.com/mongodb/mongo/blob/master/client/model.cpp#L39) _id isn't set - needed for remove()" , _id["_id
+* 13121 [code](http://github.com/mongodb/mongo/blob/master/client/model.cpp#L81)
+* 9002 [code](http://github.com/mongodb/mongo/blob/master/client/model.cpp#L51) error on Model::remove:
+* 9003 [code](http://github.com/mongodb/mongo/blob/master/client/model.cpp#L123) error on Model::save:
+
+
+client/parallel.cpp
+----
+* 10017 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L80) cursor already done
+* 10018 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L335) no more items
+* 10019 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L704) no more elements
+* 13431 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L395) have to have sort key in projection and removing it
+* 13633 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L109) error querying server:
+* 14812 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L755) Error running command on server:
+* 14813 [code](http://github.com/mongodb/mongo/blob/master/client/parallel.cpp#L756) Command returned nothing
+
+
+client/syncclusterconnection.cpp
+----
+* 10022 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L249) SyncClusterConnection::getMore not supported yet
+* 10023 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L271) SyncClusterConnection bulk insert not implemented
+* 13053 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L386) help failed: " << info , _commandOnActive( "admin" , BSON( name << "1" << "help
+* 13054 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L215) write $cmd not supported in SyncClusterConnection::query for:
+* 13104 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L170) SyncClusterConnection::findOne prepare failed:
+* 13105 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L188)
+* 13119 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L256) SyncClusterConnection::insert obj has to have an _id:
+* 13120 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L289) SyncClusterConnection::update upsert query needs _id" , query.obj["_id
+* 13397 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L364) SyncClusterConnection::say prepare failed:
+* 15848 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L200) sync cluster of sync clusters?
+* 8001 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L135) SyncClusterConnection write op failed:
+* 8002 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L245) all servers down!
+* 8003 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L261) SyncClusterConnection::insert prepare failed:
+* 8004 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L50) SyncClusterConnection needs 3 servers
+* 8005 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L295) SyncClusterConnection::udpate prepare failed:
+* 8006 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L338) SyncClusterConnection::call can only be used directly for dbQuery
+* 8007 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L342) SyncClusterConnection::call can't handle $cmd" , strstr( d.getns(), "$cmd
+* 8008 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L358) all servers down!
+* 8020 [code](http://github.com/mongodb/mongo/blob/master/client/syncclusterconnection.cpp#L277) SyncClusterConnection::remove prepare failed:
+
+
+db/btree.cpp
+----
+* 10281 [code](http://github.com/mongodb/mongo/blob/master/db/btree.cpp#L132) assert is misdefined
+* 10282 [code](http://github.com/mongodb/mongo/blob/master/db/btree.cpp#L313) n==0 in btree popBack()
+* 10283 [code](http://github.com/mongodb/mongo/blob/master/db/btree.cpp#L320) rchild not null in btree popBack()
+* 10285 [code](http://github.com/mongodb/mongo/blob/master/db/btree.cpp#L1699) _insert: reuse key but lchild is not this->null
+* 10286 [code](http://github.com/mongodb/mongo/blob/master/db/btree.cpp#L1700) _insert: reuse key but rchild is not this->null
+* 10287 [code](http://github.com/mongodb/mongo/blob/master/db/btree.cpp#L73) btree: key+recloc already in index
+
+
+db/btree.h
+----
+* 13000 [code](http://github.com/mongodb/mongo/blob/master/db/btree.h#L357) invalid keyNode: " + BSON( "i" << i << "n
+
+
+db/btreebuilder.cpp
+----
+* 10288 [code](http://github.com/mongodb/mongo/blob/master/db/btreebuilder.cpp#L75) bad key order in BtreeBuilder - server internal error
+
+
+db/btreecursor.cpp
+----
+* 13384 [code](http://github.com/mongodb/mongo/blob/master/db/btreecursor.cpp#L307) BtreeCursor FieldRangeVector constructor doesn't accept special indexes
+* 14800 [code](http://github.com/mongodb/mongo/blob/master/db/btreecursor.cpp#L252) unsupported index version
+* 14801 [code](http://github.com/mongodb/mongo/blob/master/db/btreecursor.cpp#L268) unsupported index version
+* 15850 [code](http://github.com/mongodb/mongo/blob/master/db/btreecursor.cpp#L56) keyAt bucket deleted
+
+
+db/cap.cpp
+----
+* 10345 [code](http://github.com/mongodb/mongo/blob/master/db/cap.cpp#L258) passes >= maxPasses in capped collection alloc
+* 13415 [code](http://github.com/mongodb/mongo/blob/master/db/cap.cpp#L344) emptying the collection is not allowed
+* 13424 [code](http://github.com/mongodb/mongo/blob/master/db/cap.cpp#L411) collection must be capped
+* 13425 [code](http://github.com/mongodb/mongo/blob/master/db/cap.cpp#L412) background index build in progress
+* 13426 [code](http://github.com/mongodb/mongo/blob/master/db/cap.cpp#L413) indexes present
+
+
+db/client.cpp
+----
+* 10057 [code](http://github.com/mongodb/mongo/blob/master/db/client.cpp#L261)
+* 13005 [code](http://github.com/mongodb/mongo/blob/master/db/client.cpp#L228) can't create db, keeps getting closed
+* 14031 [code](http://github.com/mongodb/mongo/blob/master/db/client.cpp#L188) Can't take a write lock while out of disk space
+
+
+db/client.h
+----
+* 12600 [code](http://github.com/mongodb/mongo/blob/master/db/client.h#L228) releaseAndWriteLock: unlock_shared failed, probably recursive
+
+
+db/clientcursor.h
+----
+* 12051 [code](http://github.com/mongodb/mongo/blob/master/db/clientcursor.h#L110) clientcursor already in use? driver problem?
+* 12521 [code](http://github.com/mongodb/mongo/blob/master/db/clientcursor.h#L300) internal error: use of an unlocked ClientCursor
+
+
+db/cloner.cpp
+----
+* 10024 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L87) bad ns field for index during dbcopy
+* 10025 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L89) bad ns field for index during dbcopy [2]
+* 10026 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L649) source namespace does not exist
+* 10027 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L659) target namespace exists", cmdObj["dropTarget
+* 10289 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L292) useReplAuth is not written to replication log
+* 10290 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L363)
+* 13008 [code](http://github.com/mongodb/mongo/blob/master/db/cloner.cpp#L602) must call copydbgetnonce first
+
+
+db/cmdline.cpp
+----
+* 10033 [code](http://github.com/mongodb/mongo/blob/master/db/cmdline.cpp#L271) logpath has to be non-zero
+
+
+db/commands/distinct.cpp
+----
+* 10044 [code](http://github.com/mongodb/mongo/blob/master/db/commands/distinct.cpp#L116) distinct too big, 16mb cap
+
+
+db/commands/find_and_modify.cpp
+----
+* 12515 [code](http://github.com/mongodb/mongo/blob/master/db/commands/find_and_modify.cpp#L94) can't remove and update", cmdObj["update
+* 12516 [code](http://github.com/mongodb/mongo/blob/master/db/commands/find_and_modify.cpp#L126) must specify remove or update
+* 13329 [code](http://github.com/mongodb/mongo/blob/master/db/commands/find_and_modify.cpp#L71) upsert mode requires update field
+* 13330 [code](http://github.com/mongodb/mongo/blob/master/db/commands/find_and_modify.cpp#L72) upsert mode requires query field
+
+
+db/commands/group.cpp
+----
+* 10041 [code](http://github.com/mongodb/mongo/blob/master/db/commands/group.cpp#L42) invoke failed in $keyf:
+* 10042 [code](http://github.com/mongodb/mongo/blob/master/db/commands/group.cpp#L44) return of $key has to be an object
+* 10043 [code](http://github.com/mongodb/mongo/blob/master/db/commands/group.cpp#L111) group() can't handle more than 20000 unique keys
+* 9010 [code](http://github.com/mongodb/mongo/blob/master/db/commands/group.cpp#L117) reduce invoke failed:
+
+
+db/commands/isself.cpp
+----
+* 13469 [code](http://github.com/mongodb/mongo/blob/master/db/commands/isself.cpp#L25) getifaddrs failure:
+* 13470 [code](http://github.com/mongodb/mongo/blob/master/db/commands/isself.cpp#L42) getnameinfo() failed:
+* 13472 [code](http://github.com/mongodb/mongo/blob/master/db/commands/isself.cpp#L88) getnameinfo() failed:
+
+
+db/commands/mr.cpp
+----
+* 10074 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L155) need values
+* 10075 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L196) reduce -> multiple not supported yet
+* 10076 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L435) rename failed:
+* 10077 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L880) fast_emit takes 2 args
+* 10078 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L1148) something bad happened" , shardedOutputCollection == res["result
+* 13069 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L881) an emit can't be more than half max bson size
+* 13070 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L176) value too large to reduce
+* 13522 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L258) unknown out specifier [" << t << "]
+* 13598 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L55) couldn't compile code for:
+* 13602 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L230) outType is no longer a valid option" , cmdObj["outType
+* 13604 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L392) too much data for in memory map/reduce
+* 13605 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L412) too much data for in memory map/reduce
+* 13606 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L266) 'out' has to be a string or an object
+* 13608 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L300) query has to be blank or an Object
+* 13609 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L307) sort has to be blank or an Object
+* 13630 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L346) userCreateNS failed for mr tempLong ns: " << _config.tempLong << " err:
+* 13631 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L331) userCreateNS failed for mr incLong ns: " << _config.incLong << " err:
+* 9014 [code](http://github.com/mongodb/mongo/blob/master/db/commands/mr.cpp#L73) map invoke failed:
+
+
+db/common.cpp
+----
+* 10332 [code](http://github.com/mongodb/mongo/blob/master/db/common.cpp#L45) Expected CurrentTime type
+
+
+db/compact.cpp
+----
+* 13660 [code](http://github.com/mongodb/mongo/blob/master/db/compact.cpp#L244) namespace " << ns << " does not exist
+* 13661 [code](http://github.com/mongodb/mongo/blob/master/db/compact.cpp#L245) cannot compact capped collection
+* 14024 [code](http://github.com/mongodb/mongo/blob/master/db/compact.cpp#L86) compact error out of space during compaction
+* 14025 [code](http://github.com/mongodb/mongo/blob/master/db/compact.cpp#L184) compact error no space available to allocate
+* 14027 [code](http://github.com/mongodb/mongo/blob/master/db/compact.cpp#L236) can't compact a system namespace", !str::contains(ns, ".system.
+* 14028 [code](http://github.com/mongodb/mongo/blob/master/db/compact.cpp#L235) bad ns
+
+
+db/concurrency.h
+----
+* 13142 [code](http://github.com/mongodb/mongo/blob/master/db/concurrency.h#L134) timeout getting readlock
+
+
+db/curop.h
+----
+* 11600 [code](http://github.com/mongodb/mongo/blob/master/db/curop.h#L359) interrupted at shutdown
+* 11601 [code](http://github.com/mongodb/mongo/blob/master/db/curop.h#L361) interrupted
+* 12601 [code](http://github.com/mongodb/mongo/blob/master/db/curop.h#L247) CurOp not marked done yet
+
+
+db/cursor.h
+----
+* 13285 [code](http://github.com/mongodb/mongo/blob/master/db/cursor.h#L133) manual matcher config not allowed
+
+
+db/database.cpp
+----
+* 10028 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L47) db name is empty
+* 10029 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L48) bad db name [1]
+* 10030 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L49) bad db name [2]
+* 10031 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L50) bad char(s) in db name
+* 10032 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L51) db name too long
+* 10295 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L151) getFile(): bad file number value (corrupt db?): run repair
+* 12501 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L211) quota exceeded
+* 14810 [code](http://github.com/mongodb/mongo/blob/master/db/database.cpp#L224) couldn't allocate space (suitableFile)
+
+
+db/db.cpp
+----
+* 10296 [code](http://github.com/mongodb/mongo/blob/master/db/db.cpp#L436)
+* 10297 [code](http://github.com/mongodb/mongo/blob/master/db/db.cpp#L1232) Couldn't register Windows Ctrl-C handler
+* 12590 [code](http://github.com/mongodb/mongo/blob/master/db/db.cpp#L441)
+* 14026 [code](http://github.com/mongodb/mongo/blob/master/db/db.cpp#L288)
+
+
+db/db.h
+----
+* 10298 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L152) can't temprelease nested write lock
+* 10299 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L157) can't temprelease nested read lock
+* 13074 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L127) db name can't be empty
+* 13075 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L130) db name can't be empty
+* 13280 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L120) invalid db name:
+* 14814 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L162)
+* 14845 [code](http://github.com/mongodb/mongo/blob/master/db/db.h#L193)
+
+
+db/dbcommands.cpp
+----
+* 10039 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L780) can't drop collection with reserved $ character in name
+* 10040 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1104) chunks out of order
+* 10301 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1439) source collection " + fromNs + " does not exist
+* 13049 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1570) godinsert must specify a collection
+* 13281 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1123) File deleted during filemd5 command
+* 13416 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1702) captrunc must specify a collection
+* 13417 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1710) captrunc collection not found or empty
+* 13418 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1712) captrunc invalid n
+* 13428 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1729) emptycapped must specify a collection
+* 13429 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L1732) emptycapped no such collection
+* 14832 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands.cpp#L847) specify size:<n> when capped is true", !cmdObj["capped"].trueValue() || cmdObj["size"].isNumber() || cmdObj.hasField("$nExtents
+
+
+db/dbcommands_admin.cpp
+----
+* 12032 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands_admin.cpp#L485) fsync: sync option must be true when using lock
+* 12033 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands_admin.cpp#L491) fsync: profiling must be off to enter locked mode
+* 12034 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands_admin.cpp#L484) fsync: can't lock while an unlock is pending
+
+
+db/dbcommands_generic.cpp
+----
+* 10038 [code](http://github.com/mongodb/mongo/blob/master/db/dbcommands_generic.cpp#L316) forced error
+
+
+db/dbeval.cpp
+----
+* 10046 [code](http://github.com/mongodb/mongo/blob/master/db/dbeval.cpp#L42) eval needs Code
+* 12598 [code](http://github.com/mongodb/mongo/blob/master/db/dbeval.cpp#L127) $eval reads unauthorized
+
+
+db/dbhelpers.cpp
+----
+* 10303 [code](http://github.com/mongodb/mongo/blob/master/db/dbhelpers.cpp#L326) {autoIndexId:false}
+* 13430 [code](http://github.com/mongodb/mongo/blob/master/db/dbhelpers.cpp#L161) no _id index
+* 9011 [code](http://github.com/mongodb/mongo/blob/master/db/dbhelpers.cpp#L68) Not an index cursor
+
+
+db/dbmessage.h
+----
+* 10304 [code](http://github.com/mongodb/mongo/blob/master/db/dbmessage.h#L195) Client Error: Remaining data too small for BSON object
+* 10305 [code](http://github.com/mongodb/mongo/blob/master/db/dbmessage.h#L197) Client Error: Invalid object size
+* 10306 [code](http://github.com/mongodb/mongo/blob/master/db/dbmessage.h#L198) Client Error: Next object larger than space left in message
+* 10307 [code](http://github.com/mongodb/mongo/blob/master/db/dbmessage.h#L201) Client Error: bad object in message
+* 13066 [code](http://github.com/mongodb/mongo/blob/master/db/dbmessage.h#L193) Message contains no documents
+
+
+db/dbwebserver.cpp
+----
+* 13453 [code](http://github.com/mongodb/mongo/blob/master/db/dbwebserver.cpp#L171) server not started with --jsonp
+
+
+db/dur.cpp
+----
+* 13599 [code](http://github.com/mongodb/mongo/blob/master/db/dur.cpp#L377) Written data does not match in-memory view. Missing WriteIntent?
+* 13616 [code](http://github.com/mongodb/mongo/blob/master/db/dur.cpp#L199) can't disable durability with pending writes
+
+
+db/dur_journal.cpp
+----
+* 13611 [code](http://github.com/mongodb/mongo/blob/master/db/dur_journal.cpp#L504) can't read lsn file in journal directory :
+* 13614 [code](http://github.com/mongodb/mongo/blob/master/db/dur_journal.cpp#L465) unexpected version number of lsn file in journal/ directory got:
+
+
+db/dur_recover.cpp
+----
+* 13531 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L76) unexpected files in journal directory " << dir.string() << " :
+* 13532 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L83)
+* 13533 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L145) problem processing journal file during recovery
+* 13535 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L387) recover abrupt journal file end
+* 13536 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L329) journal version number mismatch
+* 13537 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L331) journal header invalid
+* 13544 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L369) recover error couldn't open
+* 13545 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L394) --durOptions " << (int) CmdLine::DurScanOnly << " (scan only) specified
+* 13594 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L125) journal checksum doesn't match
+* 13622 [code](http://github.com/mongodb/mongo/blob/master/db/dur_recover.cpp#L240) Trying to write past end of file in WRITETODATAFILES
+
+
+db/durop.cpp
+----
+* 13546 [code](http://github.com/mongodb/mongo/blob/master/db/durop.cpp#L51) journal recover: unrecognized opcode in journal
+* 13547 [code](http://github.com/mongodb/mongo/blob/master/db/durop.cpp#L142) recover couldn't create file
+* 13628 [code](http://github.com/mongodb/mongo/blob/master/db/durop.cpp#L156) recover failure writing file
+
+
+db/extsort.cpp
+----
+* 10048 [code](http://github.com/mongodb/mongo/blob/master/db/extsort.cpp#L70) already sorted
+* 10049 [code](http://github.com/mongodb/mongo/blob/master/db/extsort.cpp#L95) sorted already
+* 10050 [code](http://github.com/mongodb/mongo/blob/master/db/extsort.cpp#L116) bad
+* 10308 [code](http://github.com/mongodb/mongo/blob/master/db/extsort.cpp#L222) mmap failed
+
+
+db/extsort.h
+----
+* 10052 [code](http://github.com/mongodb/mongo/blob/master/db/extsort.h#L115) not sorted
+
+
+db/geo/2d.cpp
+----
+* 13022 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L110) can't have 2 geo field
+* 13023 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L111) 2d has to be first in index
+* 13024 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L120) no geo field specified
+* 13026 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L310) geo values have to be numbers:
+* 13027 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L333) point not in interval of [ " << _min << ", " << _max << " )
+* 13028 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L124) bits in geo index must be between 1 and 32
+* 13042 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2396) missing geo field (" + _geo + ") in :
+* 13046 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2446) 'near' param missing/invalid", !cmdObj["near
+* 13057 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2368) $within has to take an object or array
+* 13058 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2386) unknown $with type:
+* 13059 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2372) $center has to take an object or array
+* 13060 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L1974) $center needs 2 fields (middle,max distance)
+* 13061 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L1988) need a max distance >= 0
+* 13063 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2110) $box needs 2 fields (bottomLeft,topRight)
+* 13064 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2119) need an area > 0
+* 13065 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2377) $box has to take an object or array
+* 13067 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L305) geo field is empty
+* 13068 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L307) geo field only has 1 element
+* 13460 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2011) invalid $center query type:
+* 13461 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L1999) Spherical MaxDistance > PI. Are you sure you are using radians?
+* 13462 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2006) Spherical distance would require wrapping, which isn't implemented yet
+* 13464 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2342) invalid $near search type:
+* 13654 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L234) location object expected, location array not in correct format
+* 13656 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L1979) the first field of $center object must be a location object
+* 14029 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2382) $polygon has to take an object or array
+* 14030 [code](http://github.com/mongodb/mongo/blob/master/db/geo/2d.cpp#L2226) polygon must be defined by three points or more
+
+
+db/geo/core.h
+----
+* 13047 [code](http://github.com/mongodb/mongo/blob/master/db/geo/core.h#L93) wrong type for geo index. if you're using a pre-release version, need to rebuild index
+* 14808 [code](http://github.com/mongodb/mongo/blob/master/db/geo/core.h#L474) point " << p.toString() << " must be in earth-like bounds of long : [-180, 180), lat : [-90, 90]
+
+
+db/geo/haystack.cpp
+----
+* 13314 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L89) can't have 2 geo fields
+* 13315 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L90) 2d has to be first in index
+* 13316 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L99) no geo field specified
+* 13317 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L100) no other fields specified
+* 13318 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L298) near needs to be an array
+* 13319 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L299) maxDistance needs a number
+* 13320 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L300) search needs to be an object
+* 13321 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L80) need bucketSize
+* 13322 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L106) not a number
+* 13323 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L141) latlng not an array
+* 13326 [code](http://github.com/mongodb/mongo/blob/master/db/geo/haystack.cpp#L101) quadrant search can only have 1 other field for now
+
+
+db/index.cpp
+----
+* 10096 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L269) invalid ns to index
+* 10097 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L270) bad table to index name on add index attempt
+* 10098 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L277)
+* 11001 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L68)
+* 12504 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L284)
+* 12505 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L314)
+* 12523 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L265) no index name specified
+* 12524 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L274) index key pattern too large
+* 12588 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L320) cannot add index with a background operation in progress
+* 14803 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L355) this version of mongod cannot build new indexes of version number
+* 14819 [code](http://github.com/mongodb/mongo/blob/master/db/index.cpp#L192)
+
+
+db/index.h
+----
+* 14802 [code](http://github.com/mongodb/mongo/blob/master/db/index.h#L159) index v field should be Integer type
+
+
+db/indexkey.cpp
+----
+* 10088 [code](http://github.com/mongodb/mongo/blob/master/db/indexkey.cpp#L163)
+* 13007 [code](http://github.com/mongodb/mongo/blob/master/db/indexkey.cpp#L59) can only have 1 index plugin / bad index key pattern
+* 13529 [code](http://github.com/mongodb/mongo/blob/master/db/indexkey.cpp#L76) sparse only works for single field keys
+
+
+db/instance.cpp
+----
+* 10054 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L464) not master
+* 10055 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L451) update object too large
+* 10056 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L492) not master
+* 10058 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L619) not master
+* 10059 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L576) object to insert too large
+* 10309 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L921) Unable to create/open lock file: " << name << ' ' << errnoWithDescription() << " Is a mongod instance already running?
+* 10310 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L926) Unable to acquire lock for lockfilepath:
+* 12596 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L979) old lock file
+* 13004 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L396) sent negative cursors to kill:
+* 13073 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L531) shutting down
+* 13342 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L997) Unable to truncate lock file
+* 13455 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L838) dbexit timed out getting lock
+* 13511 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L582) document to insert can't have $ fields
+* 13597 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L989) can't start without --journal enabled when journal/ files are present
+* 13618 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L1014) can't start without --journal enabled when journal/ files are present
+* 13625 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L993) Unable to truncate lock file
+* 13627 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L915) Unable to create/open lock file: " << name << ' ' << m << " Is a mongod instance already running?
+* 13637 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L722) count failed in DBDirectClient:
+* 13658 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L395) bad kill cursors size:
+* 13659 [code](http://github.com/mongodb/mongo/blob/master/db/instance.cpp#L394) sent 0 cursors to kill
+
+
+db/jsobj.cpp
+----
+* 10060 [code](http://github.com/mongodb/mongo/blob/master/db/jsobj.cpp#L519) woSortOrder needs a non-empty sortKey
+* 10061 [code](http://github.com/mongodb/mongo/blob/master/db/jsobj.cpp#L1101) type not supported for appendMinElementForType
+* 10311 [code](http://github.com/mongodb/mongo/blob/master/db/jsobj.cpp#L85)
+* 10312 [code](http://github.com/mongodb/mongo/blob/master/db/jsobj.cpp#L243)
+* 12579 [code](http://github.com/mongodb/mongo/blob/master/db/jsobj.cpp#L823) unhandled cases in BSONObj okForStorage
+* 14853 [code](http://github.com/mongodb/mongo/blob/master/db/jsobj.cpp#L1154) type not supported for appendMaxElementForType
+
+
+db/json.cpp
+----
+* 10338 [code](http://github.com/mongodb/mongo/blob/master/db/json.cpp#L230) Invalid use of reserved field name
+* 10339 [code](http://github.com/mongodb/mongo/blob/master/db/json.cpp#L374) Badly formatted bindata
+* 10340 [code](http://github.com/mongodb/mongo/blob/master/db/json.cpp#L588) Failure parsing JSON string near:
+
+
+db/lasterror.cpp
+----
+* 13649 [code](http://github.com/mongodb/mongo/blob/master/db/lasterror.cpp#L88) no operation yet
+
+
+db/matcher.cpp
+----
+* 10066 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L322) $where may only appear once in query
+* 10067 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L323) $where query, but no script engine
+* 10068 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L192) invalid operator:
+* 10069 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L278) BUG - can't operator for:
+* 10070 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L910) $where compile error
+* 10071 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L925)
+* 10072 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L929) unknown error in invocation of $where function
+* 10073 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L100) mod can't be 0
+* 10341 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L82) scope has to be created first!
+* 10342 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L1080) pcre not compiled with utf8 support
+* 12517 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L107) $elemMatch needs an Object
+* 13020 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L156) with $all, can't mix $elemMatch and others
+* 13021 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L587) $all/$elemMatch needs to be applied to array
+* 13029 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L269) can't use $not with $options, use BSON regex type instead
+* 13030 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L372) $not cannot be empty
+* 13031 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L382) invalid use of $not
+* 13032 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L258) can't use $not with $regex, use BSON regex type instead
+* 13086 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L284) $and/$or/$nor must be a nonempty array
+* 13087 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L288) $and/$or/$nor match element must be an object
+* 13089 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L324) no current client needed for $where
+* 13276 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L217) $in needs an array
+* 13277 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L228) $nin needs an array
+* 13629 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L314) can't have undefined in a query expression
+* 14844 [code](http://github.com/mongodb/mongo/blob/master/db/matcher.cpp#L408) $atomic specifier must be a top level field
+
+
+db/mongommf.cpp
+----
+* 13520 [code](http://github.com/mongodb/mongo/blob/master/db/mongommf.cpp#L260) MongoMMF only supports filenames in a certain format
+* 13636 [code](http://github.com/mongodb/mongo/blob/master/db/mongommf.cpp#L286) file " << filename() << " open/create failed in createPrivateMap (look in log for more information)
+
+
+db/mongomutex.h
+----
+* 10293 [code](http://github.com/mongodb/mongo/blob/master/db/mongomutex.h#L235) internal error: locks are not upgradeable:
+* 12599 [code](http://github.com/mongodb/mongo/blob/master/db/mongomutex.h#L101) internal error: attempt to unlock when wasn't in a write lock
+
+
+db/namespace-inl.h
+----
+* 10080 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L35) ns name too long, max size is 128
+* 10348 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L45) $extra: ns name too long
+* 10349 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L103) E12000 idxNo fails
+* 13283 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L81) Missing Extra
+* 14045 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L82) missing Extra
+* 14823 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L89) missing extra
+* 14824 [code](http://github.com/mongodb/mongo/blob/master/db/namespace-inl.h#L90) missing Extra
+
+
+db/namespace.cpp
+----
+* 10079 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L176) bad .ns file length, cannot open database
+* 10082 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L469) allocExtra: too many namespaces/collections
+* 10343 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L183) bad lenForNewNsFiles
+* 10346 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L532) not implemented
+* 10350 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L464) allocExtra: base ns missing?
+* 10351 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L465) allocExtra: extra already exists
+* 14037 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.cpp#L642) can't create user databases on a --configsvr instance
+
+
+db/namespace.h
+----
+* 10081 [code](http://github.com/mongodb/mongo/blob/master/db/namespace.h#L574) too many namespaces/collections
+
+
+db/nonce.cpp
+----
+* 10352 [code](http://github.com/mongodb/mongo/blob/master/db/nonce.cpp#L32) Security is a singleton class
+* 10353 [code](http://github.com/mongodb/mongo/blob/master/db/nonce.cpp#L42) can't open dev/urandom
+* 10354 [code](http://github.com/mongodb/mongo/blob/master/db/nonce.cpp#L51) md5 unit test fails
+* 10355 [code](http://github.com/mongodb/mongo/blob/master/db/nonce.cpp#L60) devrandom failed
+
+
+db/oplog.cpp
+----
+* 13044 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L518) no ts field in query
+* 13257 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L341)
+* 13288 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L51) replSet error write op to db before replSet initialized", str::startsWith(ns, "local.
+* 13312 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L138) replSet error : logOp() but not primary?
+* 13347 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L174) local.oplog.rs missing. did you drop it? if so restart server
+* 13389 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L70) local.oplog.rs missing. did you drop it? if so restart server
+* 14038 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L461) invalid _findingStartMode
+* 14825 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L694) error in applyOperation : unknown opType
+* 14834 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.cpp#L478) empty extent found during finding start scan
+
+
+db/oplog.h
+----
+* 14835 [code](http://github.com/mongodb/mongo/blob/master/db/oplog.h#L82)
+
+
+db/ops/delete.cpp
+----
+* 10100 [code](http://github.com/mongodb/mongo/blob/master/db/ops/delete.cpp#L111) cannot delete from collection with reserved $ in name
+* 10101 [code](http://github.com/mongodb/mongo/blob/master/db/ops/delete.cpp#L118) can't remove from a capped collection
+* 12050 [code](http://github.com/mongodb/mongo/blob/master/db/ops/delete.cpp#L107) cannot delete from system namespace
+* 13340 [code](http://github.com/mongodb/mongo/blob/master/db/ops/delete.cpp#L48) cursor dropped during delete
+
+
+db/ops/query.cpp
+----
+* 10110 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L855) bad query object
+* 13051 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L868) tailable cursor requested on non capped collection
+* 13052 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L874) only {$natural:1} order allowed for tailable cursor
+* 13530 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L833) bad or malformed command request?
+* 13638 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L671) client cursor dropped during explain query yield
+* 14820 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L903) capped collections have no _id index by default, can only query by _id if one added
+* 14833 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.cpp#L116) auth error
+
+
+db/ops/query.h
+----
+* 10102 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L57) bad order array
+* 10103 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L58) bad order array [2]
+* 10104 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L61) too many ordering elements
+* 10105 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L140) bad skip value in query
+* 12001 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L219) E12001 can't sort with $snapshot
+* 12002 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L220) E12002 can't use hint with $snapshot
+* 13513 [code](http://github.com/mongodb/mongo/blob/master/db/ops/query.h#L189) sort must be an object or array
+
+
+db/ops/update.cpp
+----
+* 10131 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L121) $push can only be applied to an array
+* 10132 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L192) $pushAll can only be applied to an array
+* 10133 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L193) $pushAll has to be passed an array
+* 10134 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L217) $pull/$pullAll can only be applied to an array
+* 10135 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L250) $pop can only be applied to an array
+* 10136 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L286) $bit needs an array
+* 10137 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L287) $bit can only be applied to numbers
+* 10138 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L288) $bit cannot update a value of type double
+* 10139 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L296) $bit field must be number
+* 10140 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L406) Cannot apply $inc modifier to non-number
+* 10141 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L428) Cannot apply $push/$pushAll modifier to non-array
+* 10142 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L434) Cannot apply $pull/$pullAll modifier to non-array
+* 10143 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L455) Cannot apply $pop modifier to non-array
+* 10145 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L667) LEFT_SUBFIELD only supports Object: " << field << " not:
+* 10147 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L805) Invalid modifier specified:
+* 10148 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L817) Mod on _id not allowed", strcmp( fieldName, "_id
+* 10149 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L818) Invalid mod field name, may not end in a period
+* 10150 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L819) Field name duplication not allowed with modifiers
+* 10151 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L820) have conflicting mods in update
+* 10152 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L821) Modifier $inc allowed for numbers only
+* 10153 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L822) Modifier $pushAll/pullAll allowed for arrays only
+* 10154 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L898) Modifiers and non-modifiers cannot be mixed
+* 10155 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L1320) cannot update reserved $ collection
+* 10156 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L1323) cannot update system collection: " << ns << " q: " << patternOrig << " u:
+* 10157 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L1167) multi-update requires all modified objects to have an _id
+* 10158 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L1276) multi update only works with $ operators
+* 10159 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L1307) multi update only works with $ operators
+* 10399 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L712) ModSet::createNewFromMods - RIGHT_SUBFIELD should be impossible
+* 10400 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L715) unhandled case
+* 12522 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L957) $ operator made object too large
+* 12591 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L461) Cannot apply $addToSet modifier to non-array
+* 12592 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L138) $addToSet can only be applied to an array
+* 13339 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L921) cursor dropped during update
+* 13478 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L601) can't apply mod in place - shouldn't have gotten here
+* 13479 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L829) invalid mod field name, target may not be empty
+* 13480 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L830) invalid mod field name, source may not begin or end in period
+* 13481 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L831) invalid mod field name, target may not begin or end in period
+* 13482 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L832) $rename affecting _id not allowed
+* 13483 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L833) $rename affecting _id not allowed
+* 13484 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L834) field name duplication not allowed with $rename target
+* 13485 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L835) conflicting mods not allowed with $rename target
+* 13486 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L836) $rename target may not be a parent of source
+* 13487 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L837) $rename source may not be dynamic array", strstr( fieldName , ".$
+* 13488 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L838) $rename target may not be dynamic array", strstr( target , ".$
+* 13489 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L378) $rename source field invalid
+* 13490 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L389) $rename target field invalid
+* 13494 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L825) $rename target must be a string
+* 13495 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L827) $rename source must differ from target
+* 13496 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L828) invalid mod field name, source may not be empty
+* 9016 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L312) unknown $bit operation:
+* 9017 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.cpp#L337)
+
+
+db/ops/update.h
+----
+* 10161 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.h#L379) Invalid modifier specified
+* 12527 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.h#L245) not okForStorage
+* 13492 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.h#L270) mod must be RENAME_TO type
+* 9015 [code](http://github.com/mongodb/mongo/blob/master/db/ops/update.h#L621)
+
+
+db/pdfile.cpp
+----
+* 10003 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1030) failing update: objects in a capped ns cannot grow
+* 10083 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L226) create collection invalid size spec
+* 10084 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L402) can't map file memory - mongo requires 64 bit build for larger datasets
+* 10085 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L404) can't map file memory
+* 10086 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L816) ns not found:
+* 10087 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L824) turn off profiling before dropping system.profile collection
+* 10089 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L976) can't remove from a capped collection
+* 10092 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1202) too may dups on index build with dropDups=true
+* 10093 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1706) cannot insert into reserved $ collection
+* 10094 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1707) invalid ns:
+* 10095 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1605) attempt to insert in reserved database name 'system'
+* 10099 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1746) _id cannot be an array
+* 10356 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L314) invalid ns:
+* 10357 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L443) shutdown in progress
+* 10358 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L444) bad new extent size
+* 10359 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L445) header==0 on new extent: 32 bit mmap space exceeded?
+* 10360 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L588) Extent::reset bad magic value
+* 10361 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L796) can't create .$freelist
+* 12502 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L826) can't drop system ns
+* 12503 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L864)
+* 12582 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1545) duplicate key insert for unique index of capped collection
+* 12583 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1823) unexpected index insertion failure on capped collection
+* 12584 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1343) cursor gone during bg index
+* 12585 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1323) cursor gone during bg index; dropDups
+* 12586 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L90) cannot perform operation: a background operation is currently running for this database
+* 12587 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L95) cannot perform operation: a background operation is currently running for this collection
+* 13130 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1357) can't start bg index b/c in recursive lock (db.eval?)
+* 13143 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1648) can't create index on system.indexes" , tabletoidxns.find( ".system.indexes
+* 13440 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L354)
+* 13441 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L348)
+* 13596 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1025) cannot change _id of a document old:" << objOld << " new:
+* 14051 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1613) system.user entry needs 'user' field to be a string" , t["user
+* 14052 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1614) system.user entry needs 'pwd' field to be a string" , t["pwd
+* 14053 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1615) system.user entry needs 'user' field to be non-empty" , t["user
+* 14054 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.cpp#L1616) system.user entry needs 'pwd' field to be non-empty" , t["pwd
+
+
+db/pdfile.h
+----
+* 13640 [code](http://github.com/mongodb/mongo/blob/master/db/pdfile.h#L374) DataFileHeader looks corrupt at file open filelength:" << filelength << " fileno:
+
+
+db/projection.cpp
+----
+* 10053 [code](http://github.com/mongodb/mongo/blob/master/db/projection.cpp#L82) You cannot currently mix including and excluding fields. Contact us if this is an issue.
+* 10371 [code](http://github.com/mongodb/mongo/blob/master/db/projection.cpp#L25) can only add to Projection once
+* 13097 [code](http://github.com/mongodb/mongo/blob/master/db/projection.cpp#L64) Unsupported projection option:
+* 13098 [code](http://github.com/mongodb/mongo/blob/master/db/projection.cpp#L60) $slice only supports numbers and [skip, limit] arrays
+* 13099 [code](http://github.com/mongodb/mongo/blob/master/db/projection.cpp#L50) $slice array wrong size
+* 13100 [code](http://github.com/mongodb/mongo/blob/master/db/projection.cpp#L55) $slice limit must be positive
+
+
+db/queryoptimizer.cpp
+----
+* 10111 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L44) table scans not allowed:
+* 10112 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L350) bad hint
+* 10113 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L362) bad hint
+* 10363 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L214) newCursor() with start location not implemented for indexed plans
+* 10364 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L235) newReverseCursor() not implemented for indexed plans
+* 10365 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L328)
+* 10366 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L389) natural order cannot be specified with $min/$max
+* 10367 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L400)
+* 10368 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L460) Unable to locate previously recorded index
+* 10369 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L647) no plans
+* 13038 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L436) can't find special index: " + _special + " for:
+* 13040 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L98) no type for special:
+* 13268 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L839) invalid $or spec
+* 13292 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.cpp#L337) hint eoo
+
+
+db/queryoptimizer.h
+----
+* 13266 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.h#L437) not implemented for $or query
+* 13271 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.h#L440) can't run more ops
+* 13335 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.h#L146) yield not supported
+* 13336 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizer.h#L148) yield not supported
+
+
+db/queryoptimizercursor.cpp
+----
+* 14809 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizercursor.cpp#L312) Invalid access for cursor that is not ok()
+* 14826 [code](http://github.com/mongodb/mongo/blob/master/db/queryoptimizercursor.cpp#L174)
+
+
+db/queryutil.cpp
+----
+* 10370 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L323) $all requires array
+* 12580 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L160) invalid query
+* 13033 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L630) can't have 2 special fields
+* 13034 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L819) invalid use of $not
+* 13041 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L828) invalid use of $not
+* 13050 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L740) $all requires array
+* 13262 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1382) $or requires nonempty array
+* 13263 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1386) $or array must contain objects
+* 13274 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1398) no or clause to pop
+* 13291 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1388) $or may not contain 'special' query
+* 13303 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1010) combinatorial limit of $in partitioning of result set exceeded
+* 13304 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1020) combinatorial limit of $in partitioning of result set exceeded
+* 13385 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L879) combinatorial limit of $in partitioning of result set exceeded
+* 13454 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L228) invalid regular expression operator
+* 14048 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1097) FieldRangeSetPair invalid index specified
+* 14049 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L1101) FieldRangeSetPair invalid index specified
+* 14816 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L777) $and expression must be a nonempty array
+* 14817 [code](http://github.com/mongodb/mongo/blob/master/db/queryutil.cpp#L781) $and elements must be objects
+
+
+db/repl.cpp
+----
+* 10002 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L389) local.sources collection corrupt?
+* 10118 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L257) 'host' field not set in sources collection object
+* 10119 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L258) only source='main' allowed for now with replication", sourceName() == "main
+* 10120 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L261) bad sources 'syncedTo' field value
+* 10123 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L993) replication error last applied optime at slave >= nextOpTime from master
+* 10124 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L1195)
+* 10384 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L400) --only requires use of --source
+* 10385 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L456) Unable to get database list
+* 10386 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L765) non Date ts found:
+* 10389 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L794) Unable to get database list
+* 10390 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L881) got $err reading remote oplog
+* 10391 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L886) repl: bad object read from remote oplog
+* 10392 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L1061) bad user object? [1]
+* 10393 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L1062) bad user object? [2]
+* 13344 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L877) trying to slave off of a non-master
+* 14032 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L560) Invalid 'ts' in remote log
+* 14033 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L566) Unable to get database list
+* 14034 [code](http://github.com/mongodb/mongo/blob/master/db/repl.cpp#L608) Duplicate database names present after attempting to delete duplicates
+
+
+db/repl/health.h
+----
+* 13112 [code](http://github.com/mongodb/mongo/blob/master/db/repl/health.h#L41) bad replset heartbeat option
+* 13113 [code](http://github.com/mongodb/mongo/blob/master/db/repl/health.h#L42) bad replset heartbeat option
+
+
+db/repl/rs.cpp
+----
+* 13093 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs.cpp#L261) bad --replSet config string format is: <setname>[/<seedhost1>,<seedhost2>,...]
+* 13096 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs.cpp#L280) bad --replSet command line config string - dups?
+* 13101 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs.cpp#L282) can't use localhost in replset host list
+* 13114 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs.cpp#L278) bad --replSet seed hostname
+* 13290 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs.cpp#L335) bad replSet oplog entry?
+* 13302 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs.cpp#L430) replSet error self appears twice in the repl set configuration
+
+
+db/repl/rs_config.cpp
+----
+* 13107 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L542)
+* 13108 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L552) bad replset config -- duplicate hosts in the config object?
+* 13109 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L646) multiple rows in " << rsConfigNs << " not supported host:
+* 13115 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L477) bad " + rsConfigNs + " config: version
+* 13117 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L559) bad " + rsConfigNs + " config
+* 13122 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L573) bad repl set config?
+* 13126 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L126) bad Member config
+* 13131 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L487) replSet error parsing (or missing) 'members' field in config object
+* 13132 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L289)
+* 13133 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L293) replSet bad config no members
+* 13135 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L548)
+* 13260 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L621)
+* 13308 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L292) replSet bad config version #
+* 13309 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L294) replSet bad config maximum number of members is 12
+* 13393 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L558) can't use localhost in repl set member names except when using it for all members
+* 13419 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L133) priorities must be between 0.0 and 100.0
+* 13432 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L255) _id may not change for members
+* 13433 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L273) can't find self in new replset config
+* 13434 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L40) unexpected field '" << e.fieldName() << "'in object
+* 13437 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L134) slaveDelay requires priority be zero
+* 13438 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L135) bad slaveDelay value
+* 13439 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L136) priority must be 0 when hidden=true
+* 13476 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L259) buildIndexes may not change for members
+* 13477 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L137) priority must be 0 when buildIndexes=false
+* 13510 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L265) arbiterOnly may not change for members
+* 13612 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L301) replSet bad config maximum number of voting members is 7
+* 13613 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L302) replSet bad config no voting members
+* 13645 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L249) hosts cannot switch between localhost and hostname
+* 14046 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L399) getLastErrorMode rules must be objects
+* 14827 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L267) arbiters cannot have tags
+* 14828 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L411) getLastErrorMode criteria must be greater than 0:
+* 14829 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L406) getLastErrorMode criteria must be numeric
+* 14831 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_config.cpp#L416) mode " << clauseObj << " requires
+
+
+db/repl/rs_initialsync.cpp
+----
+* 13404 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initialsync.cpp#L41)
+
+
+db/repl/rs_initiate.cpp
+----
+* 13144 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L130)
+* 13145 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L93) set name does not match the set name host " + i->h.toString() + " expects
+* 13256 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L97) member " + i->h.toString() + " is already initiated
+* 13259 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L83)
+* 13278 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L58) bad config: isSelf is true for multiple hosts:
+* 13279 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L64)
+* 13311 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L136) member " + i->h.toString() + " has data already, cannot initiate set. All members except initiator must be empty.
+* 13341 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L102) member " + i->h.toString() + " has a config version >= to the new cfg version; cannot change config
+* 13420 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_initiate.cpp#L51) initiation and reconfiguration of a replica set must be sent to a node that can become primary
+
+
+db/repl/rs_rollback.cpp
+----
+* 13410 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_rollback.cpp#L346) replSet too much data to roll back
+* 13423 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_rollback.cpp#L463) replSet error in rollback can't find
+
+
+db/repl/rs_sync.cpp
+----
+* 1000 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_sync.cpp#L299) replSet source for syncing doesn't seem to be await capable -- is it an older version of mongodb?
+* 12000 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_sync.cpp#L385) rs slaveDelay differential too big check clocks and systems
+* 13508 [code](http://github.com/mongodb/mongo/blob/master/db/repl/rs_sync.cpp#L87) no 'ts' in first op in oplog:
+
+
+db/repl_block.cpp
+----
+* 14830 [code](http://github.com/mongodb/mongo/blob/master/db/repl_block.cpp#L182) unrecognized getLastError mode:
+
+
+db/replutil.h
+----
+* 10107 [code](http://github.com/mongodb/mongo/blob/master/db/replutil.h#L76) not master
+* 13435 [code](http://github.com/mongodb/mongo/blob/master/db/replutil.h#L84) not master and slaveok=false
+* 13436 [code](http://github.com/mongodb/mongo/blob/master/db/replutil.h#L85) not master or secondary, can't read
+
+
+db/restapi.cpp
+----
+* 13085 [code](http://github.com/mongodb/mongo/blob/master/db/restapi.cpp#L151) query failed for dbwebserver
+
+
+db/scanandorder.h
+----
+* 10128 [code](http://github.com/mongodb/mongo/blob/master/db/scanandorder.h#L125) too much data for sort() with no index. add an index or specify a smaller limit
+* 10129 [code](http://github.com/mongodb/mongo/blob/master/db/scanandorder.h#L149) too much data for sort() with no index
+
+
+dbtests/framework.cpp
+----
+* 10162 [code](http://github.com/mongodb/mongo/blob/master/dbtests/framework.cpp#L403) already have suite with that name
+
+
+dbtests/jsobjtests.cpp
+----
+* 12528 [code](http://github.com/mongodb/mongo/blob/master/dbtests/jsobjtests.cpp#L1746) should be ok for storage:
+* 12529 [code](http://github.com/mongodb/mongo/blob/master/dbtests/jsobjtests.cpp#L1753) should NOT be ok for storage:
+
+
+dbtests/queryoptimizertests.cpp
+----
+* 10408 [code](http://github.com/mongodb/mongo/blob/master/dbtests/queryoptimizertests.cpp#L560) throw
+* 10409 [code](http://github.com/mongodb/mongo/blob/master/dbtests/queryoptimizertests.cpp#L599) throw
+* 10410 [code](http://github.com/mongodb/mongo/blob/master/dbtests/queryoptimizertests.cpp#L727) throw
+* 10411 [code](http://github.com/mongodb/mongo/blob/master/dbtests/queryoptimizertests.cpp#L740) throw
+
+
+s/balance.cpp
+----
+* 13258 [code](http://github.com/mongodb/mongo/blob/master/s/balance.cpp#L292) oids broken after resetting!
+
+
+s/chunk.cpp
+----
+* 10163 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L125) can only handle numbers here - which i think is correct
+* 10165 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L262) can't split as shard doesn't have a manager
+* 10167 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L300) can't move shard to its current location!
+* 10169 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L418) datasize failed!" , conn->runCommand( "admin
+* 10170 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L65) Chunk needs a ns
+* 10171 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L68) Chunk needs a server
+* 10172 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L70) Chunk needs a min
+* 10173 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L71) Chunk needs a max
+* 10174 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L790) config servers not all up
+* 10412 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L396)
+* 13003 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L265) can't split a chunk with only one distinct value
+* 13141 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L677) Chunk map pointed to incorrect chunk
+* 13282 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L539) Couldn't load a valid config for " + _ns + " after 3 attempts. Please try again.
+* 13327 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L66) Chunk ns must match server ns
+* 13331 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L788) collection's metadata is undergoing changes. Please try again.
+* 13332 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L263) need a split key to split chunk
+* 13333 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L264) can't split a chunk in that many parts
+* 13345 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L187)
+* 13346 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L838) can't pre-split already splitted collection
+* 13405 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L756) min must have shard key
+* 13406 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L757) max must have shard key
+* 13501 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L702) use geoNear command rather than $near query
+* 13502 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L709) unrecognized special query type:
+* 13503 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L158)
+* 13507 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L733) invalid chunk config minObj:
+* 13592 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L640)
+* 14022 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L785) Error locking distributed lock for chunk drop.
+* 8070 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L681) couldn't find a chunk which should be impossible:
+* 8071 [code](http://github.com/mongodb/mongo/blob/master/s/chunk.cpp#L828) cleaning up after drop failed:
+
+
+s/client.cpp
+----
+* 13134 [code](http://github.com/mongodb/mongo/blob/master/s/client.cpp#L63)
+
+
+s/commands_public.cpp
+----
+* 10418 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L271) how could chunk manager be null!
+* 10420 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L730) how could chunk manager be null!
+* 12594 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L494) how could chunk manager be null!
+* 13002 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L616) shard internal error chunk manager should never be null
+* 13091 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L795) how could chunk manager be null!
+* 13092 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L796) GridFS chunks collection can only be sharded on files_id", cm->getShardKey().key() == BSON("files_id
+* 13137 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L328) Source and destination collections must be on same shard
+* 13138 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L322) You can't rename a sharded collection
+* 13139 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L323) You can't rename to a sharded collection
+* 13140 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L321) Don't recognize source or target DB
+* 13343 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L619) query for sharded findAndModify must have shardkey
+* 13398 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L342) cant copy to sharded DB
+* 13399 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L350) need a fromdb argument
+* 13400 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L353) don't know where source DB is
+* 13401 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L354) cant copy from sharded DB
+* 13402 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L339) need a todb argument
+* 13407 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L652) how could chunk manager be null!
+* 13408 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L658) keyPattern must equal shard key
+* 13500 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L826) how could chunk manager be null!
+* 13512 [code](http://github.com/mongodb/mongo/blob/master/s/commands_public.cpp#L274) drop collection attempted on non-sharded collection
+
+
+s/config.cpp
+----
+* 10176 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L460) shard state missing for
+* 10178 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L118) no primary!
+* 10181 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L204) not sharded:
+* 10184 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L463) _dropShardedCollections too many collections - bailing
+* 10187 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L496) need configdbs
+* 10189 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L653) should only have 1 thing in config.version
+* 13396 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L331) DBConfig save failed:
+* 13449 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L151) collections already sharded
+* 13473 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L92) failed to save collection (" + ns + "):
+* 13509 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L282) can't migrate from 1.5.x release to the current one; need to upgrade to 1.6.x first
+* 13648 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L135) can't shard collection because not all config servers are up
+* 14822 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L247) state changed in the middle:
+* 8042 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L134) db doesn't have sharding enabled
+* 8043 [code](http://github.com/mongodb/mongo/blob/master/s/config.cpp#L142) collection already sharded
+
+
+s/config.h
+----
+* 10190 [code](http://github.com/mongodb/mongo/blob/master/s/config.h#L208) ConfigServer not setup
+* 8041 [code](http://github.com/mongodb/mongo/blob/master/s/config.h#L154) no primary shard configured for db:
+
+
+s/cursors.cpp
+----
+* 10191 [code](http://github.com/mongodb/mongo/blob/master/s/cursors.cpp#L76) cursor already done
+* 13286 [code](http://github.com/mongodb/mongo/blob/master/s/cursors.cpp#L217) sent 0 cursors to kill
+* 13287 [code](http://github.com/mongodb/mongo/blob/master/s/cursors.cpp#L218) too many cursors to kill
+
+
+s/d_chunk_manager.cpp
+----
+* 13539 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L49) does not exist
+* 13540 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L50) collection config entry corrupted" , collectionDoc["dropped
+* 13541 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L51) dropped. Re-shard collection first." , !collectionDoc["dropped
+* 13542 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L77) collection doesn't have a key:
+* 13585 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L237) version " << version.toString() << " not greater than
+* 13586 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L206) couldn't find chunk " << min << "->
+* 13587 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L214)
+* 13588 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L271)
+* 13590 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L228) setting version to " << version << " on removing last chunk
+* 13591 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L257) version can't be set to zero
+* 14039 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L296) version " << version.toString() << " not greater than
+* 14040 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L303) can split " << min << " -> " << max << " on
+* 15851 [code](http://github.com/mongodb/mongo/blob/master/s/d_chunk_manager.cpp#L142)
+
+
+s/d_logic.cpp
+----
+* 10422 [code](http://github.com/mongodb/mongo/blob/master/s/d_logic.cpp#L96) write with bad shard config and no server id!
+
+
+s/d_split.cpp
+----
+* 13593 [code](http://github.com/mongodb/mongo/blob/master/s/d_split.cpp#L771)
+
+
+s/d_state.cpp
+----
+* 13298 [code](http://github.com/mongodb/mongo/blob/master/s/d_state.cpp#L77)
+* 13299 [code](http://github.com/mongodb/mongo/blob/master/s/d_state.cpp#L99)
+* 13647 [code](http://github.com/mongodb/mongo/blob/master/s/d_state.cpp#L521) context should be empty here, is:
+
+
+s/grid.cpp
+----
+* 10185 [code](http://github.com/mongodb/mongo/blob/master/s/grid.cpp#L93) can't find a shard to put new db on
+* 10186 [code](http://github.com/mongodb/mongo/blob/master/s/grid.cpp#L107) removeDB expects db name
+* 10421 [code](http://github.com/mongodb/mongo/blob/master/s/grid.cpp#L452) getoptime failed" , conn->simpleCommand( "admin" , &result , "getoptime
+
+
+s/mr_shard.cpp
+----
+* 14836 [code](http://github.com/mongodb/mongo/blob/master/s/mr_shard.cpp#L45) couldn't compile code for:
+* 14837 [code](http://github.com/mongodb/mongo/blob/master/s/mr_shard.cpp#L149) value too large to reduce
+* 14838 [code](http://github.com/mongodb/mongo/blob/master/s/mr_shard.cpp#L169) reduce -> multiple not supported yet
+* 14839 [code](http://github.com/mongodb/mongo/blob/master/s/mr_shard.cpp#L231) unknown out specifier [" << t << "]
+* 14840 [code](http://github.com/mongodb/mongo/blob/master/s/mr_shard.cpp#L239) 'out' has to be a string or an object
+* 14841 [code](http://github.com/mongodb/mongo/blob/master/s/mr_shard.cpp#L203) outType is no longer a valid option" , cmdObj["outType
+
+
+s/request.cpp
+----
+* 10193 [code](http://github.com/mongodb/mongo/blob/master/s/request.cpp#L73) no shard info for:
+* 10194 [code](http://github.com/mongodb/mongo/blob/master/s/request.cpp#L92) can't call primaryShard on a sharded collection!
+* 10195 [code](http://github.com/mongodb/mongo/blob/master/s/request.cpp#L127) too many attempts to update config, failing
+* 13644 [code](http://github.com/mongodb/mongo/blob/master/s/request.cpp#L61) can't use 'local' database through mongos" , ! str::startsWith( getns() , "local.
+* 15845 [code](http://github.com/mongodb/mongo/blob/master/s/request.cpp#L143) unauthorized
+* 8060 [code](http://github.com/mongodb/mongo/blob/master/s/request.cpp#L88) can't call primaryShard on a sharded collection
+
+
+s/server.cpp
+----
+* 10197 [code](http://github.com/mongodb/mongo/blob/master/s/server.cpp#L171) createDirectClient not implemented for sharding yet
+* 15849 [code](http://github.com/mongodb/mongo/blob/master/s/server.cpp#L81) client info not defined
+
+
+s/shard.cpp
+----
+* 13128 [code](http://github.com/mongodb/mongo/blob/master/s/shard.cpp#L119) can't find shard for:
+* 13129 [code](http://github.com/mongodb/mongo/blob/master/s/shard.cpp#L111) can't find shard for:
+* 13136 [code](http://github.com/mongodb/mongo/blob/master/s/shard.cpp#L307)
+* 13632 [code](http://github.com/mongodb/mongo/blob/master/s/shard.cpp#L40) couldn't get updated shard list from config server
+* 14807 [code](http://github.com/mongodb/mongo/blob/master/s/shard.cpp#L255) no set name for shard: " << _name << "
+* 15847 [code](http://github.com/mongodb/mongo/blob/master/s/shard.cpp#L364) can't authenticate to shard server
+
+
+s/shard_version.cpp
+----
+* 10428 [code](http://github.com/mongodb/mongo/blob/master/s/shard_version.cpp#L134) need_authoritative set but in authoritative mode already
+* 10429 [code](http://github.com/mongodb/mongo/blob/master/s/shard_version.cpp#L157)
+
+
+s/shardconnection.cpp
+----
+* 13409 [code](http://github.com/mongodb/mongo/blob/master/s/shardconnection.cpp#L262) can't parse ns from:
+
+
+s/shardkey.cpp
+----
+* 10198 [code](http://github.com/mongodb/mongo/blob/master/s/shardkey.cpp#L46) left object doesn't have full shard key
+* 10199 [code](http://github.com/mongodb/mongo/blob/master/s/shardkey.cpp#L48) right object doesn't have full shard key
+
+
+s/shardkey.h
+----
+* 13334 [code](http://github.com/mongodb/mongo/blob/master/s/shardkey.h#L106) Shard Key must be less than 512 bytes
+
+
+s/strategy.cpp
+----
+* 10200 [code](http://github.com/mongodb/mongo/blob/master/s/strategy.cpp#L56) mongos: error calling db
+
+
+s/strategy_shard.cpp
+----
+* 10201 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L251) invalid update
+* 10203 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L440) bad delete message
+* 12376 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L306)
+* 13123 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L293)
+* 13465 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L265) shard key in upsert query must be an exact match
+* 13505 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L442) $atomic not supported sharded" , pattern["$atomic
+* 13506 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L250) $atomic not supported sharded" , query["$atomic
+* 14804 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L178) collection no longer sharded
+* 14805 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L466) collection no longer sharded
+* 14806 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L338) collection no longer sharded
+* 14842 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L212) tried to insert object without shard key
+* 14843 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L239) collection no longer sharded
+* 14849 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L428) collection no longer sharded
+* 14850 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L367) can't do non-multi update with query that doesn't have the shard key
+* 14851 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L386)
+* 14854 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L352) can't upsert something without shard key
+* 14855 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L358) shard key in upsert query must be an exact match
+* 14856 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L394)
+* 14857 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L399)
+* 8010 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L41) something is wrong, shouldn't see a command here
+* 8011 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L143) tried to insert object without shard key
+* 8012 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L259) can't upsert something without shard key
+* 8013 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L274) can't do non-multi update with query that doesn't have the shard key
+* 8014 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L301)
+* 8015 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L471) can only delete with a non-shard key pattern if can delete as many as we find
+* 8016 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_shard.cpp#L499) can't do this write op on sharded collection
+
+
+s/strategy_single.cpp
+----
+* 10204 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_single.cpp#L106) dbgrid: getmore: error calling db
+* 10205 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_single.cpp#L124) can't use unique indexes with sharding ns:
+* 13390 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_single.cpp#L86) unrecognized command:
+* 8050 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_single.cpp#L145) can't update system.indexes
+* 8051 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_single.cpp#L149) can't delete indexes on sharded collection yet
+* 8052 [code](http://github.com/mongodb/mongo/blob/master/s/strategy_single.cpp#L153) handleIndexWrite invalid write op
+
+
+s/util.h
+----
+* 13657 [code](http://github.com/mongodb/mongo/blob/master/s/util.h#L108) unknown type for ShardChunkVersion:
+
+
+s/writeback_listener.cpp
+----
+* 10427 [code](http://github.com/mongodb/mongo/blob/master/s/writeback_listener.cpp#L161) invalid writeback message
+* 13403 [code](http://github.com/mongodb/mongo/blob/master/s/writeback_listener.cpp#L110) didn't get writeback for: " << oid << " after: " << t.millis() << " ms
+* 13641 [code](http://github.com/mongodb/mongo/blob/master/s/writeback_listener.cpp#L69) can't parse host [" << conn.getServerAddress() << "]
+* 14041 [code](http://github.com/mongodb/mongo/blob/master/s/writeback_listener.cpp#L100) got writeback waitfor for older id
+
+
+scripting/bench.cpp
+----
+* 14811 [code](http://github.com/mongodb/mongo/blob/master/scripting/bench.cpp#L92) invalid bench dynamic piece:
+
+
+scripting/engine.cpp
+----
+* 10206 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L83)
+* 10207 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L90) compile failed
+* 10208 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L176) need to have locallyConnected already
+* 10209 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L197) name has to be a string:
+* 10210 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L198) value has to be set
+* 10430 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L168) invalid object id: not hex
+* 10448 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.cpp#L159) invalid object id: length
+
+
+scripting/engine.h
+----
+* 13474 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.h#L194) no _getInterruptSpecCallback
+* 9004 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.h#L93) invoke failed:
+* 9005 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine.h#L101) invoke failed:
+
+
+scripting/engine_java.h
+----
+* 10211 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_java.h#L197) only readOnly setObject supported in java
+
+
+scripting/engine_spidermonkey.cpp
+----
+* 10212 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L83) holder magic value is wrong
+* 10213 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L223) non ascii character detected
+* 10214 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L251) not a number
+* 10215 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L327) not an object
+* 10216 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L336) not a function
+* 10217 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L393) can't append field. name:" + name + " type:
+* 10218 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L717) not done: toval
+* 10219 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L744) object passed to getPropery is null
+* 10220 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L835) don't know what to do with this op
+* 10221 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1161) JS_NewRuntime failed
+* 10222 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1169) assert not being executed
+* 10223 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1244) deleted SMScope twice?
+* 10224 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1302) already local connected
+* 10225 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1312) already setup for external db
+* 10226 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1314) connected to different db
+* 10227 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1383) unknown type
+* 10228 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1539) exec failed:
+* 10229 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1738) need a scope
+* 10431 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1220) JS_NewContext failed
+* 10432 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1227) JS_NewObject failed for global
+* 10433 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L1229) js init failed
+* 13072 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L36) JS_NewObject failed:
+* 13076 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L153) recursive toObject
+* 13498 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L216)
+* 13615 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L40) JS allocation failed, either memory leak or using too much memory
+* 9006 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_spidermonkey.cpp#L46) invalid utf8
+
+
+scripting/engine_v8.cpp
+----
+* 10230 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L499) can't handle external yet
+* 10231 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L544) not an object
+* 10232 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L606) not a func
+* 10233 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L716)
+* 10234 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L743)
+* 12509 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L507) don't know what this is:
+* 12510 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L808) externalSetup already called, can't call externalSetup
+* 12511 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L812) localConnect called with a different name previously
+* 12512 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L834) localConnect already called, can't call externalSetup
+* 13475 [code](http://github.com/mongodb/mongo/blob/master/scripting/engine_v8.cpp#L726)
+
+
+scripting/sm_db.cpp
+----
+* 10235 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L78) no cursor!
+* 10236 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L83) no args to internal_cursor_constructor
+* 10237 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L156) mongo_constructor not implemented yet
+* 10239 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L214) no connection!
+* 10245 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L284) no connection!
+* 10248 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L312) no connection!
+* 10251 [code](http://github.com/mongodb/mongo/blob/master/scripting/sm_db.cpp#L347) no connection!
+
+
+scripting/utils.cpp
+----
+* 10261 [code](http://github.com/mongodb/mongo/blob/master/scripting/utils.cpp#L29) js md5 needs a string
+
+
+shell/shell_utils.cpp
+----
+* 10257 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L124) need to specify 1 argument to listFiles
+* 10258 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L105) processinfo not supported
+* 12513 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L934) connect failed", scope.exec( _dbConnect , "(connect)
+* 12514 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L937) login failed", scope.exec( _dbAuth , "(auth)
+* 12518 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L855) srand requires a single numeric argument
+* 12519 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L862) rand accepts no arguments
+* 12581 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L133)
+* 12597 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L201) need to specify 1 argument
+* 13006 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L873) isWindows accepts no arguments
+* 13301 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L221) cat() : file to big to load as a variable
+* 13411 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L882) getHostName accepts no arguments
+* 13619 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L272) fuzzFile takes 2 arguments
+* 13620 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L275) couldn't open file to fuzz
+* 13621 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L628) no known mongo program on port
+* 14042 [code](http://github.com/mongodb/mongo/blob/master/shell/shell_utils.cpp#L536)
+
+
+tools/dump.cpp
+----
+* 10262 [code](http://github.com/mongodb/mongo/blob/master/tools/dump.cpp#L106) couldn't open file
+* 14035 [code](http://github.com/mongodb/mongo/blob/master/tools/dump.cpp#L60) couldn't write to file
+
+
+tools/import.cpp
+----
+* 10263 [code](http://github.com/mongodb/mongo/blob/master/tools/import.cpp#L282) unknown error reading file
+* 13289 [code](http://github.com/mongodb/mongo/blob/master/tools/import.cpp#L60) Invalid UTF8 character detected
+* 13295 [code](http://github.com/mongodb/mongo/blob/master/tools/import.cpp#L273) JSONArray file too large
+
+
+tools/sniffer.cpp
+----
+* 10266 [code](http://github.com/mongodb/mongo/blob/master/tools/sniffer.cpp#L482) can't use --source twice
+* 10267 [code](http://github.com/mongodb/mongo/blob/master/tools/sniffer.cpp#L483) source needs more args
+
+
+tools/tool.cpp
+----
+* 10264 [code](http://github.com/mongodb/mongo/blob/master/tools/tool.cpp#L455) invalid object size:
+* 10265 [code](http://github.com/mongodb/mongo/blob/master/tools/tool.cpp#L491) counts don't match
+* 9997 [code](http://github.com/mongodb/mongo/blob/master/tools/tool.cpp#L395) auth failed:
+* 9998 [code](http://github.com/mongodb/mongo/blob/master/tools/tool.cpp#L376) you need to specify fields
+* 9999 [code](http://github.com/mongodb/mongo/blob/master/tools/tool.cpp#L355) file: " + fn ) + " doesn't exist
+
+
+util/alignedbuilder.cpp
+----
+* 13524 [code](http://github.com/mongodb/mongo/blob/master/util/alignedbuilder.cpp#L82) out of memory AlignedBuilder
+* 13584 [code](http://github.com/mongodb/mongo/blob/master/util/alignedbuilder.cpp#L27) out of memory AlignedBuilder
+
+
+util/assert_util.h
+----
+* 10437 [code](http://github.com/mongodb/mongo/blob/master/util/assert_util.h#L234) unknown boost failed
+* 123 [code](http://github.com/mongodb/mongo/blob/master/util/assert_util.h#L74) blah
+* 13294 [code](http://github.com/mongodb/mongo/blob/master/util/assert_util.h#L232)
+* 14043 [code](http://github.com/mongodb/mongo/blob/master/util/assert_util.h#L243)
+* 14044 [code](http://github.com/mongodb/mongo/blob/master/util/assert_util.h#L245) unknown boost failed
+
+
+util/background.cpp
+----
+* 13643 [code](http://github.com/mongodb/mongo/blob/master/util/background.cpp#L52) backgroundjob already started:
+
+
+util/base64.cpp
+----
+* 10270 [code](http://github.com/mongodb/mongo/blob/master/util/base64.cpp#L79) invalid base64
+
+
+util/concurrency/list.h
+----
+* 14050 [code](http://github.com/mongodb/mongo/blob/master/util/concurrency/list.h#L82) List1: item to orphan not in list
+
+
+util/file.h
+----
+* 10438 [code](http://github.com/mongodb/mongo/blob/master/util/file.h#L115) ReadFile error - truncated file?
+
+
+util/file_allocator.cpp
+----
+* 10439 [code](http://github.com/mongodb/mongo/blob/master/util/file_allocator.cpp#L255)
+* 10440 [code](http://github.com/mongodb/mongo/blob/master/util/file_allocator.cpp#L157)
+* 10441 [code](http://github.com/mongodb/mongo/blob/master/util/file_allocator.cpp#L161) Unable to allocate new file of size
+* 10442 [code](http://github.com/mongodb/mongo/blob/master/util/file_allocator.cpp#L163) Unable to allocate new file of size
+* 10443 [code](http://github.com/mongodb/mongo/blob/master/util/file_allocator.cpp#L178) FileAllocator: file write failed
+* 13653 [code](http://github.com/mongodb/mongo/blob/master/util/file_allocator.cpp#L274)
+
+
+util/log.cpp
+----
+* 10268 [code](http://github.com/mongodb/mongo/blob/master/util/log.cpp#L49) LoggingManager already started
+* 14036 [code](http://github.com/mongodb/mongo/blob/master/util/log.cpp#L70) couldn't write to log file
+
+
+util/logfile.cpp
+----
+* 13514 [code](http://github.com/mongodb/mongo/blob/master/util/logfile.cpp#L176) error appending to file on fsync
+* 13515 [code](http://github.com/mongodb/mongo/blob/master/util/logfile.cpp#L166) error appending to file
+* 13516 [code](http://github.com/mongodb/mongo/blob/master/util/logfile.cpp#L141) couldn't open file " << name << " for writing
+* 13517 [code](http://github.com/mongodb/mongo/blob/master/util/logfile.cpp#L93) error appending to file
+* 13518 [code](http://github.com/mongodb/mongo/blob/master/util/logfile.cpp#L70) couldn't open file " << name << " for writing
+* 13519 [code](http://github.com/mongodb/mongo/blob/master/util/logfile.cpp#L91) error 87 appending to file - misaligned direct write?
+
+
+util/mmap.cpp
+----
+* 13468 [code](http://github.com/mongodb/mongo/blob/master/util/mmap.cpp#L34) can't create file already exists
+* 13617 [code](http://github.com/mongodb/mongo/blob/master/util/mmap.cpp#L172) MongoFile : multiple opens of same filename
+
+
+util/mmap_posix.cpp
+----
+* 10446 [code](http://github.com/mongodb/mongo/blob/master/util/mmap_posix.cpp#L80) mmap: can't map area of size 0 file:
+* 10447 [code](http://github.com/mongodb/mongo/blob/master/util/mmap_posix.cpp#L90) map file alloc failed, wanted: " << length << " filelen:
+
+
+util/mmap_win.cpp
+----
+* 13056 [code](http://github.com/mongodb/mongo/blob/master/util/mmap_win.cpp#L190) Async flushing not supported on windows
+
+
+util/net/hostandport.h
+----
+* 13095 [code](http://github.com/mongodb/mongo/blob/master/util/net/hostandport.h#L154) HostAndPort: bad port #
+* 13110 [code](http://github.com/mongodb/mongo/blob/master/util/net/hostandport.h#L150) HostAndPort: bad config string
+
+
+util/net/httpclient.cpp
+----
+* 10271 [code](http://github.com/mongodb/mongo/blob/master/util/net/httpclient.cpp#L40) invalid url" , url.find( "http://
+
+
+util/net/message.h
+----
+* 13273 [code](http://github.com/mongodb/mongo/blob/master/util/net/message.h#L177) single data buffer expected
+
+
+util/net/message_server_asio.cpp
+----
+* 10273 [code](http://github.com/mongodb/mongo/blob/master/util/net/message_server_asio.cpp#L110) _cur not empty! pipelining requests not supported
+* 10274 [code](http://github.com/mongodb/mongo/blob/master/util/net/message_server_asio.cpp#L171) pipelining requests doesn't work yet
+
+
+util/net/message_server_port.cpp
+----
+* 10275 [code](http://github.com/mongodb/mongo/blob/master/util/net/message_server_port.cpp#L103) multiple PortMessageServer not supported
+
+
+util/net/sock.cpp
+----
+* 13079 [code](http://github.com/mongodb/mongo/blob/master/util/net/sock.cpp#L54) path to unix socket too long
+* 13080 [code](http://github.com/mongodb/mongo/blob/master/util/net/sock.cpp#L52) no unix socket support on windows
+
+
+util/net/sock.h
+----
+* 13082 [code](http://github.com/mongodb/mongo/blob/master/util/net/sock.h#L176)
+
+
+util/paths.h
+----
+* 13600 [code](http://github.com/mongodb/mongo/blob/master/util/paths.h#L57)
+* 13646 [code](http://github.com/mongodb/mongo/blob/master/util/paths.h#L86) stat() failed for file: " << path << "
+* 13650 [code](http://github.com/mongodb/mongo/blob/master/util/paths.h#L107) Couldn't open directory '" << dir.string() << "' for flushing:
+* 13651 [code](http://github.com/mongodb/mongo/blob/master/util/paths.h#L111) Couldn't fsync directory '" << dir.string() << "':
+* 13652 [code](http://github.com/mongodb/mongo/blob/master/util/paths.h#L101) Couldn't find parent dir for file:
+
+
+util/processinfo_linux2.cpp
+----
+* 13538 [code](http://github.com/mongodb/mongo/blob/master/util/processinfo_linux2.cpp#L45)
+
+
+util/text.h
+----
+* 13305 [code](http://github.com/mongodb/mongo/blob/master/util/text.h#L131) could not convert string to long long
+* 13306 [code](http://github.com/mongodb/mongo/blob/master/util/text.h#L140) could not convert string to long long
+* 13307 [code](http://github.com/mongodb/mongo/blob/master/util/text.h#L126) cannot convert empty string to long long
+* 13310 [code](http://github.com/mongodb/mongo/blob/master/util/text.h#L144) could not convert string to long long
+
diff --git a/doxygenConfig b/doxygenConfig
index 0356d10..94eea98 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.8.3
+PROJECT_NUMBER = 2.0.0
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/ageoutjournalfiles.js b/jstests/ageoutjournalfiles.js
new file mode 100644
index 0000000..3c12cd8
--- /dev/null
+++ b/jstests/ageoutjournalfiles.js
@@ -0,0 +1,16 @@
+if (false && db.serverStatus().dur) {
+
+ assert(db.serverStatus().dur.ageOutJournalFiles != false);
+
+ db.adminCommand({ setParameter: 1, ageOutJournalFiles: false });
+
+ assert(db.serverStatus().dur.ageOutJournalFiles == false);
+
+ db.adminCommand({ setParameter: 1, ageOutJournalFiles: true });
+
+ assert(db.serverStatus().dur.ageOutJournalFiles != false);
+
+}
+else {
+// print("dur is off");
+} \ No newline at end of file
diff --git a/jstests/and.js b/jstests/and.js
new file mode 100644
index 0000000..bd6dbcd
--- /dev/null
+++ b/jstests/and.js
@@ -0,0 +1,86 @@
+// Some tests for $and SERVER-1089
+
+t = db.jstests_and;
+t.drop();
+
+t.save( {a:[1,2]} );
+t.save( {a:'foo'} );
+
+function check() {
+ // $and must be an array
+ assert.throws( function() { t.find( {$and:4} ).toArray() } );
+ // $and array must not be empty
+ assert.throws( function() { t.find( {$and:[]} ).toArray() } );
+ // $and elements must be objects
+ assert.throws( function() { t.find( {$and:[4]} ).toArray() } );
+
+ // Check equality matching
+ assert.eq( 1, t.count( {$and:[{a:1}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:1},{a:2}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:1},{a:3}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:1},{a:2},{a:3}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:'foo'}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:'foo'},{a:'g'}]} ) );
+
+ // Check $and with other fields
+ assert.eq( 1, t.count( {a:2,$and:[{a:1}]} ) );
+ assert.eq( 0, t.count( {a:0,$and:[{a:1}]} ) );
+ assert.eq( 0, t.count( {a:2,$and:[{a:0}]} ) );
+ assert.eq( 1, t.count( {a:1,$and:[{a:1}]} ) );
+
+ // Check recursive $and
+ assert.eq( 1, t.count( {a:2,$and:[{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {a:0,$and:[{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {a:2,$and:[{$and:[{a:0}]}]} ) );
+ assert.eq( 1, t.count( {a:1,$and:[{$and:[{a:1}]}]} ) );
+
+ assert.eq( 1, t.count( {$and:[{a:2},{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:0},{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:2},{$and:[{a:0}]}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:1},{$and:[{a:1}]}]} ) );
+
+ // Some of these cases were more important with an alternative $and syntax
+ // that was rejected, but they're still valid checks.
+
+ // Check simple regex
+ assert.eq( 1, t.count( {$and:[{a:/foo/}]} ) );
+ // Check multiple regexes
+ assert.eq( 1, t.count( {$and:[{a:/foo/},{a:/^f/},{a:/o/}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:/foo/},{a:/^g/}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:/^f/},{a:'foo'}]} ) );
+ // Check regex flags
+ assert.eq( 0, t.count( {$and:[{a:/^F/},{a:'foo'}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:/^F/i},{a:'foo'}]} ) );
+
+
+
+ // Check operator
+ assert.eq( 1, t.count( {$and:[{a:{$gt:0}}]} ) );
+
+ // Check where
+ assert.eq( 1, t.count( {a:'foo',$where:'this.a=="foo"'} ) );
+ assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
+ assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
+
+ // Nested where ok
+ assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}]}) );
+ assert.eq( 1, t.count({$and:[{a:'foo'},{$where:'this.a=="foo"'}]}) );
+ assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}],$where:'this.a=="foo"'}) );
+}
+
+check();
+t.ensureIndex( {a:1} );
+check();
+var e = t.find( {$and:[{a:1}]} ).explain();
+assert.eq( 'BtreeCursor a_1', e.cursor );
+assert.eq( [[1,1]], e.indexBounds.a );
+
+function checkBounds( query ) {
+ var e = t.find( query ).explain();
+ assert.eq( 1, e.n );
+ assert.eq( [[1,1]], e.indexBounds.a );
+}
+
+// Since this is a multikey index, we get the bounds from the first constraint scanned.
+checkBounds( {a:1,$and:[{a:2}]} );
+checkBounds( {$and:[{a:1},{a:2}]} );
diff --git a/jstests/and2.js b/jstests/and2.js
new file mode 100644
index 0000000..0bd13eb
--- /dev/null
+++ b/jstests/and2.js
@@ -0,0 +1,27 @@
+// Test dollar sign operator with $and SERVER-1089
+
+t = db.jstests_and2;
+
+t.drop();
+t.save( {a:[1,2]} );
+t.update( {a:1}, {$set:{'a.$':5}} );
+assert.eq( [5,2], t.findOne().a );
+
+t.drop();
+t.save( {a:[1,2]} );
+t.update( {$and:[{a:1}]}, {$set:{'a.$':5}} );
+assert.eq( [5,2], t.findOne().a );
+
+// Make sure dollar sign operator with $and is consistent with no $and case
+t.drop();
+t.save( {a:[1,2],b:[3,4]} );
+t.update( {a:1,b:4}, {$set:{'a.$':5}} );
+// Probably not what we want here, just trying to make sure $and is consistent
+assert.eq( {a:[1,5],b:[3,4]}, t.find( {}, {_id:0} ).toArray()[ 0 ] );
+
+// Make sure dollar sign operator with $and is consistent with no $and case
+t.drop();
+t.save( {a:[1,2],b:[3,4]} );
+t.update( {a:1,$and:[{b:4}]}, {$set:{'a.$':5}} );
+// Probably not what we want here, just trying to make sure $and is consistent
+assert.eq( {a:[1,5],b:[3,4]}, t.find( {}, {_id:0} ).toArray()[ 0 ] );
diff --git a/jstests/and3.js b/jstests/and3.js
new file mode 100644
index 0000000..98a0974
--- /dev/null
+++ b/jstests/and3.js
@@ -0,0 +1,66 @@
+// Check key match with sub matchers - part of SERVER-3192
+
+t = db.jstests_and3;
+t.drop();
+
+t.save( {a:1} );
+t.save( {a:'foo'} );
+
+t.ensureIndex( {a:1} );
+
+function checkScanMatch( query, nscannedObjects, n ) {
+ var e = t.find( query ).hint( {a:1} ).explain();
+ // NOTE The nscannedObjects values aren't necessarily optimal currently,
+ // we're just checking current behavior here.
+ assert.eq( nscannedObjects, e.nscannedObjects );
+ assert.eq( n, e.n );
+}
+
+checkScanMatch( {a:/o/}, 1, 1 );
+checkScanMatch( {a:/a/}, 0, 0 );
+checkScanMatch( {a:{$not:/o/}}, 2, 1 );
+checkScanMatch( {a:{$not:/a/}}, 2, 2 );
+
+checkScanMatch( {$and:[{a:/o/}]}, 1, 1 );
+checkScanMatch( {$and:[{a:/a/}]}, 0, 0 );
+checkScanMatch( {$and:[{a:{$not:/o/}}]}, 2, 1 );
+checkScanMatch( {$and:[{a:{$not:/a/}}]}, 2, 2 );
+checkScanMatch( {$and:[{a:/o/},{a:{$not:/o/}}]}, 1, 0 );
+checkScanMatch( {$and:[{a:/o/},{a:{$not:/a/}}]}, 1, 1 );
+checkScanMatch( {$or:[{a:/o/}]}, 1, 1 );
+checkScanMatch( {$or:[{a:/a/}]}, 0, 0 );
+checkScanMatch( {$nor:[{a:/o/}]}, 2, 1 );
+checkScanMatch( {$nor:[{a:/a/}]}, 2, 2 );
+
+checkScanMatch( {$and:[{$and:[{a:/o/}]}]}, 1, 1 );
+checkScanMatch( {$and:[{$and:[{a:/a/}]}]}, 0, 0 );
+checkScanMatch( {$and:[{$and:[{a:{$not:/o/}}]}]}, 2, 1 );
+checkScanMatch( {$and:[{$and:[{a:{$not:/a/}}]}]}, 2, 2 );
+checkScanMatch( {$and:[{$or:[{a:/o/}]}]}, 1, 1 );
+checkScanMatch( {$and:[{$or:[{a:/a/}]}]}, 0, 0 );
+checkScanMatch( {$or:[{a:{$not:/o/}}]}, 2, 1 );
+checkScanMatch( {$and:[{$or:[{a:{$not:/o/}}]}]}, 2, 1 );
+checkScanMatch( {$and:[{$or:[{a:{$not:/a/}}]}]}, 2, 2 );
+checkScanMatch( {$and:[{$nor:[{a:/o/}]}]}, 2, 1 );
+checkScanMatch( {$and:[{$nor:[{a:/a/}]}]}, 2, 2 );
+
+checkScanMatch( {$where:'this.a==1'}, 2, 1 );
+checkScanMatch( {$and:[{$where:'this.a==1'}]}, 2, 1 );
+
+checkScanMatch( {a:1,$where:'this.a==1'}, 1, 1 );
+checkScanMatch( {a:1,$and:[{$where:'this.a==1'}]}, 1, 1 );
+checkScanMatch( {$and:[{a:1},{$where:'this.a==1'}]}, 1, 1 );
+checkScanMatch( {$and:[{a:1,$where:'this.a==1'}]}, 1, 1 );
+checkScanMatch( {a:1,$and:[{a:1},{a:1,$where:'this.a==1'}]}, 1, 1 );
+
+function checkImpossibleMatch( query ) {
+ var e = t.find( query ).explain();
+ assert.eq( 0, e.n );
+ assert.eq( 'BasicCursor', e.cursor );
+}
+
+// With a single key index, all bounds are utilized.
+assert.eq( [[1,1]], t.find( {$and:[{a:1}]} ).explain().indexBounds.a );
+assert.eq( [[1,1]], t.find( {a:1,$and:[{a:1}]} ).explain().indexBounds.a );
+checkImpossibleMatch( {a:1,$and:[{a:2}]} );
+checkImpossibleMatch( {$and:[{a:1},{a:2}]} );
diff --git a/jstests/andor.js b/jstests/andor.js
new file mode 100644
index 0000000..fae6ee4
--- /dev/null
+++ b/jstests/andor.js
@@ -0,0 +1,105 @@
+// SERVER-1089 Test and/or nesting
+
+t = db.jstests_andor;
+t.drop();
+
+// not ok
+function ok( q ) {
+ assert.eq( 1, t.find( q ).itcount() );
+}
+
+t.save( {a:1} );
+
+test = function() {
+
+ ok( {a:1} );
+
+ ok( {$and:[{a:1}]} );
+ ok( {$or:[{a:1}]} );
+
+ ok( {$and:[{$and:[{a:1}]}]} );
+ ok( {$or:[{$or:[{a:1}]}]} );
+
+ ok( {$and:[{$or:[{a:1}]}]} );
+ ok( {$or:[{$and:[{a:1}]}]} );
+
+ ok( {$and:[{$and:[{$or:[{a:1}]}]}]} );
+ ok( {$and:[{$or:[{$and:[{a:1}]}]}]} );
+ ok( {$or:[{$and:[{$and:[{a:1}]}]}]} );
+
+ ok( {$or:[{$and:[{$or:[{a:1}]}]}]} );
+
+ // now test $nor
+
+ ok( {$and:[{a:1}]} );
+ ok( {$nor:[{a:2}]} );
+
+ ok( {$and:[{$and:[{a:1}]}]} );
+ ok( {$nor:[{$nor:[{a:1}]}]} );
+
+ ok( {$and:[{$nor:[{a:2}]}]} );
+ ok( {$nor:[{$and:[{a:2}]}]} );
+
+ ok( {$and:[{$and:[{$nor:[{a:2}]}]}]} );
+ ok( {$and:[{$nor:[{$and:[{a:2}]}]}]} );
+ ok( {$nor:[{$and:[{$and:[{a:2}]}]}]} );
+
+ ok( {$nor:[{$and:[{$nor:[{a:1}]}]}]} );
+
+}
+
+test();
+t.ensureIndex( {a:1} );
+test();
+
+// Test an inequality base match.
+
+test = function() {
+
+ ok( {a:{$ne:2}} );
+
+ ok( {$and:[{a:{$ne:2}}]} );
+ ok( {$or:[{a:{$ne:2}}]} );
+
+ ok( {$and:[{$and:[{a:{$ne:2}}]}]} );
+ ok( {$or:[{$or:[{a:{$ne:2}}]}]} );
+
+ ok( {$and:[{$or:[{a:{$ne:2}}]}]} );
+ ok( {$or:[{$and:[{a:{$ne:2}}]}]} );
+
+ ok( {$and:[{$and:[{$or:[{a:{$ne:2}}]}]}]} );
+ ok( {$and:[{$or:[{$and:[{a:{$ne:2}}]}]}]} );
+ ok( {$or:[{$and:[{$and:[{a:{$ne:2}}]}]}]} );
+
+ ok( {$or:[{$and:[{$or:[{a:{$ne:2}}]}]}]} );
+
+ // now test $nor
+
+ ok( {$and:[{a:{$ne:2}}]} );
+ ok( {$nor:[{a:{$ne:1}}]} );
+
+ ok( {$and:[{$and:[{a:{$ne:2}}]}]} );
+ ok( {$nor:[{$nor:[{a:{$ne:2}}]}]} );
+
+ ok( {$and:[{$nor:[{a:{$ne:1}}]}]} );
+ ok( {$nor:[{$and:[{a:{$ne:1}}]}]} );
+
+ ok( {$and:[{$and:[{$nor:[{a:{$ne:1}}]}]}]} );
+ ok( {$and:[{$nor:[{$and:[{a:{$ne:1}}]}]}]} );
+ ok( {$nor:[{$and:[{$and:[{a:{$ne:1}}]}]}]} );
+
+ ok( {$nor:[{$and:[{$nor:[{a:{$ne:2}}]}]}]} );
+
+}
+
+t.drop();
+t.save( {a:1} );
+test();
+t.ensureIndex( {a:1} );
+test();
+
+t.drop();
+t.ensureIndex( {a:1} );
+var e = t.find( {$and:[{a:1}]} ).explain();
+// nested $or clauses currently ignored for indexing
+assert.eq( e.indexBounds, t.find( {$and:[{a:1,$or:[{a:2}]}]} ).explain().indexBounds );
diff --git a/jstests/apitest_dbcollection.js b/jstests/apitest_dbcollection.js
index f6e74da..0983b06 100644
--- a/jstests/apitest_dbcollection.js
+++ b/jstests/apitest_dbcollection.js
@@ -55,7 +55,7 @@ if( v.ns != "test.test_db" ) {
assert (v.ns == "test.test_db",9);
assert (v.ok == 1,10);
-assert(v.result.toString().match(/nrecords\?:(\d+)/)[1] == 100,11);
+assert.eq(100,v.nrecords,11)
/*
* test deleteIndex, deleteIndexes
diff --git a/jstests/array_match2.js b/jstests/array_match2.js
new file mode 100644
index 0000000..d64ca1b
--- /dev/null
+++ b/jstests/array_match2.js
@@ -0,0 +1,25 @@
+// Different recursive array match cases SERVER-2898
+
+t = db.jstests_array_match2;
+t.drop();
+
+t.save( {a:[{1:4},5]} );
+// When the array index is the last field, both of these match types work.
+assert.eq( 1, t.count( {'a.1':4} ) );
+assert.eq( 1, t.count( {'a.1':5} ) );
+
+t.remove();
+// When the array index is not the last field, only one of the match types works.
+t.save( {a:[{1:{foo:4}},{foo:5}]} );
+if ( 0 ) { // SERVER-2898
+assert.eq( 1, t.count( {'a.1.foo':4} ) );
+}
+assert.eq( 1, t.count( {'a.1.foo':5} ) );
+
+// Same issue with the $exists operator
+t.remove();
+t.save( {a:[{1:{foo:4}},{}]} );
+assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+if ( 0 ) { // SERVER-2898
+assert.eq( 1, t.count( {'a.1.foo':{$exists:true}} ) );
+}
diff --git a/jstests/array_match3.js b/jstests/array_match3.js
new file mode 100644
index 0000000..c865343
--- /dev/null
+++ b/jstests/array_match3.js
@@ -0,0 +1,13 @@
+// SERVER-2902 Test indexing of numerically referenced array elements.
+
+t = db.jstests_array_match3;
+t.drop();
+
+// Test matching numericallly referenced array element.
+t.save( {a:{'0':5}} );
+t.save( {a:[5]} );
+assert.eq( 2, t.count( {'a.0':5} ) );
+
+// Test with index.
+t.ensureIndex( {'a.0':1} );
+assert.eq( 2, t.count( {'a.0':5} ) );
diff --git a/jstests/arrayfind2.js b/jstests/arrayfind2.js
index 94d77f1..1e63bf6 100644
--- a/jstests/arrayfind2.js
+++ b/jstests/arrayfind2.js
@@ -32,4 +32,5 @@ assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } },
t.ensureIndex( { "a.x":1,"a.y":-1 } );
-assert.eq( {"a.x":[[3,3]],"a.y":[[1.7976931348623157e+308,4]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
+// TODO Index bounds below for elemMatch could be improved. - SERVER-3104
+assert.eq( {"a.x":[[3,3]],"a.y":[[{$maxElement:1},{$minElement:1}]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
diff --git a/jstests/arrayfind4.js b/jstests/arrayfind4.js
new file mode 100644
index 0000000..b141425
--- /dev/null
+++ b/jstests/arrayfind4.js
@@ -0,0 +1,22 @@
+// Test query empty array SERVER-2258
+
+t = db.jstests_arrayfind4;
+t.drop();
+
+t.save( {a:[]} );
+t.ensureIndex( {a:1} );
+
+assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() );
+
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() );
+
+t.remove();
+t.save( {a:[[]]} );
+
+assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() );
+
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() );
diff --git a/jstests/arrayfind5.js b/jstests/arrayfind5.js
new file mode 100644
index 0000000..083dc06
--- /dev/null
+++ b/jstests/arrayfind5.js
@@ -0,0 +1,23 @@
+// Test indexed elemmatch of missing field.
+
+t = db.jstests_arrayfind5;
+t.drop();
+
+function check( nullElemMatch ) {
+ assert.eq( 1, t.find( {'a.b':1} ).itcount() );
+ assert.eq( 1, t.find( {a:{$elemMatch:{b:1}}} ).itcount() );
+ assert.eq( 0, t.find( {'a.b':null} ).itcount() );
+ assert.eq( nullElemMatch ? 1 : 0, t.find( {a:{$elemMatch:{b:null}}} ).itcount() ); // see SERVER-3377
+}
+
+t.save( {a:[{},{b:1}]} );
+check( true );
+t.ensureIndex( {'a.b':1} );
+check( true );
+
+t.drop();
+
+t.save( {a:[5,{b:1}]} );
+check( false );
+t.ensureIndex( {'a.b':1} );
+check( false );
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index 2f2a1b4..c837085 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -26,6 +26,11 @@ for( i = 0; i < 999; ++i ) {
assert.eq( 999, t.count() , "A1" );
assert.eq( 999, t.find().toArray().length , "A2" );
+db.setProfilingLevel( 2 );
+t.count();
+db.setProfilingLevel( 0 );
+assert.lt( 0 , db.system.profile.find( { user : "eliot" } ).count() , "AP1" )
+
assert.eq( 999, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A3" );
db.eval( function() { db[ "jstests_auth_auth1" ].save( {i:999} ) } );
assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A4" );
diff --git a/jstests/auth/auth2.js b/jstests/auth/auth2.js
new file mode 100644
index 0000000..4f30894
--- /dev/null
+++ b/jstests/auth/auth2.js
@@ -0,0 +1,23 @@
+// test read/write permissions
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "jstests_auth_auth2";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" , "--nojournal" , "--smallfiles" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+users = db.getCollection( "system.users" );
+assert.eq( 0 , users.count() );
+
+db.addUser( "eliot" , "eliot" );
+
+assert.throws( function(){ db.users.count(); } )
+
+assert.throws( function() { db.shutdownServer(); } )
+
+db.auth( "eliot" , "eliot" )
+
+db.shutdownServer();
diff --git a/jstests/auth/rename.js b/jstests/auth/rename.js
new file mode 100644
index 0000000..5411298
--- /dev/null
+++ b/jstests/auth/rename.js
@@ -0,0 +1,40 @@
+// test renameCollection with auth
+
+port = allocatePorts( 1 )[ 0 ];
+
+baseName = "jstests_rename_auth";
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface" );
+
+db1 = m.getDB( baseName )
+db2 = m.getDB( baseName + '_other' )
+admin = m.getDB( 'admin' )
+
+// auth not yet checked since we are on localhost
+db1.addUser( "foo", "bar" );
+db2.addUser( "bar", "foo" );
+
+printjson(db1.a.count());
+db1.a.save({});
+assert.eq(db1.a.count(), 1);
+
+//this makes auth required on localhost
+admin.addUser('not', 'used');
+
+// can't run same db w/o auth
+assert.commandFailed( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) );
+
+// can run same db with auth
+db1.auth('foo', 'bar')
+assert.commandWorked( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) );
+
+// can't run diff db w/o auth
+assert.commandFailed( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) );
+
+// can run diff db with auth
+db2.auth('bar', 'foo');
+assert.commandWorked( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) );
+
+// test post conditions
+assert.eq(db1.a.count(), 0);
+assert.eq(db1.b.count(), 0);
+assert.eq(db2.a.count(), 1);
diff --git a/jstests/auth1.js b/jstests/auth1.js
index ce0159b..a2cc48a 100644
--- a/jstests/auth1.js
+++ b/jstests/auth1.js
@@ -38,3 +38,20 @@ pass = "a" + Math.random();
db2.addUser( "eliot" , pass );
assert.commandFailed( db2.runCommand( { authenticate: 1, user: "eliot", nonce: "foo", key: "bar" } ) );
+
+// check sanity check SERVER-3003
+
+before = db2.system.users.count()
+
+assert.throws( function(){
+ db2.addUser( "" , "abc" )
+} , null , "C1" )
+
+assert.throws( function(){
+ db2.addUser( "abc" , "" )
+} , null , "C2" )
+
+
+after = db2.system.users.count()
+assert( before > 0 , "C3" )
+assert.eq( before , after , "C4" )
diff --git a/jstests/auth2.js b/jstests/auth2.js
index 9b6dfad..9c2b38f 100644
--- a/jstests/auth2.js
+++ b/jstests/auth2.js
@@ -2,4 +2,8 @@
// SERVER-724
db.runCommand({logout : 1});
-db.runCommand({logout : 1});
+x = db.runCommand({logout : 1});
+assert.eq( 1 , x.ok , "A" )
+
+x = db.logout();
+assert.eq( 1 , x.ok , "B" )
diff --git a/jstests/bench_test1.js b/jstests/bench_test1.js
new file mode 100644
index 0000000..c32b37d
--- /dev/null
+++ b/jstests/bench_test1.js
@@ -0,0 +1,16 @@
+
+t = db.bench_test1;
+t.drop();
+
+t.insert( { _id : 1 , x : 1 } )
+t.insert( { _id : 2 , x : 1 } )
+
+ops = [
+ { op : "findOne" , ns : t.getFullName() , query : { _id : 1 } } ,
+ { op : "update" , ns : t.getFullName() , query : { _id : 1 } , update : { $inc : { x : 1 } } }
+]
+
+seconds = .7
+
+res = benchRun( { ops : ops , parallel : 2 , seconds : seconds , host : db.getMongo().host } )
+assert.lte( seconds * res.update , t.findOne( { _id : 1 } ).x , "A1" )
diff --git a/jstests/bench_test2.js b/jstests/bench_test2.js
new file mode 100644
index 0000000..4a69c9c
--- /dev/null
+++ b/jstests/bench_test2.js
@@ -0,0 +1,41 @@
+
+t = db.bench_test2
+t.drop();
+
+for ( i=0; i<100; i++ )
+ t.insert( { _id : i , x : 0 } );
+db.getLastError();
+
+res = benchRun( { ops : [ { ns : t.getFullName() ,
+ op : "update" ,
+ query : { _id : { "#RAND_INT" : [ 0 , 100 ] } } ,
+ update : { $inc : { x : 1 } } } ] ,
+ parallel : 2 ,
+ seconds : 1 ,
+ totals : true ,
+ host : db.getMongo().host } )
+printjson( res );
+
+sumsq = 0
+sum = 0
+
+min = 1000
+max = 0;
+t.find().forEach(
+ function(z){
+ sum += z.x;
+ sumsq += Math.pow( ( res.update / 100 ) - z.x , 2 );
+ min = Math.min( z.x , min );
+ max = Math.max( z.x , max );
+ }
+)
+
+avg = sum / 100
+std = Math.sqrt( sumsq / 100 )
+
+print( "Avg: " + avg )
+print( "Std: " + std )
+print( "Min: " + min )
+print( "Max: " + max )
+
+
diff --git a/jstests/big_object1.js b/jstests/big_object1.js
index be841e0..6bbe115 100644
--- a/jstests/big_object1.js
+++ b/jstests/big_object1.js
@@ -44,3 +44,5 @@ if ( db.adminCommand( "buildinfo" ).bits == 64 ){
else {
print( "skipping big_object1 b/c not 64-bit" )
}
+
+print("SUCCESS");
diff --git a/jstests/binData.js b/jstests/binData.js
new file mode 100644
index 0000000..3f03765
--- /dev/null
+++ b/jstests/binData.js
@@ -0,0 +1,14 @@
+
+var x = new BinData(3, "OEJTfmD8twzaj/LPKLIVkA==");
+assert.eq(x.hex(), "3842537e60fcb70cda8ff2cf28b21590", "bad hex");
+assert.eq(x.base64(), "OEJTfmD8twzaj/LPKLIVkA==", "bad base64");
+assert.eq(x.type, 3, "bad type");
+assert.eq(x.length(), 16, "bad length");
+
+x = new BinData(0, "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=");
+assert.eq(x.hex(), "4d616e2069732064697374696e677569736865642c206e6f74206f6e6c792062792068697320726561736f6e2c2062757420627920746869732073696e67756c61722070617373696f6e2066726f6d206f7468657220616e696d616c732c2077686963682069732061206c757374206f6620746865206d696e642c20746861742062792061207065727365766572616e6365206f662064656c6967687420696e2074686520636f6e74696e75656420616e6420696e6465666174696761626c652067656e65726174696f6e206f66206b6e6f776c656467652c2065786365656473207468652073686f727420766568656d656e6365206f6620616e79206361726e616c20706c6561737572652e", "bad hex");
+assert.eq(x.base64(), "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=", "bad base64");
+assert.eq(x.type, 0, "bad type");
+assert.eq(x.length(), 269, "bad length");
+
+
diff --git a/jstests/capped.js b/jstests/capped.js
index bae7472..6fdc4df 100644
--- a/jstests/capped.js
+++ b/jstests/capped.js
@@ -1,11 +1,11 @@
db.jstests_capped.drop();
db.createCollection("jstests_capped", {capped:true, size:30000});
-assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_capped"} ).count() );
+
+assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_capped"} ).count(), "expected a count of zero indexes for new capped collection" );
t = db.jstests_capped;
t.save({x:1});
t.save({x:2});
-assert( t.find().sort({$natural:1})[0].x == 1 );
-assert( t.find().sort({$natural:-1})[0].x == 2 );
-
+assert( t.find().sort({$natural:1})[0].x == 1 , "expected obj.x==1");
+assert( t.find().sort({$natural:-1})[0].x == 2, "expected obj.x == 2");
diff --git a/jstests/capped2.js b/jstests/capped2.js
index 2d2f6a8..65bb82f 100644
--- a/jstests/capped2.js
+++ b/jstests/capped2.js
@@ -8,7 +8,7 @@ function debug( x ) {
var val = new Array( 2000 );
var c = "";
-for( i = 0; i < 2000; ++i, c += "-" ) {
+for( i = 0; i < 2000; ++i, c += "---" ) { // bigger and bigger objects through the array...
val[ i ] = { a: c };
}
@@ -47,16 +47,16 @@ function checkDecreasing( i ) {
for( i = 0 ;; ++i ) {
debug( "capped 2: " + i );
- tzz.save( val[ i ] );
+ tzz.insert( val[ i ] );
if ( tzz.count() == 0 ) {
- assert( i > 100, "K" );
- break;
+ assert( i > 100, "K" );
+ break;
}
checkIncreasing( i );
}
for( i = 600 ; i >= 0 ; --i ) {
debug( "capped 2: " + i );
- tzz.save( val[ i ] );
+ tzz.insert( val[ i ] );
checkDecreasing( i );
}
diff --git a/jstests/capped5.js b/jstests/capped5.js
index 1c7ec3d..be6c27d 100644
--- a/jstests/capped5.js
+++ b/jstests/capped5.js
@@ -4,12 +4,11 @@ tn = "capped5"
t = db[tn]
t.drop();
+
db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
t.insert( { _id : 5 , x : 11 , z : 52 } );
-
assert.eq( 0 , t.getIndexKeys().length , "A0" )
assert.eq( 52 , t.findOne( { x : 11 } ).z , "A1" );
-assert.eq( 52 , t.findOne( { _id : 5 } ).z , "A2" );
t.ensureIndex( { _id : 1 } )
t.ensureIndex( { x : 1 } )
@@ -41,10 +40,10 @@ t.ensureIndex( { x:1 }, {unique:true, dropDups:true } );
assert.eq( 1, db.system.indexes.count( {ns:"test."+tn} ) );
assert.eq( 2, t.find().hint( {x:1} ).toArray().length );
-// SERVER-525
+// SERVER-525 (closed) unique indexes in capped collection
t.drop();
db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
-t.ensureIndex( { _id:1 } );
+t.ensureIndex( { _id:1 } ); // note we assume will be automatically unique because it is _id
t.insert( { _id : 5 , x : 11 } );
t.insert( { _id : 5 , x : 12 } );
assert.eq( 1, t.find().toArray().length );
diff --git a/jstests/capped6.js b/jstests/capped6.js
index 6579807..098f667 100644
--- a/jstests/capped6.js
+++ b/jstests/capped6.js
@@ -52,7 +52,7 @@ var max = 0;
*/
function doTest() {
for( var i = max; i < oldMax; ++i ) {
- tzz.save( val[ i ] );
+ tzz.insert( val[ i ] );
}
max = oldMax;
count = tzz.count();
diff --git a/jstests/capped8.js b/jstests/capped8.js
index cce0eec..e5b28dc 100644
--- a/jstests/capped8.js
+++ b/jstests/capped8.js
@@ -9,25 +9,39 @@ function debug( x ) {
}
/** Generate an object with a string field of specified length */
-function obj( size ) {
- return {a:new Array( size + 1 ).toString()};;
+function obj( size, x ) {
+ return {X:x, a:new Array( size + 1 ).toString()};;
}
function withinOne( a, b ) {
assert( Math.abs( a - b ) <= 1, "not within one: " + a + ", " + b )
}
+var X = 0;
+
/**
* Insert enough documents of the given size spec that the collection will
* contain only documents having this size spec.
*/
-function insertMany( size ) {
+function insertManyRollingOver( objsize ) {
// Add some variability, as the precise number can trigger different cases.
- n = 250 + Random.randInt( 10 );
+ X++;
+ n = 250 + Random.randInt(10);
+
+ assert(t.count() == 0 || t.findOne().X != X);
+
for( i = 0; i < n; ++i ) {
- t.save( obj( size ) );
+ t.save( obj( objsize, X ) );
debug( t.count() );
}
+
+ if (t.findOne().X != X) {
+ printjson(t.findOne());
+ print("\n\nERROR didn't roll over in insertManyRollingOver " + objsize);
+ print("approx amountwritten: " + (objsize * n));
+ printjson(t.stats());
+ assert(false);
+ }
}
/**
@@ -37,10 +51,10 @@ function insertMany( size ) {
function insertAndTruncate( first ) {
myInitialCount = t.count();
// Insert enough documents to make the capped allocation loop over.
- insertMany( 50 );
+ insertManyRollingOver( 150 );
myFiftyCount = t.count();
// Insert documents that are too big to fit in the smaller extents.
- insertMany( 2000 );
+ insertManyRollingOver( 5000 );
myTwokCount = t.count();
if ( first ) {
initialCount = myInitialCount;
@@ -69,18 +83,24 @@ function testTruncate() {
insertAndTruncate( false );
}
+var pass = 1;
+
+print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 1000 ] } );
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 4000 ] } );
testTruncate();
+print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 1000 ] } );
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 4000 ] } );
testTruncate();
+print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000 ] } );
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 4000 ] } );
testTruncate();
+print("pass " + pass++);
t.drop();
db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000 ] } );
testTruncate();
diff --git a/jstests/capped9.js b/jstests/capped9.js
new file mode 100644
index 0000000..9ea506c
--- /dev/null
+++ b/jstests/capped9.js
@@ -0,0 +1,28 @@
+
+t = db.capped9;
+t.drop();
+
+db.createCollection("capped9" , {capped:true, size:1024*50 });
+
+t.insert( { _id : 1 , x : 2 , y : 3 } )
+
+assert.eq( 1 , t.find( { x : 2 } ).itcount() , "A1" )
+assert.eq( 1 , t.find( { y : 3 } ).itcount() , "A2" )
+//assert.throws( function(){ t.find( { _id : 1 } ).itcount(); } , [] , "A3" ); // SERVER-3064
+
+t.update( { _id : 1 } , { $set : { y : 4 } } )
+//assert( db.getLastError() , "B1" ); // SERVER-3064
+//assert.eq( 3 , t.findOne().y , "B2" ); // SERVER-3064
+
+t.ensureIndex( { _id : 1 } )
+
+assert.eq( 1 , t.find( { _id : 1 } ).itcount() , "D1" )
+
+t.update( { _id : 1 } , { $set : { y : 4 } } )
+assert( null == db.getLastError() , "D1: " + tojson( db.getLastError() ) )
+assert.eq( 4 , t.findOne().y , "D2" )
+
+
+
+
+
diff --git a/jstests/cappeda.js b/jstests/cappeda.js
new file mode 100644
index 0000000..4a4b14a
--- /dev/null
+++ b/jstests/cappeda.js
@@ -0,0 +1,33 @@
+
+t = db.scan_capped_id;
+t.drop()
+
+x = t.runCommand( "create" , { capped : true , size : 10000 } )
+assert( x.ok )
+
+for ( i=0; i<100; i++ )
+ t.insert( { _id : i , x : 1 } )
+
+function q() {
+ return t.findOne( { _id : 5 } )
+}
+
+function u() {
+ t.update( { _id : 5 } , { $set : { x : 2 } } );
+ var gle = db.getLastError();
+ if ( gle )
+ throw gle;
+}
+
+
+// SERVER-3064
+//assert.throws( q , [] , "A1" );
+//assert.throws( u , [] , "B1" );
+
+t.ensureIndex( { _id : 1 } )
+
+assert.eq( 1 , q().x )
+q()
+u()
+
+assert.eq( 2 , q().x )
diff --git a/jstests/compact.js b/jstests/compact.js
new file mode 100644
index 0000000..b12b03f
--- /dev/null
+++ b/jstests/compact.js
@@ -0,0 +1,37 @@
+// compact.js
+
+t = db.compacttest;
+t.drop();
+t.insert({ x: 3 });
+t.insert({ x: 3 });
+t.insert({ x: 5 });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.ensureIndex({ x: 1 });
+
+print("1");
+
+var res = db.runCommand({ compact: 'compacttest', dev: true });
+printjson(res);
+assert(res.ok);
+assert(t.count() == 9);
+var v = t.validate(true);
+assert(v.ok);
+assert(v.extentCount == 1);
+assert(v.deletedCount == 1);
+assert(t.getIndexes().length == 2);
+
+print("2");
+
+// works on an empty collection?
+t.remove({});
+assert(db.runCommand({ compact: 'compacttest', dev: true }).ok);
+assert(t.count() == 0);
+v = t.validate(true);
+assert(v.ok);
+assert(v.extentCount == 1);
+assert(t.getIndexes().length == 2);
diff --git a/jstests/compact_speed_test.js b/jstests/compact_speed_test.js
new file mode 100755
index 0000000..0c4b9d5
--- /dev/null
+++ b/jstests/compact_speed_test.js
@@ -0,0 +1,61 @@
+if (1) {
+
+ t = db.compactspeedtest;
+ t.drop();
+
+ var obj = { x: 1, y: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", z: [1, 2] };
+
+ var start = new Date();
+ function timed() {
+ db.getLastError();
+ var dt = (new Date()) - start;
+ //print("time: " + dt);
+ start = new Date();
+ return dt;
+ }
+
+ //print("adding data");
+ var N = 100000;
+ if (db.adminCommand("buildInfo").debug)
+ N = 10000;
+ for (var i = 0; i < N; i++) {
+ obj.x = i;
+ obj.z[1] = i;
+ t.insert(obj);
+ }
+ var a = timed();
+
+ //print("index");
+ t.ensureIndex({ x: 1 });
+ //print("index");
+ t.ensureIndex({ y: 1 });
+ //print("index");
+ t.ensureIndex({ z: 1 });
+
+ a += timed();
+
+ //print("count:" + t.count());
+
+ timed();
+
+ {
+ //print("compact");
+ var res = db.runCommand({ compact: 'compactspeedtest', dev: true });
+ b = timed();
+ //printjson(res);
+ assert(res.ok);
+
+ //print("validate");
+ var v = t.validate(true);
+
+ assert(v.ok);
+ assert(t.getIndexes().length == 4);
+
+ if (b < a) {
+ // consider making this fail/assert
+ print("\n\n\nwarning WARNING compact command was slower than it should be");
+ print("a:" + a + " b:" + b);
+ print("\n\n\n");
+ }
+ }
+}
diff --git a/jstests/date1.js b/jstests/date1.js
index ca2e616..e6fc147 100644
--- a/jstests/date1.js
+++ b/jstests/date1.js
@@ -4,11 +4,14 @@ t = db.date1;
function go( d , msg ){
t.drop();
- t.save( { a : 1 , d : d } );
+ t.save({ a: 1, d: d });
+// printjson(d);
+// printjson(t.findOne().d);
assert.eq( d , t.findOne().d , msg )
}
go( new Date() , "A" )
go( new Date( 1 ) , "B")
go( new Date( 0 ) , "C (old spidermonkey lib fails this test)")
+go(new Date(-10), "neg")
diff --git a/jstests/date2.js b/jstests/date2.js
new file mode 100644
index 0000000..94eb58e
--- /dev/null
+++ b/jstests/date2.js
@@ -0,0 +1,13 @@
+// Check that it's possible to compare a Date to a Timestamp - SERVER-3304
+// Check Date / Timestamp comparison equivalence - SERVER-3222
+
+t = db.jstests_date2;
+t.drop();
+
+t.ensureIndex( {a:1} );
+
+t.save( {a:new Timestamp()} );
+
+if ( 0 ) { // SERVER-3304
+assert.eq( 1, t.find( {a:{$gt:new Date(0)}} ).itcount() );
+} \ No newline at end of file
diff --git a/jstests/date3.js b/jstests/date3.js
new file mode 100644
index 0000000..e7ddf71
--- /dev/null
+++ b/jstests/date3.js
@@ -0,0 +1,31 @@
+// Check dates before Unix epoch - SERVER-405
+
+t = db.date3;
+t.drop()
+
+d1 = new Date(-1000)
+dz = new Date(0)
+d2 = new Date(1000)
+
+t.save( {x: 3, d: dz} )
+t.save( {x: 2, d: d2} )
+t.save( {x: 1, d: d1} )
+
+function test () {
+ var list = t.find( {d: {$lt: dz}} )
+ assert.eq ( 1, list.size() )
+ assert.eq ( 1, list[0].x )
+ assert.eq ( d1, list[0].d )
+ var list = t.find( {d: {$gt: dz}} )
+ assert.eq ( 1, list.size() )
+ assert.eq ( 2, list[0].x )
+ var list = t.find().sort( {d:1} )
+ assert.eq ( 3, list.size() )
+ assert.eq ( 1, list[0].x )
+ assert.eq ( 3, list[1].x )
+ assert.eq ( 2, list[2].x )
+}
+
+test()
+t.ensureIndex( {d: 1} )
+test()
diff --git a/jstests/dbcase.js b/jstests/dbcase.js
index 21854d8..25c0bca 100644
--- a/jstests/dbcase.js
+++ b/jstests/dbcase.js
@@ -1,6 +1,5 @@
+// Check db name duplication constraint SERVER-2111
-/*
-TODO SERVER-2111
a = db.getSisterDB( "dbcasetest_dbnamea" )
b = db.getSisterDB( "dbcasetest_dbnameA" )
@@ -15,11 +14,16 @@ b.foo.save( { x : 1 } )
z = db.getLastErrorObj();
assert.eq( 13297 , z.code || 0 , "B : " + tojson(z) )
-print( db.getMongo().getDBNames() )
+assert.neq( -1, db.getMongo().getDBNames().indexOf( a.getName() ) );
+assert.eq( -1, db.getMongo().getDBNames().indexOf( b.getName() ) );
+printjson( db.getMongo().getDBs().databases );
a.dropDatabase();
b.dropDatabase();
-print( db.getMongo().getDBNames() )
-*/
-
+ai = db.getMongo().getDBNames().indexOf( a.getName() );
+bi = db.getMongo().getDBNames().indexOf( b.getName() );
+// One of these dbs may exist if there is a slave active, but they must
+// not both exist.
+assert( ai == -1 || bi == -1 );
+printjson( db.getMongo().getDBs().databases );
diff --git a/jstests/dbcase2.js b/jstests/dbcase2.js
new file mode 100644
index 0000000..57e43bc
--- /dev/null
+++ b/jstests/dbcase2.js
@@ -0,0 +1,9 @@
+// SERVER-2111 Check that an in memory db name will block creation of a db with a similar but differently cased name.
+
+a = db.getSisterDB( "dbcasetest_dbnamea" )
+b = db.getSisterDB( "dbcasetest_dbnameA" )
+
+a.c.count();
+assert.throws( function() { b.c.count() } );
+
+assert.eq( -1, db.getMongo().getDBNames().indexOf( "dbcasetest_dbnameA" ) );
diff --git a/jstests/dbhash.js b/jstests/dbhash.js
index e9cbc94..7fea4b4 100644
--- a/jstests/dbhash.js
+++ b/jstests/dbhash.js
@@ -14,16 +14,22 @@ db.getCollectionNames().forEach( function( x ) {
}
} );
+function dbhash( mydb ) {
+ var ret = mydb.runCommand( "dbhash" );
+ assert.commandWorked( ret, "dbhash failure" );
+ return ret;
+}
+
function gh( coll , mydb ){
if ( ! mydb ) mydb = db;
- var x = mydb.runCommand( "dbhash" ).collections[coll.getName()];
+ var x = dbhash( mydb ).collections[coll.getName()];
if ( ! x )
return "";
return x;
}
function dbh( mydb ){
- return mydb.runCommand( "dbhash" ).md5;
+ return dbhash( mydb ).md5;
}
assert.eq( gh( a ) , gh( b ) , "A1" );
diff --git a/jstests/delx.js b/jstests/delx.js
index 3f8c88c..aa858e9 100644
--- a/jstests/delx.js
+++ b/jstests/delx.js
@@ -23,6 +23,7 @@ x.next();
y.next();
a.foo.remove( { _id : { $gt : 50 } } );
+db.getLastError();
assert.eq( 51 , a.foo.find().itcount() , "B1" )
assert.eq( 100 , b.foo.find().itcount() , "B2" )
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 3b65bd0..c29dea0 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -22,6 +22,8 @@ checkDir = function( dir ) {
files = listFiles( dir + baseName );
for( f in files ) {
+ if ( files[f].isDirectory )
+ continue;
assert( new RegExp( baseName + "/" + baseName + "." ).test( files[ f ].name ) , "B dir:" + dir + " f: " + f );
}
}
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 26b707d..eddb300 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -20,10 +20,16 @@ if ( doIt ) {
port = allocatePorts( 1 )[ 0 ];
m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- c = m.getDB( "diskfulltest" ).getCollection( "diskfulltest" )
+ d = m.getDB( "diskfulltest" );
+ c = d.getCollection( "diskfulltest" );
c.save( { a: 6 } );
+ assert.eq(d.getLastError(), "new file allocation failure"); // first fail
assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" );
assert.isnull( c.findOne() , "shouldn't exist" );
+ c.save( { a: 6 } );
+ assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail
+
+
sleep( 3000 );
m2 = new Mongo( m.host );
printjson( m2.getDBs() );
diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js
index 944ad1c..57ae179 100644
--- a/jstests/disk/newcollection.js
+++ b/jstests/disk/newcollection.js
@@ -3,11 +3,21 @@
port = allocatePorts( 1 )[ 0 ]
var baseName = "jstests_disk_newcollection";
var m = startMongod( "--noprealloc", "--smallfiles", "--port", port, "--dbpath", "/data/db/" + baseName );
+//var m = db.getMongo();
db = m.getDB( "test" );
-db.createCollection( baseName, {size:15.9*1024*1024} );
-db.baseName.drop();
+var t = db[baseName];
-size = m.getDBs().totalSize;
-db.baseName.save( {} );
-assert.eq( size, m.getDBs().totalSize );
+for (var pass = 0; pass <= 1; pass++) {
+
+ db.createCollection(baseName, { size: 15.8 * 1024 * 1024 });
+ if( pass == 0 )
+ t.drop();
+
+ size = m.getDBs().totalSize;
+ t.save({});
+ assert.eq(size, m.getDBs().totalSize);
+ assert(size <= 32 * 1024 * 1024);
+
+ t.drop();
+}
diff --git a/jstests/disk/norepeat.js b/jstests/disk/norepeat.js
index d9f1cd3..985fc36 100644
--- a/jstests/disk/norepeat.js
+++ b/jstests/disk/norepeat.js
@@ -45,7 +45,7 @@ assert.throws( function() { c.next() }, [], "unexpected: object found" );
m.getDB( "local" ).getCollectionNames().forEach( function( x ) { assert( !x.match( /^temp/ ), "temp collection found" ); } );
t.drop();
-m.getDB( baseName ).createCollection( baseName, { capped:true, size:100000, autoIdIndex:false } );
+m.getDB( baseName ).createCollection( baseName, { capped:true, size:100000, autoIndexId:false } );
t = m.getDB( baseName ).getCollection( baseName );
t.insert( {_id:"a"} );
t.insert( {_id:"a"} );
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
new file mode 100644
index 0000000..d93e5ea
--- /dev/null
+++ b/jstests/disk/quota.js
@@ -0,0 +1,47 @@
+// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local' database).
+
+port = allocatePorts( 1 )[ 0 ];
+
+baseName = "jstests_disk_quota";
+dbpath = "/data/db/" + baseName;
+
+m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--quotaFiles", "1", "--smallfiles" );
+db = m.getDB( baseName );
+
+big = new Array( 10000 ).toString();
+
+// Insert documents until quota is exhausted.
+while( !db.getLastError() ) {
+ db[ baseName ].save( {b:big} );
+}
+printjson( db.getLastError() );
+
+dotTwoDataFile = dbpath + "/" + baseName + ".2";
+files = listFiles( dbpath );
+for( i in files ) {
+ // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated (SERVER-3410) but no .2 file is expected.
+ assert.neq( dotTwoDataFile, files[ i ].name );
+}
+
+dotTwoDataFile = dbpath + "/" + "local" + ".2";
+// Check that quota does not apply to local db, and a .2 file can be created.
+l = m.getDB( "local" )[ baseName ];
+for( i = 0; i < 10000; ++i ) {
+ l.save( {b:big} );
+ assert( !db.getLastError() );
+ dotTwoFound = false;
+ if ( i % 100 != 0 ) {
+ continue;
+ }
+ files = listFiles( dbpath );
+ for( f in files ) {
+ if ( files[ f ].name == dotTwoDataFile ) {
+ dotTwoFound = true;
+ }
+ }
+ if ( dotTwoFound ) {
+ break;
+ }
+}
+
+assert( dotTwoFound );
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
new file mode 100644
index 0000000..c0d30df
--- /dev/null
+++ b/jstests/disk/quota2.js
@@ -0,0 +1,38 @@
+// Test for quotaFiles off by one file limit issue - SERVER-3420.
+
+if ( 0 ) { // SERVER-3420
+
+port = allocatePorts( 1 )[ 0 ];
+
+baseName = "jstests_disk_quota2";
+dbpath = "/data/db/" + baseName;
+
+m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--quotaFiles", "1", "--smallfiles" );
+db = m.getDB( baseName );
+
+big = new Array( 10000 ).toString();
+
+// Insert documents until quota is exhausted.
+while( !db.getLastError() ) {
+ db[ baseName ].save( {b:big} );
+}
+
+db.resetError();
+
+// Trigger allocation of an additional file for a 'special' namespace.
+for( n = 0; !db.getLastError(); ++n ) {
+ db.createCollection( '' + n );
+}
+
+print( n );
+
+// Check that new docs are saved in the .0 file.
+for( i = 0; i < n; ++i ) {
+ c = db[ ''+i ];
+ c.save( {b:big} );
+ if( !db.getLastError() ) {
+ assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
+ }
+}
+
+} \ No newline at end of file
diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js
index c986dce..9e6767c 100644
--- a/jstests/disk/repair3.js
+++ b/jstests/disk/repair3.js
@@ -1,4 +1,4 @@
-// test --repairpath on aother partition
+// test --repairpath on another partition
var baseName = "jstests_disk_repair3";
var repairbase = "/data/db/repairpartitiontest"
diff --git a/jstests/disk/repair5.js b/jstests/disk/repair5.js
new file mode 100644
index 0000000..65da330
--- /dev/null
+++ b/jstests/disk/repair5.js
@@ -0,0 +1,43 @@
+// SERVER-2351 Test killop with repair command.
+
+var baseName = "jstests_disk_repair5";
+
+port = allocatePorts( 1 )[ 0 ];
+dbpath = "/data/db/" + baseName + "/";
+repairpath = dbpath + "repairDir/"
+
+resetDbpath( dbpath );
+resetDbpath( repairpath );
+
+m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+
+big = new Array( 5000 ).toString();
+for( i = 0; i < 20000; ++i ) {
+ db[ baseName ].save( {i:i,b:big} );
+}
+
+function killRepair() {
+ while( 1 ) {
+ p = db.currentOp().inprog;
+ for( var i in p ) {
+ var o = p[ i ];
+ printjson( o );
+ // Find the active 'repairDatabase' op and kill it.
+ if ( o.active && o.query.repairDatabase ) {
+ db.killOp( o.opid );
+ return;
+ }
+ }
+ }
+}
+
+s = startParallelShell( killRepair.toString() + "; killRepair();" );
+
+// Repair should fail due to killOp.
+assert.commandFailed( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+
+s();
+
+assert.eq( 20000, db[ baseName ].find().itcount() );
+assert( db[ baseName ].validate().valid );
diff --git a/jstests/distinct1.js b/jstests/distinct1.js
index 5e47400..1b9354f 100644
--- a/jstests/distinct1.js
+++ b/jstests/distinct1.js
@@ -25,3 +25,4 @@ t.save( { a : { b : "c" } , c : 12 } );
res = t.distinct( "a.b" );
assert.eq( "a,b,c" , res.toString() , "B1" );
+assert.eq( "BasicCursor" , t._distinct( "a.b" ).stats.cursor , "B2" )
diff --git a/jstests/distinct_index1.js b/jstests/distinct_index1.js
index 8677457..64dc280 100644
--- a/jstests/distinct_index1.js
+++ b/jstests/distinct_index1.js
@@ -48,3 +48,13 @@ x = d( "b" , { a : { $gt : 5 } } );
assert.eq( 398 , x.stats.n , "BC1" )
assert.eq( 398 , x.stats.nscanned , "BC2" )
assert.eq( 398 , x.stats.nscannedObjects , "BC3" )
+
+// Check proper nscannedObjects count when using a query optimizer cursor.
+t.dropIndexes();
+t.ensureIndex( { a : 1, b : 1 } );
+x = d( "b" , { a : { $gt : 5 }, b : { $gt : 5 } } );
+assert.eq( "QueryOptimizerCursor", x.stats.cursor );
+assert.eq( 171 , x.stats.n )
+assert.eq( 275 , x.stats.nscanned )
+// Disable temporarily - exact value doesn't matter.
+// assert.eq( 266 , x.stats.nscannedObjects )
diff --git a/jstests/drop2.js b/jstests/drop2.js
index a1d619d..87e646e 100644
--- a/jstests/drop2.js
+++ b/jstests/drop2.js
@@ -26,7 +26,7 @@ function op( drop ) {
return null;
}
-s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { ; } } } )" );
+s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { sleep( 1 ); } } } )" );
countOp = null;
assert.soon( function() { countOp = op( false ); return countOp; } );
diff --git a/jstests/drop3.js b/jstests/drop3.js
new file mode 100644
index 0000000..b2ca94a
--- /dev/null
+++ b/jstests/drop3.js
@@ -0,0 +1,29 @@
+t = db.jstests_drop3;
+sub = t.sub;
+
+t.drop();
+sub.drop();
+
+
+for (var i = 0; i < 10; i++){
+ t.insert({});
+ sub.insert({});
+}
+
+var cursor = t.find().batchSize(2);
+var subcursor = sub.find().batchSize(2);
+
+cursor.next();
+subcursor.next();
+assert.eq( cursor.objsLeftInBatch(), 1 );
+assert.eq( subcursor.objsLeftInBatch(), 1 );
+
+t.drop(); // should invalidate cursor, but not subcursor
+db.getLastError();
+
+assert.throws( function(){ cursor.itcount() } ); // throws "cursor doesn't exist on server" error on getMore
+assert.eq( subcursor.itcount(), 9 ); //one already seen
+
+
+
+
diff --git a/jstests/dropdb.js b/jstests/dropdb.js
new file mode 100644
index 0000000..0b83884
--- /dev/null
+++ b/jstests/dropdb.js
@@ -0,0 +1,17 @@
+// Test that a db does not exist after it is dropped.
+// Disabled in the small oplog suite because the slave may create a master db
+// with the same name as the dropped db when requesting a clone.
+
+m = db.getMongo();
+baseName = "jstests_dropdb";
+ddb = db.getSisterDB( baseName );
+
+ddb.c.save( {} );
+ddb.getLastError();
+assert.neq( -1, m.getDBNames().indexOf( baseName ) );
+
+ddb.dropDatabase();
+assert.eq( -1, m.getDBNames().indexOf( baseName ) );
+
+ddb.dropDatabase();
+assert.eq( -1, m.getDBNames().indexOf( baseName ) );
diff --git a/jstests/dropdb_race.js b/jstests/dropdb_race.js
new file mode 100644
index 0000000..bff7980
--- /dev/null
+++ b/jstests/dropdb_race.js
@@ -0,0 +1,44 @@
+// test dropping a db with simultaneous commits
+
+m = db.getMongo();
+baseName = "jstests_dur_droprace";
+d = db.getSisterDB(baseName);
+t = d.foo;
+
+assert(d.adminCommand({ setParameter: 1, syncdelay: 5 }).ok);
+
+var s = 0;
+
+var start = new Date();
+
+for (var pass = 0; pass < 100; pass++) {
+ if (pass % 2 == 0) {
+ // sometimes wait for create db first, to vary the timing of things
+ t.insert({});
+ if( pass % 4 == 0 )
+ d.runCommand({getLastError:1,j:1});
+ else
+ d.getLastError();
+ }
+ t.insert({ x: 1 });
+ t.insert({ x: 3 });
+ t.ensureIndex({ x: 1 });
+ sleep(s);
+ if (pass % 37 == 0)
+ d.adminCommand("closeAllDatabases");
+ else if (pass % 13 == 0)
+ t.drop();
+ else if (pass % 17 == 0)
+ t.dropIndexes();
+ else
+ d.dropDatabase();
+ if (pass % 7 == 0)
+ d.runCommand({getLastError:1,j:1});
+ d.getLastError();
+ s = (s + 1) % 25;
+ //print(pass);
+ if ((new Date()) - start > 60000) {
+ print("stopping early");
+ break;
+ }
+}
diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js
index f169f06..9131aed 100644
--- a/jstests/dur/closeall.js
+++ b/jstests/dur/closeall.js
@@ -8,7 +8,9 @@ function f() {
var ourdb = "closealltest";
print("closeall.js start mongod variant:" + variant);
- var options = (new Date()-0)%2==0 ? 8 : 0;
+ var R = (new Date()-0)%2;
+ var QuickCommits = (new Date()-0)%3 == 0;
+ var options = R==0 ? 8 : 0; // 8 is DurParanoid
print("closeall.js --durOptions " + options);
var N = 1000;
if (options)
@@ -23,6 +25,10 @@ function f() {
// we'll use two connections to make a little parallelism
var db1 = conn.getDB(ourdb);
var db2 = new Mongo(db1.getMongo().host).getDB(ourdb);
+ if( QuickCommits ) {
+ print("closeall.js QuickCommits variant (using a small syncdelay)");
+ assert( db2.adminCommand({setParameter:1, syncdelay:5}).ok );
+ }
print("closeall.js run test");
@@ -34,9 +40,9 @@ function f() {
db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 });
if (i % 100 == 0)
db1.foo.find();
- if( i == 800 )
+ if( i == 800 )
db1.foo.ensureIndex({ x: 1 });
- var res = null;
+ var res = null;
try {
if( variant == 1 )
sleep(0);
@@ -44,37 +50,37 @@ function f() {
sleep(1);
else if( variant == 3 && i % 10 == 0 )
print(i);
- res = db2.adminCommand("closeAllDatabases");
- }
- catch (e) {
- sleep(5000); // sleeping a little makes console output order prettier
- print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
- try {
- print("getlasterror:");
- printjson(db2.getLastErrorObj());
- print("trying one more closealldatabases:");
- res = db2.adminCommand("closeAllDatabases");
- printjson(res);
- }
- catch (e) {
- print("got another exception : " + e);
- }
- print("\n\n\n");
- // sleep a little to capture possible mongod output?
- sleep(2000);
- throw e;
- }
- assert( res.ok, "closeAllDatabases res.ok=false");
- }
-
- print("closeall.js end test loop. slave.foo.count:");
- print(slave.foo.count());
-
- print("closeall.js shutting down servers");
- stopMongod(30002);
- stopMongod(30001);
-}
-
-f();
+ res = db2.adminCommand("closeAllDatabases");
+ }
+ catch (e) {
+ sleep(5000); // sleeping a little makes console output order prettier
+ print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
+ try {
+ print("getlasterror:");
+ printjson(db2.getLastErrorObj());
+ print("trying one more closealldatabases:");
+ res = db2.adminCommand("closeAllDatabases");
+ printjson(res);
+ }
+ catch (e) {
+ print("got another exception : " + e);
+ }
+ print("\n\n\n");
+ // sleep a little to capture possible mongod output?
+ sleep(2000);
+ throw e;
+ }
+ assert( res.ok, "closeAllDatabases res.ok=false");
+ }
+
+ print("closeall.js end test loop. slave.foo.count:");
+ print(slave.foo.count());
+
+ print("closeall.js shutting down servers");
+ stopMongod(30002);
+ stopMongod(30001);
+}
+
+f();
sleep(500);
print("SUCCESS closeall.js");
diff --git a/jstests/dur/data/empty.bson b/jstests/dur/data/empty.bson
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/jstests/dur/data/empty.bson
diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js
index da45c20..c123ea1 100644
--- a/jstests/dur/diskfull.js
+++ b/jstests/dur/diskfull.js
@@ -14,23 +14,23 @@ for ( i in files ) {
if ( !doIt ) {
print( "path " + startPath + " missing, skipping diskfull test" );
doIt = false;
-}
-
-function checkNoJournalFiles(path, pass) {
- var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
- if (pass == null) {
- // wait a bit longer for mongod to potentially finish if it is still running.
- sleep(10000);
- return checkNoJournalFiles(path, 1);
- }
- print("\n\n\n");
- print("FAIL path:" + path);
- print("unexpected files:");
- printjson(files);
- assert(false, "FAIL a journal/lsn file is present which is unexpected");
- }
-}
+}
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
/** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */
function clear() {
@@ -56,7 +56,9 @@ function work() {
d.foo.insert( { _id:i, b:big } );
}
- d.getLastError();
+ gle = d.getLastError();
+ if ( gle )
+ throw gle;
} catch ( e ) {
print( e );
raise( e );
@@ -86,9 +88,8 @@ function runFirstMongodAndFillDisk() {
conn = startMongodNoReset("--port", 30001, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc");
assert.throws( work, null, "no exception thrown when exceeding disk capacity" );
- waitMongoProgramOnPort( 30001 );
-
- // the above wait doesn't work on windows
+ stopMongod( 30001 );
+
sleep(5000);
}
@@ -104,9 +105,9 @@ function runSecondMongdAndRecover() {
// stopMongod seems to be asynchronous (hmmm) so we sleep here.
sleep(5000);
- // at this point, after clean shutdown, there should be no journal files
- log("check no journal files");
- checkNoJournalFiles(startPath + "/journal/");
+ // at this point, after clean shutdown, there should be no journal files
+ log("check no journal files");
+ checkNoJournalFiles(startPath + "/journal/");
log();
}
@@ -133,4 +134,4 @@ if ( doIt ) {
print(testname + " SUCCESS");
-} \ No newline at end of file
+}
diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js
index 7f82cd7..2aabd4a 100644
--- a/jstests/dur/dropdb.js
+++ b/jstests/dur/dropdb.js
@@ -73,21 +73,28 @@ function verify() {
var d = conn.getDB("test");
var count = d.foo.count();
if (count != 1) {
- print("going to fail, count mismatch in verify()");
+ print("going to fail, test.foo.count() != 1 in verify()");
sleep(10000); // easier to read the output this way
print("\n\n\ndropdb.js FAIL test.foo.count() should be 1 but is : " + count);
- print(d.foo.count() + "\n\n\n");
+ print(d.foo.count() + "\n\n\n");
assert(false);
}
assert(d.foo.findOne()._id == 100, "100");
print("dropdb.js teste.foo.findOne:");
- printjson(conn.getDB("teste").foo.findOne());
-
- var teste = conn.getDB("teste");
- print("dropdb count " + teste.foo.count());
+ printjson(conn.getDB("teste").foo.findOne());
+
+ var teste = conn.getDB("teste");
+ var testecount = teste.foo.count();
+ if (testecount != 1) {
+ print("going to fail, teste.foo.count() != 1 in verify()");
+ sleep(10000); // easier to read the output this way
+ print("\n\n\ndropdb.js FAIL teste.foo.count() should be 1 but is : " + testecount);
+ print("\n\n\n");
+ assert(false);
+ }
+ print("teste.foo.count() = " + teste.foo.count());
assert(teste.foo.findOne()._id == 99, "teste");
-
}
if (debugging) {
diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js
index 4c8f1bf..299ac30 100755
--- a/jstests/dur/dur1.js
+++ b/jstests/dur/dur1.js
@@ -75,7 +75,7 @@ function work() {
}
function verify() {
- log("verify");
+ log("verify test.foo.count == 2");
var d = conn.getDB("test");
var ct = d.foo.count();
if (ct != 2) {
@@ -99,37 +99,38 @@ var path1 = "/data/db/" + testname+"nodur";
var path2 = "/data/db/" + testname+"dur";
// non-durable version
-log();
+log("run mongod without journaling");
conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
work();
stopMongod(30000);
// durable version
-log();
-conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+log("run mongod with --journal");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8);
work();
// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
-// kill the process hard
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+// kill the process hard
+log("kill 9");
stopMongod(30001, /*signal*/9);
// journal file should be present, and non-empty as we killed hard
// restart and recover
-log();
-conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+log("restart mongod --journal and recover");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8);
verify();
-log("stop");
+log("stop mongod");
stopMongod(30002);
// stopMongod seems to be asynchronous (hmmm) so we sleep here.
-sleep(5000);
+// sleep(5000);
// at this point, after clean shutdown, there should be no journal files
-log("check no journal files");
+log("check no journal files (after presumably clean shutdown)");
checkNoJournalFiles(path2 + "/journal");
log("check data matches ns");
diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js
new file mode 100755
index 0000000..5090b5b
--- /dev/null
+++ b/jstests/dur/dur1_tool.js
@@ -0,0 +1,152 @@
+/*
+ test durability option with tools (same a dur1.js but use mongorestore to do repair)
+*/
+
+var debugging = false;
+var testname = "dur1_tool";
+var step = 1;
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+function runDiff(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return run("diff", a, b);
+}
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work");
+ var d = conn.getDB("test");
+ d.foo.insert({ _id: 3, x: 22 });
+ d.foo.insert({ _id: 4, x: 22 });
+ d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
+ d.a.update({ _id: 4 }, { $inc: { x: 1} });
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
+ d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
+
+// d.a.update({ _id: 4 }, { $inc: { x: 1} });
+// d.a.reIndex();
+
+ // assure writes applied in case we kill -9 on return from this function
+ d.getLastError();
+
+ log("endwork");
+ return d;
+}
+
+function verify() {
+ log("verify test.foo.count == 2");
+ var d = conn.getDB("test");
+ var ct = d.foo.count();
+ if (ct != 2) {
+ print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
+ assert(ct == 2);
+ }
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+log();
+
+// directories
+var path1 = "/data/db/" + testname+"nodur";
+var path2 = "/data/db/" + testname+"dur";
+
+// non-durable version
+log("run mongod without journaling");
+conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
+work();
+stopMongod(30000);
+
+// durable version
+log("run mongod with --journal");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8);
+work();
+
+// wait for group commit.
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+// kill the process hard
+log("kill 9");
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// mongorestore with --dbpath and --journal options should do a recovery pass
+// empty.bson is an empty file so it won't actually insert anything
+log("use mongorestore to recover");
+runMongoProgram("mongorestore", "--dbpath", path2, "--journal", "-d", "test", "-c", "empty", "jstests/dur/data/empty.bson");
+
+// stopMongod seems to be asynchronous (hmmm) so we sleep here.
+// sleep(5000);
+
+// at this point, after clean shutdown, there should be no journal files
+log("check no journal files (after presumably clean shutdown)");
+checkNoJournalFiles(path2 + "/journal");
+
+log("check data matches ns");
+var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.ns files differ");
+
+log("check data matches .0");
+var diff = runDiff(path1 + "/test.0", path2 + "/test.0");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.0 files differ");
+
+log("check data matches done");
+
+print(testname + " SUCCESS");
+
diff --git a/jstests/dur/indexbg.js b/jstests/dur/indexbg.js
new file mode 100644
index 0000000..e78ae4a
--- /dev/null
+++ b/jstests/dur/indexbg.js
@@ -0,0 +1,7 @@
+path = '/data/db/indexbg_dur';
+
+m = startMongodEmpty( '--port', 30001, '--dbpath', path, '--journal', '--smallfiles', '--journalOptions', 24 );
+t = m.getDB( 'test' ).test;
+t.save( {x:1} );
+t.createIndex( {x:1}, {background:true} );
+t.count();
diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js
new file mode 100644
index 0000000..6a0af24
--- /dev/null
+++ b/jstests/dur/indexbg2.js
@@ -0,0 +1,19 @@
+path = '/data/db/indexbg2_dur';
+
+m = startMongodEmpty( '--port', 30001, '--dbpath', path, '--journal', '--smallfiles' );
+
+t = m.getDB( 'test' ).test;
+t.createIndex( {a:1} );
+t.createIndex( {b:1} );
+t.createIndex( {x:1}, {background:true} );
+for( var i = 0; i < 1000; ++i ) {
+ t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
+ t.remove( {_id:i} );
+}
+sleep( 1000 );
+for( var i = 1000; i < 2000; ++i ) {
+ t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
+ t.remove( {_id:i} );
+}
+t.insert( {_id:2000,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
+assert( !t.getDB().getLastError() );
diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js
index 04e4318..79f72a4 100755
--- a/jstests/dur/manyRestart.js
+++ b/jstests/dur/manyRestart.js
@@ -116,6 +116,10 @@ conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallf
work();
stopMongod(30000);
+// hail mary for windows
+// Sat Jun 11 14:07:57 Error: boost::filesystem::create_directory: Access is denied: "\data\db\manyRestartsdur" (anon):1
+sleep(1000);
+
log("starting 30001");
conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
work();
diff --git a/jstests/eval_nolock.js b/jstests/eval_nolock.js
index 2688ec5..2ab96a3 100644
--- a/jstests/eval_nolock.js
+++ b/jstests/eval_nolock.js
@@ -10,7 +10,7 @@ res = db.runCommand( { eval :
db.eval_nolock.insert( { _id : 123 } );
return db.eval_nolock.count();
}
- , nlock : true } );
+ , nolock : true } );
assert.eq( 11 , res.retval , "A" )
diff --git a/jstests/evalb.js b/jstests/evalb.js
index 177930c..ea80331 100644
--- a/jstests/evalb.js
+++ b/jstests/evalb.js
@@ -11,7 +11,7 @@ db.setProfilingLevel( 2 );
assert.eq( 3, db.eval( function(){ return db.evalb.findOne().x; } ) , "B" );
o = db.system.profile.find().sort( { $natural : -1 } ).limit(1).next();
-assert( o.info.indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) )
+assert( tojson(o).indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) )
db.setProfilingLevel( 0 );
diff --git a/jstests/evalc.js b/jstests/evalc.js
index 8a9e889..0320ecd 100644
--- a/jstests/evalc.js
+++ b/jstests/evalc.js
@@ -1,17 +1,24 @@
t = db.jstests_evalc;
t.drop();
+t2 = db.evalc_done
+t2.drop()
+
for( i = 0; i < 10; ++i ) {
t.save( {i:i} );
}
// SERVER-1610
-s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<500000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); " )
+assert.eq( 0 , t2.count() , "X1" )
+
+s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<50000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); db.evalc_done.insert( { x : 1 } ); " )
print( "starting eval: " + Date() )
-for ( i=0; i<20000; i++ ){
+while ( true ) {
db.eval( "db.jstests_evalc.count( {i:10} );" );
+ if ( t2.count() > 0 )
+ break;
}
print( "end eval: " + Date() )
diff --git a/jstests/evald.js b/jstests/evald.js
index 78cabb6..7b18f3c 100644
--- a/jstests/evald.js
+++ b/jstests/evald.js
@@ -53,10 +53,10 @@ function doIt( ev, wait, where ) {
}
-doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", true, true );
-doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", false, true );
-doIt( "while( true ) {;}", false );
-doIt( "while( true ) {;}", true );
+doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { sleep(1); } } } )", true, true );
+doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { sleep(1); } } } )", false, true );
+doIt( "while( true ) { sleep(1);}", false );
+doIt( "while( true ) { sleep(1);}", true );
// the for loops are currently required, as a spawned op masks the parent op - see SERVER-1931
doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count( {i:10} ); }", true );
@@ -65,4 +65,4 @@ doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(
doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(); }", false );
doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} try { db.jstests_evald.count( {i:10} ); } catch ( e ) { } }", true );
-doIt( "while( 1 ) { try { while( 1 ) { ; } } catch ( e ) { } }", true );
+doIt( "while( 1 ) { try { while( 1 ) { sleep(1); } } catch ( e ) { } }", true );
diff --git a/jstests/exists3.js b/jstests/exists3.js
new file mode 100644
index 0000000..53a69d6
--- /dev/null
+++ b/jstests/exists3.js
@@ -0,0 +1,21 @@
+// Check exists with non empty document, based on SERVER-2470 example.
+
+t = db.jstests_exists3;
+t.drop();
+
+t.insert({a: 1, b: 2});
+
+assert.eq( 1, t.find({}).sort({c: -1}).itcount() );
+assert.eq( 1, t.count({c: {$exists: false}}) );
+assert.eq( 1, t.find({c: {$exists: false}}).itcount() );
+assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() );
+
+// now we have an index on the sort key
+t.ensureIndex({c: -1})
+
+assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() );
+assert.eq( 1, t.find({c: {$exists: false}}).itcount() );
+// still ok without the $exists
+assert.eq( 1, t.find({}).sort({c: -1}).itcount() );
+// and ok with a convoluted $not $exists
+assert.eq( 1, t.find({c: {$not: {$exists: true}}}).sort({c: -1}).itcount() );
diff --git a/jstests/exists4.js b/jstests/exists4.js
new file mode 100644
index 0000000..fb801ed
--- /dev/null
+++ b/jstests/exists4.js
@@ -0,0 +1,20 @@
+// Check various exists cases, based on SERVER-1735 example.
+
+t = db.jstests_exists4;
+t.drop();
+
+t.ensureIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1});
+t.insert({ date: new Date("08/27/2010"), tot_visit: 100});
+t.insert({ date: new Date("08/27/2010"), country_code: "IT", tot_visit: 77});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", tot_visit: 23});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "and...@spacca.org", tot_visit: 11});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@gmail.com", tot_visit: 5});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@progloedizioni.com", tot_visit: 7});
+
+assert.eq( 6, t.find({date: new Date("08/27/2010")}).count() );
+assert.eq( 5, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}}).count() );
+assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: {$exists: false}}).count() );
+assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: null}).count() );
+assert.eq( 3, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: true}}).count() );
+assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: false}}).count() );
+assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null}).count() );
diff --git a/jstests/exists5.js b/jstests/exists5.js
new file mode 100644
index 0000000..a90a94f
--- /dev/null
+++ b/jstests/exists5.js
@@ -0,0 +1,33 @@
+// Test some $not/$exists cases.
+
+t = db.jstests_exists5;
+t.drop();
+
+t.save( {a:1} );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) );
+assert.eq( 1, t.count( {'c.d':{$not:{$exists:true}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) );
+assert.eq( 0, t.count( {'c.d':{$not:{$exists:false}}} ) );
+
+t.drop();
+t.save( {a:{b:1}} );
+assert.eq( 1, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) );
+
+t.drop();
+t.save( {a:[1]} );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) );
+
+t.drop();
+t.save( {a:[{b:1}]} );
+assert.eq( 1, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) );
diff --git a/jstests/exists6.js b/jstests/exists6.js
new file mode 100644
index 0000000..7c1cdc1
--- /dev/null
+++ b/jstests/exists6.js
@@ -0,0 +1,63 @@
+// SERVER-393 Test indexed matching with $exists.
+
+t = db.jstests_exists6;
+t.drop();
+
+t.ensureIndex( {b:1} );
+t.save( {} );
+t.save( {b:1} );
+t.save( {b:null} );
+
+checkExists = function( query ) {
+ // Constraint on 'b' is trivial, so a BasicCursor is the default cursor type.
+ assert.eq( 'BasicCursor', t.find( query ).explain().cursor );
+ // Index bounds include all elements.
+ assert.eq( [ [ { $minElement:1 }, { $maxElement:1 } ] ], t.find( query ).hint( {b:1} ).explain().indexBounds.b );
+ // All keys must be scanned.
+ assert.eq( 3, t.find( query ).hint( {b:1} ).explain().nscanned );
+ // 2 docs will match.
+ assert.eq( 2, t.find( query ).hint( {b:1} ).itcount() );
+}
+checkExists( {b:{$exists:true}} );
+checkExists( {b:{$not:{$exists:false}}} );
+
+checkMissing = function( query ) {
+ // Constraint on 'b' is nontrivial, so a BtreeCursor is the default cursor type.
+ assert.eq( 'BtreeCursor b_1', t.find( query ).explain().cursor );
+ // Scan null index keys.
+ assert.eq( [ [ null, null ] ], t.find( query ).explain().indexBounds.b );
+ // Two existing null keys will be scanned.
+ assert.eq( 2, t.find( query ).explain().nscanned );
+ // One doc is missing 'b'.
+ assert.eq( 1, t.find( query ).hint( {b:1} ).itcount() );
+}
+checkMissing( {b:{$exists:false}} );
+checkMissing( {b:{$not:{$exists:true}}} );
+
+// Now check existence of second compound field.
+t.ensureIndex( {a:1,b:1} );
+t.save( {a:1} );
+t.save( {a:1,b:1} );
+t.save( {a:1,b:null} );
+
+checkExists = function( query ) {
+ // Index bounds include all elements.
+ assert.eq( [ [ { $minElement:1 }, { $maxElement:1 } ] ], t.find( query ).explain().indexBounds.b );
+ // All keys must be scanned.
+ assert.eq( 3, t.find( query ).explain().nscanned );
+ // 2 docs will match.
+ assert.eq( 2, t.find( query ).hint( {a:1,b:1} ).itcount() );
+}
+checkExists( {a:1,b:{$exists:true}} );
+checkExists( {a:1,b:{$not:{$exists:false}}} );
+
+checkMissing = function( query ) {
+ // Scan null index keys.
+ assert.eq( [ [ null, null ] ], t.find( query ).explain().indexBounds.b );
+ // Two existing null keys will be scanned.
+ assert.eq( 2, t.find( query ).explain().nscanned );
+ // One doc is missing 'b'.
+ assert.eq( 1, t.find( query ).hint( {a:1,b:1} ).itcount() );
+}
+checkMissing( {a:1,b:{$exists:false}} );
+checkMissing( {a:1,b:{$not:{$exists:true}}} );
diff --git a/jstests/exists7.js b/jstests/exists7.js
new file mode 100644
index 0000000..14a9720
--- /dev/null
+++ b/jstests/exists7.js
@@ -0,0 +1,21 @@
+
+// Test that non boolean value types are allowed with $explain spec. SERVER-2322
+
+t = db.jstests_explain7;
+t.drop();
+
+function testIntegerExistsSpec() {
+ t.remove();
+ t.save( {} );
+ t.save( {a:1} );
+ t.save( {a:2} );
+ t.save( {a:3, b:3} );
+ t.save( {a:4, b:4} );
+
+ assert.eq( 2, t.count( {b:{$exists:1}} ) );
+ assert.eq( 3, t.count( {b:{$exists:0}} ) );
+}
+
+testIntegerExistsSpec();
+t.ensureIndex( {b:1} );
+testIntegerExistsSpec();
diff --git a/jstests/exists8.js b/jstests/exists8.js
new file mode 100644
index 0000000..82f0c45
--- /dev/null
+++ b/jstests/exists8.js
@@ -0,0 +1,76 @@
+// Test $exists with array element field names SERVER-2897
+
+t = db.jstests_exists8;
+t.drop();
+
+t.save( {a:[1]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[1,2]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[{}]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[{},{}]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[{'b':2},{'a':1}]} );
+assert.eq( 1, t.count( {'a.a':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1.a':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.a':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[1]]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.0':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.0.0':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0.0.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.0.0':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[[1]]]} );
+assert.eq( 1, t.count( {'a.0.0.0':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[[{b:1}]]} );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.0.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.0.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 0, t.count( {'a.0.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 1, t.count( {'a.1.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 1, t.count( {'a.1.0.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1.0.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 0, t.count( {'a.1.1.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1.1.b':{$exists:false}} ) );
diff --git a/jstests/exists9.js b/jstests/exists9.js
new file mode 100644
index 0000000..66378d1
--- /dev/null
+++ b/jstests/exists9.js
@@ -0,0 +1,41 @@
+// SERVER-393 Test exists with various empty array and empty object cases.
+
+t = db.jstests_exists9;
+t.drop();
+
+// Check existence of missing nested field.
+t.save( {a:{}} );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+
+// With index.
+t.ensureIndex( {'a.b':1} );
+assert.eq( 1, t.find( {'a.b':{$exists:false}} ).hint( {'a.b':1} ).itcount() );
+assert.eq( 0, t.find( {'a.b':{$exists:true}} ).hint( {'a.b':1} ).itcount() );
+
+t.drop();
+
+// Check that an empty array 'exists'.
+t.save( {} );
+t.save( {a:[]} );
+assert.eq( 1, t.count( {a:{$exists:true}} ) );
+assert.eq( 1, t.count( {a:{$exists:false}} ) );
+
+// With index.
+t.ensureIndex( {a:1} );
+assert.eq( 1, t.find( {a:{$exists:true}} ).hint( {a:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).explain().nscanned );
+
+t.drop();
+
+// Check that an indexed field within an empty array does not exist.
+t.save( {a:{'0':1}} );
+t.save( {a:[]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0':{$exists:false}} ) );
+
+// With index.
+t.ensureIndex( {'a.0':1} );
+assert.eq( 1, t.find( {'a.0':{$exists:true}} ).hint( {'a.0':1} ).itcount() );
+assert.eq( 1, t.find( {'a.0':{$exists:false}} ).hint( {'a.0':1} ).itcount() );
diff --git a/jstests/find8.js b/jstests/find8.js
new file mode 100644
index 0000000..2ec368b
--- /dev/null
+++ b/jstests/find8.js
@@ -0,0 +1,27 @@
+// SERVER-1932 Test unindexed matching of a range that is only valid in a multikey context.
+
+t = db.jstests_find8;
+t.drop();
+
+t.save( {a:[1,10]} );
+assert.eq( 1, t.count( { a: { $gt:2,$lt:5} } ) );
+
+// Check that we can do a query with 'invalid' range.
+assert.eq( 1, t.count( { a: { $gt:5,$lt:2} } ) );
+
+t.save( {a:[-1,12]} );
+
+// Check that we can do a query with 'invalid' range and sort.
+assert.eq( 1, t.find( { a: { $gt:5,$lt:2} } ).sort( {a:1} ).toArray()[ 0 ].a[ 0 ] );
+assert.eq( 2, t.find( { a: { $gt:5,$lt:2} } ).sort( {$natural:-1} ).itcount() );
+
+// SERVER-2864
+if( 0 ) {
+t.find( { a: { $gt:5,$lt:2} } ).itcount();
+// Check that we can record a plan for an 'invalid' range.
+assert( t.find( { a: { $gt:5,$lt:2} } ).explain( true ).oldPlan );
+}
+
+t.ensureIndex( {b:1} );
+// Check that if we do a table scan of an 'invalid' range in an or clause we don't check subsequent clauses.
+assert.eq( "BasicCursor", t.find( { $or:[{ a: { $gt:5,$lt:2} }, {b:1}] } ).explain().cursor ); \ No newline at end of file
diff --git a/jstests/find_and_modify2.js b/jstests/find_and_modify2.js
index 108fc0f..2c8ab5b 100644
--- a/jstests/find_and_modify2.js
+++ b/jstests/find_and_modify2.js
@@ -8,3 +8,9 @@ assert.eq(out, {_id:1, i:1});
out = t.findAndModify({update: {$inc: {i:1}}, fields: {i:0}});
assert.eq(out, {_id:1, j:0});
+
+out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}});
+assert.eq(out, {j:0});
+
+out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}, 'new': true});
+assert.eq(out, {j:0});
diff --git a/jstests/fsync.js b/jstests/fsync.js
index fccd623..134d558 100644
--- a/jstests/fsync.js
+++ b/jstests/fsync.js
@@ -1,22 +1,21 @@
// test the lock/unlock snapshotting feature a bit
-x=db.runCommand({fsync:1,lock:1});
+x=db.runCommand({fsync:1,lock:1}); // not on admin db
assert(!x.ok,"D");
-d=db.getSisterDB("admin");
-
-x=d.runCommand({fsync:1,lock:1});
+x=db.fsyncLock(); // uses admin automatically
assert(x.ok,"C");
-y = d.currentOp();
+y = db.currentOp();
assert(y.fsyncLock,"B");
-z = d.$cmd.sys.unlock.findOne();
+z = db.fsyncUnlock();
+assert( db.currentOp().fsyncLock == null, "A2" );
-// it will take some time to unlock, and unlock does not block and wait for that
-// doing a write will make us wait until db is writeable.
+// make sure the db is unlocked
db.jstests_fsync.insert({x:1});
+db.getLastError();
-assert( d.currentOp().fsyncLock == null, "A" );
+assert( db.currentOp().fsyncLock == null, "A" );
diff --git a/jstests/geo10.js b/jstests/geo10.js
new file mode 100644
index 0000000..39da09f
--- /dev/null
+++ b/jstests/geo10.js
@@ -0,0 +1,21 @@
+// Test for SERVER-2746
+
+coll = db.geo10
+coll.drop();
+
+db.geo10.ensureIndex( { c : '2d', t : 1 }, { min : 0, max : Math.pow( 2, 40 ) } )
+assert( db.getLastError() == null, "B" )
+assert( db.system.indexes.count({ ns : "test.geo10" }) == 2, "A3" )
+
+printjson( db.system.indexes.find().toArray() )
+
+db.geo10.insert( { c : [ 1, 1 ], t : 1 } )
+assert.eq( db.getLastError(), null, "C" )
+
+db.geo10.insert( { c : [ 3600, 3600 ], t : 1 } )
+assert( db.getLastError() == null, "D" )
+
+db.geo10.insert( { c : [ 0.001, 0.001 ], t : 1 } )
+assert( db.getLastError() == null, "E" )
+
+printjson( db.geo10.find({ c : { $within : { $box : [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]] } }, t : 1 }).toArray() )
diff --git a/jstests/geo4.js b/jstests/geo4.js
index 73b4020..78404ab 100644
--- a/jstests/geo4.js
+++ b/jstests/geo4.js
@@ -4,7 +4,7 @@ t.drop();
t.insert( { zip : "06525" , loc : [ 41.352964 , 73.01212 ] } );
t.ensureIndex( { loc : "2d" }, { bits : 33 } );
-assert.eq( db.getLastError() , "can't have more than 32 bits in geo index" , "a" );
+assert.eq( db.getLastError() , "bits in geo index must be between 1 and 32" , "a" );
t.ensureIndex( { loc : "2d" }, { bits : 32 } );
assert( !db.getLastError(), "b" );
diff --git a/jstests/geo_array0.js b/jstests/geo_array0.js
new file mode 100644
index 0000000..2d69611
--- /dev/null
+++ b/jstests/geo_array0.js
@@ -0,0 +1,25 @@
+// Make sure the very basics of geo arrays are sane by creating a few multi location docs
+
+t = db.geoarray
+t.drop();
+
+t.insert( { zip : "10001", loc : { home : [ 10, 10 ], work : [ 50, 50 ] } } )
+t.insert( { zip : "10002", loc : { home : [ 20, 20 ], work : [ 50, 50 ] } } )
+t.insert( { zip : "10003", loc : { home : [ 30, 30 ], work : [ 50, 50 ] } } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { loc : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", loc : { home : [ 40, 40 ], work : [ 50, 50 ] } } )
+
+assert.isnull( db.getLastError() )
+
+// test normal access
+
+printjson( t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() )
+
+assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
+
+assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
diff --git a/jstests/geo_array1.js b/jstests/geo_array1.js
new file mode 100644
index 0000000..56b7c85
--- /dev/null
+++ b/jstests/geo_array1.js
@@ -0,0 +1,30 @@
+// Make sure many locations in one doc works, in the form of an array
+
+t = db.geoarray1
+t.drop();
+
+var locObj = []
+
+// Add locations everywhere
+for ( var i = 0; i < 10; i++ ) {
+ for ( var j = 0; j < 10; j++ ) {
+ if ( j % 2 == 0 )
+ locObj.push( [ i, j ] )
+ else
+ locObj.push( { x : i, y : j } )
+ }
+}
+
+// Add docs with all these locations
+for( var i = 0; i < 300; i++ ){
+ t.insert( { loc : locObj } )
+}
+t.ensureIndex( { loc : "2d" } )
+
+// Pull them back
+for ( var i = 0; i < 10; i++ ) {
+ for ( var j = 0; j < 10; j++ ) {
+ assert.eq( 300, t.find( { loc : { $within : { $box : [ [ i - 0.5, j - 0.5 ], [ i + 0.5, j + 0.5 ] ] } } } )
+ .count() )
+ }
+}
diff --git a/jstests/geo_array2.js b/jstests/geo_array2.js
new file mode 100644
index 0000000..28cb152
--- /dev/null
+++ b/jstests/geo_array2.js
@@ -0,0 +1,163 @@
+// Check the semantics of near calls with multiple locations
+
+t = db.geoarray2
+t.drop();
+
+var numObjs = 10;
+var numLocs = 100;
+
+// Test the semantics of near / nearSphere / etc. queries with multiple keys per object
+
+for( var i = -1; i < 2; i++ ){
+ for(var j = -1; j < 2; j++ ){
+
+ locObj = []
+
+ if( i != 0 || j != 0 )
+ locObj.push( { x : i * 50 + Random.rand(),
+ y : j * 50 + Random.rand() } )
+ locObj.push( { x : Random.rand(),
+ y : Random.rand() } )
+ locObj.push( { x : Random.rand(),
+ y : Random.rand() } )
+
+ t.insert({ name : "" + i + "" + j , loc : locObj , type : "A" })
+ t.insert({ name : "" + i + "" + j , loc : locObj , type : "B" })
+ }
+}
+
+t.ensureIndex({ loc : "2d" , type : 1 })
+
+assert.isnull( db.getLastError() )
+
+print( "Starting testing phase... ")
+
+for( var t = 0; t < 2; t++ ){
+
+var type = t == 0 ? "A" : "B"
+
+for( var i = -1; i < 2; i++ ){
+ for(var j = -1; j < 2; j++ ){
+
+ var center = [ i * 50 , j * 50 ]
+ var count = i == 0 && j == 0 ? 2 * 9 : 1
+ var objCount = i == 0 && j == 0 ? 2 : 1
+
+ // Do near check
+
+ var nearResults = db.runCommand( { geoNear : "geoarray2" ,
+ near : center ,
+ num : count,
+ query : { type : type } } ).results
+ //printjson( nearResults )
+
+ var objsFound = {}
+ var lastResult = 0;
+ for( var k = 0; k < nearResults.length; k++ ){
+
+ // All distances should be small, for the # of results
+ assert.gt( 1.5 , nearResults[k].dis )
+ // Distances should be increasing
+ assert.lte( lastResult, nearResults[k].dis )
+ // Objs should be of the right type
+ assert.eq( type, nearResults[k].obj.type )
+
+ lastResult = nearResults[k].dis
+
+ var objKey = "" + nearResults[k].obj._id
+
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+
+ }
+
+ // Make sure we found the right objects each time
+ // Note: Multiple objects could be found for diff distances.
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+ // Do nearSphere check
+
+ // Earth Radius
+ var eRad = 6371
+
+ nearResults = db.geoarray2.find( { loc : { $nearSphere : center , $maxDistance : 500 /* km */ / eRad }, type : type } ).toArray()
+
+ assert.eq( nearResults.length , count )
+
+ objsFound = {}
+ lastResult = 0;
+ for( var k = 0; k < nearResults.length; k++ ){
+ var objKey = "" + nearResults[k]._id
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+
+ }
+
+ // Make sure we found the right objects each time
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+
+ // Within results do not return duplicate documents
+
+ var count = i == 0 && j == 0 ? 9 : 1
+ var objCount = i == 0 && j == 0 ? 1 : 1
+
+ // Do within check
+ objsFound = {}
+
+ var box = [ [center[0] - 1, center[1] - 1] , [center[0] + 1, center[1] + 1] ]
+
+ //printjson( box )
+
+ var withinResults = db.geoarray2.find({ loc : { $within : { $box : box } } , type : type }).toArray()
+
+ assert.eq( withinResults.length , count )
+
+ for( var k = 0; k < withinResults.length; k++ ){
+ var objKey = "" + withinResults[k]._id
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+ }
+
+ //printjson( objsFound )
+
+ // Make sure we found the right objects each time
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+ // Do within check (circle)
+ objsFound = {}
+
+ withinResults = db.geoarray2.find({ loc : { $within : { $center : [ center, 1.5 ] } } , type : type }).toArray()
+
+ assert.eq( withinResults.length , count )
+
+ for( var k = 0; k < withinResults.length; k++ ){
+ var objKey = "" + withinResults[k]._id
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+ }
+
+ // Make sure we found the right objects each time
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+
+ }
+}
+
+}
+
+
+
+
diff --git a/jstests/geo_borders.js b/jstests/geo_borders.js
index 85ffe35..9e8788a 100644
--- a/jstests/geo_borders.js
+++ b/jstests/geo_borders.js
@@ -1,10 +1,7 @@
-
t = db.borders
t.drop()
-// FIXME: FAILS for all epsilon < 1
-epsilon = 1
-//epsilon = 0.99
+epsilon = 0.0001;
// For these tests, *required* that step ends exactly on max
min = -1
@@ -12,9 +9,9 @@ max = 1
step = 1
numItems = 0;
-for(var x = min; x <= max; x += step){
- for(var y = min; y <= max; y += step){
- t.insert({ loc: { x : x, y : y } })
+for ( var x = min; x <= max; x += step ) {
+ for ( var y = min; y <= max; y += step ) {
+ t.insert( { loc : { x : x, y : y } } )
numItems++;
}
}
@@ -23,167 +20,149 @@ overallMin = -1
overallMax = 1
// Create a point index slightly smaller than the points we have
-t.ensureIndex({ loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2})
-assert(db.getLastError(), "A1")
+t.ensureIndex( { loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2 } )
+assert( db.getLastError() )
-// FIXME: FAILS for all epsilon < 1
// Create a point index only slightly bigger than the points we have
-t.ensureIndex({ loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon })
-assert.isnull(db.getLastError(), "A2")
-
-
-
-
-
-
-
+t.ensureIndex( { loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon } )
+assert.isnull( db.getLastError() )
-//************
+// ************
// Box Tests
-//************
-
-
-/*
-// FIXME: Fails w/ non-nice error
-// Make sure we can get all points in full bounds
-assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon,
- overallMin - epsilon],
- [overallMax + epsilon,
- overallMax + epsilon]] } } }).count(), "B1");
-*/
-
-// Make sure an error is thrown if the bounds are bigger than the box itself
-// TODO: Do we really want an error in this case? Shouldn't we just clip the box?
-try{
- t.findOne({ loc : { $within : { $box : [[overallMin - 2 * epsilon,
- overallMin - 2 * epsilon],
- [overallMax + 2 * epsilon,
- overallMax + 2 * epsilon]] } } });
- assert(false, "B2");
-}
-catch(e){}
-
-//Make sure we can get at least close to the bounds of the index
-assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon / 2,
- overallMin - epsilon / 2],
- [overallMax + epsilon / 2,
- overallMax + epsilon / 2]] } } }).count(), "B3");
-
-
-//**************
-//Circle tests
-//**************
-
-center = (overallMax + overallMin) / 2
-center = [center, center]
+// ************
+
+// If the bounds are bigger than the box itself, just clip at the borders
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMin - 2 * epsilon, overallMin - 2 * epsilon ],
+ [ overallMax + 2 * epsilon, overallMax + 2 * epsilon ] ] } } } ).count() );
+
+// Check this works also for bounds where only a single dimension is off-bounds
+assert.eq( numItems - 5, t.find(
+ { loc : { $within : { $box : [
+ [ overallMin - 2 * epsilon, overallMin - 0.5 * epsilon ],
+ [ overallMax - epsilon, overallMax - epsilon ] ] } } } ).count() );
+
+// Make sure we can get at least close to the bounds of the index
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMin - epsilon / 2, overallMin - epsilon / 2 ],
+ [ overallMax + epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() );
+
+// Make sure we can get at least close to the bounds of the index
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMax + epsilon / 2, overallMax + epsilon / 2 ],
+ [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() );
+
+// Check that swapping min/max has good behavior
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMax + epsilon / 2, overallMax + epsilon / 2 ],
+ [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() );
+
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMax + epsilon / 2, overallMin - epsilon / 2 ],
+ [ overallMin - epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() );
+
+// **************
+// Circle tests
+// **************
+
+center = ( overallMax + overallMin ) / 2
+center = [ center, center ]
radius = overallMax
-offCenter = [center[0] + radius, center[1] + radius]
-onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon]
-offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon]
-
+offCenter = [ center[0] + radius, center[1] + radius ]
+onBounds = [ offCenter[0] + epsilon, offCenter[1] + epsilon ]
+offBounds = [ onBounds[0] + epsilon, onBounds[1] + epsilon ]
+onBoundsNeg = [ -onBounds[0], -onBounds[1] ]
-//Make sure we can get all points when radius is exactly at full bounds
-assert(0 < t.find({ loc : { $within : { $center : [center, radius + epsilon] } } }).count(), "C1");
+// Make sure we can get all points when radius is exactly at full bounds
+assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + epsilon ] } } } ).count() );
-//Make sure we can get points when radius is over full bounds
-assert(0 < t.find({ loc : { $within : { $center : [center, radius + 2 * epsilon] } } }).count(), "C2");
+// Make sure we can get points when radius is over full bounds
+assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + 2 * epsilon ] } } } ).count() );
-//Make sure we can get points when radius is over full bounds, off-centered
-assert(0 < t.find({ loc : { $within : { $center : [offCenter, radius + 2 * epsilon] } } }).count(), "C3");
+// Make sure we can get points when radius is over full bounds, off-centered
+assert.lt( 0, t.find( { loc : { $within : { $center : [ offCenter, radius + 2 * epsilon ] } } } ).count() );
-//Make sure we get correct corner point when center is in bounds
+// Make sure we get correct corner point when center is in bounds
// (x bounds wrap, so could get other corner)
-cornerPt = t.findOne({ loc : { $within : { $center : [offCenter, step / 2] } } });
-assert(cornerPt.loc.y == overallMax, "C4")
+cornerPt = t.findOne( { loc : { $within : { $center : [ offCenter, step / 2 ] } } } );
+assert.eq( cornerPt.loc.y, overallMax )
-/*
-// FIXME: FAILS, returns opposite corner
// Make sure we get correct corner point when center is on bounds
-cornerPt = t.findOne({ loc : { $within : { $center : [onBounds,
- Math.sqrt(2 * epsilon * epsilon) + (step / 2) ] } } });
-assert(cornerPt.loc.y == overallMax, "C5")
-*/
+// NOTE: Only valid points on MIN bounds
+cornerPt = t
+ .findOne( { loc : { $within : { $center : [ onBoundsNeg, Math.sqrt( 2 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+assert.eq( cornerPt.loc.y, overallMin )
-// TODO: Handle gracefully?
// Make sure we can't get corner point when center is over bounds
-try{
- t.findOne({ loc : { $within : { $center : [offBounds,
- Math.sqrt(8 * epsilon * epsilon) + (step / 2) ] } } });
- assert(false, "C6")
+try {
+ t.findOne( { loc : { $within : { $center : [ offBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+ assert( false )
+} catch (e) {
}
-catch(e){}
-
-
-
-
-
-
-//***********
-//Near tests
-//***********
-
-//Make sure we can get all nearby points to point in range
-assert(t.find({ loc : { $near : offCenter } }).next().loc.y == overallMax,
- "D1");
-
-/*
-// FIXME: FAILS, returns opposite list
-// Make sure we can get all nearby points to point on boundary
-assert(t.find({ loc : { $near : onBounds } }).next().loc.y == overallMax,
- "D2");
-*/
-
-//TODO: Could this work?
-//Make sure we can't get all nearby points to point over boundary
-try{
- t.findOne({ loc : { $near : offBounds } })
- assert(false, "D3")
+// Make sure we can't get corner point when center is on max bounds
+try {
+ t.findOne( { loc : { $within : { $center : [ onBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+ assert( false )
+} catch (e) {
}
-catch(e){}
-
-/*
-// FIXME: FAILS, returns only single point
-//Make sure we can get all nearby points within one step (4 points in top corner)
-assert(4 == t.find({ loc : { $near : offCenter, $maxDistance : step * 1.9 } }).count(),
- "D4");
-*/
+// ***********
+// Near tests
+// ***********
+// Make sure we can get all nearby points to point in range
+assert.eq( overallMax, t.find( { loc : { $near : offCenter } } ).next().loc.y );
-//**************
-//Command Tests
-//**************
+// Make sure we can get all nearby points to point on boundary
+assert.eq( overallMin, t.find( { loc : { $near : onBoundsNeg } } ).next().loc.y );
+// Make sure we can't get all nearby points to point over boundary
+try {
+ t.findOne( { loc : { $near : offBounds } } )
+ assert( false )
+} catch (e) {
+}
+// Make sure we can't get all nearby points to point on max boundary
+try {
+ t.findOne( { loc : { $near : onBoundsNeg } } )
+ assert( false )
+} catch (e) {
+}
-//Make sure we can get all nearby points to point in range
-assert(db.runCommand({ geoNear : "borders", near : offCenter }).results[0].obj.loc.y == overallMax,
- "E1");
+// Make sure we can get all nearby points within one step (4 points in top
+// corner)
+assert.eq( 4, t.find( { loc : { $near : offCenter, $maxDistance : step * 1.9 } } ).count() );
+// **************
+// Command Tests
+// **************
+// Make sure we can get all nearby points to point in range
+assert.eq( overallMax, db.runCommand( { geoNear : "borders", near : offCenter } ).results[0].obj.loc.y );
-/*
-// FIXME: FAILS, returns opposite list
-//Make sure we can get all nearby points to point on boundary
-assert(db.runCommand({ geoNear : "borders", near : onBounds }).results[0].obj.loc.y == overallMax,
- "E2");
-*/
+// Make sure we can get all nearby points to point on boundary
+assert.eq( overallMin, db.runCommand( { geoNear : "borders", near : onBoundsNeg } ).results[0].obj.loc.y );
-//TODO: Could this work?
-//Make sure we can't get all nearby points to point over boundary
-try{
- db.runCommand({ geoNear : "borders", near : offBounds }).results.length
- assert(false, "E3")
+// Make sure we can't get all nearby points to point over boundary
+try {
+ db.runCommand( { geoNear : "borders", near : offBounds } ).results.length
+ assert( false )
+} catch (e) {
}
-catch(e){}
-
-
-/*
-// FIXME: Fails, returns one point
-//Make sure we can get all nearby points within one step (4 points in top corner)
-assert(4 == db.runCommand({ geoNear : "borders", near : offCenter, maxDistance : step * 1.5 }).results.length,
- "E4");
-*/
-
+// Make sure we can't get all nearby points to point on max boundary
+try {
+ db.runCommand( { geoNear : "borders", near : onBounds } ).results.length
+ assert( false )
+} catch (e) {
+}
+// Make sure we can get all nearby points within one step (4 points in top
+// corner)
+assert.eq( 4, db.runCommand( { geoNear : "borders", near : offCenter, maxDistance : step * 1.5 } ).results.length );
diff --git a/jstests/geo_center_sphere2.js b/jstests/geo_center_sphere2.js
new file mode 100644
index 0000000..c9c5fbb
--- /dev/null
+++ b/jstests/geo_center_sphere2.js
@@ -0,0 +1,158 @@
+//
+// Tests the error handling of spherical queries
+// along with multi-location documents.
+// This is necessary since the error handling must manage
+// multiple documents, and so requires simultaneous testing.
+//
+
+var numTests = 30
+
+for ( var test = 0; test < numTests; test++ ) {
+
+ //var fixedTest = 6017
+ //if( fixedTest ) test = fixedTest
+
+ Random.srand( 1337 + test );
+
+ var radius = 5000 * Random.rand() // km
+ radius = radius / 6371 // radians
+ var numDocs = Math.floor( 400 * Random.rand() )
+ // TODO: Wrapping uses the error value to figure out what would overlap...
+ var bits = Math.floor( 5 + Random.rand() * 28 )
+ var maxPointsPerDoc = 50
+
+ t = db.sphere
+
+ var randomPoint = function() {
+ return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ];
+ }
+
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var startPoint
+ var ex = null
+ do {
+
+ t.drop()
+ startPoint = randomPoint()
+ t.ensureIndex( { loc : "2d" }, { bits : bits } )
+
+ try {
+ // Check for wrapping issues
+ t.find( { loc : { $within : { $centerSphere : [ startPoint, radius ] } } } ).toArray()
+ ex = null
+ } catch (e) {
+ ex = e
+ }
+ } while (ex)
+
+ var pointsIn = 0
+ var pointsOut = 0
+ var docsIn = 0
+ var docsOut = 0
+ var totalPoints = 0
+
+ //var point = randomPoint()
+
+ for ( var i = 0; i < numDocs; i++ ) {
+
+ var numPoints = Math.floor( Random.rand() * maxPointsPerDoc + 1 )
+ var docIn = false
+ var multiPoint = []
+
+ totalPoints += numPoints
+
+ for ( var p = 0; p < numPoints; p++ ) {
+ var point = randomPoint()
+ multiPoint.push( point )
+
+ if ( Geo.sphereDistance( startPoint, point ) <= radius ) {
+ pointsIn++
+ docIn = true
+ } else {
+ pointsOut++
+ }
+ }
+
+ t.insert( { loc : multiPoint } )
+
+ if ( docIn )
+ docsIn++
+ else
+ docsOut++
+
+ }
+
+ printjson( { test: test,
+ radius : radius, bits : bits, numDocs : numDocs, pointsIn : pointsIn, docsIn : docsIn, pointsOut : pointsOut,
+ docsOut : docsOut } )
+
+ assert.isnull( db.getLastError() )
+ assert.eq( docsIn + docsOut, numDocs )
+ assert.eq( pointsIn + pointsOut, totalPoints )
+
+ // $centerSphere
+ assert.eq( docsIn, t.find( { loc : { $within : { $centerSphere : [ startPoint, radius ] } } } ).count() )
+
+ // $nearSphere
+ var results = t.find( { loc : { $nearSphere : startPoint, $maxDistance : radius } } ).limit( 2 * pointsIn )
+ .toArray()
+
+ assert.eq( pointsIn, results.length )
+
+ var distance = 0;
+ for ( var i = 0; i < results.length; i++ ) {
+
+ var minNewDistance = radius + 1
+ for( var j = 0; j < results[i].loc.length; j++ ){
+ var newDistance = Geo.sphereDistance( startPoint, results[i].loc[j] )
+ if( newDistance < minNewDistance && newDistance >= distance ) minNewDistance = newDistance
+ }
+
+ //print( "Dist from : " + results[i].loc[j] + " to " + startPoint + " is "
+ // + minNewDistance + " vs " + radius )
+
+ assert.lte( minNewDistance, radius )
+ assert.gte( minNewDistance, distance )
+ distance = minNewDistance
+
+ }
+
+ // geoNear
+ var results = db.runCommand( {
+ geoNear : "sphere", near : startPoint, maxDistance : radius, num : 2 * pointsIn, spherical : true } ).results
+
+ /*
+ printjson( results );
+
+ for ( var j = 0; j < results[0].obj.loc.length; j++ ) {
+ var newDistance = Geo.sphereDistance( startPoint, results[0].obj.loc[j] )
+ if( newDistance <= radius ) print( results[0].obj.loc[j] + " : " + newDistance )
+ }
+ */
+
+ assert.eq( pointsIn, results.length )
+
+ var distance = 0;
+ for ( var i = 0; i < results.length; i++ ) {
+ var retDistance = results[i].dis
+
+ // print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
+ // + retDistance + " vs " + radius )
+
+ var distInObj = false
+ for ( var j = 0; j < results[i].obj.loc.length && distInObj == false; j++ ) {
+ var newDistance = Geo.sphereDistance( startPoint, results[i].obj.loc[j] )
+ distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
+ }
+
+ assert( distInObj )
+ assert.lte( retDistance, radius )
+ assert.gte( retDistance, distance )
+ distance = retDistance
+ }
+
+ //break;
+}
+
+
diff --git a/jstests/geo_distinct.js b/jstests/geo_distinct.js
new file mode 100644
index 0000000..60e0d15
--- /dev/null
+++ b/jstests/geo_distinct.js
@@ -0,0 +1,16 @@
+// Test distinct with geo queries SERVER-2135
+
+t = db.commits
+t.drop()
+
+t.save( { _id : ObjectId( "4ce63ec2f360622431000013" ), loc : [ 55.59664, 13.00156 ], author : "FredrikL" } )
+
+printjson( db.runCommand( { distinct : 'commits', key : 'loc' } ) )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { loc : '2d' } )
+
+printjson( t.getIndexes() )
+
+printjson( db.runCommand( { distinct : 'commits', key : 'loc' } ) )
+assert.isnull( db.getLastError() ) \ No newline at end of file
diff --git a/jstests/geo_fiddly_box.js b/jstests/geo_fiddly_box.js
new file mode 100644
index 0000000..2a9cf49
--- /dev/null
+++ b/jstests/geo_fiddly_box.js
@@ -0,0 +1,44 @@
+// Reproduces simple test for SERVER-2832
+
+// The setup to reproduce was/is to create a set of points where the
+// "expand" portion of the geo-lookup expands the 2d range in only one
+// direction (so points are required on either side of the expanding range)
+
+db.geo_fiddly_box.drop();
+db.geo_fiddly_box.ensureIndex({ loc : "2d" })
+
+db.geo_fiddly_box.insert({ "loc" : [3, 1] })
+db.geo_fiddly_box.insert({ "loc" : [3, 0.5] })
+db.geo_fiddly_box.insert({ "loc" : [3, 0.25] })
+db.geo_fiddly_box.insert({ "loc" : [3, -0.01] })
+db.geo_fiddly_box.insert({ "loc" : [3, -0.25] })
+db.geo_fiddly_box.insert({ "loc" : [3, -0.5] })
+db.geo_fiddly_box.insert({ "loc" : [3, -1] })
+
+// OK!
+print( db.geo_fiddly_box.count() )
+assert.eq( 7, db.geo_fiddly_box.count({ "loc" : { "$within" : { "$box" : [ [2, -2], [46, 2] ] } } }), "Not all locations found!" );
+
+
+// Test normal lookup of a small square of points as a sanity check.
+
+epsilon = 0.0001;
+min = -1
+max = 1
+step = 1
+numItems = 0;
+
+db.geo_fiddly_box2.drop()
+db.geo_fiddly_box2.ensureIndex({ loc : "2d" }, { max : max + epsilon / 2, min : min - epsilon / 2 })
+
+for(var x = min; x <= max; x += step){
+ for(var y = min; y <= max; y += step){
+ db.geo_fiddly_box2.insert({ "loc" : { x : x, y : y } })
+ numItems++;
+ }
+}
+
+assert.eq( numItems, db.geo_fiddly_box2.count({ loc : { $within : { $box : [[min - epsilon / 3,
+ min - epsilon / 3],
+ [max + epsilon / 3,
+ max + epsilon / 3]] } } }), "Not all locations found!");
diff --git a/jstests/geo_fiddly_box2.js b/jstests/geo_fiddly_box2.js
new file mode 100644
index 0000000..0588abf
--- /dev/null
+++ b/jstests/geo_fiddly_box2.js
@@ -0,0 +1,32 @@
+// Reproduces simple test for SERVER-2115
+
+// The setup to reproduce is to create a set of points and a really big bounds so that we are required to do
+// exact lookups on the points to get correct results.
+
+t = db.geo_fiddly_box2
+t.drop()
+
+t.insert( { "letter" : "S", "position" : [ -3, 0 ] } )
+t.insert( { "letter" : "C", "position" : [ -2, 0 ] } )
+t.insert( { "letter" : "R", "position" : [ -1, 0 ] } )
+t.insert( { "letter" : "A", "position" : [ 0, 0 ] } )
+t.insert( { "letter" : "B", "position" : [ 1, 0 ] } )
+t.insert( { "letter" : "B", "position" : [ 2, 0 ] } )
+t.insert( { "letter" : "L", "position" : [ 3, 0 ] } )
+t.insert( { "letter" : "E", "position" : [ 4, 0 ] } )
+
+t.ensureIndex( { position : "2d" } )
+result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } )
+assert.eq( 4, result.count() )
+
+t.dropIndex( { position : "2d" } )
+t.ensureIndex( { position : "2d" }, { min : -10000000, max : 10000000 } )
+
+result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } )
+assert.eq( 4, result.count() )
+
+t.dropIndex( { position : "2d" } )
+t.ensureIndex( { position : "2d" }, { min : -1000000000, max : 1000000000 } )
+
+result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } )
+assert.eq( 4, result.count() )
diff --git a/jstests/geo_group.js b/jstests/geo_group.js
new file mode 100644
index 0000000..4e038f9
--- /dev/null
+++ b/jstests/geo_group.js
@@ -0,0 +1,35 @@
+t = db.geo_group;
+t.drop();
+
+n = 1;
+for ( var x=-100; x<100; x+=2 ){
+ for ( var y=-100; y<100; y+=2 ){
+ t.insert( { _id : n++ , loc : [ x , y ] } )
+ }
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+// Test basic count with $near
+assert.eq(t.find().count(), 10000);
+assert.eq(t.find( { loc : { $within : {$center : [[56,8], 10]}}}).count(), 81);
+assert.eq(t.find( { loc : { $near : [56, 8, 10] } } ).count(), 81);
+
+// Test basic group that effectively does a count
+assert.eq(
+ t.group( {
+ reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1} },
+ initial : { sums:{count:0} } }
+ ),
+ [ { "sums" : { "count" : 10000 } } ]
+);
+
+// Test basic group + $near that does a count
+assert.eq(
+ t.group( {
+ reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1} },
+ initial : { sums:{count:0} },
+ cond : { loc : { $near : [56, 8, 10] } } }
+ ),
+ [ { "sums" : { "count" : 81 } } ]
+);
diff --git a/jstests/geo_mapreduce.js b/jstests/geo_mapreduce.js
new file mode 100644
index 0000000..a6ecf76
--- /dev/null
+++ b/jstests/geo_mapreduce.js
@@ -0,0 +1,56 @@
+// Test script from SERVER-1742
+
+// MongoDB test script for mapreduce with geo query
+
+// setup test collection
+db.apples.drop()
+db.apples.insert( { "geo" : { "lat" : 32.68331909, "long" : 69.41610718 }, "apples" : 5 } );
+db.apples.insert( { "geo" : { "lat" : 35.01860809, "long" : 70.92027283 }, "apples" : 2 } );
+db.apples.insert( { "geo" : { "lat" : 31.11639023, "long" : 64.19970703 }, "apples" : 11 } );
+db.apples.insert( { "geo" : { "lat" : 32.64500046, "long" : 69.36251068 }, "apples" : 4 } );
+db.apples.insert( { "geo" : { "lat" : 33.23638916, "long" : 69.81360626 }, "apples" : 9 } );
+db.apples.ensureIndex( { "geo" : "2d" } );
+
+center = [ 32.68, 69.41 ];
+radius = 10 / 111; // 10km; 1 arcdegree ~= 111km
+geo_query = { geo : { '$within' : { '$center' : [ center, radius ] } } };
+
+// geo query on collection works fine
+res = db.apples.find( geo_query );
+assert.eq( 2, res.count() );
+
+// map function
+m = function() {
+ emit( null, { "apples" : this.apples } );
+};
+
+// reduce function
+r = function(key, values) {
+ var total = 0;
+ for ( var i = 0; i < values.length; i++ ) {
+ total += values[i].apples;
+ }
+ return { "apples" : total };
+};
+
+// mapreduce without geo query works fine
+res = db.apples.mapReduce( m, r, { out : { inline : 1 } } );
+
+printjson( res )
+total = res.results[0];
+assert.eq( 31, total.value.apples );
+
+// mapreduce with regular query works fine too
+res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : { apples : { '$lt' : 9 } } } );
+total = res.results[0];
+assert.eq( 11, total.value.apples );
+
+// mapreduce with geo query gives error on mongodb version 1.6.2
+// uncaught exception: map reduce failed: {
+// "assertion" : "manual matcher config not allowed",
+// "assertionCode" : 13285,
+// "errmsg" : "db assertion failure",
+// "ok" : 0 }
+res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : geo_query } );
+total = res.results[0];
+assert.eq( 9, total.value.apples );
diff --git a/jstests/geo_mapreduce2.js b/jstests/geo_mapreduce2.js
new file mode 100644
index 0000000..9c39345
--- /dev/null
+++ b/jstests/geo_mapreduce2.js
@@ -0,0 +1,36 @@
+// Geo mapreduce 2 from SERVER-3478
+
+var coll = db.geoMR2
+coll.drop()
+
+for( var i = 0; i < 300; i++ )
+ coll.insert({ i : i, location : [ 10, 20 ] })
+
+coll.ensureIndex({ location : "2d" })
+
+// map function
+m = function() {
+ emit( null, { count : this.i } )
+}
+
+// reduce function
+r = function( key, values ) {
+
+ var total = 0
+ for ( var i = 0; i < values.length; i++ ) {
+ total += values[i].count
+ }
+
+ return { count : total }
+};
+
+try{ coll.mapReduce( m, r,
+ { out : coll.getName() + "_mr",
+ sort : { _id : 1 },
+ query : { 'location' : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } } } })
+
+}
+catch( e ){
+ // This should occur, since we can't in-mem sort for mreduce
+ printjson( e )
+}
diff --git a/jstests/geo_multinest0.js b/jstests/geo_multinest0.js
new file mode 100644
index 0000000..68e6095
--- /dev/null
+++ b/jstests/geo_multinest0.js
@@ -0,0 +1,63 @@
+// Make sure nesting of location arrays also works.
+
+t = db.geonest
+t.drop();
+
+t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { "data.loc" : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+// test normal access
+
+printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() )
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
+
+
+
+
+
+// Try a complex nesting
+
+t = db.geonest
+t.drop();
+
+t.insert( { zip : "10001", data : [ { loc : [ [ 10, 10 ], { lat : 50, long : 50 } ], type : "home" } ] } )
+t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+t.insert( { zip : "10003", data : [ { loc : [ { x : 30, y : 30 }, [ 50, 50 ] ], type : "home" } ] } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { "data.loc" : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+
+
+assert.isnull( db.getLastError() )
+
+// test normal access
+printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() )
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
+
+
+
diff --git a/jstests/geo_multinest1.js b/jstests/geo_multinest1.js
new file mode 100644
index 0000000..7754f24
--- /dev/null
+++ b/jstests/geo_multinest1.js
@@ -0,0 +1,37 @@
+// Test distance queries with interleaved distances
+
+t = db.multinest
+t.drop();
+
+t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" },
+ { loc : [ 29, 29 ], type : "work" } ] } )
+t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
+ { loc : [ 39, 39 ], type : "work" } ] } )
+t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" },
+ { loc : [ 49, 49 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { "data.loc" : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
+ { loc : [ 59, 59 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+// test normal access
+
+var result = t.find({ "data.loc" : { $near : [0, 0] } }).toArray();
+
+printjson( result )
+
+assert.eq( 8, result.length )
+
+var order = [ 1, 2, 1, 3, 2, 4, 3, 4 ]
+
+for( var i = 0; i < result.length; i++ ){
+ assert.eq( "1000" + order[i], result[i].zip )
+}
+
+
+
diff --git a/jstests/geo_oob_sphere.js b/jstests/geo_oob_sphere.js
new file mode 100644
index 0000000..d493f36
--- /dev/null
+++ b/jstests/geo_oob_sphere.js
@@ -0,0 +1,42 @@
+//
+// Ensures spherical queries report invalid latitude values in points and center positions
+//
+
+t = db.geooobsphere
+t.drop();
+
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 91 } })
+
+t.ensureIndex({ loc : "2d" })
+assert.isnull( db.getLastError() )
+
+t.find({ loc : { $nearSphere : [ 30, 91 ], $maxDistance : 0.25 } }).count()
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+t.find({ loc : { $nearSphere : [ 30, 89 ], $maxDistance : 0.25 } }).count()
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+t.find({ loc : { $within : { $centerSphere : [[ -180, -91 ], 0.25] } } }).count()
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+db.runCommand({ geoNear : "geooobsphere", near : [179, -91], maxDistance : 0.25, spherical : true })
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+db.runCommand({ geoNear : "geooobsphere", near : [30, 89], maxDistance : 0.25, spherical : true })
+var err = db.getLastError()
+assert( err != null )
+printjson( err ) \ No newline at end of file
diff --git a/jstests/geo_poly_edge.js b/jstests/geo_poly_edge.js
new file mode 100644
index 0000000..31a0849
--- /dev/null
+++ b/jstests/geo_poly_edge.js
@@ -0,0 +1,22 @@
+//
+// Tests polygon edge cases
+//
+
+var coll = db.getCollection( 'jstests_geo_poly_edge' )
+coll.drop();
+
+coll.ensureIndex({ loc : "2d" })
+
+coll.insert({ loc : [10, 10] })
+coll.insert({ loc : [10, -10] })
+
+assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, -10 ]] } } }).itcount(), 2 )
+
+assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, 10 ]] } } }).itcount(), 1 )
+
+
+coll.insert({ loc : [179, 0] })
+coll.insert({ loc : [0, 179] })
+
+assert.eq( coll.find({ loc : { $within : { $polygon : [[0, 0], [1000, 0], [1000, 1000], [0, 1000]] } } }).itcount(), 3 )
+
diff --git a/jstests/geo_poly_line.js b/jstests/geo_poly_line.js
new file mode 100644
index 0000000..aca77b6
--- /dev/null
+++ b/jstests/geo_poly_line.js
@@ -0,0 +1,17 @@
+// Test that weird polygons work SERVER-3725
+
+t = db.geo_polygon5;
+t.drop();
+
+t.insert({loc:[0,0]})
+t.insert({loc:[1,0]})
+t.insert({loc:[2,0]})
+t.insert({loc:[3,0]})
+t.insert({loc:[4,0]})
+
+t.ensureIndex( { loc : "2d" } );
+
+printjson( t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).toArray() )
+
+assert.eq( 5, t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).itcount() )
+
diff --git a/jstests/geo_polygon1.js b/jstests/geo_polygon1.js
new file mode 100644
index 0000000..4b7427a
--- /dev/null
+++ b/jstests/geo_polygon1.js
@@ -0,0 +1,74 @@
+//
+// Tests for N-dimensional polygon querying
+//
+
+t = db.geo_polygon1;
+t.drop();
+
+num = 0;
+for ( x=1; x < 9; x++ ){
+ for ( y= 1; y < 9; y++ ){
+ o = { _id : num++ , loc : [ x , y ] };
+ t.save( o );
+ }
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+triangle = [[0,0], [1,1], [0,2]];
+
+// Look at only a small slice of the data within a triangle
+assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" );
+
+boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
+
+assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" );
+
+//Make sure we can add object-based polygons
+assert.eq( num, t.find( { loc : { $within : { $polygon : { a : [-10, -10], b : [-10, 10], c : [10, 10], d : [10, -10] } } } } ).count() )
+
+// Look in a box much bigger than the one we have data in
+boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
+assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" );
+
+t.drop();
+
+pacman = [
+ [0,2], [0,4], [2,6], [4,6], // Head
+ [6,4], [4,3], [6,2], // Mouth
+ [4,0], [2,0] // Bottom
+ ];
+
+t.save({loc: [1,3] }); // Add a point that's in
+t.ensureIndex( { loc : "2d" } );
+assert.isnull( db.getLastError() )
+
+assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" );
+
+t.save({ loc : [5, 3] }) // Add a point that's out right in the mouth opening
+t.save({ loc : [3, 7] }) // Add a point above the center of the head
+t.save({ loc : [3,-1] }) // Add a point below the center of the bottom
+
+assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" );
+
+// Make sure we can't add bad polygons
+okay = true
+try{
+ t.find( { loc : { $within : { $polygon : [1, 2] } } } ).toArray()
+ okay = false
+}
+catch(e){}
+assert(okay)
+try{
+ t.find( { loc : { $within : { $polygon : [[1, 2]] } } } ).toArray()
+ okay = false
+}
+catch(e){}
+assert(okay)
+try{
+ t.find( { loc : { $within : { $polygon : [[1, 2], [2, 3]] } } } ).toArray()
+ okay = false
+}
+catch(e){}
+assert(okay)
+
diff --git a/jstests/geo_polygon2.js b/jstests/geo_polygon2.js
new file mode 100644
index 0000000..617801b
--- /dev/null
+++ b/jstests/geo_polygon2.js
@@ -0,0 +1,266 @@
+//
+// More tests for N-dimensional polygon querying
+//
+
+// Create a polygon of some shape (no holes)
+// using turtle graphics. Basically, will look like a very contorted octopus (quad-pus?) shape.
+// There are no holes, but some edges will probably touch.
+
+var numTests = 10
+
+for ( var test = 0; test < numTests; test++ ) {
+
+ Random.srand( 1337 + test );
+
+ var numTurtles = 4;
+ var gridSize = [ 40, 40 ];
+ var turtleSteps = 500;
+ var bounds = [ Random.rand() * -1000000 + 0.00001, Random.rand() * 1000000 + 0.00001 ]
+ var rotation = Math.PI * Random.rand();
+ var bits = Math.floor( Random.rand() * 32 );
+
+ printjson( { test : test, rotation : rotation, bits : bits })
+
+ var rotatePoint = function( x, y ) {
+
+ if( y == undefined ){
+ y = x[1]
+ x = x[0]
+ }
+
+ xp = x * Math.cos( rotation ) - y * Math.sin( rotation )
+ yp = y * Math.cos( rotation ) + x * Math.sin( rotation )
+
+ var scaleX = (bounds[1] - bounds[0]) / 360
+ var scaleY = (bounds[1] - bounds[0]) / 360
+
+ x *= scaleX
+ y *= scaleY
+
+ return [xp, yp]
+
+ }
+
+
+ var grid = []
+ for ( var i = 0; i < gridSize[0]; i++ ) {
+ grid.push( new Array( gridSize[1] ) )
+ }
+
+ grid.toString = function() {
+
+ var gridStr = "";
+ for ( var j = grid[0].length - 1; j >= -1; j-- ) {
+ for ( var i = 0; i < grid.length; i++ ) {
+ if ( i == 0 )
+ gridStr += ( j == -1 ? " " : ( j % 10) ) + ": "
+ if ( j != -1 )
+ gridStr += "[" + ( grid[i][j] != undefined ? grid[i][j] : " " ) + "]"
+ else
+ gridStr += " " + ( i % 10 ) + " "
+ }
+ gridStr += "\n"
+ }
+
+ return gridStr;
+ }
+
+ var turtles = []
+ for ( var i = 0; i < numTurtles; i++ ) {
+
+ var up = ( i % 2 == 0 ) ? i - 1 : 0;
+ var left = ( i % 2 == 1 ) ? ( i - 1 ) - 1 : 0;
+
+ turtles[i] = [
+ [ Math.floor( gridSize[0] / 2 ), Math.floor( gridSize[1] / 2 ) ],
+ [ Math.floor( gridSize[0] / 2 ) + left, Math.floor( gridSize[1] / 2 ) + up ] ];
+
+ grid[turtles[i][1][0]][turtles[i][1][1]] = i
+
+ }
+
+ grid[Math.floor( gridSize[0] / 2 )][Math.floor( gridSize[1] / 2 )] = "S"
+
+ // print( grid.toString() )
+
+ var pickDirections = function() {
+
+ var up = Math.floor( Random.rand() * 3 )
+ if ( up == 2 )
+ up = -1
+
+ if ( up == 0 ) {
+ var left = Math.floor( Random.rand() * 3 )
+ if ( left == 2 )
+ left = -1
+ } else
+ left = 0
+
+ if ( Random.rand() < 0.5 ) {
+ var swap = left
+ left = up
+ up = swap
+ }
+
+ return [ left, up ]
+ }
+
+ for ( var s = 0; s < turtleSteps; s++ ) {
+
+ for ( var t = 0; t < numTurtles; t++ ) {
+
+ var dirs = pickDirections()
+ var up = dirs[0]
+ var left = dirs[1]
+
+ var lastTurtle = turtles[t][turtles[t].length - 1]
+ var nextTurtle = [ lastTurtle[0] + left, lastTurtle[1] + up ]
+
+ if ( nextTurtle[0] >= gridSize[0] || nextTurtle[1] >= gridSize[1] || nextTurtle[0] < 0 || nextTurtle[1] < 0 )
+ continue;
+
+ if ( grid[nextTurtle[0]][nextTurtle[1]] == undefined ) {
+ turtles[t].push( nextTurtle )
+ grid[nextTurtle[0]][nextTurtle[1]] = t;
+ }
+
+ }
+ }
+
+ // print( grid.toString() )
+
+ turtlePaths = []
+ for ( var t = 0; t < numTurtles; t++ ) {
+
+ turtlePath = []
+
+ var nextSeg = function(currTurtle, prevTurtle) {
+
+ var pathX = currTurtle[0]
+
+ if ( currTurtle[1] < prevTurtle[1] ) {
+ pathX = currTurtle[0] + 1
+ pathY = prevTurtle[1]
+ } else if ( currTurtle[1] > prevTurtle[1] ) {
+ pathX = currTurtle[0]
+ pathY = currTurtle[1]
+ } else if ( currTurtle[0] < prevTurtle[0] ) {
+ pathX = prevTurtle[0]
+ pathY = currTurtle[1]
+ } else if ( currTurtle[0] > prevTurtle[0] ) {
+ pathX = currTurtle[0]
+ pathY = currTurtle[1] + 1
+ }
+
+ // print( " Prev : " + prevTurtle + " Curr : " + currTurtle + " path
+ // : "
+ // + [pathX, pathY]);
+
+ return [ pathX, pathY ]
+ }
+
+ for ( var s = 1; s < turtles[t].length; s++ ) {
+
+ currTurtle = turtles[t][s]
+ prevTurtle = turtles[t][s - 1]
+
+ turtlePath.push( nextSeg( currTurtle, prevTurtle ) )
+
+ }
+
+ for ( var s = turtles[t].length - 2; s >= 0; s-- ) {
+
+ currTurtle = turtles[t][s]
+ prevTurtle = turtles[t][s + 1]
+
+ turtlePath.push( nextSeg( currTurtle, prevTurtle ) )
+
+ }
+
+ // printjson( turtlePath )
+
+ // End of the line is not inside our polygon.
+ var lastTurtle = turtles[t][turtles[t].length - 1]
+ grid[lastTurtle[0]][lastTurtle[1]] = undefined
+
+ fixedTurtlePath = []
+ for ( var s = 1; s < turtlePath.length; s++ ) {
+
+ if ( turtlePath[s - 1][0] == turtlePath[s][0] && turtlePath[s - 1][1] == turtlePath[s][1] )
+ continue;
+
+ var up = turtlePath[s][1] - turtlePath[s - 1][1]
+ var right = turtlePath[s][0] - turtlePath[s - 1][0]
+ var addPoint = ( up != 0 && right != 0 )
+
+ if ( addPoint && up != right ) {
+ fixedTurtlePath.push( [ turtlePath[s][0], turtlePath[s - 1][1] ] )
+ } else if ( addPoint ) {
+ fixedTurtlePath.push( [ turtlePath[s - 1][0], turtlePath[s][1] ] )
+ }
+
+ fixedTurtlePath.push( turtlePath[s] )
+
+ }
+
+ // printjson( fixedTurtlePath )
+
+ turtlePaths.push( fixedTurtlePath )
+
+ }
+
+ // Uncomment to print polygon shape
+ // print( grid.toString() )
+
+ var polygon = []
+ for ( var t = 0; t < turtlePaths.length; t++ ) {
+ for ( var s = 0; s < turtlePaths[t].length; s++ ) {
+ polygon.push( rotatePoint( turtlePaths[t][s] ) )
+ }
+ }
+
+ // Uncomment to print out polygon
+ // printjson( polygon )
+
+ t = db.polytest2
+ t.drop()
+
+ // Test single and multi-location documents
+ var pointsIn = 0
+ var pointsOut = 0
+ var allPointsIn = []
+ var allPointsOut = []
+
+ for ( var j = grid[0].length - 1; j >= 0; j-- ) {
+ for ( var i = 0; i < grid.length; i++ ) {
+
+ var point = rotatePoint( [ i + 0.5, j + 0.5 ] )
+
+ t.insert( { loc : point } )
+ if ( grid[i][j] != undefined ){
+ allPointsIn.push( point )
+ pointsIn++
+ }
+ else{
+ allPointsOut.push( point )
+ pointsOut++
+ }
+ }
+ }
+
+ t.ensureIndex( { loc : "2d" }, { bits : 1 + bits, max : bounds[1], min : bounds[0] } )
+ assert.isnull( db.getLastError() )
+
+ t.insert( { loc : allPointsIn } )
+ t.insert( { loc : allPointsOut } )
+ allPoints = allPointsIn.concat( allPointsOut )
+ t.insert( { loc : allPoints } )
+
+ print( "Points : " )
+ printjson( { pointsIn : pointsIn, pointsOut : pointsOut } )
+ //print( t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() )
+
+ assert.eq( gridSize[0] * gridSize[1] + 3, t.find().count() )
+ assert.eq( 2 + pointsIn, t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() );
+
+}
diff --git a/jstests/geo_polygon3.js b/jstests/geo_polygon3.js
new file mode 100644
index 0000000..9fdff1a
--- /dev/null
+++ b/jstests/geo_polygon3.js
@@ -0,0 +1,54 @@
+//
+// Tests for polygon querying with varying levels of accuracy
+//
+
+var numTests = 31;
+
+for( var n = 0; n < numTests; n++ ){
+
+ t = db.geo_polygon3;
+ t.drop();
+
+ num = 0;
+ for ( x=1; x < 9; x++ ){
+ for ( y= 1; y < 9; y++ ){
+ o = { _id : num++ , loc : [ x , y ] };
+ t.save( o );
+ }
+ }
+
+ t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
+
+ triangle = [[0,0], [1,1], [0,2]];
+
+ // Look at only a small slice of the data within a triangle
+ assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" );
+
+
+ boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
+
+ assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" );
+
+ // Look in a box much bigger than the one we have data in
+ boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
+ assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" );
+
+ t.drop();
+
+ pacman = [
+ [0,2], [0,4], [2,6], [4,6], // Head
+ [6,4], [4,3], [6,2], // Mouth
+ [4,0], [2,0] // Bottom
+ ];
+
+ t.save({loc: [1,3] }); // Add a point that's in
+ t.ensureIndex( { loc : "2d" }, { bits : 2 + t } );
+
+ assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" );
+
+ t.save({ loc : [5, 3] }) // Add a point that's out right in the mouth opening
+ t.save({ loc : [3, 7] }) // Add a point above the center of the head
+ t.save({ loc : [3,-1] }) // Add a point below the center of the bottom
+
+ assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" );
+}
diff --git a/jstests/geo_regex0.js b/jstests/geo_regex0.js
new file mode 100644
index 0000000..79042b9
--- /dev/null
+++ b/jstests/geo_regex0.js
@@ -0,0 +1,18 @@
+// From SERVER-2247
+// Tests to make sure regex works with geo indices
+
+t = db.regex0
+t.drop()
+
+t.ensureIndex( { point : '2d', words : 1 } )
+t.insert( { point : [ 1, 1 ], words : [ 'foo', 'bar' ] } )
+
+regex = { words : /^f/ }
+geo = { point : { $near : [ 1, 1 ] } }
+both = { point : { $near : [ 1, 1 ] }, words : /^f/ }
+
+assert.eq(1, t.find( regex ).count() )
+assert.eq(1, t.find( geo ).count() )
+assert.eq(1, t.find( both ).count() )
+
+
diff --git a/jstests/geo_small_large.js b/jstests/geo_small_large.js
new file mode 100644
index 0000000..aff4743
--- /dev/null
+++ b/jstests/geo_small_large.js
@@ -0,0 +1,151 @@
+// SERVER-2386, general geo-indexing using very large and very small bounds
+
+load( "jstests/libs/geo_near_random.js" );
+
+// Do some random tests (for near queries) with very large and small ranges
+
+var test = new GeoNearRandomTest( "geo_small_large" );
+
+bounds = { min : -Math.pow( 2, 34 ), max : Math.pow( 2, 34 ) };
+
+test.insertPts( 50, bounds );
+
+printjson( db["geo_small_large"].find().limit( 10 ).toArray() )
+
+test.testPt( [ 0, 0 ] );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+
+test = new GeoNearRandomTest( "geo_small_large" );
+
+bounds = { min : -Math.pow( 2, -34 ), max : Math.pow( 2, -34 ) };
+
+test.insertPts( 50, bounds );
+
+printjson( db["geo_small_large"].find().limit( 10 ).toArray() )
+
+test.testPt( [ 0, 0 ] );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+
+
+// Check that our box and circle queries also work
+var scales = [ Math.pow( 2, 40 ), Math.pow( 2, -40 ), Math.pow(2, 2), Math.pow(3, -15), Math.pow(3, 15) ]
+
+for ( var i = 0; i < scales.length; i++ ) {
+
+ scale = scales[i];
+
+ var eps = Math.pow( 2, -7 ) * scale;
+ var radius = 5 * scale;
+ var max = 10 * scale;
+ var min = -max;
+ var range = max - min;
+ var bits = 2 + Math.random() * 30
+
+ var t = db["geo_small_large"]
+ t.drop();
+ t.ensureIndex( { p : "2d" }, { min : min, max : max, bits : bits })
+
+ var outPoints = 0;
+ var inPoints = 0;
+
+ printjson({ eps : eps, radius : radius, max : max, min : min, range : range, bits : bits })
+
+ // Put a point slightly inside and outside our range
+ for ( var j = 0; j < 2; j++ ) {
+ var currRad = ( j % 2 == 0 ? radius + eps : radius - eps );
+ t.insert( { p : { x : currRad, y : 0 } } );
+ print( db.getLastError() )
+ }
+
+ printjson( t.find().toArray() );
+
+ assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1, "Incorrect center points found!" )
+ assert.eq( t.count( { p : { $within : { $box : [ [ -radius, -radius ], [ radius, radius ] ] } } } ), 1,
+ "Incorrect box points found!" )
+
+ shouldFind = []
+ randoms = []
+
+ for ( var j = 0; j < 2; j++ ) {
+
+ var randX = Math.random(); // randoms[j].randX
+ var randY = Math.random(); // randoms[j].randY
+
+ randoms.push({ randX : randX, randY : randY })
+
+ var x = randX * ( range - eps ) + eps + min;
+ var y = randY * ( range - eps ) + eps + min;
+
+ t.insert( { p : [ x, y ] } );
+
+ if ( x * x + y * y > radius * radius ){
+ // print( "out point ");
+ // printjson({ x : x, y : y })
+ outPoints++
+ }
+ else{
+ // print( "in point ");
+ // printjson({ x : x, y : y })
+ inPoints++
+ shouldFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) })
+ }
+ }
+
+ /*
+ function printDiff( didFind, shouldFind ){
+
+ for( var i = 0; i < shouldFind.length; i++ ){
+ var beenFound = false;
+ for( var j = 0; j < didFind.length && !beenFound ; j++ ){
+ beenFound = shouldFind[i].x == didFind[j].x &&
+ shouldFind[i].y == didFind[j].y
+ }
+
+ if( !beenFound ){
+ print( "Could not find: " )
+ shouldFind[i].inRadius = ( radius - shouldFind[i].radius >= 0 )
+ printjson( shouldFind[i] )
+ }
+ }
+ }
+
+ print( "Finding random pts... ")
+ var found = t.find( { p : { $within : { $center : [[0, 0], radius ] } } } ).toArray()
+ var didFind = []
+ for( var f = 0; f < found.length; f++ ){
+ //printjson( found[f] )
+ var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0]
+ var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1]
+ didFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) })
+ }
+
+ print( "Did not find but should: ")
+ printDiff( didFind, shouldFind )
+ print( "Found but should not have: ")
+ printDiff( shouldFind, didFind )
+ */
+
+ assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1 + inPoints,
+ "Incorrect random center points found!\n" + tojson( randoms ) )
+
+ print("Found " + inPoints + " points in and " + outPoints + " points out.");
+
+ var found = t.find( { p : { $near : [0, 0], $maxDistance : radius } } ).toArray()
+ var dist = 0;
+ for( var f = 0; f < found.length; f++ ){
+ var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0]
+ var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1]
+ print( "Dist: x : " + x + " y : " + y + " dist : " + Math.sqrt( x * x + y * y) + " radius : " + radius )
+ }
+
+ assert.eq( t.count( { p : { $near : [0, 0], $maxDistance : radius } } ), 1 + inPoints,
+ "Incorrect random center points found near!\n" + tojson( randoms ) )
+
+}
+
diff --git a/jstests/geo_uniqueDocs.js b/jstests/geo_uniqueDocs.js
new file mode 100644
index 0000000..b77a3b4
--- /dev/null
+++ b/jstests/geo_uniqueDocs.js
@@ -0,0 +1,38 @@
+// Test uniqueDocs option for $within and geoNear queries SERVER-3139
+
+collName = 'geo_uniqueDocs_test'
+t = db.geo_uniqueDocs_test
+t.drop()
+
+t.save( { locs : [ [0,2], [3,4]] } )
+t.save( { locs : [ [6,8], [10,10] ] } )
+
+t.ensureIndex( { locs : '2d' } )
+
+// geoNear tests
+assert.eq(4, db.runCommand({geoNear:collName, near:[0,0]}).results.length)
+assert.eq(4, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:false}).results.length)
+assert.eq(2, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:true}).results.length)
+results = db.runCommand({geoNear:collName, near:[0,0], num:2}).results
+assert.eq(2, results.length)
+assert.eq(2, results[0].dis)
+assert.eq(5, results[1].dis)
+results = db.runCommand({geoNear:collName, near:[0,0], num:2, uniqueDocs:true}).results
+assert.eq(2, results.length)
+assert.eq(2, results[0].dis)
+assert.eq(10, results[1].dis)
+
+// $within tests
+
+assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]]}}}).count())
+assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : true}}}).count())
+assert.eq(3, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : false}}}).count())
+
+assert.eq(2, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : true}}}).count())
+assert.eq(3, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : false}}}).count())
+
+assert.eq(2, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : true}}}).count())
+assert.eq(4, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : false}}}).count())
+
+assert.eq(2, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : true}}}).count())
+assert.eq(3, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : false}}}).count())
diff --git a/jstests/getlog1.js b/jstests/getlog1.js
new file mode 100644
index 0000000..75fbeab
--- /dev/null
+++ b/jstests/getlog1.js
@@ -0,0 +1,24 @@
+// to run:
+// ./mongo jstests/<this-file>
+
+contains = function(arr,obj) {
+ var i = arr.length;
+ while (i--) {
+ if (arr[i] === obj) {
+ return true;
+ }
+ }
+ return false;
+}
+
+var resp = db.adminCommand({getLog:"*"})
+assert( resp.ok == 1, "error executing getLog command" );
+assert( resp.names, "no names field" );
+assert( resp.names.length > 0, "names array is empty" );
+assert( contains(resp.names,"global") , "missing global category" );
+assert( !contains(resp.names,"butty") , "missing butty category" );
+
+resp = db.adminCommand({getLog:"global"})
+assert( resp.ok == 1, "error executing getLog command" );
+assert( resp.log, "no log field" );
+assert( resp.log.length > 0 , "no log lines" );
diff --git a/jstests/group7.js b/jstests/group7.js
new file mode 100644
index 0000000..5bf9232
--- /dev/null
+++ b/jstests/group7.js
@@ -0,0 +1,43 @@
+// Test yielding group command SERVER-1395
+
+t = db.jstests_group7;
+t.drop();
+
+function checkForYield( docs, updates ) {
+ t.drop();
+ a = 0;
+ for( var i = 0; i < docs; ++i ) {
+ t.save( {a:a} );
+ }
+ db.getLastError();
+
+ // Iteratively update all a values atomically.
+ p = startParallelShell( 'for( a = 0; a < ' + updates + '; ++a ) { db.jstests_group7.update( {$atomic:true}, {$set:{a:a}}, false, true ); db.getLastError(); }' );
+
+ for( var i = 0; i < updates; ++i ) {
+ ret = t.group({key:{a:1},reduce:function(){},initial:{}});
+ // Check if group sees more than one a value, indicating that it yielded.
+ if ( ret.length > 1 ) {
+ p();
+ return true;
+ }
+ printjson( ret );
+ }
+
+ p();
+ return false;
+}
+
+var yielded = false;
+var docs = 1500;
+var updates = 50;
+for( var j = 1; j <= 6; ++j ) {
+ if ( checkForYield( docs, updates ) ) {
+ yielded = true;
+ break;
+ }
+ // Increase docs and updates to encourage yielding.
+ docs *= 2;
+ updates *= 2;
+}
+assert( yielded ); \ No newline at end of file
diff --git a/jstests/hint1.js b/jstests/hint1.js
index 63a5fa6..b5a580f 100644
--- a/jstests/hint1.js
+++ b/jstests/hint1.js
@@ -5,6 +5,12 @@ p.drop();
p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true } );
p.ensureIndex( { ts: 1 } );
-e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: " alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
-assert.eq( e.indexBounds.ts[0][0].getTime(), new Date( 1234119308272 ).getTime() , "A" );
-assert.eq( 0 , e.indexBounds.ts[0][1].getTime() , "B" );
+e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: "alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
+assert.eq(e.indexBounds.ts[0][0].getTime(), new Date(1234119308272).getTime(), "A");
+
+//printjson(e);
+
+assert.eq( /*just below min date is bool true*/true, e.indexBounds.ts[0][1], "B");
+
+assert.eq(1, p.find({ live: true, ts: { $lt: new Date(1234119308272) }, cls: "entry", verticals: "alleyinsider" }).sort({ ts: -1 }).hint({ ts: 1 }).count());
+
diff --git a/jstests/idhack.js b/jstests/idhack.js
new file mode 100644
index 0000000..9614ebc
--- /dev/null
+++ b/jstests/idhack.js
@@ -0,0 +1,23 @@
+
+t = db.idhack
+t.drop()
+
+
+t.insert( { _id : { x : 1 } , z : 1 } )
+t.insert( { _id : { x : 2 } , z : 2 } )
+t.insert( { _id : { x : 3 } , z : 3 } )
+t.insert( { _id : 1 , z : 4 } )
+t.insert( { _id : 2 , z : 5 } )
+t.insert( { _id : 3 , z : 6 } )
+
+assert.eq( 2 , t.findOne( { _id : { x : 2 } } ).z , "A1" )
+assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).count() , "A2" )
+assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).itcount() , "A3" )
+
+t.update( { _id : { x : 2 } } , { $set : { z : 7 } } )
+assert.eq( 7 , t.findOne( { _id : { x : 2 } } ).z , "B1" )
+
+t.update( { _id : { $gte : 2 } } , { $set : { z : 8 } } , false , true )
+assert.eq( 4 , t.findOne( { _id : 1 } ).z , "C1" )
+assert.eq( 8 , t.findOne( { _id : 2 } ).z , "C2" )
+assert.eq( 8 , t.findOne( { _id : 3 } ).z , "C3" )
diff --git a/jstests/in8.js b/jstests/in8.js
new file mode 100644
index 0000000..5e7e587
--- /dev/null
+++ b/jstests/in8.js
@@ -0,0 +1,23 @@
+// SERVER-2829 Test arrays matching themselves within a $in expression.
+
+t = db.jstests_in8;
+t.drop();
+
+t.save( {key: [1]} );
+t.save( {key: ['1']} );
+t.save( {key: [[2]]} );
+
+function doTest() {
+ assert.eq( 1, t.count( {key:[1]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[1]]}} ) );
+ assert.eq( 1, t.count( {key:{$in:[[1]],$ne:[2]}} ) );
+ assert.eq( 1, t.count( {key:{$in:[['1']],$type:2}} ) );
+ assert.eq( 1, t.count( {key:['1']} ) );
+ assert.eq( 1, t.count( {key:{$in:[['1']]}} ) );
+ assert.eq( 1, t.count( {key:[2]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[2]]}} ) );
+}
+
+doTest();
+t.ensureIndex( {key:1} );
+doTest();
diff --git a/jstests/in9.js b/jstests/in9.js
new file mode 100644
index 0000000..34cefb8
--- /dev/null
+++ b/jstests/in9.js
@@ -0,0 +1,35 @@
+// SERVER-2343 Test $in empty array matching.
+
+t = db.jstests_in9;
+t.drop();
+
+function someData() {
+ t.remove();
+ t.save( {key: []} );
+}
+
+function moreData() {
+ someData();
+ t.save( {key: [1]} );
+ t.save( {key: ['1']} );
+ t.save( {key: null} );
+ t.save( {} );
+}
+
+function check() {
+ assert.eq( 1, t.count( {key:[]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[]]}} ) );
+}
+
+function doTest() {
+ someData();
+ check();
+ moreData();
+ check();
+}
+
+doTest();
+
+// SERVER-1943 not fixed yet
+t.ensureIndex( {key:1} );
+doTest();
diff --git a/jstests/ina.js b/jstests/ina.js
new file mode 100644
index 0000000..cf614ab
--- /dev/null
+++ b/jstests/ina.js
@@ -0,0 +1,15 @@
+// Uassert when $elemMatch is attempted within $in SERVER-3545
+
+t = db.jstests_ina;
+t.drop();
+t.save( {} );
+
+assert.throws( function() { t.find( {a:{$in:[{$elemMatch:{b:1}}]}} ).itcount(); } );
+assert.throws( function() { t.find( {a:{$not:{$in:[{$elemMatch:{b:1}}]}}} ).itcount(); } );
+
+assert.throws( function() { t.find( {a:{$nin:[{$elemMatch:{b:1}}]}} ).itcount(); } );
+assert.throws( function() { t.find( {a:{$not:{$nin:[{$elemMatch:{b:1}}]}}} ).itcount(); } );
+
+// NOTE Above we don't check cases like {b:2,$elemMatch:{b:3,4}} - generally
+// we assume that the first key is $elemMatch if any key is, and validating
+// every key is expensive in some cases. \ No newline at end of file
diff --git a/jstests/index11.js b/jstests/index11.js
index 2a552dd..0f6aa33 100644
--- a/jstests/index11.js
+++ b/jstests/index11.js
@@ -1,13 +1,29 @@
// Reindex w/ field too large to index
coll = db.jstests_index11;
-coll.drop();
+coll.drop();
+
+var str = "xxxxxxxxxxxxxxxx";
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + 'q';
+
+coll.insert({ k: 'a', v: str });
+
+assert.eq(0, coll.find({ "k": "x" }).count(), "expected zero keys 1");
-coll.ensureIndex({"k": 1, "v": 1});
-coll.insert({k: "x", v: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"});
-assert.eq(0, coll.find({"k": "x"}).count()); // SERVER-1716
+coll.ensureIndex({"k": 1, "v": 1});
+coll.insert({ k: "x", v: str });
-coll.dropIndexes();
-coll.ensureIndex({"k": 1, "v": 1});
+assert.eq(0, coll.find({"k": "x"}).count(), "B"); // SERVER-1716
-assert.eq(0, coll.find({"k": "x"}).count());
+coll.dropIndexes();
+coll.ensureIndex({"k": 1, "v": 1});
+
+assert.eq(0, coll.find({ "k": "x" }).count(), "expected zero keys 2");
diff --git a/jstests/index9.js b/jstests/index9.js
index c832783..04b9009 100644
--- a/jstests/index9.js
+++ b/jstests/index9.js
@@ -1,7 +1,15 @@
t = db.jstests_index9;
t.drop();
+db.createCollection( "jstests_index9" );
+assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 1 index with default collection" );
+t.drop();
+db.createCollection( "jstests_index9", {autoIndexId: true} );
+assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 1 index if autoIndexId: true" );
+
+t.drop();
db.createCollection( "jstests_index9", {autoIndexId:false} );
+assert.eq( 0, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 0 index if autoIndexId: false" );
t.createIndex( { _id:1 } );
assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ) );
t.createIndex( { _id:1 } );
diff --git a/jstests/index_big1.js b/jstests/index_big1.js
new file mode 100644
index 0000000..61260a3
--- /dev/null
+++ b/jstests/index_big1.js
@@ -0,0 +1,39 @@
+// check where "key to big" happens
+
+t = db.index_big1;
+
+N = 3200;
+t.drop();
+
+var s = "";
+
+for ( i=0; i<N; i++ ) {
+
+ t.insert( { a : i + .5 , x : s } )
+
+ s += "x";
+}
+
+t.ensureIndex( { a : 1 , x : 1 } )
+
+assert.eq( 2 , t.getIndexes().length );
+
+flip = -1;
+
+for ( i=0; i<N; i++ ) {
+ var c = t.find( { a : i + .5 } ).count();
+ if ( c == 1 ) {
+ assert.eq( -1 , flip , "flipping : " + i );
+ }
+ else {
+ if ( flip == -1 ) {
+ // print( "state flipped at: " + i );
+ flip = i;
+ }
+ }
+}
+
+//print(flip);
+//print(flip/1024);
+
+assert.eq( /*v0 index : 797*/1002, flip , "flip changed" );
diff --git a/jstests/index_bigkeys.js b/jstests/index_bigkeys.js
new file mode 100755
index 0000000..dfb05ad
--- /dev/null
+++ b/jstests/index_bigkeys.js
@@ -0,0 +1,78 @@
+
+t = db.bigkeysidxtest;
+
+var keys = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+var str = "aaaabbbbccccddddeeeeffffgggghhhh";
+str = str + str;
+
+for (var i = 2; i < 10; i++) {
+ keys[i] = str;
+ str = str + str;
+}
+print(str.length);
+
+var dir = 1;
+
+function go() {
+ if (dir == 1) {
+ for (var i = 1; i < 10; i++) {
+ t.insert({ _id: i, k: keys[i] });
+ }
+ }
+ else {
+ for (var i = 10; i >= 1; i--) {
+ t.insert({ _id: i, k: keys[i] });
+ }
+ }
+}
+
+var expect = null;
+
+var ok = true;
+
+function check() {
+ assert(t.validate().valid);
+
+ var c = t.find({ k: /^a/ }).count();
+
+ print("keycount:" + c);
+
+ if (expect) {
+ if (expect != c) {
+ print("count of keys doesn't match expected count of : " + expect + " got: " + c);
+ ok = false;
+ }
+ }
+ else {
+ expect = c;
+ }
+
+ //print(t.validate().result);
+}
+
+for (var pass = 1; pass <= 2; pass++) {
+ print("pass:" + pass);
+
+ t.drop();
+ t.ensureIndex({ k: 1 });
+ go();
+ check(); // check incremental addition
+
+ t.reIndex();
+ check(); // check bottom up
+
+ t.drop();
+ go();
+ t.ensureIndex({ k: 1 });
+ check(); // check bottom up again without reindex explicitly
+
+ t.drop();
+ go();
+ t.ensureIndex({ k: 1 }, { background: true });
+ check(); // check background (which should be incremental)
+
+ dir = -1;
+}
+
+assert(ok,"not ok");
diff --git a/jstests/index_check5.js b/jstests/index_check5.js
index 90ac301..eabb929 100644
--- a/jstests/index_check5.js
+++ b/jstests/index_check5.js
@@ -14,4 +14,4 @@ t.save( { "name" : "Player2" ,
assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "A" );
t.ensureIndex( { "scores.level" : 1 , "scores.score" : 1 } );
-assert.eq( 1 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" );
+assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" );
diff --git a/jstests/index_check8.js b/jstests/index_check8.js
index bc267df..1964ecb 100644
--- a/jstests/index_check8.js
+++ b/jstests/index_check8.js
@@ -4,12 +4,18 @@ t.drop();
t.insert( { a : 1 , b : 1 , c : 1 , d : 1 , e : 1 } )
t.ensureIndex( { a : 1 , b : 1 , c : 1 } )
-t.ensureIndex( { a : 1 , b : 1 , d : 1 , e : 1 } )
+t.ensureIndex({ a: 1, b: 1, d: 1, e: 1 })
+
+// this block could be added to many tests in theory...
+if ((new Date()) % 10 == 0) {
+ var coll = t.toString().substring(db.toString().length + 1);
+ print("compacting " + coll + " before continuing testing");
+ // don't check return code - false for mongos
+ print("ok: " + db.runCommand({ compact: coll, dev: true }));
+}
x = t.find( { a : 1 , b : 1 , d : 1 } ).sort( { e : 1 } ).explain()
assert( ! x.scanAndOrder , "A : " + tojson( x ) )
x = t.find( { a : 1 , b : 1 , c : 1 , d : 1 } ).sort( { e : 1 } ).explain()
//assert( ! x.scanAndOrder , "B : " + tojson( x ) )
-
-
diff --git a/jstests/index_fornew.js b/jstests/index_fornew.js
deleted file mode 100644
index 6c3c158..0000000
--- a/jstests/index_fornew.js
+++ /dev/null
@@ -1,13 +0,0 @@
-
-t = db.index_fornew;
-t.drop();
-
-t.insert( { x : 1 } )
-t.ensureIndex( { x : 1 } , { v : 1 } )
-assert.eq( 1 , t.getIndexes()[1].v , tojson( t.getIndexes() ) );
-
-assert.throws( function(){ t.findOne( { x : 1 } ); } )
-
-t.reIndex();
-assert.eq( 0 , t.getIndexes()[1].v , tojson( t.getIndexes() ) );
-assert( t.findOne( { x : 1 } ) );
diff --git a/jstests/index_maxkey.js b/jstests/index_maxkey.js
new file mode 100644
index 0000000..eba8126
--- /dev/null
+++ b/jstests/index_maxkey.js
@@ -0,0 +1,27 @@
+
+t = db.index_maxkey;
+
+for ( var indexVersion=0; indexVersion<=1; indexVersion++ ) {
+ t.drop();
+
+ s = "";
+
+ t.ensureIndex( { s : 1 } , { v : indexVersion } );
+ while ( true ) {
+ t.insert( { s : s } );
+ if ( t.find().count() == t.find().sort( { s : 1 } ).itcount() ) {
+ s += ".....";
+ continue;
+ }
+ var sz = Object.bsonsize( { s : s } ) - 2;
+ print( "indexVersion: " + indexVersion + " max key is : " + sz );
+ if ( indexVersion == 0 ) {
+ assert.eq( 821 , sz );
+ }
+ else if ( indexVersion == 1 ) {
+ assert.eq( 1026 , sz );
+ }
+ break;
+ }
+
+}
diff --git a/jstests/indexbindata.js b/jstests/indexbindata.js
new file mode 100755
index 0000000..e69de29
--- /dev/null
+++ b/jstests/indexbindata.js
diff --git a/jstests/indexk.js b/jstests/indexk.js
new file mode 100644
index 0000000..7cef95a
--- /dev/null
+++ b/jstests/indexk.js
@@ -0,0 +1,58 @@
+// Check correct result set when bounds each match different multikeys SERVER-958
+
+t = db.jstests_indexk;
+t.drop();
+
+t.insert({a:[1,10]});
+
+assert.eq( 1, t.count({a: {$gt:2, $lt:5}}) );
+assert.eq( 1, t.count({a: {$gt:2}}) );
+assert.eq( 1, t.count({a: {$lt:5}}) );
+
+assert.eq( 1, t.count({a: {$gt:5, $lt:2}}) );
+assert.eq( 1, t.count({a: {$gt:5}}) );
+assert.eq( 1, t.count({a: {$lt:2}}) );
+
+t.ensureIndex({a:1});
+
+// Check that only one constraint limits the index range for a multikey index.
+// The constraint used is arbitrary, but testing current behavior here.
+
+assert.eq( 1, t.count({a: {$gt: 2, $lt:5}}) );
+e = t.find({a: {$gt: 2, $lt:5}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+assert.eq( 2, e.indexBounds.a[ 0 ][ 0 ] );
+// Check that upper bound is large ( > 5 ).
+assert.lt( 1000, e.indexBounds.a[ 0 ][ 1 ] );
+
+assert.eq( 1, t.count({a: {$lt: 5, $gt:2}}) );
+e = t.find({a: {$lt: 5, $gt:2}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+// Check that upper bound is low ( < 2 ).
+assert.gt( -1000, e.indexBounds.a[ 0 ][ 0 ] );
+assert.eq( 5, e.indexBounds.a[ 0 ][ 1 ] );
+
+// Now check cases where no match is possible with a single key index.
+
+assert.eq( 1, t.count({a: {$gt: 5, $lt:2}}) );
+e = t.find({a: {$gt: 5, $lt:2}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+assert.eq( 5, e.indexBounds.a[ 0 ][ 0 ] );
+// Check that upper bound is low ( < 2 ).
+assert.lt( 1000, e.indexBounds.a[ 0 ][ 1 ] );
+
+assert.eq( 1, t.count({a: {$lt: 2, $gt:5}}) );
+e = t.find({a: {$lt: 2, $gt:5}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+// Check that upper bound is large ( > 5 ).
+assert.gt( -1000, e.indexBounds.a[ 0 ][ 0 ] );
+assert.eq( 2, e.indexBounds.a[ 0 ][ 1 ] );
+
+assert.eq( 1, t.count({a: {$gt: 2}}) );
+assert.eq( 1, t.count({a: {$lt: 5}}) );
+
+// Check good performance of single key index \ No newline at end of file
diff --git a/jstests/indexl.js b/jstests/indexl.js
new file mode 100644
index 0000000..666586d
--- /dev/null
+++ b/jstests/indexl.js
@@ -0,0 +1,27 @@
+// Check nonoverlapping $in/$all with multikeys SERVER-2165
+
+t = db.jstests_indexl;
+
+function test(t) {
+ t.save( {a:[1,2]} );
+ assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) );
+ assert.eq( 1, t.count( {a:{$all:[2],$in:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[2],$all:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[1],$all:[2]}} ) );
+ assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) );
+ t.save({a:[3,4]})
+ t.save({a:[2,3]})
+ t.save({a:[1,2,3,4]})
+ assert.eq( 2, t.count( {a:{$in:[2],$all:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[3],$all:[1,2]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[1],$all:[3]}} ) );
+ assert.eq( 2, t.count( {a:{$in:[2,3],$all:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[4],$all:[2,3]}} ) );
+ assert.eq( 3, t.count( {a:{$in:[1,3],$all:[2]}} ) );
+}
+
+t.drop();
+test(t);
+t.drop();
+t.ensureIndex( {a:1} );
+test(t); \ No newline at end of file
diff --git a/jstests/indexm.js b/jstests/indexm.js
new file mode 100644
index 0000000..6b31ea6
--- /dev/null
+++ b/jstests/indexm.js
@@ -0,0 +1,38 @@
+// Check proper range combinations with or clauses overlapping non or portion of query SERVER-2302
+
+t = db.jstests_indexm;
+t.drop();
+
+t.save( { a : [ { x : 1 } , { x : 2 } , { x : 3 } , { x : 4 } ] } )
+
+function test(){
+ assert.eq( 1, t.count(
+ {
+ a : { x : 1 } ,
+ "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ]
+ }
+ ) );
+}
+
+// The first find will return a result since there isn't an index.
+test();
+
+// Now create an index.
+t.ensureIndex({"a":1});
+test();
+// SERVER-3105
+//assert( !t.find(
+// {
+// a : { x : 1 } ,
+// "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ]
+// }
+// ).explain().clauses );
+
+// Now create a different index.
+t.dropIndexes();
+t.ensureIndex({"a.x":1});
+test();
+
+// Drop the indexes.
+t.dropIndexes();
+test(); \ No newline at end of file
diff --git a/jstests/indexn.js b/jstests/indexn.js
new file mode 100644
index 0000000..d5800e4
--- /dev/null
+++ b/jstests/indexn.js
@@ -0,0 +1,41 @@
+// Check fast detection of empty result set with a single key index SERVER-958.
+
+t = db.jstests_indexn;
+t.drop();
+
+function checkImpossibleMatchDummyCursor( explain ) {
+ assert.eq( 'BasicCursor', explain.cursor );
+ assert.eq( 0, explain.nscanned );
+ assert.eq( 0, explain.n );
+}
+
+t.save( {a:1,b:[1,2]} );
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+assert.eq( 0, t.count( {a:{$gt:5,$lt:0}} ) );
+// {a:1} is a single key index, so no matches are possible for this query
+checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0}} ).explain() );
+
+assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:2} ) );
+checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0},b:2} ).explain() );
+
+assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ) );
+checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ).explain() );
+
+assert.eq( 1, t.count( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ) );
+checkImpossibleMatchDummyCursor( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] );
+
+// A following invalid range is eliminated.
+assert.eq( 1, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ) );
+assert.eq( null, t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ).explain().clauses );
+
+t.save( {a:2} );
+
+// An intermediate invalid range is eliminated.
+assert.eq( 2, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ) );
+explain = t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ).explain();
+assert.eq( 2, explain.clauses.length );
+assert.eq( [[1,1]], explain.clauses[ 0 ].indexBounds.a );
+assert.eq( [[2,2]], explain.clauses[ 1 ].indexBounds.a );
diff --git a/jstests/indexo.js b/jstests/indexo.js
new file mode 100644
index 0000000..e50c099
--- /dev/null
+++ b/jstests/indexo.js
@@ -0,0 +1,32 @@
+// Check that dummy basic cursors work correctly SERVER-958.
+
+t = db.jstests_indexo;
+t.drop();
+
+function checkDummyCursor( explain ) {
+ assert.eq( "BasicCursor", explain.cursor );
+ assert.eq( 0, explain.nscanned );
+ assert.eq( 0, explain.n );
+}
+
+t.save( {a:1} );
+
+t.ensureIndex( {a:1} );
+
+// Match is impossible, so no documents should be scanned.
+checkDummyCursor( t.find( {a:{$gt:5,$lt:0}} ).explain() );
+
+t.drop();
+checkDummyCursor( t.find( {a:1} ).explain() );
+
+t.save( {a:1} );
+t.ensureIndex( {a:1} );
+checkDummyCursor( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] );
+
+t.drop();
+t.save( {a:5,b:[1,2]} );
+t.ensureIndex( {a:1,b:1} );
+t.ensureIndex( {a:1} );
+// The first clause will use index {a:1,b:1} with the current implementation.
+// The second clause has no valid values for index {a:1} so it will use a dummy cursor.
+checkDummyCursor( t.find( {$or:[{b:{$exists:true},a:{$gt:4}},{a:{$lt:6,$gt:4}}]} ).explain().clauses[ 1 ] );
diff --git a/jstests/indexp.js b/jstests/indexp.js
new file mode 100644
index 0000000..ee511eb
--- /dev/null
+++ b/jstests/indexp.js
@@ -0,0 +1,58 @@
+// Check recording and playback of good query plans with different index types SERVER-958.
+
+t = db.jstests_indexp;
+t.drop();
+
+function expectRecordedPlan( query, idx ) {
+ assert.eq( "BtreeCursor " + idx, t.find( query ).explain( true ).oldPlan.cursor );
+}
+
+function expectNoRecordedPlan( query ) {
+ assert.isnull( t.find( query ).explain( true ).oldPlan );
+}
+
+// Basic test
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:1} ).itcount();
+expectRecordedPlan( {a:1}, "a_1" );
+
+// Index type changes
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:1} ).itcount();
+t.save( {a:[1,2]} );
+expectRecordedPlan( {a:1}, "a_1" );
+
+// Multi key QueryPattern reuses index
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:[1,2]} );
+t.find( {a:{$gt:0}} ).itcount();
+expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" );
+
+// Single key QueryPattern can still be used to find best plan - at least for now.
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:{$gt:0,$lt:5}} ).itcount();
+t.save( {a:[1,2]} );
+expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" );
+
+// Invalid query with only valid fields used
+if ( 0 ) { // SERVER-2864
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:1,b:{$gt:5,$lt:0}} ).itcount();
+expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" );
+}
+
+// Dummy query plan not stored
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:{$gt:5,$lt:0}} ).itcount();
+expectNoRecordedPlan( {a:{$gt:5,$lt:0}} ); \ No newline at end of file
diff --git a/jstests/indexq.js b/jstests/indexq.js
new file mode 100644
index 0000000..f067b3c
--- /dev/null
+++ b/jstests/indexq.js
@@ -0,0 +1,14 @@
+// Test multikey range preference for a fully included range SERVER-958.
+
+t = db.jstests_indexq;
+t.drop();
+
+t.ensureIndex( {a:1} );
+// Single key index
+assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] );
+assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a );
+
+t.save( {a:[1,3]} );
+// Now with multi key index.
+assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] );
+assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a );
diff --git a/jstests/indexr.js b/jstests/indexr.js
new file mode 100644
index 0000000..60ecfb1
--- /dev/null
+++ b/jstests/indexr.js
@@ -0,0 +1,47 @@
+// Check multikey index cases with parallel nested fields SERVER-958.
+
+t = db.jstests_indexr;
+t.drop();
+
+// Check without indexes.
+t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
+assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+
+// Check with single key indexes.
+t.remove();
+t.ensureIndex( {'a.b':1,'a.c':1} );
+t.ensureIndex( {a:1,'a.c':1} );
+assert.eq( 0, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 0, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+assert.eq( 4, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+
+t.save( { a: { b: 3, c: 3 } } );
+assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 1, t.count( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+assert.eq( 4, t.find( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+
+// Check with multikey indexes.
+t.remove();
+t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
+
+assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] );
+assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] );
+
+// Check reverse direction.
+assert.eq( 1, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).itcount() );
+assert.eq( 1, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).itcount() );
+
+assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).explain().indexBounds['a.c'] );
+assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).explain().indexBounds['a.c'] );
+
+// Check second field is constrained if first is not.
+assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).itcount() );
+assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).itcount() );
+
+assert.eq( 4, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).explain().indexBounds['a.c'][0][1] );
+assert.eq( 4, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).explain().indexBounds['a.c'][0][1] );
diff --git a/jstests/indexs.js b/jstests/indexs.js
new file mode 100644
index 0000000..609f912
--- /dev/null
+++ b/jstests/indexs.js
@@ -0,0 +1,21 @@
+// Test index key generation issue with parent and nested fields in same index and array containing subobject SERVER-3005.
+
+t = db.jstests_indexs;
+
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( { a: [ { b: 3 } ] } );
+assert.eq( 1, t.count( { a:{ b:3 } } ) );
+
+t.drop();
+t.ensureIndex( {a:1,'a.b':1} );
+t.save( { a: { b: 3 } } );
+assert.eq( 1, t.count( { a:{ b:3 } } ) );
+ib = t.find( { a:{ b:3 } } ).explain().indexBounds;
+
+t.drop();
+t.ensureIndex( {a:1,'a.b':1} );
+t.save( { a: [ { b: 3 } ] } );
+assert.eq( ib, t.find( { a:{ b:3 } } ).explain().indexBounds );
+assert.eq( 1, t.find( { a:{ b:3 } } ).explain().nscanned );
+assert.eq( 1, t.count( { a:{ b:3 } } ) );
diff --git a/jstests/indext.js b/jstests/indext.js
new file mode 100644
index 0000000..e418dc2
--- /dev/null
+++ b/jstests/indext.js
@@ -0,0 +1,21 @@
+// Sparse indexes with arrays SERVER-3216
+
+t = db.jstests_indext;
+t.drop();
+
+t.ensureIndex( {'a.b':1}, {sparse:true} );
+t.save( {a:[]} );
+t.save( {a:1} );
+assert.eq( 0, t.find().hint( {'a.b':1} ).itcount() );
+assert.eq( 0, t.find().hint( {'a.b':1} ).explain().nscanned );
+
+t.ensureIndex( {'a.b':1,'a.c':1}, {sparse:true} );
+t.save( {a:[]} );
+t.save( {a:1} );
+assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
+assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned );
+
+t.save( {a:[{b:1}]} );
+t.save( {a:1} );
+assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
+assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned );
diff --git a/jstests/indexu.js b/jstests/indexu.js
new file mode 100644
index 0000000..c7fa8ed
--- /dev/null
+++ b/jstests/indexu.js
@@ -0,0 +1,137 @@
+// Test index key generation with duplicate values addressed by array index and
+// object field. SERVER-2902
+
+t = db.jstests_indexu;
+t.drop();
+
+var dupDoc = {a:[{'0':1}]}; // There are two 'a.0' fields in this doc.
+var dupDoc2 = {a:[{'1':1},'c']};
+var noDupDoc = {a:[{'1':1}]};
+
+// Test that we can't index dupDoc.
+t.save( dupDoc );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.0':1} );
+assert( db.getLastError() );
+
+t.remove();
+t.ensureIndex( {'a.0':1} );
+assert( !db.getLastError() );
+t.save( dupDoc );
+assert( db.getLastError() );
+
+// Test that we can't index dupDoc2.
+t.drop();
+t.save( dupDoc2 );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.1':1} );
+assert( db.getLastError() );
+
+t.remove();
+t.ensureIndex( {'a.1':1} );
+assert( !db.getLastError() );
+t.save( dupDoc2 );
+assert( db.getLastError() );
+
+// Test that we can index dupDoc with a different index.
+t.drop();
+t.ensureIndex( {'a.b':1} );
+t.save( dupDoc );
+assert( !db.getLastError() );
+
+// Test number field starting with hyphen.
+t.drop();
+t.ensureIndex( {'a.-1':1} );
+t.save( {a:[{'-1':1}]} );
+assert( !db.getLastError() );
+
+// Test number field starting with zero.
+t.drop();
+t.ensureIndex( {'a.00':1} );
+t.save( {a:[{'00':1}]} );
+assert( !db.getLastError() );
+
+// Test multiple array indexes
+t.drop();
+t.ensureIndex( {'a.0':1,'a.1':1} );
+t.save( {a:[{'1':1}]} );
+assert( !db.getLastError() );
+t.save( {a:[{'1':1},4]} );
+assert( db.getLastError() );
+
+// Test that we can index noDupDoc.
+t.drop();
+t.save( noDupDoc );
+t.ensureIndex( {'a.0':1} );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.1':1} );
+assert( !db.getLastError() );
+
+t.drop();
+t.ensureIndex( {'a.0':1} );
+t.ensureIndex( {'a.1':1} );
+t.save( noDupDoc );
+assert( !db.getLastError() );
+
+// Test that we can query noDupDoc.
+assert.eq( 1, t.find( {'a.1':1} ).hint( {'a.1':1} ).itcount() );
+assert.eq( 1, t.find( {'a.1':1} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {'a.0':1} ).itcount() );
+assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {$natural:1} ).itcount() );
+
+// Check multiple nested array fields.
+t.drop();
+t.save( {a:[[1]]} );
+t.ensureIndex( {'a.0.0':1} );
+assert( !db.getLastError() );
+assert.eq( 1, t.find( {'a.0.0':1} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {'a.0.0':1} ).hint( {'a.0.0':1} ).itcount() );
+
+// Check where there is a duplicate for a partially addressed field but not for a fully addressed field.
+t.drop();
+t.save( {a:[[1],{'0':1}]} );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+
+// Check where there is a duplicate for a fully addressed field.
+t.drop();
+t.save( {a:[[1],{'0':[1]}]} );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+
+// Two ways of addressing parse to an array.
+t.drop();
+t.save( {a:[{'0':1}]} );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+
+// Test several key depths - with same arrays being found.
+t.drop();
+t.save( {a:[{'0':[{'0':1}]}]} );
+t.ensureIndex( {'a.0.0.0.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a':1} );
+assert( !db.getLastError() );
+
+// Two prefixes extract docs, but one terminates extraction before array.
+t.drop();
+t.save( {a:[{'0':{'c':[]}}]} );
+t.ensureIndex( {'a.0.c':1} );
+assert( db.getLastError() );
+
+t.drop();
+t.save( {a:[[{'b':1}]]} );
+assert.eq( 1, t.find( {'a.0.b':1} ).itcount() );
+t.ensureIndex( {'a.0.b':1} );
+assert.eq( 1, t.find( {'a.0.b':1} ).itcount() );
diff --git a/jstests/indexv.js b/jstests/indexv.js
new file mode 100644
index 0000000..a69ff2a
--- /dev/null
+++ b/jstests/indexv.js
@@ -0,0 +1,18 @@
+// Check null key generation.
+
+t = db.jstests_indexv;
+t.drop();
+
+t.ensureIndex( {'a.b':1} );
+
+t.save( {a:[{},{b:1}]} );
+var e = t.find( {'a.b':null} ).explain();
+assert.eq( 0, e.n );
+assert.eq( 1, e.nscanned );
+
+t.drop();
+t.ensureIndex( {'a.b.c':1} );
+t.save( {a:[{b:[]},{b:{c:1}}]} );
+var e = t.find( {'a.b.c':null} ).explain();
+assert.eq( 0, e.n );
+assert.eq( 1, e.nscanned );
diff --git a/jstests/indexw.js b/jstests/indexw.js
new file mode 100644
index 0000000..3264434
--- /dev/null
+++ b/jstests/indexw.js
@@ -0,0 +1,14 @@
+// Check that v0 keys are generated for v0 indexes SERVER-3375
+
+t = db.jstests_indexw;
+t.drop();
+
+t.save( {a:[]} );
+assert.eq( 1, t.count( {a:[]} ) );
+t.ensureIndex( {a:1} );
+assert.eq( 1, t.count( {a:[]} ) );
+t.dropIndexes();
+
+// The count result is incorrect - just checking here that v0 key generation is used.
+t.ensureIndex( {a:1}, {v:0} );
+assert.eq( 0, t.count( {a:[]} ) );
diff --git a/jstests/insert1.js b/jstests/insert1.js
index 76edca1..7e6b73b 100644
--- a/jstests/insert1.js
+++ b/jstests/insert1.js
@@ -39,3 +39,6 @@ assert.eq(id1, id2, "ids match 4");
assert.eq(o, {a:4, _id:id1}, "input unchanged 4");
assert.eq(t.findOne({_id:id1}).a, 4, "find by id 4");
assert.eq(t.findOne({a:4})._id, id1 , "find by val 4");
+
+var stats = db.runCommand({ collstats: "insert1" });
+assert(stats.paddingFactor == 1.0);
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index 8624ef2..adf4f86 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -11,25 +11,46 @@ GeoNearRandomTest = function(name) {
}
-GeoNearRandomTest.prototype.mkPt = function mkPt(scale){
- scale = scale || 1; // scale is good for staying away from edges
- return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
}
-GeoNearRandomTest.prototype.insertPts = function(nPts) {
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds) {
assert.eq(this.nPts, 0, "insertPoints already called");
this.nPts = nPts;
for (var i=0; i<nPts; i++){
- this.t.insert({_id: i, loc: this.mkPt()});
+ this.t.insert({_id: i, loc: this.mkPt(undefined, indexBounds)});
}
-
- this.t.ensureIndex({loc: '2d'});
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
}
GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
for (var i=0; i < short.length; i++){
- assert.eq(short[i], long[i]);
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
}
}
diff --git a/jstests/replsets/key1 b/jstests/libs/key1
index b5c19e4..b5c19e4 100644
--- a/jstests/replsets/key1
+++ b/jstests/libs/key1
diff --git a/jstests/replsets/key2 b/jstests/libs/key2
index cbde821..cbde821 100644
--- a/jstests/replsets/key2
+++ b/jstests/libs/key2
diff --git a/jstests/libs/testconfig b/jstests/libs/testconfig
new file mode 100644
index 0000000..0c1fc87
--- /dev/null
+++ b/jstests/libs/testconfig
@@ -0,0 +1,4 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
diff --git a/jstests/mr_errorhandling.js b/jstests/mr_errorhandling.js
index c4e1137..f872b68 100644
--- a/jstests/mr_errorhandling.js
+++ b/jstests/mr_errorhandling.js
@@ -47,3 +47,5 @@ assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A"
res.drop()
assert.throws( function(){ t.mapReduce( m_good , r , { out : "xxx" , query : "foo" } ); } )
+
+printjson( t.mapReduce( function(){ emit( 1 , db.foo.findOne() ); } , r , { out : { inline : true } } ) )
diff --git a/jstests/mr_merge2.js b/jstests/mr_merge2.js
new file mode 100644
index 0000000..520bbfd
--- /dev/null
+++ b/jstests/mr_merge2.js
@@ -0,0 +1,37 @@
+
+t = db.mr_merge2;
+t.drop();
+
+t.insert( { a : [ 1 , 2 ] } )
+t.insert( { a : [ 2 , 3 ] } )
+t.insert( { a : [ 3 , 4 ] } )
+
+outName = "mr_merge2_out";
+out = db[outName];
+out.drop();
+
+m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); }
+r = function(k,vs){ return Array.sum( vs ); }
+
+function tos( o ){
+ var s = "";
+ for ( var i=0; i<100; i++ ){
+ if ( o[i] )
+ s += i + "_" + o[i] + "|";
+ }
+ return s;
+}
+
+
+outOptions = { out : { merge : outName } }
+
+res = t.mapReduce( m , r , outOptions )
+expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 }
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
+
+t.insert( { a : [ 4 , 5 ] } )
+res = t.mapReduce( m , r , outOptions )
+expected["4"]++;
+expected["5"] = 1
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
+
diff --git a/jstests/numberint.js b/jstests/numberint.js
new file mode 100644
index 0000000..258450f
--- /dev/null
+++ b/jstests/numberint.js
@@ -0,0 +1,92 @@
+assert.eq.automsg( "0", "new NumberInt()" );
+
+n = new NumberInt( 4 );
+assert.eq.automsg( "4", "n" );
+assert.eq.automsg( "4", "n.toNumber()" );
+assert.eq.automsg( "8", "n + 4" );
+assert.eq.automsg( "'NumberInt(4)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(4)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(4) }'", "p" );
+
+assert.eq.automsg( "NumberInt(4 )", "eval( tojson( NumberInt( 4 ) ) )" );
+assert.eq.automsg( "a", "eval( tojson( a ) )" );
+
+n = new NumberInt( -4 );
+assert.eq.automsg( "-4", "n" );
+assert.eq.automsg( "-4", "n.toNumber()" );
+assert.eq.automsg( "0", "n + 4" );
+assert.eq.automsg( "'NumberInt(-4)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(-4)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(-4) }'", "p" );
+
+n = new NumberInt( "11111" );
+assert.eq.automsg( "'NumberInt(11111)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(11111)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(11111) }'", "p" );
+
+assert.eq.automsg( "NumberInt('11111' )", "eval( tojson( NumberInt( '11111' ) ) )" );
+assert.eq.automsg( "a", "eval( tojson( a ) )" );
+
+n = new NumberInt( "-11111" );
+assert.eq.automsg( "-11111", "n.toNumber()" );
+assert.eq.automsg( "-11107", "n + 4" );
+assert.eq.automsg( "'NumberInt(-11111)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(-11111)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(-11111) }'", "p" );
+
+// parsing: v8 evaluates not numbers to 0 which is not bad
+//assert.throws.automsg( function() { new NumberInt( "" ); } );
+//assert.throws.automsg( function() { new NumberInt( "y" ); } );
+
+// eq
+
+assert.eq( { x : 5 } , { x : new NumberInt( "5" ) } );
+
+assert( 5 == NumberInt( 5 ) , "eq" );
+assert( 5 < NumberInt( 6 ) , "lt" );
+assert( 5 > NumberInt( 4 ) , "lt" );
+assert( NumberInt( 1 ) , "to bool a" );
+
+// objects are always considered thruthy
+//assert( ! NumberInt( 0 ) , "to bool b" );
+
+// create doc with int value in db
+t = db.getCollection( "numberint" );
+t.drop();
+
+o = { a : NumberInt(42) };
+t.save( o );
+
+assert.eq( 42 , t.findOne().a , "save doc 1" );
+assert.eq( 1 , t.find({a: {$type: 16}}).count() , "save doc 2" );
+assert.eq( 0 , t.find({a: {$type: 1}}).count() , "save doc 3" );
+
+// roundtripping
+mod = t.findOne({a: 42});
+mod.a += 10;
+mod.b = "foo";
+delete mod._id;
+t.save(mod);
+assert.eq( 2 , t.find({a: {$type: 16}}).count() , "roundtrip 1" );
+assert.eq( 0 , t.find({a: {$type: 1}}).count() , "roundtrip 2" );
+assert.eq( 1 , t.find({a: 52}).count() , "roundtrip 3" );
+
+// save regular number
+t.save({a: 42});
+assert.eq( 2 , t.find({a: {$type: 16}}).count() , "normal 1" );
+assert.eq( 1 , t.find({a: {$type: 1}}).count() , "normal 2" );
+assert.eq( 2 , t.find({a: 42}).count() , "normal 3" );
+
+
diff --git a/jstests/numberlong2.js b/jstests/numberlong2.js
new file mode 100644
index 0000000..2540d2d
--- /dev/null
+++ b/jstests/numberlong2.js
@@ -0,0 +1,32 @@
+// Test precision of NumberLong values with v1 index code SERVER-3717
+
+if ( 1 ) { // SERVER-3717
+
+t = db.jstests_numberlong2;
+t.drop();
+
+t.ensureIndex( {x:1} );
+
+function chk(longNum) {
+ t.remove();
+ t.save({ x: longNum });
+ assert.eq(longNum, t.find().hint({ x: 1 }).next().x);
+ assert.eq(longNum, t.find({}, { _id: 0, x: 1 }).hint({ x: 1 }).next().x);
+}
+
+chk( NumberLong("1123539983311657217") );
+chk(NumberLong("-1123539983311657217"));
+ chk(NumberLong("4503599627370495"));
+ chk(NumberLong("4503599627370496"));
+ chk(NumberLong("4503599627370497"));
+
+t.remove();
+
+s = "11235399833116571";
+for( i = 99; i >= 0; --i ) {
+ t.save( {x:NumberLong( s + i )} );
+}
+
+assert.eq( t.find().sort( {x:1} ).hint( {$natural:1} ).toArray(), t.find().sort( {x:1} ).hint( {x:1} ).toArray() );
+
+} \ No newline at end of file
diff --git a/jstests/numberlong3.js b/jstests/numberlong3.js
new file mode 100644
index 0000000..10036c0
--- /dev/null
+++ b/jstests/numberlong3.js
@@ -0,0 +1,25 @@
+// Test sorting with long longs and doubles - SERVER-3719
+
+t = db.jstests_numberlong3;
+t.drop();
+
+s = "11235399833116571";
+for( i = 10; i >= 0; --i ) {
+ n = NumberLong( s + i );
+ t.save( {x:n} );
+ if ( 0 ) { // SERVER-3719
+ t.save( {x:n.floatApprox} );
+ }
+}
+
+ret = t.find().sort({x:1}).toArray().filter( function( x ) { return typeof( x.x.floatApprox ) != 'undefined' } );
+
+//printjson( ret );
+
+for( i = 1; i < ret.length; ++i ) {
+ first = ret[i-1].x.toString();
+ second = ret[i].x.toString();
+ if ( first.length == second.length ) {
+ assert.lte( ret[i-1].x.toString(), ret[i].x.toString() );
+ }
+}
diff --git a/jstests/or1.js b/jstests/or1.js
index 66162c4..66bbd2e 100644
--- a/jstests/or1.js
+++ b/jstests/or1.js
@@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
diff --git a/jstests/or2.js b/jstests/or2.js
index d90cc85..297542e 100644
--- a/jstests/or2.js
+++ b/jstests/or2.js
@@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
@@ -29,7 +29,6 @@ doTest = function( index ) {
assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$or:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } );
a1 = t.find( { x:0, $or: [ { a : 1 } ] } ).toArray();
checkArrs( [ { _id:0, x:0, a:1 } ], a1 );
diff --git a/jstests/or3.js b/jstests/or3.js
index be85a8f..97028be 100644
--- a/jstests/or3.js
+++ b/jstests/or3.js
@@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
@@ -29,8 +29,6 @@ doTest = function( index ) {
assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$nor:[{x:0}]} ] } ).toArray(); } );
an1 = t.find( { $nor: [ { a : 1 } ] } ).toArray();
checkArrs( t.find( {a:{$ne:1}} ).toArray(), an1 );
diff --git a/jstests/or4.js b/jstests/or4.js
index f793f36..3bfe191 100644
--- a/jstests/or4.js
+++ b/jstests/or4.js
@@ -17,7 +17,7 @@ checkArrs = function( a, b ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
diff --git a/jstests/ord.js b/jstests/ord.js
index 4612f21..f78e504 100644
--- a/jstests/ord.js
+++ b/jstests/ord.js
@@ -28,6 +28,7 @@ for( i = 0; i < 90; ++i ) {
// the index key {a:1}
t.dropIndex( {a:1} );
+db.getLastError();
// Dropping an index kills all cursors on the indexed namespace, not just those
// cursors using the dropped index.
diff --git a/jstests/org.js b/jstests/org.js
new file mode 100644
index 0000000..0833798
--- /dev/null
+++ b/jstests/org.js
@@ -0,0 +1,19 @@
+// SERVER-2282 $or de duping with sparse indexes
+
+t = db.jstests_org;
+t.drop();
+
+t.ensureIndex( {a:1}, {sparse:true} );
+t.ensureIndex( {b:1} );
+
+t.remove();
+t.save( {a:1,b:2} );
+assert.eq( 1, t.count( {$or:[{a:1},{b:2}]} ) );
+
+t.remove();
+t.save( {a:null,b:2} );
+assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
+
+t.remove();
+t.save( {b:2} );
+assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
diff --git a/jstests/orh.js b/jstests/orh.js
new file mode 100644
index 0000000..35f6a5b
--- /dev/null
+++ b/jstests/orh.js
@@ -0,0 +1,17 @@
+// SERVER-2831 Demonstration of sparse index matching semantics in a multi index $or query.
+
+t = db.jstests_orh;
+t.drop();
+
+t.ensureIndex( {a:1}, {sparse:true} );
+t.ensureIndex( {b:1,a:1} );
+
+t.remove();
+t.save( {b:2} );
+assert.eq( 0, t.count( {a:null} ) );
+assert.eq( 1, t.count( {b:2,a:null} ) );
+
+assert.eq( 1, t.count( {$or:[{b:2,a:null},{a:null}]} ) );
+
+// Is this desired?
+assert.eq( 0, t.count( {$or:[{a:null},{b:2,a:null}]} ) );
diff --git a/jstests/ori.js b/jstests/ori.js
new file mode 100644
index 0000000..9d923d6
--- /dev/null
+++ b/jstests/ori.js
@@ -0,0 +1,48 @@
+// Check elimination of proper range type when popping a $or clause SERVER-958.
+
+t = db.jstests_ori;
+t.drop();
+
+t.ensureIndex( {a:1,b:1} );
+t.ensureIndex( {a:1,c:1} );
+
+t.save( {a:1,b:[2,3],c:4} );
+t.save( {a:10,b:2,c:4} );
+
+// Check that proper results are returned.
+
+assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ) );
+// Two $or clauses expected to be scanned.
+assert.eq( 2, t.find( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ).explain().clauses.length );
+assert.eq( 2, t.count( {$or:[{a:10,b:2},{a:{$gt:0,$lt:5},c:4}]} ) );
+
+t.drop();
+
+// Now try a different index order.
+
+t.ensureIndex( {b:1,a:1} );
+t.ensureIndex( {a:1,c:1} );
+
+t.save( {a:1,b:[2,3],c:4} );
+t.save( {a:10,b:2,c:4} );
+
+assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ) );
+assert.eq( 2, t.count( {$or:[{a:10,b:2},{a:{$gt:0,$lt:5},c:4}]} ) );
+
+t.drop();
+
+// Now eliminate a range.
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+t.save( {a:[1,2],b:1} );
+t.save( {a:10,b:1} );
+
+assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5}},{a:10,b:1}]} ) );
+// Because a:1 is multikey, the value a:10 is scanned with the first clause.
+assert.isnull( t.find( {$or:[{a:{$gt:0,$lt:5}},{a:10,b:1}]} ).explain().clauses );
+
+assert.eq( 2, t.count( {$or:[{a:{$lt:5,$gt:0}},{a:10,b:1}]} ) );
+// Now a:10 is not scanned in the first clause so the second clause is not eliminated.
+assert.eq( 2, t.find( {$or:[{a:{$lt:5,$gt:0}},{a:10,b:1}]} ).explain().clauses.length );
diff --git a/jstests/orj.js b/jstests/orj.js
new file mode 100644
index 0000000..fa234f3
--- /dev/null
+++ b/jstests/orj.js
@@ -0,0 +1,121 @@
+// Test nested $or clauses SERVER-2585 SERVER-3192
+
+t = db.jstests_orj;
+t.drop();
+
+t.save( {a:1,b:2} );
+
+function check() {
+
+assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
+
+assert.throws( function() { t.find( { x:0,$or:[{$or:"a"}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[{$or:[]}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[{$or:[ "a" ]}] } ).toArray(); } );
+
+assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
+
+assert.throws( function() { t.find( { x:0,$nor:[{$nor:"a"}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[{$nor:[]}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[{$nor:[ "a" ]}] } ).toArray(); } );
+
+assert.eq( 1, t.find( {a:1,b:2} ).itcount() );
+
+assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).itcount() );
+assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).itcount() );
+
+assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).itcount() );
+assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).itcount() );
+assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).itcount() );
+
+assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
+
+}
+
+check();
+
+t.ensureIndex( {a:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {b:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {a:1,b:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+t.ensureIndex( {a:1,b:1} );
+check();
+
+function checkHinted( hint ) {
+ assert.eq( 1, t.find( {a:1,b:2} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+}
+
+checkHinted( {$natural:1} );
+checkHinted( {a:1} );
+checkHinted( {b:1} );
+checkHinted( {a:1,b:1} ); \ No newline at end of file
diff --git a/jstests/ork.js b/jstests/ork.js
new file mode 100644
index 0000000..d6d4016
--- /dev/null
+++ b/jstests/ork.js
@@ -0,0 +1,11 @@
+// SERVER-2585 Test $or clauses within indexed top level $or clauses.
+
+t = db.jstests_ork;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.save( {a:[1,2],b:5} );
+t.save( {a:[2,4],b:5} );
+
+assert.eq( 2, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:5}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:6}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() );
diff --git a/jstests/orl.js b/jstests/orl.js
new file mode 100644
index 0000000..2726975
--- /dev/null
+++ b/jstests/orl.js
@@ -0,0 +1,13 @@
+// SERVER-3445 Test using coarse multikey bounds for or range elimination.
+
+t = db.jstests_orl;
+t.drop();
+
+t.ensureIndex( {'a.b':1,'a.c':1} );
+// make the index multikey
+t.save( {a:{b:[1,2]}} );
+
+// SERVER-3445
+if ( 0 ) {
+assert( !t.find( {$or:[{'a.b':2,'a.c':3},{'a.b':2,'a.c':4}]} ).explain().clauses );
+} \ No newline at end of file
diff --git a/jstests/orm.js b/jstests/orm.js
new file mode 100644
index 0000000..dae75e4
--- /dev/null
+++ b/jstests/orm.js
@@ -0,0 +1,29 @@
+// Test dropping during a $or yield SERVER-3555
+
+t = db.jstests_orm;
+t.drop();
+
+clauses = [];
+for( i = 0; i < 10; ++i ) {
+ clauses.push( {a:{$lte:(i+1)*5000/10},i:49999} );
+ clauses.push( {b:{$lte:(i+1)*5000/10},i:49999} );
+}
+
+p = startParallelShell( 'for( i = 0; i < 15; ++i ) { sleep( 1000 ); db.jstests_orm.drop() }' );
+for( j = 0; j < 5; ++j ) {
+ for( i = 0; i < 5000; ++i ) {
+ t.save( {a:i,i:i} );
+ t.save( {b:i,i:i} );
+ }
+ t.ensureIndex( {a:1} );
+ t.ensureIndex( {b:1} );
+ try {
+ t.find( {$or:clauses} ).itcount();
+ t.find( {$or:clauses} ).count();
+ t.update( {$or:clauses}, {} );
+ t.remove( {$or:clauses} );
+ } catch ( e ) {
+ }
+ db.getLastError();
+}
+p();
diff --git a/jstests/orn.js b/jstests/orn.js
new file mode 100644
index 0000000..c900bb8
--- /dev/null
+++ b/jstests/orn.js
@@ -0,0 +1,22 @@
+// Test dropping during an $or distinct yield SERVER-3555
+
+t = db.jstests_orn;
+t.drop();
+
+clauses = [];
+for( i = 0; i < 10; ++i ) {
+ clauses.push( {a:{$lte:(i+1)*5000/10},i:49999} );
+ clauses.push( {b:{$lte:(i+1)*5000/10},i:49999} );
+}
+
+p = startParallelShell( 'for( i = 0; i < 15; ++i ) { sleep( 1000 ); db.jstests_orn.drop() }' );
+for( j = 0; j < 5; ++j ) {
+ for( i = 0; i < 5000; ++i ) {
+ t.save( {a:i,i:i} );
+ t.save( {b:i,i:i} );
+ }
+ t.ensureIndex( {a:1} );
+ t.ensureIndex( {b:1} );
+ t.distinct('a',{$or:clauses});
+}
+p();
diff --git a/jstests/profile1.js b/jstests/profile1.js
index 0e8009a..9654357 100644
--- a/jstests/profile1.js
+++ b/jstests/profile1.js
@@ -1,49 +1,125 @@
+print("profile1.js BEGIN");
try {
-/* With pre-created system.profile (capped) */
-db.runCommand({profile: 0});
-db.getCollection("system.profile").drop();
-assert(!db.getLastError(), "Z");
-assert.eq(0, db.runCommand({profile: -1}).was, "A");
+ function getProfileAString() {
+ var s = "\n";
+ db.system.profile.find().forEach( function(z){
+ s += tojson( z ) + " ,\n" ;
+ } );
+ return s;
+ }
-db.createCollection("system.profile", {capped: true, size: 1000});
-db.runCommand({profile: 2});
-assert.eq(2, db.runCommand({profile: -1}).was, "B");
-assert.eq(1, db.system.profile.stats().capped, "C");
-var capped_size = db.system.profile.storageSize();
-assert.gt(capped_size, 999, "D");
-assert.lt(capped_size, 2000, "E");
+ /* With pre-created system.profile (capped) */
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert(!db.getLastError(), "Z");
+ assert.eq(0, db.runCommand({profile: -1}).was, "A");
+
+ db.createCollection("system.profile", {capped: true, size: 10000});
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "B");
+ assert.eq(1, db.system.profile.stats().capped, "C");
+ var capped_size = db.system.profile.storageSize();
+ assert.gt(capped_size, 9999, "D");
+ assert.lt(capped_size, 20000, "E");
+
+ db.foo.findOne()
+
+ assert.eq( 4 , db.system.profile.find().count() , "E2" );
+
+ /* Make sure we can't drop if profiling is still on */
+ assert.throws( function(z){ db.getCollection("system.profile").drop(); } )
-db.foo.findOne()
+ /* With pre-created system.profile (un-capped) */
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "F");
+
+ db.createCollection("system.profile");
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "G");
+ assert.eq(null, db.system.profile.stats().capped, "G1");
+
+ /* With no system.profile collection */
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "H");
+
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "I");
+ assert.eq(1, db.system.profile.stats().capped, "J");
+ var auto_size = db.system.profile.storageSize();
+ assert.gt(auto_size, capped_size, "K");
+
-assert.eq( 4 , db.system.profile.find().count() , "E2" );
+ db.eval("sleep(1)") // pre-load system.js
-/* Make sure we can't drop if profiling is still on */
-assert.throws( function(z){ db.getCollection("system.profile").drop(); } )
+ db.setProfilingLevel(2);
+ before = db.system.profile.count();
+ db.eval( "sleep(25)" )
+ db.eval( "sleep(120)" )
+ after = db.system.profile.count()
+ assert.eq( before + 3 , after , "X1" )
-/* With pre-created system.profile (un-capped) */
-db.runCommand({profile: 0});
-db.getCollection("system.profile").drop();
-assert.eq(0, db.runCommand({profile: -1}).was, "F");
+ /* sleep() could be inaccurate on certain platforms. let's check */
+ print("\nsleep 2 time actual:");
+ for (var i = 0; i < 4; i++) {
+ print(db.eval("var x = new Date(); sleep(2); return new Date() - x;"));
+ }
+ print();
+ print("\nsleep 20 times actual:");
+ for (var i = 0; i < 4; i++) {
+ print(db.eval("var x = new Date(); sleep(20); return new Date() - x;"));
+ }
+ print();
+ print("\nsleep 120 times actual:");
+ for (var i = 0; i < 4; i++) {
+ print(db.eval("var x = new Date(); sleep(120); return new Date() - x;"));
+ }
+ print();
-db.createCollection("system.profile");
-db.runCommand({profile: 2});
-assert.eq(2, db.runCommand({profile: -1}).was, "G");
-assert.eq(null, db.system.profile.stats().capped, "G1");
+ function evalSleepMoreThan(millis,max){
+ var start = new Date();
+ db.eval("sleep("+millis+")");
+ var end = new Date();
+ var actual = end.getTime() - start.getTime();
+ if ( actual > ( millis + 5 ) ) {
+ print( "warning wanted to sleep for: " + millis + " but took: " + actual );
+ }
+ return actual >= max ? 1 : 0;
+ }
-/* With no system.profile collection */
-db.runCommand({profile: 0});
-db.getCollection("system.profile").drop();
-assert.eq(0, db.runCommand({profile: -1}).was, "H");
+ db.setProfilingLevel(1,100);
+ before = db.system.profile.count();
+ var delta = 0;
+ delta += evalSleepMoreThan( 15 , 100 );
+ delta += evalSleepMoreThan( 120 , 100 );
+ after = db.system.profile.count()
+ assert.eq( before + delta , after , "X2 : " + getProfileAString() )
-db.runCommand({profile: 2});
-assert.eq(2, db.runCommand({profile: -1}).was, "I");
-assert.eq(1, db.system.profile.stats().capped, "J");
-var auto_size = db.system.profile.storageSize();
-assert.gt(auto_size, capped_size, "K");
+ db.setProfilingLevel(1,20);
+ before = db.system.profile.count();
+ delta = 0;
+ delta += evalSleepMoreThan( 5 , 20 );
+ delta += evalSleepMoreThan( 120 , 20 );
+ after = db.system.profile.count()
+ assert.eq( before + delta , after , "X3 : " + getProfileAString() )
+
+ db.profile.drop();
+ db.setProfilingLevel(2)
+ var q = { _id : 5 };
+ var u = { $inc : { x : 1 } };
+ db.profile1.update( q , u );
+ var r = db.system.profile.find().sort( { $natural : -1 } )[0]
+ assert.eq( q , r.query , "Y1" );
+ assert.eq( u , r.updateobj , "Y2" );
+ assert.eq( "update" , r.op , "Y3" );
+ assert.eq("test.profile1", r.ns, "Y4");
+ print("profile1.js SUCCESS OK");
+
} finally {
// disable profiling for subsequent tests
assert.commandWorked( db.runCommand( {profile:0} ) );
-} \ No newline at end of file
+}
diff --git a/jstests/profile2.js b/jstests/profile2.js
new file mode 100644
index 0000000..929b463
--- /dev/null
+++ b/jstests/profile2.js
@@ -0,0 +1,19 @@
+print("profile2.js BEGIN");
+
+try {
+
+ assert.commandWorked( db.runCommand( {profile:2} ) );
+
+ huge = 'huge';
+ while (huge.length < 2*1024*1024){
+ huge += huge;
+ }
+
+ db.profile2.count({huge:huge}) // would make a huge entry in db.system.profile
+
+ print("profile2.js SUCCESS OK");
+
+} finally {
+ // disable profiling for subsequent tests
+ assert.commandWorked( db.runCommand( {profile:0} ) );
+}
diff --git a/jstests/profile3.js b/jstests/profile3.js
new file mode 100644
index 0000000..a6574b7
--- /dev/null
+++ b/jstests/profile3.js
@@ -0,0 +1,26 @@
+
+t = db.profile3;
+t.drop();
+
+try {
+ db.setProfilingLevel(0);
+
+ db.system.profile.drop();
+ assert.eq( 0 , db.system.profile.count() )
+
+ db.setProfilingLevel(2);
+
+ t.insert( { x : 1 } );
+ t.findOne( { x : 1 } );
+ t.find( { x : 1 } ).count();
+
+ db.system.profile.find().forEach( printjson )
+
+ db.setProfilingLevel(0);
+ db.system.profile.drop();
+
+}
+finally {
+ db.setProfilingLevel(0);
+}
+
diff --git a/jstests/push.js b/jstests/push.js
index 2cdd91c..9bcaa2f 100644
--- a/jstests/push.js
+++ b/jstests/push.js
@@ -17,6 +17,38 @@ assert.eq( "2" , t.findOne().a.toString() , "D" );
t.update( { _id : 2 } , { $push : { a : 3 } } );
t.update( { _id : 2 } , { $push : { a : 4 } } );
t.update( { _id : 2 } , { $push : { a : 5 } } );
-assert.eq( "2,3,4,5" , t.findOne().a.toString() , "D" );
+assert.eq( "2,3,4,5" , t.findOne().a.toString() , "E1" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.eq( "3,4,5" , t.findOne().a.toString() , "E2" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.eq( "4,5" , t.findOne().a.toString() , "E3" );
+
t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.eq( "3,4,5" , t.findOne().a.toString() , "D" );
+assert.isnull( db.getLastError() , "E4a" )
+assert.eq( "5" , t.findOne().a.toString() , "E4" );
+
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.isnull( db.getLastError() , "E5a")
+assert.eq( "" , t.findOne().a.toString() , "E5" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.isnull( db.getLastError() , "E6a" )
+assert.eq( "" , t.findOne().a.toString() , "E6" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.isnull( db.getLastError() , "E7a" )
+assert.eq( "" , t.findOne().a.toString() , "E7" );
+
+t.update( { _id : 2 } , { $pop : { a : 1 } } );
+assert.isnull( db.getLastError() , "E8a" )
+assert.eq( "" , t.findOne().a.toString() , "E8" );
+
+t.update( { _id : 2 } , { $pop : { b : -1 } } );
+assert.isnull( db.getLastError() , "E4a" )
+
+t.update( { _id : 2 } , { $pop : { b : 1 } } );
+assert.isnull( db.getLastError() , "E4a" )
+
diff --git a/jstests/query1.js b/jstests/query1.js
index 9b40054..c3e276f 100644
--- a/jstests/query1.js
+++ b/jstests/query1.js
@@ -18,3 +18,6 @@ t.find().forEach(
assert.eq( num , 3 , "num" )
assert.eq( total , 8 , "total" )
+
+assert.eq( 3 , t.find()._addSpecial( "$comment" , "this is a test" ).itcount() , "B1" )
+assert.eq( 3 , t.find()._addSpecial( "$comment" , "this is a test" ).count() , "B2" )
diff --git a/jstests/regex2.js b/jstests/regex2.js
index b6a21f5..87d5cb4 100644
--- a/jstests/regex2.js
+++ b/jstests/regex2.js
@@ -60,3 +60,11 @@ assert.eq( 1 , t.find( { a : {$regex: a} } ).count() , "obj C D" );
assert.eq( 1 , t.find( { a : {$regex: b} } ).count() , "obj C E" );
assert.eq( 2 , t.find( { a : {$regex: a , $options: "i" } } ).count() , "obj C F is spidermonkey built with UTF-8 support?" );
+// Test s (DOT_ALL) option. Not supported with /regex/opts syntax
+t.drop();
+t.save({a:'1 2'})
+t.save({a:'1\n2'})
+assert.eq( 1 , t.find( { a : {$regex: '1.*2'} } ).count() );
+assert.eq( 2 , t.find( { a : {$regex: '1.*2', $options: 's'} } ).count() );
+
+
diff --git a/jstests/regex6.js b/jstests/regex6.js
index 8243313..5414324 100644
--- a/jstests/regex6.js
+++ b/jstests/regex6.js
@@ -6,6 +6,7 @@ t.save( { name : "eliot" } );
t.save( { name : "emily" } );
t.save( { name : "bob" } );
t.save( { name : "aaron" } );
+t.save( { name : "[with]some?symbols" } );
t.ensureIndex( { name : 1 } );
@@ -14,9 +15,15 @@ assert.eq( 1 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1"
assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain 2" );
assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain 3" );
assert.eq( 1 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" );
-assert.eq( 4 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
+assert.eq( 5 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
-assert.eq( 4 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
+// SERVER-2862
+assert.eq( 0 , t.find( { name : /^\Qblah\E/ } ).count() , "index explain 6" );
+assert.eq( 1 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
+assert.eq( 1 , t.find( { name : /^blah/ } ).explain().nscanned , "index explain 6" );
+assert.eq( 1 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).count() , "index explain 6" );
+assert.eq( 2 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).explain().nscanned , "index explain 6" );
+assert.eq( 2 , t.find( { name : /^bob/ } ).explain().nscanned , "index explain 6" ); // proof nscanned == count+1
assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain().nscanned , "ie7" );
assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain().nscanned , "ie7" );
diff --git a/jstests/regexa.js b/jstests/regexa.js
new file mode 100644
index 0000000..b0d4719
--- /dev/null
+++ b/jstests/regexa.js
@@ -0,0 +1,19 @@
+// Test simple regex optimization with a regex | (bar) present - SERVER-3298
+
+t = db.jstests_regexa;
+t.drop();
+
+function check() {
+ assert.eq( 1, t.count( {a:/^(z|.)/} ) );
+ assert.eq( 1, t.count( {a:/^z|./} ) );
+ assert.eq( 0, t.count( {a:/^z(z|.)/} ) );
+ assert.eq( 1, t.count( {a:/^zz|./} ) );
+}
+
+t.save( {a:'a'} );
+
+check();
+t.ensureIndex( {a:1} );
+if ( 1 ) { // SERVER-3298
+check();
+}
diff --git a/jstests/remove10.js b/jstests/remove10.js
new file mode 100644
index 0000000..cf1dac4
--- /dev/null
+++ b/jstests/remove10.js
@@ -0,0 +1,28 @@
+// SERVER-2009 Update documents with adjacent indexed keys.
+// This test doesn't fail, it just prints an invalid warning message.
+
+if ( 0 ) { // SERVER-2009
+t = db.jstests_remove10;
+t.drop();
+t.ensureIndex( {i:1} );
+
+function arr( i ) {
+ ret = [];
+ for( j = i; j < i + 11; ++j ) {
+ ret.push( j );
+ }
+ return ret;
+}
+
+for( i = 0; i < 1100; i += 11 ) {
+ t.save( {i:arr( i )} );
+}
+
+s = startParallelShell( 't = db.jstests_remove10; for( j = 0; j < 1000; ++j ) { o = t.findOne( {i:Random.randInt(1100)} ); t.remove( {_id:o._id} ); t.insert( o ); }' );
+
+for( i = 0; i < 200; ++i ) {
+ t.find( {i:{$gte:0}} ).hint( {i:1} ).itcount();
+}
+
+s();
+} \ No newline at end of file
diff --git a/jstests/remove2.js b/jstests/remove2.js
index ff122a0..eb4ef07 100644
--- a/jstests/remove2.js
+++ b/jstests/remove2.js
@@ -21,6 +21,11 @@ function g() {
t.save( { x:[7,8,9], z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
t.remove( {x : {$gte:3}, $atomic:x++ } );
+
+ assert( !db.getLastError() );
+ // $atomic within $and is not allowed.
+ t.remove( {x : {$gte:3}, $and:[{$atomic:true}] } );
+ assert( db.getLastError() );
assert( t.findOne({x:3}) == null );
assert( t.findOne({x:8}) == null );
diff --git a/jstests/remove9.js b/jstests/remove9.js
new file mode 100644
index 0000000..655594a
--- /dev/null
+++ b/jstests/remove9.js
@@ -0,0 +1,16 @@
+// SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries.
+
+t = db.jstests_remove9;
+t.drop();
+t.ensureIndex( {i:1} );
+for( i = 0; i < 1000; ++i ) {
+ t.save( {i:i} );
+}
+
+s = startParallelShell( 't = db.jstests_remove9; for( j = 0; j < 5000; ++j ) { i = Random.randInt( 499 ) * 2; t.update( {i:i}, {$set:{i:2000}} ); t.remove( {i:2000} ); t.save( {i:i} ); }' );
+
+for( i = 0; i < 1000; ++i ) {
+ assert.eq( 500, t.find( {i:{$gte:0,$mod:[2,1]}} ).hint( {i:1} ).itcount() );
+}
+
+s();
diff --git a/jstests/rename.js b/jstests/rename.js
index 3ace968..d475cc6 100644
--- a/jstests/rename.js
+++ b/jstests/rename.js
@@ -31,17 +31,24 @@ a.drop();
b.drop();
c.drop();
-db.createCollection( "jstests_rename_a", {capped:true,size:100} );
-for( i = 0; i < 10; ++i ) {
+// TODO: too many numbers hard coded here
+// this test depends precisely on record size and hence may not be very reliable
+// note we use floats to make sure numbers are represented as doubles for both SM and v8, since test relies on record size
+db.createCollection( "jstests_rename_a", {capped:true,size:10000} );
+for( i = 0.1; i < 10; ++i ) {
a.save( { i: i } );
}
assert.commandWorked( admin.runCommand( {renameCollection:"test.jstests_rename_a", to:"test.jstests_rename_b"} ) );
-assert.eq( 1, b.count( {i:9} ) );
-for( i = 10; i < 20; ++i ) {
+assert.eq( 1, b.count( {i:9.1} ) );
+for( i = 10.1; i < 250; ++i ) {
b.save( { i: i } );
}
-assert.eq( 0, b.count( {i:9} ) );
-assert.eq( 1, b.count( {i:19} ) );
+
+//res = b.find().sort({i:1});
+//while (res.hasNext()) printjson(res.next());
+
+assert.eq( 0, b.count( {i:9.1} ) );
+assert.eq( 1, b.count( {i:19.1} ) );
assert( db.system.namespaces.findOne( {name:"test.jstests_rename_b" } ) );
assert( !db.system.namespaces.findOne( {name:"test.jstests_rename_a" } ) );
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 15fc983..4a6091d 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -60,7 +60,7 @@ r = function( key , v ){
correct = { a : 2 , b : 1 };
function checkMR( t ){
- var res = t.mapReduce( m , r , "basic1_out" );
+ var res = t.mapReduce( m , r , { out : { inline : 1 } } )
assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
}
@@ -148,6 +148,23 @@ x = { _id : 1 , x : 1 }
assert.eq( x , am.mu1.findOne() , "mu1" );
assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEqual( x , z ); } , "mu2" )
+// profiling - this sould be last
+
+am.setProfilingLevel( 2 )
+am.foo.insert( { x : 1 } )
+am.foo.findOne()
+block();
+assert.eq( 2 , am.system.profile.count() , "P1" )
+assert.eq( 0 , as.system.profile.count() , "P2" )
+
+assert.eq( 1 , as.foo.findOne().x , "P3" );
+assert.eq( 0 , as.system.profile.count() , "P4" )
+
+assert( as.getCollectionNames().indexOf( "system.profile" ) < 0 , "P4.5" )
+
+as.setProfilingLevel(2)
+as.foo.findOne();
+assert.eq( 1 , as.system.profile.count() , "P5" )
rt.stop();
diff --git a/jstests/repl/dbcase.js b/jstests/repl/dbcase.js
new file mode 100644
index 0000000..10a5a61
--- /dev/null
+++ b/jstests/repl/dbcase.js
@@ -0,0 +1,95 @@
+// Test db case checking with replication SERVER-2111
+
+baseName = "jstests_repl_dbcase";
+
+rt = new ReplTest( baseName );
+
+m = rt.start( true );
+s = rt.start( false );
+
+n1 = "dbname";
+n2 = "dbNAme";
+
+/**
+ * The value of n should be n1 or n2. Check that n is soon present while its
+ * opposite is not present.
+ */
+function check( n ) {
+ assert.soon( function() {
+ try {
+ // Our db name changes may trigger an exception - SERVER-3189.
+ names = s.getDBNames();
+ } catch (e) {
+ return false;
+ }
+ n1Idx = names.indexOf( n1 );
+ n2Idx = names.indexOf( n2 );
+ if ( n1Idx != -1 && n2Idx != -1 ) {
+ // n1 and n2 may both be reported as present transiently.
+ return false;
+ }
+ // Return true if we matched expected n.
+ return -1 != names.indexOf( n );
+ } );
+}
+
+/** Allow some time for additional operations to be processed by the slave. */
+function checkTwice( n ) {
+ check( n );
+ // zzz is expected to be cloned after n1 and n2 because of its position in the alphabet.
+ m.getDB( "zzz" ).c.save( {} );
+ assert.soon( function() { return s.getDB( "zzz" ).c.count(); } )
+ check( n );
+ m.getDB( "zzz" ).dropDatabase();
+}
+
+/**
+ * The slave may create in memory db names on the master matching old dbs it is
+ * attempting to clone. This function forces operation 'cmd' by deleting those
+ * in memory dbs if necessary. This function should only be called in cases where
+ * 'cmd' would succeed if not for the in memory dbs on master created by the slave.
+ */
+function force( cmd ) {
+ print( "cmd: " + cmd );
+ eval( cmd );
+ while( m1.getLastError() ) {
+ sleep( 100 );
+ m1.dropDatabase();
+ m2.dropDatabase();
+ eval( cmd );
+ }
+}
+
+m1 = m.getDB( n1 );
+m2 = m.getDB( n2 );
+
+m1.c.save( {} );
+m2.c.save( {} ); // will fail due to conflict
+check( n1 );
+
+m1.dropDatabase();
+force( "m2.c.save( {} );" ); // will now succeed
+check( n2 );
+
+m2.dropDatabase();
+force( "m1.c.save( {} );" );
+check( n1 );
+
+for( i = 0; i < 5; ++i ) {
+ m1.dropDatabase();
+ force( "m2.c.save( {} );" );
+ m2.dropDatabase();
+ force( "m1.c.save( {} );" );
+}
+checkTwice( n1 );
+
+m1.dropDatabase();
+force( "m2.c.save( {} );" );
+
+for( i = 0; i < 5; ++i ) {
+ m2.dropDatabase();
+ force( "m1.c.save( {} );" );
+ m1.dropDatabase();
+ force( "m2.c.save( {} );" );
+}
+checkTwice( n2 );
diff --git a/jstests/repl/drop_dups.js b/jstests/repl/drop_dups.js
new file mode 100644
index 0000000..100f469
--- /dev/null
+++ b/jstests/repl/drop_dups.js
@@ -0,0 +1,68 @@
+
+var rt = new ReplTest( "drop_dups" );
+
+m = rt.start( true );
+s = rt.start( false );
+
+function block(){
+ am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
+}
+
+am = m.getDB( "foo" );
+as = s.getDB( "foo" );
+
+function run( createInBackground ) {
+
+ collName = "foo" + ( createInBackground ? "B" : "F" );
+
+ am[collName].drop();
+ am.blah.insert( { x : 1 } )
+ assert.soon( function(){
+ block();
+ return as.blah.findOne();
+ }
+ );
+
+
+ for ( i=0; i<10; i++ ) {
+ am[collName].insert( { _id : i , x : Math.floor( i / 2 ) } )
+ }
+
+ block();
+
+ am.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } );
+ am.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } );
+
+ as.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } );
+ as.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } );
+
+ assert.eq( as[collName].count() , am[collName].count() );
+
+ function mymap(z) {
+ return z._id + ":" + z.x + ",";
+ }
+
+
+ if ( am.serverStatus().mem.bits == 64 ) {
+ assert.neq( tojson(am[collName].find().map(mymap)) ,
+ tojson(as[collName].find().map(mymap)) , "order is not supposed to be same on master and slave but it is" );
+ }
+
+
+ am[collName].ensureIndex( { x : 1 } , { unique : true , dropDups : true , background : createInBackground } );
+ am.blah.insert( { x : 1 } )
+ block();
+
+ assert.eq( 2 , am[collName].getIndexKeys().length , "A1 : " + createInBackground )
+ assert.eq( 2 , as[collName].getIndexKeys().length , "A2 : " + createInBackground )
+
+ assert.eq( am[collName].find().sort( { _id : 1 } ).map(mymap) ,
+ as[collName].find().sort( { _id : 1 } ).map(mymap) , "different things dropped on master and slave" );
+
+
+}
+
+run( false )
+run( true )
+
+rt.stop()
diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js
index 4932d5a..97fdc14 100644
--- a/jstests/repl/mastermaster1.js
+++ b/jstests/repl/mastermaster1.js
@@ -4,32 +4,45 @@
ports = allocatePorts( 2 )
left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } )
-right = startMongodTest( ports[1] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
-
-print( "check 1" )
x = left.getDB( "admin" ).runCommand( "ismaster" )
assert( x.ismaster , "left: " + tojson( x ) )
+right = startMongodTest( ports[1] , "mastermaster1right" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
+
x = right.getDB( "admin" ).runCommand( "ismaster" )
assert( x.ismaster , "right: " + tojson( x ) )
+print( "check 1" )
+
+
ldb = left.getDB( "test" )
rdb = right.getDB( "test" )
print( "check 2" )
ldb.foo.insert( { _id : 1 , x : "eliot" } )
-var result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } );
+result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } );
printjson(result);
rdb.foo.insert( { _id : 2 , x : "sara" } )
-result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } )
+result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } )
printjson(result);
print( "check 3" )
+print( "left" )
+ldb.foo.find().forEach( printjsononeline )
+print( "right" )
+rdb.foo.find().forEach( printjsononeline )
+
+print( "oplog" )
+
+rdb.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().forEach( printjsononeline )
+
+/*
assert.eq( 2 , ldb.foo.count() , "B1" )
assert.eq( 2 , rdb.foo.count() , "B2" )
+*/
print( "going to stop everything" )
diff --git a/jstests/repl/mod_move.js b/jstests/repl/mod_move.js
new file mode 100644
index 0000000..d39e747
--- /dev/null
+++ b/jstests/repl/mod_move.js
@@ -0,0 +1,69 @@
+
+// test repl basics
+// data on master/slave is the same
+
+var rt = new ReplTest( "mod_move" );
+
+m = rt.start( true , { oplogSize : 50 } );
+
+function block(){
+ am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
+}
+
+am = m.getDB( "foo" );
+
+function check( note ){
+ var start = new Date();
+ var x,y;
+ while ( (new Date()).getTime() - start.getTime() < 5 * 60 * 1000 ){
+ x = am.runCommand( "dbhash" );
+ y = as.runCommand( "dbhash" );
+ if ( x.md5 == y.md5 )
+ return;
+ sleep( 200 );
+ }
+ assert.eq( x.md5 , y.md5 , note );
+}
+
+// insert a lot of 'big' docs
+// so when we delete them the small docs move here
+
+BIG = 100000;
+N = BIG * 2;
+
+s : "asdasdasdasdasdasdasdadasdadasdadasdasdas"
+
+for ( i=0; i<BIG; i++ ) {
+ am.a.insert( { _id : i , s : 1 , x : 1 } )
+}
+for ( ; i<N; i++ ) {
+ am.a.insert( { _id : i , s : 1 } )
+}
+for ( i=0; i<BIG; i++ ) {
+ am.a.remove( { _id : i } )
+}
+am.getLastError();
+assert.eq( BIG , am.a.count() )
+
+assert.eq( 1 , am.a.stats().paddingFactor , "A2" )
+
+
+// start slave
+s = rt.start( false );
+as = s.getDB( "foo" );
+for ( i=N-1; i>=BIG; i-- ) {
+ am.a.update( { _id : i } , { $set : { x : 1 } } )
+ if ( i == N ) {
+ am.getLastError()
+ assert.lt( as.a.count() , BIG , "B1" )
+ print( "NOW : " + as.a.count() )
+ }
+}
+
+check( "B" )
+
+rt.stop();
+
+
+
+
diff --git a/jstests/repl/pair1.js b/jstests/repl/pair1.js
deleted file mode 100644
index 84dd7b7..0000000
--- a/jstests/repl/pair1.js
+++ /dev/null
@@ -1,100 +0,0 @@
-// Basic pairing test
-
-var baseName = "jstests_pair1test";
-
-debug = function( p ) {
-// print( p );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
-// print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-var writeOneIdx = 0;
-
-writeOne = function( n ) {
- n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } );
-}
-
-getCount = function( n ) {
- return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length;
-}
-
-checkWrite = function( m, s ) {
- writeOne( m );
- assert.eq( 1, getCount( m ) );
- check( s );
-}
-
-check = function( s ) {
- s.setSlaveOk();
- assert.soon( function() {
- return 1 == getCount( s );
- } );
- sleep( 500 ); // wait for sync clone to finish up
-}
-
-// check that slave reads and writes are guarded
-checkSlaveGuard = function( s ) {
- var t = s.getDB( baseName + "-temp" ).temp;
- assert.throws( t.find().count, [], "not master" );
- assert.throws( t.find(), [], "not master", "find did not assert" );
-
- checkError = function() {
- assert.eq( "not master", s.getDB( "admin" ).getLastError() );
- s.getDB( "admin" ).resetError();
- }
- s.getDB( "admin" ).resetError();
- t.save( {x:1} );
- checkError();
- t.update( {}, {x:2}, true );
- checkError();
- t.remove( {x:0} );
- checkError();
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState();
-
- checkSlaveGuard( rp.slave() );
-
- checkWrite( rp.master(), rp.slave() );
-
- debug( "kill first" );
- rp.killNode( rp.master(), signal );
- rp.waitForSteadyState( [ 1, null ], rp.slave().host );
- writeOne( rp.master() );
-
- debug( "restart first" );
- rp.start( true );
- rp.waitForSteadyState();
- check( rp.slave() );
- checkWrite( rp.master(), rp.slave() );
-
- debug( "kill second" );
- rp.killNode( rp.master(), signal );
- rp.waitForSteadyState( [ 1, null ], rp.slave().host );
-
- debug( "restart second" );
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ], rp.master().host );
- checkWrite( rp.master(), rp.slave() );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/pair2.js b/jstests/repl/pair2.js
deleted file mode 100644
index 2491fb2..0000000
--- a/jstests/repl/pair2.js
+++ /dev/null
@@ -1,71 +0,0 @@
-// Pairing resync
-
-var baseName = "jstests_pair2test";
-
-ismaster = function( n ) {
- im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- assert( im );
- return im.ismaster;
-}
-
-soonCount = function( m, count ) {
- assert.soon( function() {
-// print( "counting" );
-//// print( "counted: " + l.getDB( baseName ).z.find().count() );
- return m.getDB( baseName ).z.find().count() == count;
- } );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState();
-
- rp.slave().setSlaveOk();
- mz = rp.master().getDB( baseName ).z;
-
- mz.save( { _id: new ObjectId() } );
- soonCount( rp.slave(), 1 );
- assert.eq( 0, rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
-
- sleep( 3000 ); // allow time to finish clone and save ReplSource
- rp.killNode( rp.slave(), signal );
- rp.waitForSteadyState( [ 1, null ], rp.master().host );
-
- big = new Array( 2000 ).toString();
- for( i = 0; i < 1000; ++i )
- mz.save( { _id: new ObjectId(), i: i, b: big } );
-
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ], rp.master().host );
-
- sleep( 15000 );
-
- rp.slave().setSlaveOk();
- assert.soon( function() {
- ret = rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } );
-// printjson( ret );
- return 1 == ret.ok;
- } );
-
- sleep( 8000 );
- soonCount( rp.slave(), 1001 );
- sz = rp.slave().getDB( baseName ).z
- assert.eq( 1, sz.find( { i: 0 } ).count() );
- assert.eq( 1, sz.find( { i: 999 } ).count() );
-
- assert.eq( 0, rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/pair3.js b/jstests/repl/pair3.js
deleted file mode 100644
index d5fdf7e..0000000
--- a/jstests/repl/pair3.js
+++ /dev/null
@@ -1,245 +0,0 @@
-// test arbitration
-
-var baseName = "jstests_pair3test";
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-// bring up node connections before arbiter connections so that arb can forward to node when expected
-connect = function() {
- if ( lp == null ) {
- print("connecting lp");
- lp = startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- }
- if ( rp == null ) {
- print("connecting rp");
- rp = startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
- }
- if ( al == null ) {
- print("connecting al");
- al = startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort );
- }
- if ( ar == null ) {
- print("connecting ar");
- ar = startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort );
- }
-}
-
-disconnectNode = function( mongo ) {
- if ( lp ) {
- print("disconnecting lp: "+lpPort);
- stopMongoProgram( lpPort );
- lp = null;
- }
- if ( rp ) {
- print("disconnecting rp: "+rpPort);
- stopMongoProgram( rpPort );
- rp = null;
- }
- if ( mongo.host.match( new RegExp( "^127.0.0.1:" + lPort + "$" ) ) ) {
- print("disconnecting al: "+alPort);
- stopMongoProgram( alPort );
- al = null;
- } else if ( mongo.host.match( new RegExp( "^127.0.0.1:" + rPort + "$" ) ) ) {
- print("disconnecting ar: "+arPort);
- stopMongoProgram( arPort );
- ar = null;
- } else {
- assert( false, "don't know how to disconnect node: " + mongo );
- }
-}
-
-doTest1 = function() {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- connect();
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
-
- print("normal startup");
- pair.start();
- pair.waitForSteadyState();
-
- print("disconnect slave");
- disconnectNode( pair.slave() );
- pair.waitForSteadyState( [ 1, -3 ], pair.master().host );
-
- print("disconnect master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ -3, -3 ] );
-
- print("reconnect");
- connect();
- pair.waitForSteadyState();
-
- print("disconnect master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true );
-
- print("disconnect new master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ -3, -3 ] );
-
- print("reconnect");
- connect();
- pair.waitForSteadyState();
-
- print("disconnect slave");
- disconnectNode( pair.slave() );
- pair.waitForSteadyState( [ 1, -3 ], pair.master().host );
-
- print("reconnect slave");
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.master().host );
-
- print("disconnect master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true );
-
- print("reconnect old master");
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.master().host );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-// this time don't start connected
-doTest2 = function() {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState( [ -3, -3 ] );
-
- startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort );
-
- // there hasn't been an initial sync, no no node will become master
-
- for( i = 0; i < 10; ++i ) {
- assert( pair.isMaster( pair.right() ) == -3 && pair.isMaster( pair.left() ) == -3 );
- sleep( 500 );
- }
-
- stopMongoProgram( arPort );
-
- startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort );
-
- for( i = 0; i < 10; ++i ) {
- assert( pair.isMaster( pair.right() ) == -3 && pair.isMaster( pair.left() ) == -3 );
- sleep( 500 );
- }
-
- stopMongoProgram( alPort );
-
- // connect l and r without a
-
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-
- pair.waitForSteadyState( [ 1, 0 ] );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-// recover from master - master setup
-doTest3 = function() {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- connect();
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- // now can only talk to arbiter
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- // recover
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
- pair.waitForSteadyState( [ 1, 0 ], null, true );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-// check that initial sync is persistent
-doTest4 = function( signal ) {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- connect();
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- pair.killNode( pair.left(), signal );
- pair.killNode( pair.right(), signal );
- stopMongoProgram( rpPort );
- stopMongoProgram( lpPort );
-
- // now can only talk to arbiter
- pair.start( true );
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-doTest1();
-doTest2();
-doTest3();
-doTest4( 15 );
-doTest4( 9 );
diff --git a/jstests/repl/pair4.js b/jstests/repl/pair4.js
deleted file mode 100644
index c04433e..0000000
--- a/jstests/repl/pair4.js
+++ /dev/null
@@ -1,160 +0,0 @@
-// data consistency after master-master
-
-var baseName = "jstests_pair4test";
-
-debug = function( o ) {
- printjson( o );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-connect = function() {
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-}
-
-disconnect = function() {
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
-}
-
-write = function( m, n, id ) {
- if ( id ) {
- save = { _id:id, n:n };
- } else {
- save = { n:n };
- }
- m.getDB( baseName ).getCollection( baseName ).save( save );
-}
-
-check = function( m, n, id ) {
- m.setSlaveOk();
- if ( id ) {
- find = { _id:id, n:n };
- } else {
- find = { n:n };
- }
- assert.soon( function() { return m.getDB( baseName ).getCollection( baseName ).find( find ).count() > 0; },
- "failed waiting for " + m + " value of n to be " + n );
-}
-
-checkCount = function( m, c ) {
- m.setSlaveOk();
- assert.soon( function() {
- actual = m.getDB( baseName ).getCollection( baseName ).find().count();
- print( actual );
- return c == actual; },
- "count failed for " + m );
-}
-
-coll = function( m ) {
- return m.getDB( baseName ).getCollection( baseName );
-}
-
-db2Coll = function( m ) {
- return m.getDB( baseName + "_second" ).getCollection( baseName );
-}
-
-doTest = function( recover, newMaster, newSlave ) {
- ports = allocatePorts( 5 );
- aPort = ports[ 0 ];
- lPort = ports[ 1 ];
- lpPort = ports[ 2 ];
- rPort = ports[ 3 ];
- rpPort = ports[ 4 ];
-
- // start normally
- connect();
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort );
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- firstMaster = pair.master();
- firstSlave = pair.slave();
-
- write( pair.master(), 0 );
- write( pair.master(), 1 );
- check( pair.slave(), 0 );
- check( pair.slave(), 1 );
-
- // now each can only talk to arbiter
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- m = newMaster();
- write( m, 10 );
- write( m, 100, "a" );
- coll( m ).update( {n:1}, {$set:{n:2}} );
- db2Coll( m ).save( {n:500} );
- db2Coll( m ).findOne();
-
- s = newSlave();
- write( s, 20 );
- write( s, 200, "a" );
- coll( s ).update( {n:1}, {n:1,m:3} );
- db2Coll( s ).save( {_id:"a",n:600} );
- db2Coll( s ).findOne();
-
- // recover
- recover();
-
- nodes = [ pair.right(), pair.left() ];
-
- nodes.forEach( function( x ) { checkCount( x, 5 ); } );
- nodes.forEach( function( x ) { [ 0, 10, 20, 100 ].forEach( function( y ) { check( x, y ); } ); } );
-
- checkM = function( c ) {
- assert.soon( function() {
- obj = coll( c ).findOne( {n:2} );
- printjson( obj );
- return obj.m == undefined;
- }, "n:2 test for " + c + " failed" );
- };
- nodes.forEach( function( x ) { checkM( x ); } );
-
- // check separate database
- nodes.forEach( function( x ) { assert.soon( function() {
- r = db2Coll( x ).findOne( {_id:"a"} );
- debug( r );
- if ( r == null ) {
- return false;
- }
- return 600 == r.n;
- } ) } );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-
-}
-
-// right will be master on recovery b/c both sides will have completed initial sync
-debug( "basic test" );
-doTest( function() {
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
- }, function() { return pair.right(); }, function() { return pair.left(); } );
-
-doRestartTest = function( signal ) {
- doTest( function() {
- if ( signal == 9 ) {
- sleep( 3000 );
- }
- pair.killNode( firstMaster, signal );
- connect();
- pair.start( true );
- pair.waitForSteadyState( [ 1, 0 ], firstSlave.host, true );
- }, function() { return firstSlave; }, function() { return firstMaster; } );
-}
-
-debug( "sigterm restart test" );
-doRestartTest( 15 ) // SIGTERM
-
-debug( "sigkill restart test" );
-doRestartTest( 9 ) // SIGKILL
diff --git a/jstests/repl/pair5.js b/jstests/repl/pair5.js
deleted file mode 100644
index de7e2d5..0000000
--- a/jstests/repl/pair5.js
+++ /dev/null
@@ -1,95 +0,0 @@
-// writes to new master while making master-master logs consistent
-
-var baseName = "jstests_pair5test";
-
-debug = function( p ) {
- print( p );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-connect = function() {
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-}
-
-disconnect = function() {
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
-}
-
-write = function( m, n, id ) {
- if ( id ) {
- save = { _id:id, n:n };
- } else {
- save = { n:n };
- }
- m.getDB( baseName ).getCollection( baseName ).save( save );
-}
-
-checkCount = function( m, c ) {
- m.setSlaveOk();
- assert.soon( function() {
- actual = m.getDB( baseName ).getCollection( baseName ).find().count();
- print( actual );
- return c == actual; },
- "count failed for " + m );
-}
-
-doTest = function( nSlave, opIdMem ) {
- ports = allocatePorts( 5 );
- aPort = ports[ 0 ];
- lPort = ports[ 1 ];
- lpPort = ports[ 2 ];
- rPort = ports[ 3 ];
- rpPort = ports[ 4 ];
-
- // start normally
- connect();
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort );
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- // now each can only talk to arbiter
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- // left will become slave (b/c both completed initial sync)
- for( i = 0; i < nSlave; ++i ) {
- write( pair.left(), i, i );
- }
- pair.left().getDB( baseName ).getCollection( baseName ).findOne();
-
- for( i = 10000; i < 15000; ++i ) {
- write( pair.right(), i, i );
- }
- pair.right().getDB( baseName ).getCollection( baseName ).findOne();
-
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
-
- pair.master().getDB( baseName ).getCollection( baseName ).update( {_id:nSlave - 1}, {_id:nSlave - 1,n:-1}, true );
- assert.eq( -1, pair.master().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n );
- checkCount( pair.master(), 5000 + nSlave );
- assert.eq( -1, pair.master().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n );
- pair.slave().setSlaveOk();
- assert.soon( function() {
- n = pair.slave().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n;
- print( n );
- return -1 == n;
- } );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-
-}
-
-doTest( 5000, 100000000 );
-doTest( 5000, 100 ); // force op id converstion to collection based storage
diff --git a/jstests/repl/pair6.js b/jstests/repl/pair6.js
deleted file mode 100644
index b249fc0..0000000
--- a/jstests/repl/pair6.js
+++ /dev/null
@@ -1,115 +0,0 @@
-// pairing cases where oplogs run out of space
-
-var baseName = "jstests_pair6test";
-
-debug = function( p ) {
- print( p );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-connect = function() {
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-}
-
-disconnect = function() {
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
-}
-
-checkCount = function( m, c ) {
- m.setSlaveOk();
- assert.soon( function() {
- actual = m.getDB( baseName ).getCollection( baseName ).find().count();
- print( actual );
- return c == actual; },
- "expected count " + c + " for " + m );
-}
-
-resetSlave = function( s ) {
- s.setSlaveOk();
- assert.soon( function() {
- ret = s.getDB( "admin" ).runCommand( { "resync" : 1 } );
- // printjson( ret );
- return 1 == ret.ok;
- } );
-}
-
-big = new Array( 2000 ).toString();
-
-doTest = function() {
- ports = allocatePorts( 5 );
- aPort = ports[ 0 ];
- lPort = ports[ 1 ];
- lpPort = ports[ 2 ];
- rPort = ports[ 3 ];
- rpPort = ports[ 4 ];
-
- // start normally
- connect();
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort );
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- print( "test one" );
-
- // fill new slave oplog
- for( i = 0; i < 1000; ++i ) {
- pair.left().getDB( baseName ).getCollection( baseName ).save( {b:big} );
- }
- pair.left().getDB( baseName ).getCollection( baseName ).findOne();
-
- // write single to new master
- pair.right().getDB( baseName ).getCollection( baseName ).save( {} );
-
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
-
- resetSlave( pair.left() );
-
- checkCount( pair.left(), 1 );
- checkCount( pair.right(), 1 );
-
- pair.right().getDB( baseName ).getCollection( baseName ).remove( {} );
- checkCount( pair.left(), 0 );
-
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- print( "test two" );
-
- // fill new master oplog
- for( i = 0; i < 1000; ++i ) {
- pair.right().getDB( baseName ).getCollection( baseName ).save( {b:big} );
- }
-
- pair.left().getDB( baseName ).getCollection( baseName ).save( {_id:"abcde"} );
-
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
-
- sleep( 15000 );
-
- resetSlave( pair.left() );
-
- checkCount( pair.left(), 1000 );
- checkCount( pair.right(), 1000 );
- assert.eq( 0, pair.left().getDB( baseName ).getCollection( baseName ).find( {_id:"abcde"} ).count() );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-
-}
-
-doTest(); \ No newline at end of file
diff --git a/jstests/repl/pair7.js b/jstests/repl/pair7.js
deleted file mode 100644
index 52ef91f..0000000
--- a/jstests/repl/pair7.js
+++ /dev/null
@@ -1,85 +0,0 @@
-// pairing with auth
-
-var baseName = "jstests_pair7test";
-
-setAdmin = function( n ) {
- n.getDB( "admin" ).addUser( "super", "super" );
- n.getDB( "local" ).addUser( "repl", "foo" );
- n.getDB( "local" ).system.users.findOne();
-}
-
-auth = function( n ) {
- return n.getDB( baseName ).auth( "test", "test" );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- m = startMongod( "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "-left", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- setAdmin( m );
- stopMongod( ports[ 1 ] );
-
- m = startMongod( "--port", ports[ 2 ], "--dbpath", "/data/db/" + baseName + "-right", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- setAdmin( m );
- stopMongod( ports[ 2 ] );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] );
- r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] );
-
- rp = new ReplPair( l, r, a );
- rp.start( true );
- rp.waitForSteadyState();
-
- rp.master().getDB( "admin" ).auth( "super", "super" );
- rp.master().getDB( baseName ).addUser( "test", "test" );
- auth( rp.master() ); // reauth
- assert.soon( function() { return auth( rp.slave() ); } );
- rp.slave().setSlaveOk();
-
- ma = rp.master().getDB( baseName ).a;
- ma.save( {} );
- sa = rp.slave().getDB( baseName ).a;
- assert.soon( function() { return 1 == sa.count(); } );
-
- rp.killNode( rp.slave(), signal );
- rp.waitForSteadyState( [ 1, null ] );
- ma.save( {} );
-
- rp.start( true );
- rp.waitForSteadyState();
- assert.soon( function() { return auth( rp.slave() ); } );
- rp.slave().setSlaveOk();
- sa = rp.slave().getDB( baseName ).a;
- assert.soon( function() { return 2 == sa.count(); } );
-
- ma.save( {a:1} );
- assert.soon( function() { return 1 == sa.count( {a:1} ); } );
-
- ma.update( {a:1}, {b:2} );
- assert.soon( function() { return 1 == sa.count( {b:2} ); } );
-
- ma.remove( {b:2} );
- assert.soon( function() { return 0 == sa.count( {b:2} ); } );
-
- rp.killNode( rp.master(), signal );
- rp.waitForSteadyState( [ 1, null ] );
- ma = sa;
- ma.save( {} );
-
- rp.start( true );
- rp.waitForSteadyState();
- assert.soon( function() { return auth( rp.slave() ); } );
- rp.slave().setSlaveOk();
- sa = rp.slave().getDB( baseName ).a;
- assert.soon( function() { return 3 == sa.count(); } );
-
- ma.save( {} );
- assert.soon( function() { return 4 == sa.count(); } );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/repl2.js b/jstests/repl/repl2.js
index 42b0caf..b290c61 100644
--- a/jstests/repl/repl2.js
+++ b/jstests/repl/repl2.js
@@ -1,34 +1,43 @@
// Test resync command
soonCount = function( count ) {
- assert.soon( function() {
+ assert.soon( function() {
// print( "check count" );
// print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB("foo").a.find().count() == count;
- } );
+ return s.getDB("foo").a.find().count() == count;
+ } );
}
doTest = function( signal ) {
-
+ print("signal: "+signal);
+
var rt = new ReplTest( "repl2tests" );
// implicit small oplog makes slave get out of sync
- m = rt.start( true );
+ m = rt.start( true, { oplogSize : "1" } );
s = rt.start( false );
-
+
am = m.getDB("foo").a
-
+
am.save( { _id: new ObjectId() } );
soonCount( 1 );
assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
rt.stop( false , signal );
-
+
big = new Array( 2000 ).toString();
for( i = 0; i < 1000; ++i )
am.save( { _id: new ObjectId(), i: i, b: big } );
s = rt.start( false , null , true );
- assert.soon( function() { return 1 == s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok; } );
+
+ print("earliest op in master: "+tojson(m.getDB("local").oplog.$main.find().sort({$natural:1}).limit(1).next()));
+ print("latest op on slave: "+tojson(s.getDB("local").sources.findOne()));
+
+ assert.soon( function() {
+ var result = s.getDB( "admin" ).runCommand( { "resync" : 1 } );
+ print("resync says: "+tojson(result));
+ return result.ok == 1;
+ } );
soonCount( 1001 );
assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" );
@@ -36,7 +45,7 @@ doTest = function( signal ) {
as = s.getDB("foo").a
assert.eq( 1, as.find( { i: 0 } ).count() );
assert.eq( 1, as.find( { i: 999 } ).count() );
-
+
assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
rt.stop();
diff --git a/jstests/repl/repl3.js b/jstests/repl/repl3.js
index d3c3848..5ace9b6 100644
--- a/jstests/repl/repl3.js
+++ b/jstests/repl/repl3.js
@@ -10,38 +10,42 @@ soonCount = function( count ) {
} );
}
-doTest = function( signal ) {
-
- rt = new ReplTest( "repl3tests" );
-
- m = rt.start( true );
- s = rt.start( false );
-
- am = m.getDB( baseName ).a
-
- am.save( { _id: new ObjectId() } );
- soonCount( 1 );
- rt.stop( false, signal );
-
- big = new Array( 2000 ).toString();
- for( i = 0; i < 1000; ++i )
- am.save( { _id: new ObjectId(), i: i, b: big } );
-
- s = rt.start( false, { autoresync: null }, true );
-
+doTest = function (signal) {
+
+ print("repl3.js doTest(" + signal + ")")
+
+ rt = new ReplTest("repl3tests");
+
+ m = rt.start(true);
+ s = rt.start(false);
+
+ am = m.getDB(baseName).a
+
+ am.save({ _id: new ObjectId() });
+ soonCount(1);
+ rt.stop(false, signal);
+
+ big = new Array(2000).toString();
+ for (i = 0; i < 1000; ++i)
+ am.save({ _id: new ObjectId(), i: i, b: big });
+
+ s = rt.start(false, { autoresync: null }, true);
+
// after SyncException, mongod waits 10 secs.
- sleep( 15000 );
-
+ sleep(15000);
+
// Need the 2 additional seconds timeout, since commands don't work on an 'allDead' node.
- soonCount( 1001 );
- as = s.getDB( baseName ).a
- assert.eq( 1, as.find( { i: 0 } ).count() );
- assert.eq( 1, as.find( { i: 999 } ).count() );
-
- assert.commandFailed( s.getDB( "admin" ).runCommand( { "resync" : 1 } ) );
+ soonCount(1001);
+ as = s.getDB(baseName).a
+ assert.eq(1, as.find({ i: 0 }).count());
+ assert.eq(1, as.find({ i: 999 }).count());
+
+ assert.commandFailed(s.getDB("admin").runCommand({ "resync": 1 }));
rt.stop();
}
doTest( 15 ); // SIGTERM
doTest( 9 ); // SIGKILL
+
+print("repl3.js OK")
diff --git a/jstests/repl/replacePeer1.js b/jstests/repl/replacePeer1.js
deleted file mode 100644
index b3743ce..0000000
--- a/jstests/repl/replacePeer1.js
+++ /dev/null
@@ -1,82 +0,0 @@
-// test replace peer on master
-
-var baseName = "jstests_replacepeer1test";
-
-ismaster = function( n ) {
- im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
-// print( "ismaster: " + tojson( im ) );
- assert( im );
- return im.ismaster;
-}
-
-var writeOneIdx = 0;
-
-writeOne = function( n ) {
- n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } );
-}
-
-getCount = function( n ) {
- return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length;
-}
-
-checkWrite = function( m, s ) {
- writeOne( m );
- assert.eq( 1, getCount( m ) );
- s.setSlaveOk();
- assert.soon( function() {
- return 1 == getCount( s );
- } );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 4 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState( [ 1, 0 ] );
- rightMaster = ( rp.master().host == rp.right().host );
-
- checkWrite( rp.master(), rp.slave() );
-
- rp.killNode( rp.slave(), signal );
-
- writeOne( rp.master() );
-
- assert.commandWorked( rp.master().getDB( "admin" ).runCommand( {replacepeer:1} ) );
-
- rp.killNode( rp.master(), signal );
- rp.killNode( rp.arbiter(), signal );
-
- if ( rightMaster ) {
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( o, r, a );
- resetDbpath( "/data/db/" + baseName + "-left" );
- } else {
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( l, o, a );
- resetDbpath( "/data/db/" + baseName + "-right" );
- }
-
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ] );
-
- rp.slave().setSlaveOk();
- assert.eq( 2, rp.master().getDB( baseName ).z.find().toArray().length );
- assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length );
-
- checkWrite( rp.master(), rp.slave() );
- assert.eq( 3, rp.slave().getDB( baseName ).z.find().toArray().length );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js
deleted file mode 100644
index 33b054a..0000000
--- a/jstests/repl/replacePeer2.js
+++ /dev/null
@@ -1,86 +0,0 @@
-// test replace peer on slave
-
-var baseName = "jstests_replacepeer2test";
-
-ismaster = function( n ) {
- im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
-// print( "ismaster: " + tojson( im ) );
- assert( im );
- return im.ismaster;
-}
-
-var writeOneIdx = 0;
-
-writeOne = function( n ) {
- n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } );
-}
-
-getCount = function( n ) {
- return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length;
-}
-
-checkWrite = function( m, s ) {
- writeOne( m );
- assert.eq( 1, getCount( m ) );
- s.setSlaveOk();
- assert.soon( function() {
- return 1 == getCount( s );
- } );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 4 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState( [ 1, 0 ] );
- leftSlave = ( rp.slave().host == rp.left().host );
-
- checkWrite( rp.master(), rp.slave() );
-
- // allow slave to finish initial sync
- var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
- assert( res.ok , "replacepeer didn't finish: " + tojson( res ) );
-
- // Should not be saved to slave.
- writeOne( rp.master() );
- // Make sure there would be enough time to save to l if we hadn't called replacepeer.
- sleep( 10000 );
-
- ports.forEach( function( x ) { stopMongod( x, signal ); } );
-
- if ( leftSlave ) {
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( l, o, a );
- resetDbpath( "/data/db/" + baseName + "-right" );
- } else {
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( o, r, a );
- resetDbpath( "/data/db/" + baseName + "-left" );
- }
-
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ] );
-
- rp.slave().setSlaveOk();
- assert.eq( 1, rp.slave().getDB( baseName ).z.find().toArray().length );
- assert.eq( 1, rp.master().getDB( baseName ).z.find().toArray().length );
-
- checkWrite( rp.master(), rp.slave() );
- assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
-
-print("replace2Peer finishes");
diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js
deleted file mode 100644
index 60b3531..0000000
--- a/jstests/repl/snapshot2.js
+++ /dev/null
@@ -1,72 +0,0 @@
-// Test SERVER-623 - starting repl peer from a new snapshot of master
-
-print("snapshot2.js 1 -----------------------------------------------------------");
-
-ports = allocatePorts( 3 );
-
-var baseName = "repl_snapshot2";
-var basePath = "/data/db/" + baseName;
-
-a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
-l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
-r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
-print("snapshot2.js 2 -----------------------------------------------------------");
-
-rp = new ReplPair(l, r, a);
-rp.start();
-print("snapshot2.js 3 -----------------------------------------------------------");
-rp.waitForSteadyState();
-
-print("snapshot2.js 4 -----------------------------------------------------------");
-
-big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
-rp.slave().setSlaveOk();
-print("snapshot2.js 5 -----------------------------------------------------------");
-for (i = 0; i < 500; ++i) {
- rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
- if (i % 250 == 249) {
- function p() { return i + 1 == rp.slave().getDB(baseName)[baseName].count(); }
- try {
- assert.soon(p);
- } catch (e) {
- print("\n\n\nsnapshot2.js\ni+1:" + (i + 1));
- print("slave count:" + rp.slave().getDB(baseName)[baseName].count());
- sleep(2000);
- print(p());
- throw (e);
- }
- sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
- }
-}
-print("snapshot2.js 6 -----------------------------------------------------------");
-
-rp.master().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
-leftMaster = ( rp.master().host == rp.left().host );
-rp.killNode( rp.slave() );
-if ( leftMaster ) {
- copyDbpath( basePath + "-left", basePath + "-right" );
-} else {
- copyDbpath( basePath + "-right", basePath + "-left" );
-}
-rp.master().getDB( "admin" ).$cmd.sys.unlock.findOne();
-rp.killNode( rp.master() );
-
-clearRawMongoProgramOutput();
-
-rp.right_.extraArgs_ = [ "--fastsync" ];
-rp.left_.extraArgs_ = [ "--fastsync" ];
-
-rp.start( true );
-rp.waitForSteadyState();
-assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() );
-rp.slave().setSlaveOk();
-assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
-rp.master().getDB( baseName )[ baseName ].save( {i:500} );
-assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-
-assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert(!rawMongoProgramOutput().match(/SyncException/));
-
-print("snapshot2.js SUCCESS ----------------");
-
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
deleted file mode 100644
index 02955e5..0000000
--- a/jstests/repl/snapshot3.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// Test SERVER-623 - starting repl peer from a new snapshot of slave
-
-ports = allocatePorts( 3 );
-
-var baseName = "repl_snapshot3";
-var basePath = "/data/db/" + baseName;
-
-a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
-l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
-r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
-rp = new ReplPair( l, r, a );
-rp.start();
-rp.waitForSteadyState();
-
-big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
-rp.slave().setSlaveOk();
-for( i = 0; i < 500; ++i ) {
- rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
- if ( i % 250 == 249 ) {
- assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
- sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
- }
-}
-
-rp.slave().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
-leftSlave = ( rp.slave().host == rp.left().host );
-rp.killNode( rp.master() );
-if ( leftSlave ) {
- copyDbpath( basePath + "-left", basePath + "-right" );
-} else {
- copyDbpath( basePath + "-right", basePath + "-left" );
-}
-rp.slave().getDB( "admin" ).$cmd.sys.unlock.findOne();
-rp.killNode( rp.slave() );
-
-clearRawMongoProgramOutput();
-
-rp.right_.extraArgs_ = [ "--fastsync" ];
-rp.left_.extraArgs_ = [ "--fastsync" ];
-
-rp.start( true );
-rp.waitForSteadyState();
-assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() );
-rp.slave().setSlaveOk();
-assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
-rp.master().getDB( baseName )[ baseName ].save( {i:500} );
-assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-
-assert( !rawMongoProgramOutput().match( new RegExp( "resync.*" + baseName + ".*\n" ) ) , "last1" );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) , "last2" );
-
-print("snapshot3.js finishes");
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 60e4b95..71ab2d9 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -3,17 +3,27 @@
load("jstests/replsets/rslib.js");
var name = "rs_auth1";
-var port = allocatePorts(4);
-var path = "jstests/replsets/";
+var port = allocatePorts(5);
+var path = "jstests/libs/";
+
+
+print("try starting mongod with auth");
+var m = runMongoProgram( "mongod", "--auth", "--port", port[4], "--dbpath", "/data/db/wrong-auth");
+
+assert.throws(function() {
+ m.getDB("local").auth("__system", "");
+});
+
+stopMongod(port[4]);
+
-
print("reset permissions");
run("chmod", "644", path+"key1");
run("chmod", "644", path+"key2");
print("try starting mongod");
-var m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name);
+m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name);
print("should fail with wrong permissions");
@@ -81,6 +91,10 @@ function doQueryOn(p) {
doQueryOn(slave);
master.adminCommand({logout:1});
+
+print("unauthorized:");
+printjson(master.adminCommand({replSetGetStatus : 1}));
+
doQueryOn(master);
@@ -125,11 +139,12 @@ master.auth("bar", "baz");
for (var i=0; i<1000; i++) {
master.foo.insert({x:i, foo : "bar"});
}
-master.runCommand({getlasterror:1, w:3, wtimeout:60000});
+var result = master.runCommand({getlasterror:1, w:2, wtimeout:60000});
+printjson(result);
print("resync");
-rs.restart(0);
+rs.restart(0, {"keyFile" : path+"key1"});
print("add some more data 2");
@@ -159,7 +174,7 @@ master.getSisterDB("admin").auth("foo", "bar");
print("shouldn't ever sync");
-for (var i = 0; i<30; i++) {
+for (var i = 0; i<10; i++) {
print("iteration: " +i);
var results = master.adminCommand({replSetGetStatus:1});
printjson(results);
@@ -177,9 +192,15 @@ conn = new MongodRunner(port[3], "/data/db/"+name+"-3", null, null, ["--replSet"
conn.start();
wait(function() {
+ try {
var results = master.adminCommand({replSetGetStatus:1});
printjson(results);
return results.members[3].state == 2;
+ }
+ catch (e) {
+ print(e);
+ }
+ return false;
});
print("make sure it has the config, too");
diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js
index 6d2d0f3..2519c09 100644
--- a/jstests/replsets/cloneDb.js
+++ b/jstests/replsets/cloneDb.js
@@ -6,34 +6,36 @@ doTest = function( signal ) {
var N = 2000
- // ~1KB string
+ print("~1KB string");
var Text = ''
for (var i = 0; i < 40; i++)
Text += 'abcdefghijklmnopqrstuvwxyz'
- // Create replica set
+ print("Create replica set");
var repset = new ReplicaSet ('testSet', 3) .begin()
var master = repset.getMaster()
var db1 = master.getDB('test')
- // Insert data
+ print("Insert data");
for (var i = 0; i < N; i++) {
db1['foo'].insert({x: i, text: Text})
db1.getLastError(2) // wait to be copied to at least one secondary
}
- // Create single server
+ print("Create single server");
var solo = new Server ('singleTarget')
var soloConn = solo.begin()
+ soloConn.getDB("admin").runCommand({setParameter:1,logLevel:5});
+
var db2 = soloConn.getDB('test')
- // Clone db from replica set to single server
+ print("Clone db from replica set to single server");
db2.cloneDatabase (repset.getURL())
- // Confirm clone worked
+ print("Confirm clone worked");
assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test1)')
- // Now test the reverse direction
+ print("Now test the reverse direction");
db1 = master.getDB('test2')
db2 = soloConn.getDB('test2')
for (var i = 0; i < N; i++) {
@@ -43,7 +45,7 @@ doTest = function( signal ) {
db1.cloneDatabase (solo.host())
assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test2)')
- // Shut down replica set and single server
+ print("Shut down replica set and single server");
solo.end()
repset.stopSet( signal )
}
diff --git a/jstests/replsets/config1.js b/jstests/replsets/config1.js
deleted file mode 100644
index 748ce8f..0000000
--- a/jstests/replsets/config1.js
+++ /dev/null
@@ -1,21 +0,0 @@
-doTest = function( signal ) {
- var name = 'config1';
-
- var replTest = new ReplSetTest( {name: name, nodes: 3} );
- var nodes = replTest.startSet();
-
- var config = replTest.getReplSetConfig();
- config.settings = {"heartbeatSleep" : .5, heartbeatTimeout : .8};
-
- replTest.initiate(config);
-
- // Call getMaster to return a reference to the node that's been
- // elected master.
- var master = replTest.getMaster();
-
- config = master.getDB("local").system.replset.findOne();
- assert.eq(config.settings.heartbeatSleep, .5);
- assert.eq(config.settings.heartbeatTimeout, .8);
-};
-
-doTest(15);
diff --git a/jstests/replsets/downstream.js b/jstests/replsets/downstream.js
new file mode 100755
index 0000000..795e667
--- /dev/null
+++ b/jstests/replsets/downstream.js
@@ -0,0 +1,36 @@
+// BUG: [SERVER-1768] replica set getlasterror {w: 2} after 2000
+// inserts hangs while secondary servers log "replSet error RS102 too stale to catch up" every once in a while
+
+function newReplicaSet (name, numServers) {
+ var rs = new ReplSetTest({name: name, nodes: numServers})
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+function go() {
+var N = 2000
+
+// ~1KB string
+var Text = ''
+for (var i = 0; i < 40; i++)
+ Text += 'abcdefghijklmnopqrstuvwxyz'
+
+// Create replica set of 3 servers
+var repset = newReplicaSet('repset', 3)
+var conn = repset.getMaster()
+var db = conn.getDB('test')
+
+// Add data to it
+for (var i = 0; i < N; i++)
+ db['foo'].insert({x: i, text: Text})
+
+// wait to be copied to at least one secondary (BUG hangs here)
+db.getLastError(2)
+
+print('getlasterror_w2.js SUCCESS')
+}
+
+// turn off until fixed
+//go();
diff --git a/jstests/replsets/fastsync.js b/jstests/replsets/fastsync.js
index d7c3905..1c9c215 100644
--- a/jstests/replsets/fastsync.js
+++ b/jstests/replsets/fastsync.js
@@ -22,7 +22,7 @@ var wait = function(f) {
}
var reconnect = function(a) {
- wait(function() {
+ wait(function() {
try {
a.getDB("foo").bar.stats();
return true;
@@ -33,7 +33,7 @@ var reconnect = function(a) {
});
};
-ports = allocatePorts( 3 );
+ports = allocatePorts( 4 );
var basename = "jstests_fastsync";
var basePath = "/data/db/" + basename;
@@ -48,7 +48,7 @@ var admin = p.getDB("admin");
var foo = p.getDB("foo");
var local = p.getDB("local");
-var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0]}]};
+var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0], priority:2}]};
printjson(config);
var result = admin.runCommand({replSetInitiate : config});
print("result:");
@@ -59,10 +59,19 @@ while (count < 10 && result.ok != 1) {
count++;
sleep(2000);
result = admin.runCommand({replSetInitiate : config});
-}
+}
assert(result.ok, tojson(result));
-assert.soon(function() { return admin.runCommand({isMaster:1}).ismaster; });
+assert.soon(function() { result = false;
+ try {
+ result = admin.runCommand({isMaster:1}).ismaster;
+ }
+ catch(e) {
+ print(e);
+ return false;
+ }
+ return result;
+ });
print("1");
for (var i=0; i<100000; i++) {
@@ -73,45 +82,113 @@ print("total in foo: "+foo.bar.count());
print("2");
admin.runCommand( {fsync:1,lock:1} );
-copyDbpath( basePath + "-p", basePath + "-s" );
+copyDbpath( basePath + "-p", basePath + "-s"+1 );
admin.$cmd.sys.unlock.findOne();
-
print("3");
-var sargs = new MongodRunner( ports[ 1 ], basePath + "-s", false, false,
+var startSlave = function(n) {
+ var sargs = new MongodRunner( ports[ n ], basePath + "-s"+n, false, false,
["--replSet", basename, "--fastsync",
"--oplogSize", 2], {no_bind : true} );
-var reuseData = true;
-sargs.start(reuseData);
+ var reuseData = true;
+ var conn = sargs.start(reuseData);
+
+ config = local.system.replset.findOne();
+ config.version++;
+ config.members.push({_id:n, host:hostname+":"+ports[n]});
+
+ result = admin.runCommand({replSetReconfig : config});
+ printjson(result);
+ assert(result.ok, "reconfig worked");
+ reconnect(p);
+
+ print("4");
+ var status = admin.runCommand({replSetGetStatus : 1});
+ var count = 0;
+ while (status.members[n].state != 2 && count < 200) {
+ print("not a secondary yet");
+ if (count % 10 == 0) {
+ printjson(status);
+ }
+ assert(!status.members[n].errmsg || !status.members[n].errmsg.match("^initial sync cloning db"));
-config = local.system.replset.findOne();
-config.version++;
-config.members.push({_id:1, host:hostname+":"+ports[1]});
+ sleep(1000);
-result = admin.runCommand({replSetReconfig : config});
-assert(result.ok, "reconfig worked");
-reconnect(p);
+ // disconnection could happen here
+ try {
+ status = admin.runCommand({replSetGetStatus : 1});
+ }
+ catch (e) {
+ print(e);
+ }
+ count++;
+ }
-print("4");
-var status = admin.runCommand({replSetGetStatus : 1});
-var count = 0;
-while (status.members[1].state != 2 && count < 200) {
- print("not a secondary yet");
- if (count % 10 == 0) {
- printjson(status);
- }
- assert(!status.members[1].errmsg || !status.members[1].errmsg.match("^initial sync cloning db"));
-
- sleep(1000);
-
- // disconnection could happen here
- try {
- status = admin.runCommand({replSetGetStatus : 1});
- }
- catch (e) {
- print(e);
- }
- count++;
+ assert.eq(status.members[n].state, 2);
+
+ assert.soon(function() {
+ return admin.runCommand({isMaster : 1}).ismaster;
+ });
+
+ admin.foo.insert({x:1});
+ assert.soon(function() {
+ var last = local.oplog.rs.find().sort({$natural:-1}).limit(1).next();
+ var cur = conn.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next();
+ print("last: "+tojson(last)+" cur: "+tojson(cur));
+ return cur != null && last != null && cur.ts.t == last.ts.t && cur.ts.i == last.ts.i;
+ });
+
+ return conn;
+};
+
+var s1 = startSlave(1);
+
+var me1 = s1.getDB("local").me.findOne();
+
+print("me: " +me1._id);
+assert(me1._id != null);
+
+print("5");
+s1.getDB("admin").runCommand( {fsync:1,lock:1} );
+copyDbpath( basePath + "-s1", basePath + "-s2" );
+s1.getDB("admin").$cmd.sys.unlock.findOne();
+
+var s2 = startSlave(2);
+
+var me2 = s2.getDB("local").me.findOne();
+
+print("me: " +me2._id);
+assert(me1._id != me2._id);
+
+print("restart member with a different port and make it a new set");
+try {
+ p.getDB("admin").runCommand({shutdown:1});
+}
+catch(e) {
+ print("good, shutting down: " +e);
}
+sleep(10000);
+
+pargs = new MongodRunner( ports[ 3 ], basePath + "-p", false, false,
+ ["--replSet", basename, "--oplogSize", 2],
+ {no_bind : true} );
+p = pargs.start(true);
+
+printjson(p.getDB("admin").runCommand({replSetGetStatus:1}));
+
+p.getDB("admin").runCommand({replSetReconfig : {
+ _id : basename,
+ members : [{_id:0, host : hostname+":"+ports[3]}]
+ }, force : true});
+
+print("start waiting for primary...");
+assert.soon(function() {
+ try {
+ return p.getDB("admin").runCommand({isMaster : 1}).ismaster;
+ }
+ catch(e) {
+ print(e);
+ }
+ return false;
+ }, "waiting for master", 60000);
-assert.eq(status.members[1].state, 2);
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index df978c4..4cfd606 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -95,12 +95,11 @@ replTest.stop(1);
print("8. Eventually it should become a secondary");
print("if initial sync has started, this will cause it to fail and sleep for 5 minutes");
-sleep(5*60*1000);
wait(function() {
var status = admin_s2.runCommand({replSetGetStatus:1});
occasionally(function() { printjson(status); });
return status.members[2].state == 2;
- });
+ }, 350);
print("9. Bring #2 back up");
@@ -122,5 +121,5 @@ for (var i=0; i<10000; i++) {
print("11. Everyone happy eventually");
-replTest.awaitReplication();
+replTest.awaitReplication(300000);
diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js
index 471aa16..7f2af94 100644
--- a/jstests/replsets/initial_sync3.js
+++ b/jstests/replsets/initial_sync3.js
@@ -43,14 +43,14 @@ wait(function() {
if (!status.members) {
return false;
}
-
+
for (i=0; i<7; i++) {
if (status.members[i].state != 1 && status.members[i].state != 2) {
return false;
}
}
return true;
-
+
});
replTest.awaitReplication();
@@ -66,6 +66,7 @@ rs2.initiate();
master = rs2.getMaster();
var config = master.getDB("local").system.replset.findOne();
config.version++;
+config.members[0].priority = 2;
config.members[0].initialSync = {state : 2};
config.members[1].initialSync = {state : 1};
try {
@@ -75,12 +76,34 @@ catch(e) {
print("trying to reconfigure: "+e);
}
-master = rs2.getMaster();
-config = master.getDB("local").system.replset.findOne();
+// wait for a heartbeat, too, just in case sync happens before hb
+assert.soon(function() {
+ try {
+ for (var n in rs2.nodes) {
+ if (rs2.nodes[n].getDB("local").system.replset.findOne().version != 2) {
+ return false;
+ }
+ }
+ }
+ catch (e) {
+ return false;
+ }
+ return true;
+});
+
+rs2.awaitReplication();
+
+// test partitioning
+master = rs2.bridge();
+rs2.partition(0, 2);
+
+master.getDB("foo").bar.baz.insert({x:1});
+rs2.awaitReplication();
-assert(typeof(config.members[0].initialSync) == "object");
-assert.eq(config.members[0].initialSync.state, 2);
-assert.eq(config.members[1].initialSync.state, 1);
+master.getDB("foo").bar.baz.insert({x:2});
+var x = master.getDB("foo").runCommand({getLastError : 1, w : 3, wtimeout : 5000});
+printjson(x);
+assert.eq(null, x.err);
rs2.stopSet();
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
new file mode 100644
index 0000000..5b068cd
--- /dev/null
+++ b/jstests/replsets/maintenance.js
@@ -0,0 +1,32 @@
+
+
+var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+var conns = replTest.startSet();
+replTest.initiate();
+
+// Make sure we have a master
+var master = replTest.getMaster();
+
+for (i=0;i<10000; i++) { master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); }
+for (i=0;i<1000; i++) { master.getDB("bar").foo.update({y:i},{$push :{foo : "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); }
+
+replTest.awaitReplication();
+
+assert.soon(function() { return conns[2].getDB("admin").isMaster().secondary; });
+
+join = startParallelShell( "db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[2] );
+
+print("check secondary goes to recovering");
+assert.soon(function() { return !conns[2].getDB("admin").isMaster().secondary; });
+
+print("joining");
+join();
+
+print("check secondary becomes a secondary again");
+var x = 0;
+assert.soon(function() {
+ var im = conns[2].getDB("admin").isMaster();
+ if (x++ % 5 == 0) printjson(im);
+ return im.secondary;
+});
+
diff --git a/jstests/replsets/majority.js b/jstests/replsets/majority.js
new file mode 100644
index 0000000..6df1a41
--- /dev/null
+++ b/jstests/replsets/majority.js
@@ -0,0 +1,60 @@
+var num = 5;
+var host = getHostName();
+var name = "tags";
+var timeout = 10000;
+
+var replTest = new ReplSetTest( {name: name, nodes: num, startPort:31000} );
+var nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({_id : name, members :
+ [
+ {_id:0, host : host+":"+port[0], priority : 2},
+ {_id:1, host : host+":"+port[1]},
+ {_id:2, host : host+":"+port[2]},
+ {_id:3, host : host+":"+port[3], arbiterOnly : true},
+ {_id:4, host : host+":"+port[4], arbiterOnly : true},
+ ],
+ });
+
+replTest.awaitReplication();
+replTest.bridge();
+
+var testInsert = function() {
+ master.getDB("foo").bar.insert({x:1});
+ var result = master.getDB("foo").runCommand({getLastError:1, w:"majority", wtimeout:timeout});
+ printjson(result);
+ return result;
+};
+
+var master = replTest.getMaster();
+
+print("get back in the groove");
+testInsert();
+replTest.awaitReplication();
+
+print("makes sure majority works");
+assert.eq(testInsert().err, null);
+
+print("setup: 0,1 | 2,3,4");
+replTest.partition(0,2);
+replTest.partition(0,3);
+replTest.partition(0,4);
+replTest.partition(1,2);
+replTest.partition(1,3);
+replTest.partition(1,4);
+
+print("make sure majority doesn't work");
+// primary should now be 2
+master = replTest.getMaster();
+assert.eq(testInsert().err, "timeout");
+
+print("bring set back together");
+replTest.unPartition(0,2);
+replTest.unPartition(0,3);
+replTest.unPartition(1,4);
+
+master = replTest.getMaster();
+
+print("make sure majority works");
+assert.eq(testInsert().err, null);
+
diff --git a/jstests/replsets/randomcommands1.js b/jstests/replsets/randomcommands1.js
deleted file mode 100644
index c451e74..0000000
--- a/jstests/replsets/randomcommands1.js
+++ /dev/null
@@ -1,29 +0,0 @@
-
-replTest = new ReplSetTest( {name: 'randomcommands1', nodes: 3} );
-
-nodes = replTest.startSet();
-replTest.initiate();
-
-master = replTest.getMaster();
-slaves = replTest.liveNodes.slaves;
-printjson(replTest.liveNodes);
-
-db = master.getDB("foo")
-t = db.foo
-
-ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } )
-
-t.save({a: 1000});
-t.ensureIndex( { a : 1 } )
-
-db.getLastError( 3 , 30000 )
-
-ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
-
-t.reIndex()
-
-db.getLastError( 3 , 30000 )
-ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
-
-replTest.stopSet( 15 )
-
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
new file mode 100644
index 0000000..b7dca03
--- /dev/null
+++ b/jstests/replsets/reconfig.js
@@ -0,0 +1,69 @@
+
+// try reconfiguring with servers down
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getMaster();
+
+print("initial sync");
+master.getDB("foo").bar.insert({X:1});
+replTest.awaitReplication();
+
+print("stopping 3 & 4");
+replTest.stop(3);
+replTest.stop(4);
+
+print("reconfiguring");
+master = replTest.getMaster();
+var config = master.getDB("local").system.replset.findOne();
+var oldVersion = config.version++;
+config.members[0].votes = 2;
+config.members[3].votes = 2;
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print(e);
+}
+
+var config = master.getDB("local").system.replset.findOne();
+assert.eq(oldVersion+1, config.version);
+
+
+print("0 & 3 up; 1, 2, 4 down");
+replTest.restart(3);
+
+// in case 0 isn't master
+replTest.awaitReplication();
+
+replTest.stop(1);
+replTest.stop(2);
+
+print("try to reconfigure with a 'majority' down");
+oldVersion = config.version;
+config.version++;
+master = replTest.getMaster();
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print(e);
+}
+
+var config = master.getDB("local").system.replset.findOne();
+assert.eq(oldVersion+1, config.version);
+
+replTest.stopSet();
+
+replTest2 = new ReplSetTest({name : 'testSet2', nodes : 1});
+nodes = replTest2.startSet();
+
+result = nodes[0].getDB("admin").runCommand({replSetInitiate : {_id : "testSet2", members : [
+ {_id : 0, tags : ["member0"]}
+ ]}});
+
+assert(result.errmsg.match(/bad or missing host field/));
+
+replTest2.stopSet();
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index ebd17d6..f93fe9e 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -16,7 +16,7 @@ var host = getHostName();
print("Start set with three nodes");
-var replTest = new ReplSetTest( {name: name, nodes: 3} );
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getMaster();
@@ -28,85 +28,44 @@ master.getDB("foo").bar.baz.insert({x:1});
replTest.awaitReplication();
-print("Remove slave2");
+print("Remove slaves");
var config = replTest.getReplSetConfig();
config.members.pop();
config.version = 2;
-try {
- master.getDB("admin").runCommand({replSetReconfig:config});
-}
-catch(e) {
- print(e);
-}
-reconnect(master);
-
-
-print("Remove slave1");
-config.members.pop();
-config.version = 3;
-try {
- master.getDB("admin").runCommand({replSetReconfig:config});
-}
-catch(e) {
- print(e);
-}
-reconnect(master);
-
-print("sleeping 1");
-sleep(10000);
-// these are already down, but this clears their ports from memory so that they
-// can be restarted later
-stopMongod(replTest.getPort(1));
-stopMongod(replTest.getPort(2));
-
-
-print("Bring slave1 back up");
-var paths = [ replTest.getPath(1), replTest.getPath(2) ];
-var ports = allocatePorts(2, replTest.getPort(2)+1);
-var args = ["mongod", "--port", ports[0], "--dbpath", paths[0], "--noprealloc", "--smallfiles", "--rest"];
-var conn = startMongoProgram.apply( null, args );
-conn.getDB("local").system.replset.remove();
-printjson(conn.getDB("local").runCommand({getlasterror:1}));
-print(conn);
-print("sleeping 2");
-sleep(10000);
-stopMongod(ports[0]);
-
-replTest.restart(1);
-
-
-print("Bring slave2 back up");
-args[2] = ports[1];
-args[4] = paths[1];
-conn = startMongoProgram.apply( null, args );
-conn.getDB("local").system.replset.remove();
-print("path: "+paths[1]);
-print("sleeping 3");
-sleep(10000);
-stopMongod(ports[1]);
-
-replTest.restart(2);
-sleep(10000);
-
-
-print("Add them back as slaves");
+assert.soon(function() {
+ try {
+ master.getDB("admin").runCommand({replSetReconfig:config});
+ }
+ catch(e) {
+ print(e);
+ }
+
+ reconnect(master);
+ reconnect(replTest.nodes[1]);
+ var c = master.getDB("local").system.replset.findOne();
+ return c.version == 2;
+ });
+
+print("Add it back as a slave");
config.members.push({_id:1, host : host+":"+replTest.getPort(1)});
-config.members.push({_id:2, host : host+":"+replTest.getPort(2)});
-config.version = 4;
+config.version = 3;
+printjson(config);
wait(function() {
try {
- master.getDB("admin").runCommand({replSetReconfig:config});
+ master.getDB("admin").runCommand({replSetReconfig:config});
}
catch(e) {
- print(e);
+ print(e);
}
reconnect(master);
+ printjson(master.getDB("admin").runCommand({replSetGetStatus:1}));
master.setSlaveOk();
var newConfig = master.getDB("local").system.replset.findOne();
- return newConfig.version == 4;
- });
+ print( "newConfig: " + tojson(newConfig) );
+ return newConfig.version == 3;
+} , "wait1" );
print("Make sure everyone's secondary");
@@ -115,18 +74,49 @@ wait(function() {
occasionally(function() {
printjson(status);
});
-
- if (!status.members || status.members.length != 3) {
+
+ if (!status.members || status.members.length != 2) {
return false;
}
- for (var i = 0; i<3; i++) {
+ for (var i = 0; i<2; i++) {
if (status.members[i].state != 1 && status.members[i].state != 2) {
return false;
}
}
return true;
- });
+} , "wait2" );
+
+
+print("reconfig with minority");
+replTest.stop(1);
+
+assert.soon(function() {
+ try {
+ return master.getDB("admin").runCommand({isMaster : 1}).secondary;
+ }
+ catch(e) {
+ print("trying to get master: "+e);
+ }
+});
+
+config.version = 4;
+config.members.pop();
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config, force : true});
+}
+catch(e) {
+ print(e);
+}
+
+reconnect(master);
+assert.soon(function() {
+ return master.getDB("admin").runCommand({isMaster : 1}).ismaster;
+});
+
+config = master.getDB("local").system.replset.findOne();
+printjson(config);
+assert(config.version > 4);
replTest.stopSet();
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 5ac94e7..6387c5d 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -108,6 +108,28 @@ doTest = function( signal ) {
assert.eq( 1000 , count.n , "slave count wrong: " + slave );
});
+ // last error
+ master = replTest.getMaster();
+ slaves = replTest.liveNodes.slaves;
+ printjson(replTest.liveNodes);
+
+ db = master.getDB("foo")
+ t = db.foo
+
+ ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } )
+
+ t.save({a: 1000});
+ t.ensureIndex( { a : 1 } )
+
+ db.getLastError( 3 , 30000 )
+
+ ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
+ t.reIndex()
+
+ db.getLastError( 3 , 30000 )
+ ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
// Shut down the set and finish the test.
replTest.stopSet( signal );
}
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
index faa0627..ba08eac 100644
--- a/jstests/replsets/replset3.js
+++ b/jstests/replsets/replset3.js
@@ -29,7 +29,7 @@ doTest = function (signal) {
// Step down master. Note: this may close our connection!
try {
- master.getDB("admin").runCommand({ replSetStepDown: true });
+ master.getDB("admin").runCommand({ replSetStepDown: true, force: 1 });
} catch (err) {
print("caught: " + err + " on stepdown");
}
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index 6a7d8a5..67ce2d7 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -23,51 +23,63 @@ doTest = function (signal) {
master.getDB("barDB").bar.save({ a: 1 });
replTest.awaitReplication();
- // These writes should be replicated immediately
- var docNum = 5000;
- for(var n=0; n<docNum; n++) {
- master.getDB(testDB).foo.insert({ n: n });
- }
+ // These writes should be replicated immediately
+ var docNum = 5000;
+ for (var n = 0; n < docNum; n++) {
+ master.getDB(testDB).foo.insert({ n: n });
+ }
- // If you want to test failure, just add values for w and wtimeout
- // to the following command. This will override the default set above and
- // prevent replication from happening in time for the count tests below.
- master.getDB("admin").runCommand({getlasterror: 1});
+ // should use the configured last error defaults from above, that's what we're testing.
+ //
+ // If you want to test failure, just add values for w and wtimeout (e.g. w=1)
+ // to the following command. This will override the default set above and
+ // prevent replication from happening in time for the count tests below.
+ //
+ var result = master.getDB("admin").runCommand({ getlasterror: 1 });
+ print("replset5.js getlasterror result:");
+ printjson(result);
+
+ if (result.err == "timeout") {
+ print("\WARNING getLastError timed out and should not have.\nThis machine seems extremely slow. Stopping test without failing it\n")
+ replTest.stopSet(signal);
+ print("\WARNING getLastError timed out and should not have.\nThis machine seems extremely slow. Stopping test without failing it\n")
+ return;
+ }
var slaves = replTest.liveNodes.slaves;
slaves[0].setSlaveOk();
slaves[1].setSlaveOk();
- print("Testing slave counts");
+ print("replset5.js Testing slave counts");
+
+ var slave0count = slaves[0].getDB(testDB).foo.count();
+ assert(slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
+
+ var slave1count = slaves[1].getDB(testDB).foo.count();
+ assert(slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
+
+ var master1count = master.getDB(testDB).foo.count();
+ assert(master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
+
+ print("replset5.js reconfigure with hidden=1");
+ config = master.getDB("local").system.replset.findOne();
+ config.version++;
+ config.members[2].priority = 0;
+ config.members[2].hidden = 1;
+
+ try {
+ master.adminCommand({ replSetReconfig: config });
+ }
+ catch (e) {
+ print(e);
+ }
+
+ config = master.getDB("local").system.replset.findOne();
+ printjson(config);
+ assert.eq(config.members[2].hidden, true);
- var slave0count = slaves[0].getDB(testDB).foo.count();
- assert( slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
-
- var slave1count = slaves[1].getDB(testDB).foo.count();
- assert( slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
-
- var master1count = master.getDB(testDB).foo.count();
- assert( master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
-
- print("reconfigure with hidden=1");
- config = master.getDB("local").system.replset.findOne();
- config.version++;
- config.members[2].priority = 0;
- config.members[2].hidden = 1;
-
- try {
- master.adminCommand({replSetReconfig : config});
- }
- catch(e) {
- print(e);
- }
-
- config = master.getDB("local").system.replset.findOne();
- printjson(config);
- assert.eq(config.members[2].hidden, true);
-
replTest.stopSet(signal);
}
-doTest( 15 );
-print("replset5.js success");
+doTest( 15 );
+print("replset5.js success");
diff --git a/jstests/replsets/replsetadd.js b/jstests/replsets/replsetadd.js
index 673e1d7..44ef7c6 100644
--- a/jstests/replsets/replsetadd.js
+++ b/jstests/replsets/replsetadd.js
@@ -2,13 +2,17 @@
doTest = function( signal ) {
// Test add node
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 0} );
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 0, host:"localhost"} );
var first = replTest.add();
// Initiate replica set
assert.soon(function() {
- var res = first.getDB("admin").runCommand({replSetInitiate: null});
+ var res = first.getDB("admin").runCommand({replSetInitiate: {
+ _id : 'testSet',
+ members : [{_id : 0, host : "localhost:"+replTest.ports[0]}]
+ }
+ });
return res['ok'] == 1;
});
@@ -18,12 +22,36 @@ doTest = function( signal ) {
return result['ok'] == 1;
});
+ replTest.getMaster();
+
// Start a second node
var second = replTest.add();
// Add the second node.
// This runs the equivalent of rs.add(newNode);
- replTest.reInitiate();
+ print("calling add again");
+ try {
+ replTest.reInitiate();
+ }
+ catch(e) {
+ print(e);
+ }
+
+ print("try to change to localhost to "+getHostName());
+ var master = replTest.getMaster();
+
+ var config = master.getDB("local").system.replset.findOne();
+ config.version++;
+ config.members.forEach(function(m) {
+ m.host = m.host.replace("localhost", getHostName());
+ print(m.host);
+ });
+ printjson(config);
+
+ print("trying reconfig that shouldn't work");
+ var result = master.getDB("admin").runCommand({replSetReconfig: config});
+ assert.eq(result.ok, 0);
+ assert.eq(result.assertionCode, 13645);
replTest.stopSet( signal );
}
diff --git a/jstests/replsets/replsetarb1.js b/jstests/replsets/replsetarb1.js
deleted file mode 100644
index a323290..0000000
--- a/jstests/replsets/replsetarb1.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// FAILING TEST
-// no primary is ever elected if the first server is an arbiter
-
-doTest = function( signal ) {
-
- var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
- var nodes = replTest.nodeList();
-
- print(tojson(nodes));
-
- var conns = replTest.startSet();
- var r = replTest.initiate({"_id" : "unicomplex",
- "members" : [
- {"_id" : 0, "host" : nodes[0], "arbiterOnly" : true},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2]}]});
-
- // Make sure we have a master
- // Neither this
- var master = replTest.getMaster();
-
- // Make sure we have an arbiter
- // Nor this will succeed
- assert.soon(function() {
- res = conns[0].getDB("admin").runCommand({replSetGetStatus: 1});
- printjson(res);
- return res.myState == 7;
- }, "Aribiter failed to initialize.");
-
- replTest.stopSet( signal );
-}
-
-// doTest( 15 );
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 0e4c791..6f712cb 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -1,5 +1,4 @@
// Election when master fails and remaining nodes are an arbiter and a slave.
-// Note that in this scenario, the arbiter needs two votes.
doTest = function( signal ) {
@@ -9,11 +8,11 @@ doTest = function( signal ) {
print(tojson(nodes));
var conns = replTest.startSet();
- var r = replTest.initiate({"_id" : "unicomplex",
+ var r = replTest.initiate({"_id" : "unicomplex",
"members" : [
- {"_id" : 0, "host" : nodes[0] },
- {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 2},
- {"_id" : 2, "host" : nodes[2] }]});
+ {"_id" : 0, "host" : nodes[0] },
+ {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 1, "priority" : 0},
+ {"_id" : 2, "host" : nodes[2] }]});
// Make sure we have a master
var master = replTest.getMaster();
@@ -25,6 +24,10 @@ doTest = function( signal ) {
return res.myState == 7;
}, "Aribiter failed to initialize.");
+ var result = conns[1].getDB("admin").runCommand({isMaster : 1});
+ assert(result.arbiterOnly);
+ assert(!result.passive);
+
// Wait for initial replication
master.getDB("foo").foo.insert({a: "foo"});
replTest.awaitReplication();
diff --git a/jstests/replsets/replsetarb3.js b/jstests/replsets/replsetarb3.js
deleted file mode 100644
index 1193cf2..0000000
--- a/jstests/replsets/replsetarb3.js
+++ /dev/null
@@ -1,144 +0,0 @@
-// @file replsetarb3.js
-// try turning arbiters into non-arbiters and vice versa
-
-/*
- * 1: initialize set
- * 2: check m3.state == 7
- * 3: reconfig
- * 4: check m3.state == 2
- * 5: reconfig
- * 6: check m3.state == 7
- * 7: reconfig
- * 8: check m3.state == 2
- * 9: insert 10000
- * 10: reconfig
- * 11: check m3.state == 7
- */
-
-var debug = false;
-
-var statusSoon = function(s) {
- assert.soon(function() {
- var status = master.getDB("admin").runCommand({ replSetGetStatus: 1 });
- if (debug)
- printjson(status);
- return status.members[2].state == s;
- });
-};
-
-var w = 0;
-var wait = function(f) {
- w++;
- var n = 0;
- while (!f()) {
- if( n % 4 == 0 )
- print("toostale.js waiting " + w);
- if (++n == 4) {
- print("" + f);
- }
- assert(n < 200, 'tried 200 times, giving up');
- sleep(1000);
- }
-}
-
-var reconnect = function(a) {
- wait(function() {
- try {
- a.getDB("foo").bar.stats();
- return true;
- } catch(e) {
- print(e);
- return false;
- }
- });
-};
-
-var reconfig = function() {
- config.version++;
- try {
- var result = master.getDB("admin").runCommand({replSetReconfig : config});
- }
- catch(e) {
- print(e);
- }
- reconnect(master);
- reconnect(replTest.liveNodes.slaves[1]);
- sleep(20000);
-};
-
-var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
-var nodes = replTest.nodeList();
-
-print(tojson(nodes));
-
-
-var conns = replTest.startSet();
-
-print("1");
-var config = {"_id" : "unicomplex", "members" : [
- {"_id" : 0, "host" : nodes[0] },
- {"_id" : 1, "host" : nodes[1] },
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]};
-var r = replTest.initiate(config);
-config.version = 1;
-
-var master = replTest.getMaster();
-
-// Wait for initial replication
-master.getDB("foo").foo.insert({a: "foo"});
-replTest.awaitReplication();
-
-
-print("2");
-statusSoon(7);
-assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
-
-/*
-print("3");
-delete config.members[2].arbiterOnly;
-reconfig();
-
-
-print("4");
-statusSoon(2);
-assert(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count() > 0);
-
-
-print("5");
-config.members[2].arbiterOnly = true;
-reconfig();
-
-
-print("6");
-statusSoon(7);
-assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
-
-
-print("7");
-delete config.members[2].arbiterOnly;
-reconfig();
-
-
-print("8");
-statusSoon(2);
-assert(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count() > 0);
-
-
-print("9");
-for (var i = 0; i < 10000; i++) {
- master.getDB("foo").bar.insert({increment : i, c : 0, foo : "kasdlfjaklsdfalksdfakldfmalksdfmaklmfalkfmkafmdsaklfma", date : new Date(), d : Date()});
-}
-
-
-print("10");
-config.members[2].arbiterOnly = true;
-reconfig();
-
-
-print("11");
-statusSoon(7);
-assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
-*/
-
-replTest.stopSet( 15 );
-
diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js
index 3721ba5..7096349 100644
--- a/jstests/replsets/replsetfreeze.js
+++ b/jstests/replsets/replsetfreeze.js
@@ -53,7 +53,7 @@ var master = replTest.getMaster();
print("2: step down m1");
try {
- master.getDB("admin").runCommand({replSetStepDown : 1});
+ master.getDB("admin").runCommand({replSetStepDown : 1, force : 1});
}
catch(e) {
print(e);
@@ -80,7 +80,7 @@ master = replTest.getMaster();
print("6: step down new master");
try {
- master.getDB("admin").runCommand({replSetStepDown : 1});
+ master.getDB("admin").runCommand({replSetStepDown : 1, force : 1});
}
catch(e) {
print(e);
diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js
index 65adaf4..d9f5093 100644
--- a/jstests/replsets/replsetrestart1.js
+++ b/jstests/replsets/replsetrestart1.js
@@ -22,9 +22,16 @@ doTest = function( signal ) {
s1Id = replTest.getNodeId( replTest.liveNodes.slaves[0] );
s2Id = replTest.getNodeId( replTest.liveNodes.slaves[1] );
- replTest.stop( mId );
replTest.stop( s1Id );
replTest.stop( s2Id );
+
+ assert.soon(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ return status.members[1].state == 8 && status.members[2].state == 8;
+ });
+
+
+ replTest.stop( mId );
// Now let's restart these nodes
replTest.restart( mId );
@@ -35,6 +42,11 @@ doTest = function( signal ) {
master = replTest.getMaster();
slaves = replTest.liveNodes.slaves;
+ assert.soon(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ return status.members[1].state != 8 && status.members[2].state != 8;
+ });
+
// Do a status check on each node
// Master should be set to 1 (primary)
assert.soon(function() {
diff --git a/jstests/replsets/replsetrestart2.js b/jstests/replsets/replsetrestart2.js
index 324bd37..6d96697 100644
--- a/jstests/replsets/replsetrestart2.js
+++ b/jstests/replsets/replsetrestart2.js
@@ -1,16 +1,16 @@
// config saved on shutdown
var compare_configs = function(c1, c2) {
- assert(c1.version == c2.version, 'version same');
- assert(c1._id == c2._id, '_id same');
+ assert.eq(c1.version, c2.version, 'version same');
+ assert.eq(c1._id, c2._id, '_id same');
printjson(c1);
printjson(c2);
for (var i in c1.members) {
assert(c2.members[i] !== undefined, 'field '+i+' exists in both configs');
- assert(c1.members[i]._id == c2.members[i]._id, 'id is equal in both configs');
- assert(c1.members[i].host == c2.members[i].host, 'id is equal in both configs');
+ assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
+ assert.eq(c1.members[i].host, c2.members[i].host, 'id is equal in both configs');
}
}
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
index 46fb548..7ab3c6b 100644
--- a/jstests/replsets/rollback2.js
+++ b/jstests/replsets/rollback2.js
@@ -202,9 +202,24 @@ doTest = function (signal) {
wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
// everyone is up here...
- assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
- assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
replTest.awaitReplication();
+
+ // theoretically, a read could slip in between StateBox::change() printing
+ // replSet SECONDARY
+ // and the replset actually becoming secondary
+ // so we're trying to wait for that here
+ print("waiting for secondary");
+ assert.soon(function() {
+ try {
+ var aim = A.isMaster();
+ var bim = B.isMaster();
+ return (aim.ismaster || aim.secondary) &&
+ (bim.ismaster || bim.secondary);
+ }
+ catch(e) {
+ print("checking A and B: "+e);
+ }
+ });
verify(a);
diff --git a/jstests/replsets/rollback4.js b/jstests/replsets/rollback4.js
new file mode 100644
index 0000000..5d3299b
--- /dev/null
+++ b/jstests/replsets/rollback4.js
@@ -0,0 +1,117 @@
+//Test for SERVER-3650 (rollback from slave)
+if (0) { // enable for SERVER-3772
+
+var num = 7;
+var host = getHostName();
+var name = "rollback4";
+
+var replTest = new ReplSetTest( {name: name, nodes: num} );
+var config = replTest.getReplSetConfig();
+
+// set preferred masters
+config.members[0].priority = 3
+config.members[6].priority = 2
+// all other are 1
+
+var nodes = replTest.startSet();
+replTest.initiate(config);
+replTest.awaitReplication()
+replTest.bridge();
+
+replTest.waitForMaster();
+var master = replTest.getMaster();
+printjson(master.adminCommand("replSetGetStatus"));
+
+var mColl = master.getCollection('test.foo');
+
+mColl.insert({});
+printjson(master.adminCommand("replSetGetStatus"));
+printjson(master.adminCommand({getLastError:1, w:7, wtimeout:30*1000}));
+
+// partition 012 | 3456 with 0 and 6 the old and new master
+
+
+printjson({startPartition: new Date()});
+replTest.partition(0,3)
+replTest.partition(0,4)
+replTest.partition(0,5)
+replTest.partition(0,6)
+replTest.partition(1,3)
+replTest.partition(1,4)
+replTest.partition(1,5)
+replTest.partition(1,6)
+replTest.partition(2,3)
+replTest.partition(2,4)
+replTest.partition(2,5)
+replTest.partition(2,6)
+printjson({endPartition: new Date()});
+
+var gotThrough = 0
+try {
+ while (true){
+ mColl.insert({})
+ out = master.adminCommand({getLastError:1, w:3});
+ if (out.err)
+ break;
+
+ gotThrough++;
+ }
+}
+catch (e) {
+ print("caught exception");
+}
+
+printjson({gotThrough: gotThrough});
+printjson({cantWriteOldPrimary: new Date()});
+printjson(master.adminCommand("replSetGetStatus"));
+
+assert(gotThrough > 0, "gotOneThrough");
+
+sleep(5*1000); // make sure new seconds field in opTime
+
+replTest.waitForMaster();
+var master2 = replTest.getMaster();
+printjson(master2.adminCommand("replSetGetStatus"));
+
+var m2Coll = master2.getCollection('test.foo');
+
+var sentinel = {_id: 'sentinel'} // used to detect which master's data is used
+m2Coll.insert(sentinel);
+printjson(master2.adminCommand({getLastError:1, w:4, wtimeout:30*1000}));
+printjson(master2.adminCommand("replSetGetStatus"));
+
+m2Coll.insert({}); // this shouldn't be necessary but the next GLE doesn't work without it
+
+printjson({startUnPartition: new Date()});
+replTest.unPartition(0,3)
+replTest.unPartition(0,4)
+replTest.unPartition(0,5)
+replTest.unPartition(0,6)
+replTest.unPartition(1,3)
+replTest.unPartition(1,4)
+replTest.unPartition(1,5)
+replTest.unPartition(1,6)
+replTest.unPartition(2,3)
+replTest.unPartition(2,4)
+replTest.unPartition(2,5)
+replTest.unPartition(2,6)
+printjson({endUnPartition: new Date()});
+
+printjson(master2.adminCommand({getLastError:1, w:7, wtimeout:30*1000}));
+printjson(master2.adminCommand("replSetGetStatus"));
+
+assert.soon(function() {return master.adminCommand('isMaster').ismaster},
+ "Node 0 back to primary",
+ 60*1000/*needs to be longer than LeaseTime*/);
+printjson(master.adminCommand("replSetGetStatus"));
+
+// make sure old master rolled back to new master
+assert.eq(m2Coll.count(sentinel), 1, "check sentinal on node 6");
+assert.eq(mColl.count(sentinel), 1, "check sentinal on node 0");
+
+replTest.stopSet();
+
+}
+
+
+
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index c072829..19271c9 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -2,7 +2,7 @@
var count = 0;
var w = 0;
-var wait = function(f) {
+var wait = function(f,msg) {
w++;
var n = 0;
while (!f()) {
@@ -11,7 +11,7 @@ var wait = function(f) {
if (++n == 4) {
print("" + f);
}
- assert(n < 200, 'tried 200 times, giving up');
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
sleep(1000);
}
};
@@ -61,3 +61,43 @@ var getLatestOp = function(server) {
}
return null;
};
+
+
+var waitForAllMembers = function(master) {
+ var ready = false;
+ var count = 0;
+
+ outer:
+ while (count < 60) {
+ count++;
+ var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 2 && state.members[m].state != 1) {
+ sleep(1000);
+ continue outer;
+ }
+ }
+ return;
+ }
+
+ assert(false, "all members not ready");
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getMaster().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getMaster().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index e549822..9301c8e 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -1,22 +1,4 @@
-
-var waitForAllMembers = function(master) {
- var ready = false;
-
- outer:
- while (true) {
- var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
- printjson(state);
-
- for (var m in state.members) {
- if (state.members[m].state != 2 && state.members[m].state != 1) {
- sleep(10000);
- continue outer;
- }
- }
- return;
- }
-};
-
+load("jstests/replsets/rslib.js");
doTest = function( signal ) {
@@ -30,7 +12,7 @@ doTest = function( signal ) {
/* set slaveDelay to 30 seconds */
var config = replTest.getReplSetConfig();
config.members[2].priority = 0;
- config.members[2].slaveDelay = 30;
+ config.members[2].slaveDelay = 10;
replTest.initiate(config);
@@ -59,11 +41,16 @@ doTest = function( signal ) {
// make sure delayed slave doesn't have it
assert.eq(slave[1].foo.findOne(), null);
- // wait 35 seconds
- sleep(35000);
-
+ for (var i=0; i<8; i++) {
+ assert.eq(slave[1].foo.findOne(), null);
+ sleep(1000);
+ }
+
// now delayed slave should have it
- assert.eq(slave[1].foo.findOne().x, 1);
+ assert.soon(function() {
+ var z = slave[1].foo.findOne();
+ return z && z.x == 1;
+ });
/************* Part 2 *******************/
@@ -79,9 +66,15 @@ doTest = function( signal ) {
assert.eq(slave[0].foo.findOne({_id : 99}).foo, "bar");
assert.eq(slave[1].foo.findOne({_id : 99}), null);
- sleep(35000);
-
- assert.eq(slave[1].foo.findOne({_id : 99}).foo, "bar");
+ for (var i=0; i<8; i++) {
+ assert.eq(slave[1].foo.findOne({_id:99}), null);
+ sleep(1000);
+ }
+
+ assert.soon(function() {
+ var z = slave[1].foo.findOne({_id : 99});
+ return z && z.foo == "bar";
+ });
/************* Part 3 *******************/
@@ -94,34 +87,53 @@ doTest = function( signal ) {
config.version++;
config.members.push({_id : 3, host : host+":31007",priority:0, slaveDelay:10});
- var admin = master.getSisterDB("admin");
- try {
- var ok = admin.runCommand({replSetReconfig : config});
- assert.eq(ok.ok,1);
- }
- catch(e) {
- print(e);
- }
-
- master = replTest.getMaster().getDB(name);
-
- waitForAllMembers(master);
-
- sleep(15000);
-
+ master = reconfig(replTest, config);
+ master = master.getSisterDB(name);
+
// it should be all caught up now
master.foo.insert({_id : 123, "x" : "foo"});
master.runCommand({getlasterror:1,w:2});
conn.setSlaveOk();
- assert.eq(conn.getDB(name).foo.findOne({_id:123}), null);
+
+ for (var i=0; i<8; i++) {
+ assert.eq(conn.getDB(name).foo.findOne({_id:123}), null);
+ sleep(1000);
+ }
+
+ assert.soon(function() {
+ var z = conn.getDB(name).foo.findOne({_id:123});
+ return z != null && z.x == "foo"
+ });
+
+ /************* Part 4 ******************/
- sleep(15000);
+ print("reconfigure slavedelay");
+
+ config.version++;
+ config.members[3].slaveDelay = 15;
- assert.eq(conn.getDB(name).foo.findOne({_id:123}).x, "foo");
+ master = reconfig(replTest, config);
+ master = master.getSisterDB(name);
+ assert.soon(function() {
+ return conn.getDB("local").system.replset.findOne().version == config.version;
+ });
+
+ master.foo.insert({_id : 124, "x" : "foo"});
+
+ for (var i=0; i<13; i++) {
+ assert.eq(conn.getDB(name).foo.findOne({_id:124}), null);
+ sleep(1000);
+ }
+
+ assert.soon(function() {
+ var z = conn.getDB(name).foo.findOne({_id:124});
+ return z != null && z.x == "foo"
+ });
+
- replTest.stopSet();
+ replTest.stopSet();
}
doTest(15);
diff --git a/jstests/replsets/stale_clustered.js b/jstests/replsets/stale_clustered.js
new file mode 100644
index 0000000..457231e
--- /dev/null
+++ b/jstests/replsets/stale_clustered.js
@@ -0,0 +1,101 @@
+// this tests that slaveOk'd queries in sharded setups get correctly routed when
+// a slave goes into RECOVERING state, and don't break
+
+function prt(s) {
+ print("\nstale_clustered.js " + s);
+ print();
+}
+
+var shardTest = new ShardingTest( name = "clusteredstale" ,
+ numShards = 2 ,
+ verboseLevel = 0 ,
+ numMongos = 2 ,
+ otherParams = { rs : true } )//,
+ //rs0 : { logpath : "$path/mongod.log" },
+ //rs1 : { logpath : "$path/mongod.log" } } );
+
+shardTest.setBalancer( false )
+
+var mongos = shardTest.s0
+var mongosSOK = shardTest.s1
+mongosSOK.setSlaveOk()
+
+var admin = mongos.getDB("admin")
+var config = mongos.getDB("config")
+
+var dbase = mongos.getDB("test")
+var coll = dbase.getCollection("foo")
+var dbaseSOk = mongosSOK.getDB( "" + dbase )
+var collSOk = mongosSOK.getCollection( "" + coll )
+
+
+var rsA = shardTest._rs[0].test
+var rsB = shardTest._rs[1].test
+
+rsA.getMaster().getDB( "test_a" ).dummy.insert( { x : 1 } )
+rsB.getMaster().getDB( "test_b" ).dummy.insert( { x : 1 } )
+
+rsA.awaitReplication()
+rsB.awaitReplication()
+
+prt("1: initial insert")
+
+coll.save({ _id : -1, a : "a", date : new Date() })
+coll.save({ _id : 1, b : "b", date : new Date() })
+
+prt("2: shard collection")
+
+shardTest.shardGo( coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 } )
+
+prt("3: test normal and slaveOk queries")
+
+// Make shardA and rsA the same
+var shardA = shardTest.getShard( coll, { _id : -1 } )
+var shardAColl = shardA.getCollection( "" + coll )
+var shardB = shardTest.getShard( coll, { _id : 1 } )
+
+if( shardA.name == rsB.getURL() ){
+ var swap = rsB
+ rsB = rsA
+ rsA = swap
+}
+
+rsA.awaitReplication()
+rsB.awaitReplication()
+
+assert.eq( coll.find().itcount(), collSOk.find().itcount() )
+assert.eq( shardAColl.find().itcount(), 1 )
+assert.eq( shardAColl.findOne()._id, -1 )
+
+prt("5: overflow oplog");
+
+var secs = rsA.getSecondaries()
+var goodSec = secs[0]
+var badSec = secs[1]
+
+rsA.overflow( badSec )
+
+prt("6: stop non-overflowed secondary")
+
+rsA.stop( goodSec, undefined, true )
+
+prt("7: check our regular and slaveok query")
+
+assert.eq( coll.find().itcount(), collSOk.find().itcount() )
+
+prt("8: restart both our secondaries clean")
+
+rsA.restart( rsA.getSecondaries(), { remember : true, startClean : true }, undefined, 5 * 60 * 1000 )
+
+prt("9: wait for recovery")
+
+rsA.waitForState( rsA.getSecondaries(), rsA.SECONDARY, 5 * 60 * 1000 )
+
+prt("10: check our regular and slaveok query")
+
+assert.eq( coll.find().itcount(), collSOk.find().itcount() )
+
+prt("DONE\n\n\n");
+
+//shardTest.stop()
+
diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js
new file mode 100644
index 0000000..3a17b0e
--- /dev/null
+++ b/jstests/replsets/stepdown.js
@@ -0,0 +1,142 @@
+/* check that on a loss of primary, another node doesn't assume primary if it is stale
+ we force a stepDown to test this
+ we use lock+fsync to force secondary to be stale
+*/
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+// do a write
+print("\ndo a write");
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+// lock secondary
+print("\nlock secondary");
+var locked = replTest.liveNodes.slaves[0];
+printjson( locked.getDB("admin").runCommand({fsync : 1, lock : 1}) );
+
+print("\nwaiting 11ish seconds");
+
+sleep(2000);
+
+for (var i = 0; i < 11; i++) {
+ // do another write
+ master.getDB("foo").bar.insert({x:i});
+ sleep(1000);
+}
+
+print("\n do stepdown that should not work");
+
+// this should fail, so we don't need to try/catch
+var result = master.getDB("admin").runCommand({replSetStepDown: 10});
+printjson(result);
+assert.eq(result.ok, 0);
+
+print("\n do stepdown that should work");
+try {
+ master.getDB("admin").runCommand({replSetStepDown: 50, force : true});
+}
+catch (e) {
+ print(e);
+}
+
+var r2 = master.getDB("admin").runCommand({ismaster : 1});
+assert.eq(r2.ismaster, false);
+assert.eq(r2.secondary, true);
+
+print("\nunlock");
+printjson(locked.getDB("admin").$cmd.sys.unlock.findOne());
+
+print("\nreset stepped down time");
+master.getDB("admin").runCommand({replSetFreeze:0});
+master = replTest.getMaster();
+
+print("\nmake 1 config with priorities");
+var config = master.getDB("local").system.replset.findOne();
+print("\nmake 2");
+config.version++;
+config.members[0].priority = 2;
+config.members[1].priority = 1;
+// make sure 1 can stay master once 0 is down
+config.members[0].votes = 0;
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print(e);
+}
+
+print("\nawait");
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var firstMaster = master;
+print("\nmaster is now "+firstMaster);
+
+try {
+ printjson(master.getDB("admin").runCommand({replSetStepDown : 100, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("\nget a master");
+replTest.getMaster();
+
+assert.soon(function() {
+ var secondMaster = replTest.getMaster();
+ return firstMaster+"" != secondMaster+"";
+ }, 'making sure '+firstMaster+' isn\'t still master', 60000);
+
+
+print("\ncheck shutdown command");
+
+master = replTest.liveNodes.master;
+var slave = replTest.liveNodes.slaves[0];
+var slaveId = replTest.getNodeId(slave);
+
+try {
+ slave.adminCommand({shutdown :1})
+}
+catch (e) {
+ print(e);
+}
+
+print("\nsleeping");
+
+sleep(2000);
+
+print("\nrunning shutdown without force on master: "+master);
+
+result = replTest.getMaster().getDB("admin").runCommand({shutdown : 1, timeoutSecs : 3});
+assert.eq(result.ok, 0);
+
+print("\nsend shutdown command");
+
+var currentMaster = replTest.getMaster();
+try {
+ printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("checking "+currentMaster+" is actually shutting down");
+assert.soon(function() {
+ try {
+ currentMaster.findOne();
+ }
+ catch(e) {
+ return true;
+ }
+ return false;
+});
+
+print("\nOK 1");
+
+replTest.stopSet();
+
+print("OK 2");
diff --git a/jstests/replsets/stepdown2.js b/jstests/replsets/stepdown2.js
new file mode 100755
index 0000000..591fea2
--- /dev/null
+++ b/jstests/replsets/stepdown2.js
@@ -0,0 +1,139 @@
+print("\nstepdown2.js");
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+// do a write
+print("\ndo a write");
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+// lock secondary
+print("\nlock secondary");
+var locked = replTest.liveNodes.slaves[0];
+printjson( locked.getDB("admin").runCommand({fsync : 1, lock : 1}) );
+
+print("\nwaiting 11ish seconds");
+
+sleep(3003);
+
+for (var i = 0; i < 11; i++) {
+ // do another write
+ master.getDB("foo").bar.insert({x:i});
+ sleep(1008);
+}
+
+print("\n do stepdown that should not work");
+
+// this should fail, so we don't need to try/catch
+var result = master.getDB("admin").runCommand({replSetStepDown: 10});
+printjson(result);
+assert.eq(result.ok, 0);
+
+print("\n do stepdown that should work");
+try {
+ master.getDB("admin").runCommand({replSetStepDown: 50, force : true});
+}
+catch (e) {
+ print(e);
+}
+
+var r2 = master.getDB("admin").runCommand({ismaster : 1});
+assert.eq(r2.ismaster, false);
+assert.eq(r2.secondary, true);
+
+print("\nunlock");
+printjson(locked.getDB("admin").$cmd.sys.unlock.findOne());
+
+print("\nreset stepped down time");
+master.getDB("admin").runCommand({replSetFreeze:0});
+master = replTest.getMaster();
+
+print("\nmake 1 config with priorities");
+var config = master.getDB("local").system.replset.findOne();
+print("\nmake 2");
+config.version++;
+config.members[0].priority = 2;
+config.members[1].priority = 1;
+// make sure 1 can stay master once 0 is down
+config.members[0].votes = 0;
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print(e);
+}
+
+print("\nawait");
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var firstMaster = master;
+print("\nmaster is now "+firstMaster);
+
+try {
+ printjson(master.getDB("admin").runCommand({replSetStepDown : 100, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("\nget a master");
+replTest.getMaster();
+
+assert.soon(function() {
+ var secondMaster = replTest.getMaster();
+ return firstMaster+"" != secondMaster+"";
+ }, 'making sure '+firstMaster+' isn\'t still master', 60000);
+
+
+print("\ncheck shutdown command");
+
+master = replTest.liveNodes.master;
+var slave = replTest.liveNodes.slaves[0];
+var slaveId = replTest.getNodeId(slave);
+
+try {
+ slave.adminCommand({shutdown :1})
+}
+catch (e) {
+ print(e);
+}
+
+print("\nsleeping");
+
+sleep(2000);
+
+print("\nrunning shutdown without force on master: "+master);
+
+result = replTest.getMaster().getDB("admin").runCommand({shutdown : 1, timeoutSecs : 3});
+assert.eq(result.ok, 0);
+
+print("\nsend shutdown command");
+
+var currentMaster = replTest.getMaster();
+try {
+ printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("checking "+currentMaster+" is actually shutting down");
+assert.soon(function() {
+ try {
+ currentMaster.findOne();
+ }
+ catch(e) {
+ return true;
+ }
+ return false;
+});
+
+print("\nOK 1 stepdown2.js");
+
+replTest.stopSet();
+
+print("\nOK 2 stepdown2.js");
diff --git a/jstests/replsets/sync1.js b/jstests/replsets/sync1.js
index af16044..a090c1c 100644
--- a/jstests/replsets/sync1.js
+++ b/jstests/replsets/sync1.js
@@ -15,156 +15,172 @@ function pause(s) {
sleep(4000);
}
}
-}
-
-doTest = function (signal) {
-
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
- var nodes = replTest.startSet({ oplogSize: "40" });
-
- sleep(5000);
-
- print("\nsync1.js ********************************************************************** part 0");
- replTest.initiate();
-
- // get master
- print("\nsync1.js ********************************************************************** part 1");
- var master = replTest.getMaster();
- print("\nsync1.js ********************************************************************** part 2");
- var dbs = [master.getDB("foo")];
-
- for (var i in nodes) {
- if (nodes[i] + "" == master + "") {
- continue;
- }
- dbs.push(nodes[i].getDB("foo"));
- nodes[i].setSlaveOk();
- }
-
- print("\nsync1.js ********************************************************************** part 3");
- dbs[0].bar.drop();
-
- print("\nsync1.js ********************************************************************** part 4");
- // slow things down a bit
- dbs[0].bar.ensureIndex({ x: 1 });
- dbs[0].bar.ensureIndex({ y: 1 });
- dbs[0].bar.ensureIndex({ z: 1 });
- dbs[0].bar.ensureIndex({ w: 1 });
-
- var ok = false;
- var inserts = 10000;
-
- print("\nsync1.js ********************************************************************** part 5");
-
- for (var i = 0; i < inserts; i++) {
- dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
- }
-
- var status;
- do {
- sleep(1000);
- status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
- } while (status.members[1].state != 2 || status.members[2].state != 2);
-
- print("\nsync1.js ********************************************************************** part 6");
- dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
-
- print("\nsync1.js ********************************************************************** part 7");
-
- sleep(5000);
-
- var max1;
- var max2;
- var count = 0;
- while (1) {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch (e) {
- print("\nsync1.js couldn't get max1/max2; retrying " + e);
- sleep(2000);
- count++;
- if (count == 50) {
- assert(false, "errored out 50 times");
- }
- continue;
- }
- break;
- }
-
- // wait for a new master to be elected
- sleep(5000);
- var newMaster;
-
- print("\nsync1.js ********************************************************************** part 9");
-
- for (var q = 0; q < 10; q++) {
- // figure out who is master now
- newMaster = replTest.getMaster();
- if (newMaster + "" != master + "")
- break;
- sleep(2000);
- if (q > 6) print("sync1.js zzz....");
- }
-
- assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
-
- print("\nsync1.js new master is " + newMaster + ", old master was " + master);
-
- print("\nsync1.js ********************************************************************** part 9.1");
-
- count = 0;
- countExceptions = 0;
- do {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch (e) {
- if (countExceptions++ > 300) {
- print("dbs[1]:");
- try {
- printjson(dbs[1].isMaster());
- printjson(dbs[1].bar.count());
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+ var nodes = replTest.startSet({ oplogSize: "40" });
+ print("\nsync1.js ********************************************************************** part 0");
+ replTest.initiate();
+
+ // get master
+ print("\nsync1.js ********************************************************************** part 1");
+ var master = replTest.getMaster();
+ print("\nsync1.js ********************************************************************** part 2");
+ var dbs = [master.getDB("foo")];
+
+ for (var i in nodes) {
+ if (nodes[i] + "" == master + "") {
+ continue;
+ }
+ dbs.push(nodes[i].getDB("foo"));
+ nodes[i].setSlaveOk();
+ }
+
+ print("\nsync1.js ********************************************************************** part 3");
+ dbs[0].bar.drop();
+
+ print("\nsync1.js ********************************************************************** part 4");
+ // slow things down a bit
+ dbs[0].bar.ensureIndex({ x: 1 });
+ dbs[0].bar.ensureIndex({ y: 1 });
+ dbs[0].bar.ensureIndex({ z: 1 });
+ dbs[0].bar.ensureIndex({ w: 1 });
+
+ var ok = false;
+ var inserts = 10000;
+
+ print("\nsync1.js ********************************************************************** part 5");
+
+ for (var i = 0; i < inserts; i++) {
+ dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
+ }
+
+ var status;
+ var secondaries = 0;
+ var count = 0;
+ do {
+ sleep(1000);
+ status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
+
+ occasionally(function() {
+ printjson(status);
+ }, 30);
+
+ secondaries = 0;
+ secondaries += status.members[0].state == 2 ? 1 : 0;
+ secondaries += status.members[1].state == 2 ? 1 : 0;
+ secondaries += status.members[2].state == 2 ? 1 : 0;
+ count++;
+ } while (secondaries < 2 && count < 300);
+
+ assert(count < 300);
+
+ // Need to be careful here, allocating datafiles for the slaves can take a *long* time on slow systems
+ sleep(7000);
+
+ print("\nsync1.js ********************************************************************** part 6");
+ dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
+
+ print("\nsync1.js ********************************************************************** part 7");
+
+ sleep(5000);
+ // If we start getting error hasNext: false with done alloc datafile msgs - may need to up the sleep again in part 5
+
+
+ var max1;
+ var max2;
+ var count = 0;
+ while (1) {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js couldn't get max1/max2; retrying " + e);
+ sleep(2000);
+ count++;
+ if (count == 50) {
+ assert(false, "errored out 50 times");
+ }
+ continue;
+ }
+ break;
+ }
+
+ // wait for a new master to be elected
+ sleep(5000);
+ var newMaster;
+
+ print("\nsync1.js ********************************************************************** part 9");
+
+ for (var q = 0; q < 10; q++) {
+ // figure out who is master now
+ newMaster = replTest.getMaster();
+ if (newMaster + "" != master + "")
+ break;
+ sleep(2000);
+ if (q > 6) print("sync1.js zzz....");
+ }
+
+ assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
+
+ print("\nsync1.js new master is " + newMaster + ", old master was " + master);
+
+ print("\nsync1.js ********************************************************************** part 9.1");
+
+ count = 0;
+ countExceptions = 0;
+ do {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ if (countExceptions++ > 300) {
+ print("dbs[1]:");
+ try {
+ printjson(dbs[1].isMaster());
+ printjson(dbs[1].bar.count());
printjson(dbs[1].adminCommand({replSetGetStatus : 1}));
- }
- catch (e) { print(e); }
- print("dbs[2]:");
- try {
- printjson(dbs[2].isMaster());
- printjson(dbs[2].bar.count());
+ }
+ catch (e) { print(e); }
+ print("dbs[2]:");
+ try {
+ printjson(dbs[2].isMaster());
+ printjson(dbs[2].bar.count());
printjson(dbs[2].adminCommand({replSetGetStatus : 1}));
- }
- catch (e) { print(e); }
- assert(false, "sync1.js too many exceptions, failing");
- }
- print("\nsync1.js: exception querying; will sleep and try again " + e);
- sleep(3000);
- continue;
- }
-
- print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
-
- // printjson(max1);
- // printjson(max2);
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- pause("fail phase 1");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- } while (max1.z != max2.z);
-
- // okay, now they're caught up. We have a max: max1.z
-
- print("\nsync1.js ********************************************************************** part 10");
-
- // now, let's see if rollback works
+ }
+ catch (e) { print(e); }
+ assert(false, "sync1.js too many exceptions, failing");
+ }
+ print("\nsync1.js: exception querying; will sleep and try again " + e);
+ sleep(3000);
+ continue;
+ }
+
+ print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
+
+ // printjson(max1);
+ // printjson(max2);
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("fail phase 1");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ } while (max1.z != max2.z);
+
+ // okay, now they're caught up. We have a max: max1.z
+
+ print("\nsync1.js ********************************************************************** part 10");
+
+ // now, let's see if rollback works
wait(function() {
try {
dbs[0].adminCommand({ replSetTest: 1, blind: false });
@@ -180,50 +196,50 @@ doTest = function (signal) {
});
- dbs[0].getMongo().setSlaveOk();
- sleep(5000);
-
- // now this should resync
- print("\nsync1.js ********************************************************************** part 11");
- var max0 = null;
- count = 0;
- do {
- try {
- max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch (e) {
- print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
- sleep(2000);
- continue;
- }
-
- print("part 11");
- if (max0) {
- print("max0.z:" + max0.z);
- print("max1.z:" + max1.z);
- }
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- printjson(dbs[0].isMaster());
+ dbs[0].getMongo().setSlaveOk();
+ sleep(5000);
+
+ // now this should resync
+ print("\nsync1.js ********************************************************************** part 11");
+ var max0 = null;
+ count = 0;
+ do {
+ try {
+ max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
+ sleep(2000);
+ continue;
+ }
+
+ print("part 11");
+ if (max0) {
+ print("max0.z:" + max0.z);
+ print("max1.z:" + max1.z);
+ }
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ printjson(dbs[0].isMaster());
printjson(dbs[0].adminCommand({replSetGetStatus:1}));
- printjson(dbs[1].isMaster());
+ printjson(dbs[1].isMaster());
printjson(dbs[1].adminCommand({replSetGetStatus:1}));
- pause("FAIL part 11");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- //print("||||| count:" + count);
- //printjson(max0);
- } while (!max0 || max0.z != max1.z);
-
- print("\nsync1.js ********************************************************************** part 12");
- pause("\nsync1.js success");
- replTest.stopSet(signal);
+ pause("FAIL part 11");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ //print("||||| count:" + count);
+ //printjson(max0);
+ } while (!max0 || max0.z != max1.z);
+
+ print("\nsync1.js ********************************************************************** part 12");
+ pause("\nsync1.js success");
+ replTest.stopSet(signal);
}
if( 1 || debugging ) {
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
new file mode 100644
index 0000000..9f6c205
--- /dev/null
+++ b/jstests/replsets/sync2.js
@@ -0,0 +1,48 @@
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 5 });
+var nodes = replTest.startSet({ oplogSize: "2" });
+replTest.initiate();
+
+var master = replTest.getMaster();
+var config = master.getDB("local").system.replset.findOne();
+config.version++;
+config.members[0].priority = 2;
+
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print(e);
+}
+
+// initial sync
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+master = replTest.bridge();
+
+replTest.partition(0,4);
+replTest.partition(1,2);
+replTest.partition(2,3);
+replTest.partition(3,1);
+
+// 4 is connected to 2
+replTest.partition(4,1);
+replTest.partition(4,3);
+
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+var result = master.getDB("admin").runCommand({getLastError:1,w:5,wtimeout:1000});
+assert.eq(null, result.err, tojson(result));
+
+// 4 is connected to 3
+replTest.partition(4,2);
+replTest.unPartition(4,3);
+
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+result = master.getDB("admin").runCommand({getLastError:1,w:5,wtimeout:1000});
+assert.eq(null, result.err, tojson(result));
+
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
new file mode 100644
index 0000000..4e73886
--- /dev/null
+++ b/jstests/replsets/tags.js
@@ -0,0 +1,154 @@
+
+var num = 5;
+var host = getHostName();
+var name = "tags";
+
+var replTest = new ReplSetTest( {name: name, nodes: num, startPort:31000} );
+var nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({_id : name, members :
+ [
+ {_id:0, host : host+":"+port[0], tags : {"server" : "0", "dc" : "ny", "ny" : "1", "rack" : "ny.rk1"}},
+ {_id:1, host : host+":"+port[1], tags : {"server" : "1", "dc" : "ny", "ny" : "2", "rack" : "ny.rk1"}},
+ {_id:2, host : host+":"+port[2], tags : {"server" : "2", "dc" : "ny", "ny" : "3", "rack" : "ny.rk2", "2" : "this"}},
+ {_id:3, host : host+":"+port[3], tags : {"server" : "3", "dc" : "sf", "sf" : "1", "rack" : "sf.rk1"}},
+ {_id:4, host : host+":"+port[4], tags : {"server" : "4", "dc" : "sf", "sf" : "2", "rack" : "sf.rk2"}},
+ ],
+ settings : {
+ getLastErrorModes : {
+ "important" : {"dc" : 2, "server" : 3},
+ "a machine" : {"server" : 1}
+ }
+ }});
+
+var master = replTest.getMaster();
+
+var config = master.getDB("local").system.replset.findOne();
+
+printjson(config);
+var modes = config.settings.getLastErrorModes;
+assert.eq(typeof modes, "object");
+assert.eq(modes.important.dc, 2);
+assert.eq(modes.important.server, 3);
+assert.eq(modes["a machine"]["server"], 1);
+
+config.version++;
+config.members[1].priority = 1.5;
+config.members[2].priority = 2;
+modes.rack = {"sf" : 1};
+modes.niceRack = {"sf" : 2};
+modes["a machine"]["2"] = 1;
+modes.on2 = {"2" : 1}
+
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print(e);
+}
+
+replTest.awaitReplication();
+
+print("primary should now be 2");
+master = replTest.getMaster();
+config = master.getDB("local").system.replset.findOne();
+printjson(config);
+
+modes = config.settings.getLastErrorModes;
+assert.eq(typeof modes, "object");
+assert.eq(modes.important.dc, 2);
+assert.eq(modes.important.server, 3);
+assert.eq(modes["a machine"]["server"], 1);
+assert.eq(modes.rack["sf"], 1);
+assert.eq(modes.niceRack["sf"], 2);
+
+print("bridging");
+replTest.bridge();
+
+replTest.partition(0, 3);
+replTest.partition(0, 4);
+replTest.partition(1, 3);
+replTest.partition(1, 4);
+replTest.partition(2, 3);
+replTest.partition(2, 4);
+replTest.partition(3, 4);
+print("done bridging");
+
+print("test1");
+print("2 should be primary");
+master = replTest.getMaster();
+
+printjson(master.getDB("admin").runCommand({replSetGetStatus:1}));
+
+var timeout = 20000;
+
+master.getDB("foo").bar.insert({x:1});
+var result = master.getDB("foo").runCommand({getLastError:1,w:"rack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, "timeout");
+
+replTest.unPartition(1,4);
+
+print("test2");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"rack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test3");
+result = master.getDB("foo").runCommand({getLastError:1,w:"niceRack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, "timeout");
+
+replTest.unPartition(3,4);
+
+print("test4");
+result = master.getDB("foo").runCommand({getLastError:1,w:"niceRack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+print("non-existent w");
+result = master.getDB("foo").runCommand({getLastError:1,w:"blahblah",wtimeout:timeout});
+printjson(result);
+assert.eq(result.assertionCode, 14830);
+assert.eq(result.ok, 0);
+
+print("test on2");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"on2",wtimeout:0});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test two on the primary");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"a machine",wtimeout:0});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test5");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"important",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+replTest.unPartition(1,3);
+
+replTest.partition(2, 0);
+replTest.partition(2, 1);
+replTest.stop(2);
+
+print("1 must become primary here because otherwise the other members will take too long timing out their old sync threads");
+master = replTest.getMaster();
+
+print("test6");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"niceRack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test on2");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"on2",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, "timeout");
+
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
new file mode 100644
index 0000000..16dfcdf
--- /dev/null
+++ b/jstests/replsets/tags2.js
@@ -0,0 +1,44 @@
+// Change a getLastErrorMode from 2 to 3 servers
+
+var host = getHostName();
+var replTest = new ReplSetTest( {name: "rstag", nodes: 3, startPort: 31000} );
+var nodes = replTest.startSet();
+var ports = replTest.ports;
+var conf = {_id : "rstag", version: 1, members : [
+ {_id : 0, host : host+":"+ports[0], tags : {"backup" : "A"}},
+ {_id : 1, host : host+":"+ports[1], tags : {"backup" : "B"}},
+ {_id : 2, host : host+":"+ports[2], tags : {"backup" : "C"}} ],
+ settings : {getLastErrorModes : {
+ backedUp : {backup : 2} }} };
+replTest.initiate( conf );
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var db = master.getDB("test");
+db.foo.insert( {x:1} );
+var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} );
+assert.eq (result.err, null);
+
+conf.version = 2;
+conf.settings.getLastErrorModes.backedUp.backup = 3;
+master.getDB("admin").runCommand( {replSetReconfig: conf} );
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var db = master.getDB("test");
+db.foo.insert( {x:2} );
+var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} );
+assert.eq (result.err, null);
+
+conf.version = 3;
+conf.members[0].priorty = 3;
+conf.members[2].priorty = 0;
+master.getDB("admin").runCommand( {replSetReconfig: conf} );
+
+master = replTest.getMaster();
+var db = master.getDB("test");
+db.foo.insert( {x:3} );
+var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} );
+assert.eq (result.err, null);
+
+replTest.stopSet();
diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js
index 0b8da0d..08b1a9c 100644
--- a/jstests/replsets/toostale.js
+++ b/jstests/replsets/toostale.js
@@ -32,7 +32,7 @@ var wait = function(f) {
}
var reconnect = function(a) {
- wait(function() {
+ wait(function() {
try {
a.bar.stats();
return true;
@@ -46,9 +46,14 @@ var reconnect = function(a) {
var name = "toostale"
var replTest = new ReplSetTest( {name: name, nodes: 3});
+var host = getHostName();
var nodes = replTest.startSet();
-replTest.initiate();
+replTest.initiate({_id : name, members : [
+ {_id : 0, host : host+":"+replTest.ports[0]},
+ {_id : 1, host : host+":"+replTest.ports[1], arbiterOnly : true},
+ {_id : 2, host : host+":"+replTest.ports[2]}
+]});
var master = replTest.getMaster();
var mdb = master.getDB("foo");
@@ -60,7 +65,7 @@ mdb.foo.save({a: 1000});
print("2: initial sync");
replTest.awaitReplication();
-print("3: blind s2");
+print("3: stop s2");
replTest.stop(2);
print("waiting until the master knows the slave is blind");
assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health == 0 });
@@ -82,7 +87,7 @@ while (count != prevCount) {
}
-print("5: unblind s2");
+print("5: restart s2");
replTest.restart(2);
print("waiting until the master knows the slave is not blind");
assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health != 0 });
@@ -106,16 +111,17 @@ replTest.restart(2);
print("8: check s2.state == 3");
-status = master.getDB("admin").runCommand({replSetGetStatus:1});
-while (status.state == 0) {
- print("state is 0: ");
- printjson(status);
- sleep(1000);
- status = master.getDB("admin").runCommand({replSetGetStatus:1});
-}
+assert.soon(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members && status.members[2].state == 3;
+});
-printjson(status);
-assert.eq(status.members[2].state, 3, 'recovering');
+print("make sure s2 doesn't become primary");
+replTest.stop(0);
+sleep(20000);
+printjson(replTest.nodes[2].getDB("admin").runCommand({isMaster : 1}));
+printjson(replTest.nodes[2].getDB("admin").runCommand({replSetGetStatus : 1}));
-replTest.stopSet(15);
+//replTest.stopSet(15);
diff --git a/jstests/replsets/twosets.js b/jstests/replsets/twosets.js
deleted file mode 100644
index aae1113..0000000
--- a/jstests/replsets/twosets.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// add a node from a different set to the current set
-// I don't know what should happen here.
-
-doTest = function( signal ) {
-
- var orig = new ReplSetTest( {name: 'testSet', nodes: 3} );
- orig.startSet();
- orig.initiate();
- var master = orig.getMaster();
-
- var interloper = new ReplSetTest( {name: 'testSet', nodes: 3, startPort : 31003} );
- interloper.startSet();
- interloper.initiate();
-
- var conf = master.getDB("local").system.replset.findOne();
-
- var nodes = interloper.nodeList();
- var host = nodes[0];
- var id = conf.members.length;
- conf.members.push({_id : id, host : host});
- conf.version++;
-
- try {
- var result = master.getDB("admin").runCommand({replSetReconfig : conf});
- }
- catch(e) {
- print(e);
- }
-
- // now... stuff should blow up?
-
- sleep(10);
-}
-
-doTest(15); \ No newline at end of file
diff --git a/jstests/set7.js b/jstests/set7.js
index b46fe9e..c6d311b 100644
--- a/jstests/set7.js
+++ b/jstests/set7.js
@@ -38,3 +38,19 @@ t.save( {a:[]} );
t.update( {}, {$set:{"a.f":1}} );
assert( db.getLastError() );
assert.eq( [], t.findOne().a );
+
+// SERVER-3750
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.1500000":1}} ); // current limit
+assert( db.getLastError() == null );
+
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.1500001":1}} ); // 1 over limit
+assert.eq(15891 , db.getLastErrorObj().code );
+
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.1000000000":1}} ); // way over limit
+assert.eq(15891 , db.getLastErrorObj().code );
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index f28feed..0ca6a83 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -44,7 +44,7 @@ assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary
assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" );
// make sure we can shard the original collections
-sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index
+sdb1.foo.ensureIndex( { a : 1 }, { unique : true } ) // can't shard populated collection without an index
s.adminCommand( { enablesharding : "testDB" } );
s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } );
s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } );
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index be4a8b3..4a44b55 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -2,15 +2,18 @@
s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
-r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 34000});
+r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 31100});
r.startSet();
var config = r.getReplSetConfig();
config.members[2].priority = 0;
r.initiate(config);
+//Wait for replica set to be fully initialized - could take some time
+//to pre-allocate files on slow systems
+r.awaitReplication();
-var master = r.getMaster().master;
+var master = r.getMaster();
var members = config.members.map(function(elem) { return elem.host; });
var shardName = "addshard4/"+members.join(",");
@@ -20,5 +23,24 @@ print("adding shard "+shardName);
var result = s.adminCommand({"addshard" : shardName});
printjson(result);
+assert.eq(result, true);
+r = new ReplSetTest({name : "addshard42", nodes : 3, startPort : 31200});
+r.startSet();
+
+config = r.getReplSetConfig();
+config.members[2].arbiterOnly = true;
+
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
+master = r.getMaster();
+
+print("adding shard addshard42");
+
+result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
+
+printjson(result);
+assert.eq(result, true);
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
new file mode 100644
index 0000000..1ea61e8
--- /dev/null
+++ b/jstests/sharding/array_shard_key.js
@@ -0,0 +1,127 @@
+// Ensure you can't shard on an array key
+
+var st = new ShardingTest({ name : jsTestName(), shards : 3 })
+
+var mongos = st.s0
+
+var coll = mongos.getCollection( jsTestName() + ".foo" )
+
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+
+printjson( mongos.getDB("config").chunks.find().toArray() )
+
+st.printShardingStatus()
+
+print( "1: insert some invalid data" )
+
+var value = null
+
+var checkError = function( shouldError ){
+ var error = coll.getDB().getLastError()
+
+ if( error != null ) printjson( error )
+
+ if( error == null && ! shouldError ) return
+ if( error != null && shouldError ) return
+
+ if( error == null ) print( "No error detected!" )
+ else print( "Unexpected error!" )
+
+ assert( false )
+}
+
+// Insert an object with invalid array key
+coll.insert({ i : [ 1, 2 ] })
+checkError( true )
+
+// Insert an object with valid array key
+coll.insert({ i : 1 })
+checkError( false )
+
+// Update the value with valid other field
+value = coll.findOne({ i : 1 })
+coll.update( value, { $set : { j : 2 } } )
+checkError( false )
+
+// Update the value with invalid other fields
+value = coll.findOne({ i : 1 })
+coll.update( value, Object.merge( value, { i : [ 3 ] } ) )
+checkError( true )
+
+// Multi-update the value with invalid other fields
+value = coll.findOne({ i : 1 })
+coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true)
+checkError( true )
+
+// Single update the value with valid other fields
+value = coll.findOne({ i : 1 })
+coll.update( Object.merge( value, { i : [ 3, 4 ] } ), value )
+checkError( true )
+
+// Multi-update the value with other fields (won't work, but no error)
+value = coll.findOne({ i : 1 })
+coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true)
+checkError( false )
+
+// Query the value with other fields (won't work, but no error)
+value = coll.findOne({ i : 1 })
+coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray()
+checkError( false )
+
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) )
+checkError( false )
+
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) )
+error = coll.getDB().getLastError()
+assert.eq( error, null )
+assert.eq( coll.find().itcount(), 1 )
+
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : 1 } ) )
+error = coll.getDB().getLastError()
+assert.eq( error, null )
+assert.eq( coll.find().itcount(), 0 )
+
+printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" )
+
+// Insert a bunch of data then shard over key which is an array
+var coll = mongos.getCollection( "" + coll + "2" )
+for( var i = 0; i < 10; i++ ){
+ // TODO : does not check weird cases like [ i, i ]
+ coll.insert({ i : [ i, i + 1 ] })
+ checkError( false )
+}
+
+coll.ensureIndex({ _id : 1, i : 1 })
+
+try {
+ st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+}
+catch( e ){
+ print( "Correctly threw error on sharding with multikey index." )
+}
+
+st.printShardingStatus()
+
+// Insert a bunch of data then shard over key which is not an array
+var coll = mongos.getCollection( "" + coll + "3" )
+for( var i = 0; i < 10; i++ ){
+ // TODO : does not check weird cases like [ i, i ]
+ coll.insert({ i : i })
+ checkError( false )
+}
+
+coll.ensureIndex({ _id : 1, i : 1 })
+
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+
+st.printShardingStatus()
+
+
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
new file mode 100644
index 0000000..8d8d7d7
--- /dev/null
+++ b/jstests/sharding/auth.js
@@ -0,0 +1,177 @@
+
+adminUser = {
+ db : "admin",
+ username : "foo",
+ password : "bar"
+};
+
+testUser = {
+ db : "test",
+ username : "bar",
+ password : "baz"
+};
+
+function login(userObj) {
+ var n = s.getDB(userObj.db).runCommand({getnonce: 1});
+ var a = s.getDB(userObj.db).runCommand({authenticate: 1, user: userObj.username, nonce: n.nonce, key: s.getDB("admin").__pwHash(n.nonce, userObj.username, userObj.password)});
+ printjson(a);
+}
+
+function logout(userObj) {
+ s.getDB(userObj.db).runCommand({logout:1});
+}
+
+function getShardName(rsTest) {
+ var master = rsTest.getMaster();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) { return elem.host; });
+ return config._id+"/"+members.join(",");
+}
+
+var s = new ShardingTest( "auth1", 0 , 0 , 1 , {rs: true, extraOptions : {"keyFile" : "jstests/libs/key1"}, noChunkSize : true});
+
+print("logging in first, if there was an unclean shutdown the user might already exist");
+login(adminUser);
+
+var user = s.getDB("admin").system.users.findOne();
+if (user) {
+ print("user already exists");
+ printjson(user);
+}
+else {
+ print("adding user");
+ s.getDB(adminUser.db).addUser(adminUser.username, adminUser.password);
+}
+
+login(adminUser);
+s.getDB( "config" ).settings.update( { _id : "chunksize" }, {$set : {value : 1 }}, true );
+printjson(s.getDB("config").runCommand({getlasterror:1}));
+printjson(s.getDB("config").settings.find().toArray());
+
+print("restart mongos");
+stopMongoProgram(31000);
+var opts = { port : 31000, v : 0, configdb : s._configDB, keyFile : "jstests/libs/key1", chunkSize : 1 };
+var conn = startMongos( opts );
+s.s = s._mongos[0] = s["s0"] = conn;
+
+login(adminUser);
+
+d1 = new ReplSetTest({name : "d1", nodes : 3, startPort : 31100});
+d1.startSet({keyFile : "jstests/libs/key2"});
+d1.initiate();
+
+print("initiated");
+var shardName = getShardName(d1);
+
+print("adding shard w/out auth "+shardName);
+logout(adminUser);
+
+var result = s.getDB("admin").runCommand({addShard : shardName});
+printjson(result);
+assert.eq(result.errmsg, "unauthorized");
+
+login(adminUser);
+
+print("adding shard w/wrong key "+shardName);
+
+var thrown = false;
+try {
+ result = s.adminCommand({addShard : shardName});
+}
+catch(e) {
+ thrown = true;
+ printjson(e);
+}
+assert(thrown);
+
+print("start rs w/correct key");
+d1.stopSet();
+d1.startSet({keyFile : "jstests/libs/key1"});
+d1.initiate();
+var master = d1.getMaster();
+
+print("adding shard w/auth "+shardName);
+
+result = s.getDB("admin").runCommand({addShard : shardName});
+assert.eq(result.ok, 1, tojson(result));
+
+s.getDB("admin").runCommand({enableSharding : "test"});
+s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
+
+s.getDB(testUser.db).addUser(testUser.username, testUser.password);
+
+logout(adminUser);
+
+print("query try");
+var e = assert.throws(function() {
+ conn.getDB("foo").bar.findOne();
+});
+printjson(e);
+
+print("cmd try");
+e = assert.throws(function() {
+ conn.getDB("foo").runCommand({listdbs:1});
+});
+printjson(e);
+
+print("insert try 1");
+s.getDB("test").foo.insert({x:1});
+result = s.getDB("test").runCommand({getLastError : 1});
+assert.eq(result.err, "unauthorized");
+
+logout(adminUser);
+
+login(testUser);
+
+print("insert try 2");
+s.getDB("test").foo.insert({x:1});
+result = s.getDB("test").runCommand({getLastError : 1});
+assert.eq(result.err, null);
+
+logout(testUser);
+
+d2 = new ReplSetTest({name : "d2", nodes : 3, startPort : 31200});
+d2.startSet({keyFile : "jstests/libs/key1"});
+d2.initiate();
+
+shardName = getShardName(d2);
+
+print("adding shard "+shardName);
+login(adminUser);
+print("logged in");
+result = s.getDB("admin").runCommand({addShard : shardName})
+
+var num = 100000;
+for (i=0; i<num; i++) {
+ s.getDB("test").foo.insert({x:i, abc : "defg", date : new Date(), str : "all the talk on the market"});
+}
+
+var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
+var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
+var totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
+
+print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
+
+assert(d1Chunks > 0 && d2Chunks > 0 && d1Chunks+d2Chunks == totalChunks);
+
+assert.eq(s.getDB("test").foo.count(), num+1);
+
+s.s.setSlaveOk();
+
+var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
+
+var count = 0;
+while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+}
+
+assert.eq(count, 501);
+
+// check that dump doesn't get stuck with auth
+var x = runMongoProgram( "mongodump", "--host", "127.0.0.1:31000", "-d", testUser.db, "-u", testUser.username, "-p", testUser.password);
+
+print("result: "+x);
+
+
+s.stop();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
new file mode 100644
index 0000000..075ab41
--- /dev/null
+++ b/jstests/sharding/count_slaveok.js
@@ -0,0 +1,69 @@
+// Tests count and distinct using slaveOk
+
+var st = new ShardingTest( testName = "countSlaveOk",
+ numShards = 1,
+ verboseLevel = 0,
+ numMongos = 1,
+ { rs : true,
+ rs0 : { nodes : 2 }
+ })
+
+var rst = st._rs[0].test
+
+// Insert data into replica set
+var conn = new Mongo( st.s.host )
+conn.setLogLevel( 3 )
+
+var coll = conn.getCollection( "test.countSlaveOk" )
+coll.drop()
+
+for( var i = 0; i < 300; i++ ){
+ coll.insert( { i : i % 10 } )
+}
+
+var connA = conn
+var connB = new Mongo( st.s.host )
+var connC = new Mongo( st.s.host )
+
+// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
+coll.getDB().getLastError()
+
+st.printShardingStatus()
+
+// Wait for client to update itself and replication to finish
+rst.awaitReplication()
+
+var primary = rst.getPrimary()
+var sec = rst.getSecondary()
+
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop( rst.getMaster(), undefined, true )
+printjson( rst.status() )
+
+// Wait for the mongos to recognize the slave
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk()
+
+// Should throw exception, since not slaveOk'd
+assert.eq( 30, coll.find({ i : 0 }).count() )
+assert.eq( 10, coll.distinct("i").length )
+
+try {
+
+ conn.setSlaveOk( false )
+ coll.find({ i : 0 }).count()
+
+ print( "Should not reach here!" )
+ printjson( coll.getDB().getLastError() )
+ assert( false )
+
+}
+catch( e ){
+ print( "Non-slaveOk'd connection failed." )
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
new file mode 100644
index 0000000..aedde8f
--- /dev/null
+++ b/jstests/sharding/drop_sharded_db.js
@@ -0,0 +1,62 @@
+// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
+
+var st = new ShardingTest({ name : jsTestName() })
+
+var mongos = st.s0
+var config = mongos.getDB( "config" )
+
+var dbName = "buy"
+var dbA = mongos.getDB( dbName )
+var dbB = mongos.getDB( dbName + "_201107" )
+var dbC = mongos.getDB( dbName + "_201108" )
+
+print( "1: insert some data and colls into all dbs" )
+
+var numDocs = 3000;
+var numColls = 10;
+for( var i = 0; i < numDocs; i++ ){
+ dbA.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+ dbB.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+ dbC.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+}
+
+print( "2: shard the colls ")
+
+for( var i = 0; i < numColls; i++ ){
+
+ var key = { _id : 1 }
+ st.shardColl( dbA.getCollection( "data" + i ), key )
+ st.shardColl( dbB.getCollection( "data" + i ), key )
+ st.shardColl( dbC.getCollection( "data" + i ), key )
+
+}
+
+print( "3: drop the non-suffixed db ")
+
+dbA.dropDatabase()
+
+
+print( "3: ensure only the non-suffixed db was dropped ")
+
+var dbs = mongos.getDBNames()
+for( var i = 0; i < dbs.length; i++ ){
+ assert.neq( dbs, "" + dbA )
+}
+
+assert.eq( 0, config.databases.find({ _id : "" + dbA }).toArray().length )
+assert.eq( 1, config.databases.find({ _id : "" + dbB }).toArray().length )
+assert.eq( 1, config.databases.find({ _id : "" + dbC }).toArray().length )
+
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbA + "\\..*" ), dropped : true }).toArray().length )
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbB + "\\..*" ), dropped : false }).toArray().length )
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbC + "\\..*" ), dropped : false }).toArray().length )
+
+for( var i = 0; i < numColls; i++ ){
+
+ assert.eq( numDocs / numColls, dbB.getCollection( "data" + (i % numColls) ).find().itcount() )
+ assert.eq( numDocs / numColls, dbC.getCollection( "data" + (i % numColls) ).find().itcount() )
+
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index b2070ea..67a9abe 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -97,7 +97,7 @@ doMR = function( n ){
var res = db.mr.mapReduce( m , r , "smr1_out" );
printjson( res );
- assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n );
+ assert.eq( 4 , res.counts.input , "MR T0 " + n );
var x = db[res.result];
assert.eq( 3 , x.find().count() , "MR T1 " + n );
@@ -115,7 +115,7 @@ doMR = function( n ){
var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
printjson( res );
- assert.eq( new NumberLong(4) , res.counts.input , "MR T6 " + n );
+ assert.eq( 4 , res.counts.input , "MR T6 " + n );
var z = {};
res.find().forEach( function(a){ z[a._id] = a.value.count; } );
@@ -173,4 +173,11 @@ catch ( e ){
assert.eq( x , y , "assert format" )
+// isMaster and query-wrapped-command
+isMaster = db.runCommand({isMaster:1});
+assert( isMaster.ismaster );
+assert.eq( 'isdbgrid', isMaster.msg );
+assert.eq( isMaster, db.runCommand({query: {isMaster:1}}) );
+assert.eq( isMaster, db.runCommand({$query: {isMaster:1}}) );
+
s.stop();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index b28d88e..5277d22 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -17,52 +17,79 @@ for ( i=0; i<N; i++ )
db.foo.insert( { _id : i } )
db.getLastError();
x = db.foo.stats();
+assert.eq( "test.foo" , x.ns , "basic1" )
+assert( x.sharded , "basic2" )
assert.eq( N , x.count , "total count" )
assert.eq( N / 2 , x.shards.shard0000.count , "count on shard0000" )
assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
+assert( x.totalIndexSize > 0 )
+assert( x.numExtents > 0 )
+db.bar.insert( { x : 1 } )
+x = db.bar.stats();
+assert.eq( 1 , x.count , "XXX1" )
+assert.eq( "test.bar" , x.ns , "XXX2" )
+assert( ! x.sharded , "XXX3: " + tojson(x) )
+
+// Fork shell and start pulling back data
start = new Date()
print( "about to fork shell: " + Date() )
-join = startParallelShell( "db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
+
+// TODO: Still potential problem when our sampling of current ops misses when $where is active -
+// solution is to increase sleep time
+parallelCommand = "try { while(true){" +
+ " db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } sleep( 1000 ); return true; } ).itcount() " +
+ "}} catch(e){ print('PShell execution ended:'); printjson( e ) }"
+
+join = startParallelShell( parallelCommand )
print( "after forking shell: " + Date() )
+// Get all current $where operations
function getMine( printInprog ){
+
var inprog = db.currentOp().inprog;
+
if ( printInprog )
printjson( inprog )
+
+ // Find all the where queries
var mine = []
for ( var x=0; x<inprog.length; x++ ){
if ( inprog[x].query && inprog[x].query.$where ){
mine.push( inprog[x] )
}
}
+
return mine;
}
-state = 0; // 0 = not found, 1 = killed,
-killTime = null;
+var state = 0; // 0 = not found, 1 = killed,
+var killTime = null;
+var i = 0;
-for ( i=0; i<( 100* 1000 ); i++ ){
+assert.soon( function(){
+
+ // Get all the current operations
mine = getMine( state == 0 && i > 20 );
- if ( state == 0 ){
- if ( mine.length == 0 ){
- sleep(1);
- continue;
- }
+ i++;
+
+ // Wait for the queries to start
+ if ( state == 0 && mine.length > 0 ){
+ // Queries started
state = 1;
+ // Kill all $where
mine.forEach( function(z){ printjson( db.getSisterDB( "admin" ).killOp( z.opid ) ); } )
killTime = new Date()
}
- else if ( state == 1 ){
- if ( mine.length == 0 ){
- state = 2;
- break;
- }
- sleep(1)
- continue;
+ // Wait for killed queries to end
+ else if ( state == 1 && mine.length == 0 ){
+ // Queries ended
+ state = 2;
+ return true;
}
-}
+
+}, "Couldn't kill the $where operations.", 2 * 60 * 1000 )
print( "after loop: " + Date() );
assert( killTime , "timed out waiting too kill last mine:" + tojson(mine) )
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
new file mode 100644
index 0000000..3b7cec4
--- /dev/null
+++ b/jstests/sharding/group_slaveok.js
@@ -0,0 +1,68 @@
+// Tests group using slaveOk
+
+var st = new ShardingTest( testName = "groupSlaveOk",
+ numShards = 1,
+ verboseLevel = 0,
+ numMongos = 1,
+ { rs : true,
+ rs0 : { nodes : 2 }
+ })
+
+var rst = st._rs[0].test
+
+// Insert data into replica set
+var conn = new Mongo( st.s.host )
+conn.setLogLevel( 3 )
+
+var coll = conn.getCollection( "test.groupSlaveOk" )
+coll.drop()
+
+for( var i = 0; i < 300; i++ ){
+ coll.insert( { i : i % 10 } )
+}
+
+// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
+coll.getDB().getLastError()
+
+st.printShardingStatus()
+
+// Wait for client to update itself and replication to finish
+rst.awaitReplication()
+
+var primary = rst.getPrimary()
+var sec = rst.getSecondary()
+
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop( rst.getMaster(), undefined, true )
+printjson( rst.status() )
+
+// Wait for the mongos to recognize the slave
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk()
+
+// Should not throw exception, since slaveOk'd
+assert.eq( 10, coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } }).length )
+
+try {
+
+ conn.setSlaveOk( false )
+ coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } })
+
+ print( "Should not reach here!" )
+ printjson( coll.getDB().getLastError() )
+ assert( false )
+
+}
+catch( e ){
+ print( "Non-slaveOk'd connection failed." )
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
new file mode 100644
index 0000000..6f99449
--- /dev/null
+++ b/jstests/sharding/index1.js
@@ -0,0 +1,174 @@
+// from server 2326 - make sure that sharding only works with unique indices
+
+s = new ShardingTest( "shard_index", 2, 50, 1 )
+
+// Regenerate fully because of SERVER-2782
+for ( var i = 0; i < 10; i++ ) {
+
+ var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i )
+ coll.drop()
+
+ for ( var j = 0; j < 300; j++ ) {
+ coll.insert( { num : j, x : 1 } )
+ }
+
+ if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } );
+
+ print("\n\n\n\n\nTest # " + i)
+
+ if ( i == 0 ) {
+
+ // Unique index exists, but not the right one.
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 } )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!")
+
+ }
+ if ( i == 1 ) {
+
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ }
+ catch(e){
+ print(e)
+ assert( false, "Should be able to shard non-unique index without unique option.")
+ }
+
+ }
+ if ( i == 2 ) {
+ if (false) { // SERVER-3718
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 } )
+
+ passed = false;
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ passed = true;
+
+ }
+ catch( e ){
+ print(e)
+ }
+ assert( !passed, "Should not shard collection with no unique index.")
+ }
+ }
+ if ( i == 3 ) {
+
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex( { num : 1 }, { unique : true })
+ coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique prefix index.")
+ }
+
+ }
+ if ( i == 4 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique id index.")
+ }
+
+ }
+ if ( i == 5 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique combination id index.")
+ }
+
+ }
+ if ( i == 6 ) {
+
+ coll.remove()
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.")
+ }
+
+ }
+ if ( i == 7 ) {
+ coll.remove()
+
+ // No index exists
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } })
+ }
+ catch( e ){
+ print(e)
+ assert( !passed, "Should be able to shard collection with no index on shard key.")
+ }
+ }
+ if ( i == 8 ) {
+ if (false) { // SERVER-3718
+ coll.remove()
+
+ // No index exists
+
+ passed = false
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ passed = true
+ }
+ catch( e ){
+ print(e)
+ }
+ assert( !passed, "Should not shard collection with unique flag but with no unique index on shard key.")
+ }
+ }
+ if ( i == 9 ) {
+
+ // Unique index exists on a different field as well
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 }, { unique : true} )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!" )
+ }
+}
+
+s.stop();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index f6ba18a..917f152 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -40,6 +40,6 @@ for ( i=0; i<20; i+= 2 )
db.printShardingStatus()
-assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 120 * 1000 , 2000 )
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 )
s.stop()
diff --git a/jstests/sharding/migrateMemory.js b/jstests/sharding/migrateMemory.js
new file mode 100644
index 0000000..d321220
--- /dev/null
+++ b/jstests/sharding/migrateMemory.js
@@ -0,0 +1,54 @@
+
+s = new ShardingTest( "migrateMemory" , 2 , 1 , 1 , { chunksize : 1 });
+
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" )
+t = db.foo
+
+str = ""
+while ( str.length < 10000 ){
+ str += "asdasdsdasdasdasdas";
+}
+
+data = 0;
+num = 0;
+while ( data < ( 1024 * 1024 * 10 ) ){
+ t.insert( { _id : num++ , s : str } )
+ data += str.length
+}
+
+db.getLastError()
+
+stats = s.chunkCounts( "foo" )
+from = ""
+to = ""
+for ( x in stats ){
+ if ( stats[x] == 0 )
+ to = x
+ else
+ from = x
+}
+
+s.config.chunks.find().sort( { min : 1 } ).forEach( printjsononeline )
+
+print( "from: " + from + " to: " + to )
+printjson( stats )
+
+ss = []
+
+for ( var f = 0; f<num; f += ( 2 * num / t.stats().nchunks ) ){
+ ss.push( s.getServer( "test" ).getDB( "admin" ).serverStatus() )
+ print( f )
+ s.adminCommand( { movechunk : "test.foo" , find : { _id : f } , to : to } )
+}
+
+for ( i=0; i<ss.length; i++ )
+ printjson( ss[i].mem );
+
+
+s.stop()
+
diff --git a/jstests/sharding/multi_mongos1.js b/jstests/sharding/multi_mongos1.js
index cf9ebde..fc7eaf1 100644
--- a/jstests/sharding/multi_mongos1.js
+++ b/jstests/sharding/multi_mongos1.js
@@ -67,4 +67,5 @@ assert.eq( N , viaS2.find().toArray().length , "other B" );
printjson( primary._db._adminCommand( "shardingState" ) );
-s1.stop(); \ No newline at end of file
+
+s1.stop();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
new file mode 100644
index 0000000..ec95dc0
--- /dev/null
+++ b/jstests/sharding/multi_mongos2.js
@@ -0,0 +1,61 @@
+// multi_mongos2.js
+// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+
+
+// setup sharding with two mongos, s1 and s2
+s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+s2 = s1._mongos[1];
+
+s1.adminCommand( { enablesharding : "test" } );
+s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s1.config.databases.find().forEach( printjson )
+
+// test queries
+
+s1.getDB('test').existing.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing" , find : { _id : 5 } } )
+
+res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+
+assert.eq(1 , res.ok, tojson(res));
+
+printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) )
+printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) )
+
+assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
+assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+
+// test stats
+
+s1.getDB('test').existing2.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing2.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing2.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing2" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing2" , find : { _id : 5 } } )
+
+var res = s1.getDB('test').existing2.stats()
+printjson( res )
+assert.eq(true, res.sharded); //SERVER-2828
+assert.eq(true, s2.getDB('test').existing2.stats().sharded);
+
+// test admin commands
+
+s1.getDB('test').existing3.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing3" , find : { _id : 5 } } )
+
+res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq(1 , res.ok, tojson(res));
+
+
+
+s1.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
new file mode 100644
index 0000000..d35459c
--- /dev/null
+++ b/jstests/sharding/parallel.js
@@ -0,0 +1,38 @@
+numShards = 3
+s = new ShardingTest( "parallel" , numShards , 2 , 2 , { sync : true } );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" );
+
+N = 10000;
+
+for ( i=0; i<N; i+=(N/12) ) {
+ s.adminCommand( { split : "test.foo" , middle : { _id : i } } )
+ sh.moveChunk( "test.foo", { _id : i } , "shard000" + Math.floor( Math.random() * numShards ) )
+}
+
+
+for ( i=0; i<N; i++ )
+ db.foo.insert( { _id : i } )
+db.getLastError();
+
+
+doCommand = function( dbname , cmd ) {
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ host : db.getMongo().host , parallel : 2 , seconds : 2 } )
+ printjson(x)
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ host : s._mongos[1].host , parallel : 2 , seconds : 2 } )
+ printjson(x)
+}
+
+doCommand( "test" , { dbstats : 1 } )
+doCommand( "config" , { dbstats : 1 } )
+
+x = s.getDB( "config" ).stats()
+assert( x.ok , tojson(x) )
+printjson(x)
+
+s.stop()
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 7132563..e27316e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -4,9 +4,18 @@ s = new ShardingTest( "shard3" , 2 , 1 , 2 );
s2 = s._mongos[1];
+db = s.getDB( "test" )
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+assert( sh.getBalancerState() , "A1" )
+sh.setBalancerState( false )
+assert( ! sh.getBalancerState() , "A2" )
+sh.setBalancerState( true )
+assert( sh.getBalancerState() , "A3" )
+sh.setBalancerState( false )
+assert( ! sh.getBalancerState() , "A4" )
+
s.config.databases.find().forEach( printjson )
a = s.getDB( "test" ).foo;
@@ -53,6 +62,7 @@ function doCounts( name , total , onlyItCounts ){
var total = doCounts( "before wrong save" )
secondary.save( { num : -3 } );
+printjson( secondary.getDB().getLastError() )
doCounts( "after wrong save" , total , true )
e = a.find().explain();
assert.eq( 3 , e.n , "ex1" )
@@ -127,7 +137,7 @@ print( "*** ready to call dropDatabase" )
res = s.getDB( "test" ).dropDatabase();
assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
// Waiting for SERVER-2253
-// assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
+assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index 70c5ed7..1b58cc7 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -103,4 +103,7 @@ assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
poolStats( "at end" )
print( summary )
+
+assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ) } )
+
s.stop();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 5d185a5..de3d63e 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -75,7 +75,8 @@ function go() {
return false;
}
return true;
- });
+ }, "Queries took too long to complete correctly.",
+ 2 * 60 * 1000 );
// Done
routerSpec.end()
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
new file mode 100644
index 0000000..e27c054
--- /dev/null
+++ b/jstests/sharding/shard_keycount.js
@@ -0,0 +1,45 @@
+// Tests splitting a chunk twice
+
+s = new ShardingTest( "shard1" , 2, 0, 1, /* chunkSize */1);
+
+dbName = "test"
+collName = "foo"
+ns = dbName + "." + collName
+
+db = s.getDB( dbName );
+
+for(var i = 0; i < 10; i++){
+ db.foo.insert({ _id : i })
+}
+
+// Enable sharding on DB
+s.adminCommand( { enablesharding : dbName } );
+
+// Enable sharding on collection
+s.adminCommand( { shardcollection : ns, key : { _id : 1 } } );
+
+// Kill balancer
+s.config.settings.update({ _id: "balancer" }, { $set : { stopped: true } }, true )
+
+// Split into two chunks
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll = db.getCollection( collName )
+
+// Split chunk again
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll.update({ _id : 3 }, { _id : 3 })
+
+// Split chunk again
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll.update({ _id : 3 }, { _id : 3 })
+
+// Split chunk again
+// FAILS since the key count is based on the full index, not the chunk itself
+// i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
+// in chunk with bounds _id : 0 => 5
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+s.stop();
diff --git a/jstests/sharding/sharding_with_keyfile.js b/jstests/sharding/sharding_with_keyfile.js
new file mode 100644
index 0000000..94aea57
--- /dev/null
+++ b/jstests/sharding/sharding_with_keyfile.js
@@ -0,0 +1,69 @@
+// Tests sharding with a key file
+
+var st = new ShardingTest({ name : jsTestName(),
+ shards : 2,
+ mongos : 1,
+ keyFile : keyFile = "jstests/sharding/" + jsTestName() + ".key" })
+
+// Make sure all our instances got the key
+var configs = st._configDB.split(",")
+for( var i = 0; i < configs.length; i++ ) configs[i] = new Mongo( configs[i] )
+var shards = st._connections
+var mongoses = st._mongos
+
+for( var i = 0; i < configs.length; i++ )
+ assert.eq( configs[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+for( var i = 0; i < shards.length; i++ )
+ assert.eq( shards[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+for( var i = 0; i < mongoses.length; i++ )
+ assert.eq( mongoses[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+var mongos = st.s0
+var coll = mongos.getCollection( "test.foo" )
+
+st.shardColl( coll, { _id : 1 } )
+
+// Create an index so we can find by num later
+coll.ensureIndex({ insert : 1 })
+
+// For more logging
+// mongos.getDB("admin").runCommand({ setParameter : 1, logLevel : 3 })
+
+print( "INSERT!" )
+
+// Insert a bunch of data
+var toInsert = 2000
+for( var i = 0; i < toInsert; i++ ){
+ coll.insert({ my : "test", data : "to", insert : i })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+print( "UPDATE!" )
+
+// Update a bunch of data
+var toUpdate = toInsert
+for( var i = 0; i < toUpdate; i++ ){
+ var id = coll.findOne({ insert : i })._id
+ coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+print( "DELETE" )
+
+// Remove a bunch of data
+var toDelete = toInsert / 2
+for( var i = 0; i < toDelete; i++ ){
+ coll.remove({ insert : i })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+// Make sure the right amount of data is there
+assert.eq( coll.find().count(), toInsert / 2 )
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/sharding_with_keyfile.key b/jstests/sharding/sharding_with_keyfile.key
new file mode 100755
index 0000000..fe3344b
--- /dev/null
+++ b/jstests/sharding/sharding_with_keyfile.key
@@ -0,0 +1,3 @@
+aBcDeFg
+1010101
+JJJJJJJ \ No newline at end of file
diff --git a/jstests/sharding/sync6.js b/jstests/sharding/sync6.js
new file mode 100644
index 0000000..233534b
--- /dev/null
+++ b/jstests/sharding/sync6.js
@@ -0,0 +1,81 @@
+// Test that distributed lock forcing does not result in inconsistencies, using a
+// fast timeout.
+
+// Note that this test will always have random factors, since we can't control the
+// thread scheduling.
+
+test = new SyncCCTest( "sync6", { logpath : "/dev/null" } )
+
+// Startup another process to handle our commands to the cluster, mostly so it's
+// easier to read.
+var commandConn = startMongodTest( 30000 + 4, "syncCommander", false, {})//{ logpath : "/dev/null" } )//{verbose : ""} )
+// { logpath : "/data/db/syncCommander/mongod.log" } );
+
+// Up the log level for this test
+commandConn.getDB( "admin" ).runCommand( { setParameter : 1, logLevel : 1 } )
+
+// Have lots of threads, so use larger i
+// Can't test too many, we get socket exceptions... possibly due to the
+// javascript console.
+for ( var i = 8; i < 9; i++ ) {
+
+ // Our force time is 4 seconds
+ // Slower machines can't keep up the LockPinger rate, which can lead to lock failures
+ // since our locks are only valid if the LockPinger pings faster than the force time.
+ // Actual lock timeout is 15 minutes, so a few seconds is extremely aggressive
+ var takeoverMS = 4000;
+
+ // Generate valid sleep and skew for this timeout
+ var threadSleepWithLock = takeoverMS / 2;
+ var configServerTimeSkew = [ 0, 0, 0 ]
+ for ( var h = 0; h < 3; h++ ) {
+ // Skew by 1/30th the takeover time either way, at max
+ configServerTimeSkew[h] = ( i + h ) % Math.floor( takeoverMS / 60 )
+ // Make skew pos or neg
+ configServerTimeSkew[h] *= ( ( i + h ) % 2 ) ? -1 : 1;
+ }
+
+ // Build command
+ command = { _testDistLockWithSkew : 1 }
+
+ // Basic test parameters
+ command["lockName"] = "TimeSkewFailNewTest_lock_" + i;
+ command["host"] = test.url
+ command["seed"] = i
+ command["numThreads"] = ( i % 50 ) + 1
+
+ // Critical values so we're sure of correct operation
+ command["takeoverMS"] = takeoverMS
+ command["wait"] = 4 * takeoverMS // so we must force the lock
+ command["skewHosts"] = configServerTimeSkew
+ command["threadWait"] = threadSleepWithLock
+
+ // Less critical test params
+
+ // 1/3 of threads will not release the lock
+ command["hangThreads"] = 3
+ // Amount of time to wait before trying lock again
+ command["threadSleep"] = 1;// ( ( i + 1 ) * 100 ) % (takeoverMS / 4)
+ // Amount of total clock skew possible between locking threads (processes)
+ // This can be large now.
+ command["skewRange"] = ( command["takeoverMS"] * 3 ) * 60 * 1000
+
+ // Double-check our sleep, host skew, and takeoverMS values again
+
+ // At maximum, our threads must sleep only half the lock timeout time.
+ assert( command["threadWait"] <= command["takeoverMS"] / 2 )
+ for ( var h = 0; h < command["skewHosts"].length; h++ ) {
+ // At maximum, our config server time skew needs to be less than 1/30th
+ // the total time skew (1/60th either way).
+ assert( Math.abs( command["skewHosts"][h] ) <= ( command["takeoverMS"] / 60 ) )
+ }
+
+ result = commandConn.getDB( "admin" ).runCommand( command )
+ printjson( result )
+ printjson( command )
+ assert( result.ok, "Skewed threads did not increment correctly." );
+
+}
+
+stopMongoProgram( 30004 )
+test.stop();
diff --git a/jstests/sharding/sync7.js b/jstests/sharding/sync7.js
new file mode 100644
index 0000000..a8ff094
--- /dev/null
+++ b/jstests/sharding/sync7.js
@@ -0,0 +1,63 @@
+// Test that the clock skew of the distributed lock disallows getting locks for moving and splitting.
+
+s = new ShardingTest( "moveDistLock", 3, 0, undefined, { sync : true } );
+
+s._connections[0].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : 15000 } )
+s._connections[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : -16000 } )
+
+// We need to start another mongos after skewing the clock, since the first mongos will have already
+// tested the config servers (via the balancer) before we manually skewed them
+otherMongos = startMongos( { port : 30020, v : 0, configdb : s._configDB } );
+
+// Initialize DB data
+initDB = function(name) {
+ var db = s.getDB( name );
+ var c = db.foo;
+ c.save( { a : 1 } );
+ c.save( { a : 2 } );
+ c.save( { a : 3 } );
+ assert( 3, c.count() );
+
+ return s.getServer( name );
+}
+
+from = initDB( "test1" );
+to = s.getAnother( from );
+
+s.printShardingStatus();
+
+// Make sure we can't move when our clock skew is so high
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : to.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 0, "Move command should not have succeeded!" )
+
+// Enable sharding on DB and collection
+result = otherMongos.getDB("admin").runCommand( { enablesharding : "test1" } );
+result = otherMongos.getDB("test1").foo.ensureIndex( { a : 1 } );
+result = otherMongos.getDB("admin").runCommand( { shardcollection : "test1.foo", key : { a : 1 } } );
+print(" Collection Sharded! ")
+
+// Make sure we can't split when our clock skew is so high
+result = otherMongos.getDB( "admin" ).runCommand( { split : "test1.foo", find : { a : 2 } } );
+assert.eq( result.ok, 0, "Split command should not have succeeded!")
+
+// Adjust clock back in bounds
+s._connections[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : 0 } )
+print(" Clock adjusted back to in-bounds. ");
+
+// Make sure we can now split
+result = otherMongos.getDB( "admin" ).runCommand( { split : "test1.foo", find : { a : 2 } } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Split command should have succeeded!")
+
+// Make sure we can now move
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : to.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Move command should have succeeded!" )
+
+// Make sure we can now move again (getting the lock twice)
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : from.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Move command should have succeeded again!" )
+
+s.stop();
diff --git a/jstests/shell1.js b/jstests/shell1.js
new file mode 100644
index 0000000..d2da3b0
--- /dev/null
+++ b/jstests/shell1.js
@@ -0,0 +1,6 @@
+
+x = 1;
+
+shellHelper( "show", "tables;" )
+shellHelper( "show", "tables" )
+shellHelper( "show", "tables ;" )
diff --git a/jstests/shellkillop.js b/jstests/shellkillop.js
index 580d4c8..d903f25 100644
--- a/jstests/shellkillop.js
+++ b/jstests/shellkillop.js
@@ -1,65 +1,61 @@
-baseName = "jstests_shellkillop";
-
-// 'retry' should be set to true in contexts where an exception should cause the test to be retried rather than to fail.
-retry = false;
-
-function testShellAutokillop() {
-
-if (_isWindows()) {
- print("shellkillop.js not testing on windows, as functionality is missing there");
- print("shellkillop.js see http://jira.mongodb.org/browse/SERVER-1451");
-}
-else {
- db[baseName].drop();
-
- print("shellkillop.js insert data");
- for (i = 0; i < 100000; ++i) {
- db[baseName].insert({ i: 1 });
- }
- assert.eq(100000, db[baseName].count());
-
- // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
- // it's just for testing purposes and thus not in the shell help
- var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
- print("shellkillop.js evalStr:" + evalStr);
- spawn = startMongoProgramNoConnect("mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
-
- sleep(100);
- retry = true;
- assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test won't be valid");
- retry = false;
-
- stopMongoProgramByPid(spawn);
-
- sleep(100);
-
- print("count abcdefghijkl:" + db[baseName].find({ i: 'abcdefghijkl' }).count());
-
- var inprog = db.currentOp().inprog;
- for (i in inprog) {
- if (inprog[i].ns == "test." + baseName)
- throw "shellkillop.js op is still running: " + tojson( inprog[i] );
- }
-
- retry = true;
- assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test was not valid");
- retry = false;
-}
-
-}
-
-for( var nTries = 0; nTries < 10 && retry; ++nTries ) {
- try {
- testShellAutokillop();
- } catch (e) {
- if ( !retry ) {
- throw e;
- }
- printjson( e );
- print( "retrying..." );
- }
-}
-
-assert( !retry, "retried too many times" );
-
-print("shellkillop.js SUCCESS");
+baseName = "jstests_shellkillop";
+
+// 'retry' should be set to true in contexts where an exception should cause the test to be retried rather than to fail.
+retry = false;
+
+function testShellAutokillop() {
+
+if (true) { // toggle to disable test
+ db[baseName].drop();
+
+ print("shellkillop.js insert data");
+ for (i = 0; i < 100000; ++i) {
+ db[baseName].insert({ i: 1 });
+ }
+ assert.eq(100000, db[baseName].count());
+
+ // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
+ // it's just for testing purposes and thus not in the shell help
+ var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
+ print("shellkillop.js evalStr:" + evalStr);
+ spawn = startMongoProgramNoConnect("mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
+
+ sleep(100);
+ retry = true;
+ assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test won't be valid");
+ retry = false;
+
+ stopMongoProgramByPid(spawn);
+
+ sleep(100);
+
+ print("count abcdefghijkl:" + db[baseName].find({ i: 'abcdefghijkl' }).count());
+
+ var inprog = db.currentOp().inprog;
+ for (i in inprog) {
+ if (inprog[i].ns == "test." + baseName)
+ throw "shellkillop.js op is still running: " + tojson( inprog[i] );
+ }
+
+ retry = true;
+ assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test was not valid");
+ retry = false;
+}
+
+}
+
+for( var nTries = 0; nTries < 10 && retry; ++nTries ) {
+ try {
+ testShellAutokillop();
+ } catch (e) {
+ if ( !retry ) {
+ throw e;
+ }
+ printjson( e );
+ print( "retrying..." );
+ }
+}
+
+assert( !retry, "retried too many times" );
+
+print("shellkillop.js SUCCESS");
diff --git a/jstests/shellspawn.js b/jstests/shellspawn.js
index 6b713f8..4f550b9 100644
--- a/jstests/shellspawn.js
+++ b/jstests/shellspawn.js
@@ -9,8 +9,10 @@ if ( typeof( _startMongoProgram ) == "undefined" ){
}
else {
spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "sleep( 2000 ); db.getCollection( '" + baseName + "' ).save( {a:1} );" );
-
- assert.soon( function() { return 1 == t.count(); } );
+
+// assert.soon( function() { return 1 == t.count(); } );
+ // SERVER-2784 debugging - error message overwritten to indicate last count value.
+ assert.soon( "count = t.count(); msg = 'did not reach expected count, last value: ' + t.count(); 1 == count;" );
stopMongoProgramByPid( spawn );
diff --git a/jstests/skip1.js b/jstests/skip1.js
new file mode 100644
index 0000000..c620fb0
--- /dev/null
+++ b/jstests/skip1.js
@@ -0,0 +1,15 @@
+// SERVER-2845 When skipping objects without loading them, they shouldn't be
+// included in the nscannedObjects count.
+
+if ( 0 ) { // SERVER-2845
+t = db.jstests_skip1;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.save( {a:5} );
+t.save( {a:5} );
+t.save( {a:5} );
+
+assert.eq( 3, t.find( {a:5} ).skip( 2 ).explain().nscanned );
+assert.eq( 1, t.find( {a:5} ).skip( 2 ).explain().nscannedObjects );
+} \ No newline at end of file
diff --git a/jstests/slowNightly/background.js b/jstests/slowNightly/background.js
new file mode 100644
index 0000000..d1d0047
--- /dev/null
+++ b/jstests/slowNightly/background.js
@@ -0,0 +1,51 @@
+// background indexing test during inserts.
+
+assert( db.getName() == "test" );
+
+t = db.bg1;
+t.drop();
+
+var a = new Mongo( db.getMongo().host ).getDB( db.getName() );
+
+for( var i = 0; i < 100000; i++ ) {
+ t.insert({y:'aaaaaaaaaaaa',i:i});
+ if( i % 10000 == 0 ) {
+ db.getLastError();
+ print(i);
+ }
+}
+
+//db.getLastError();
+
+// start bg indexing
+a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true});
+
+// add more data
+
+for( var i = 0; i < 100000; i++ ) {
+ t.insert({i:i});
+ if( i % 10000 == 0 ) {
+ printjson( db.currentOp() );
+ db.getLastError();
+ print(i);
+ }
+}
+
+printjson( db.getLastErrorObj() );
+
+printjson( db.currentOp() );
+
+for( var i = 0; i < 40; i++ ) {
+ if( db.currentOp().inprog.length == 0 )
+ break;
+ print("waiting");
+ sleep(1000);
+}
+
+printjson( a.getLastErrorObj() );
+
+var idx = t.getIndexes();
+// print("indexes:");
+// printjson(idx);
+
+assert( idx[1].key.i == 1 );
diff --git a/jstests/slowNightly/command_line_parsing.js b/jstests/slowNightly/command_line_parsing.js
index 38c7324..ba7b136 100644
--- a/jstests/slowNightly/command_line_parsing.js
+++ b/jstests/slowNightly/command_line_parsing.js
@@ -7,3 +7,15 @@ var baseName = "jstests_slowNightly_command_line_parsing";
var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--notablescan" );
m.getDB( baseName ).getCollection( baseName ).save( {a:1} );
assert.throws( function() { m.getDB( baseName ).getCollection( baseName ).find( {a:1} ).toArray() } );
+
+// test config file
+var m2 = startMongod( "--port", port+2, "--dbpath", "/data/db/" + baseName +"2", "--config", "jstests/libs/testconfig");
+var m2result = {
+ "parsed" : {
+ "config" : "jstests/libs/testconfig",
+ "dbpath" : "/data/db/jstests_slowNightly_command_line_parsing2",
+ "fastsync" : "true",
+ "port" : 31002
+ }
+};
+assert( friendlyEqual(m2result.parsed, m2.getDB("admin").runCommand( "getCmdLineOpts" ).parsed) );
diff --git a/jstests/slowNightly/dur_big_atomic_update.js b/jstests/slowNightly/dur_big_atomic_update.js
index ffb0d83..800b4b8 100644
--- a/jstests/slowNightly/dur_big_atomic_update.js
+++ b/jstests/slowNightly/dur_big_atomic_update.js
@@ -23,6 +23,23 @@ err = d.getLastErrorObj();
assert(err.err == null);
assert(err.n == 1024);
+d.dropDatabase();
+
+for (var i=0; i<1024; i++){
+ d.foo.insert({_id:i});
+}
+
+// Do it again but in a db.eval
+d.eval(
+ function(host, big_string) {
+ new Mongo(host).getDB("test").foo.update({}, {$set: {big_string: big_string}}, false, /*multi*/true)
+ }, conn.host, big_string); // Can't pass in connection or DB objects
+
+err = d.getLastErrorObj();
+
+assert(err.err == null);
+assert(err.n == 1024);
+
// free up space
d.dropDatabase();
diff --git a/jstests/slowNightly/dur_remove_old_journals.js b/jstests/slowNightly/dur_remove_old_journals.js
index 3c57c12..1e81bee 100644
--- a/jstests/slowNightly/dur_remove_old_journals.js
+++ b/jstests/slowNightly/dur_remove_old_journals.js
@@ -33,20 +33,19 @@ sleep(sleepSecs*1000);
files = listFiles(PATH + "/journal")
printjson(files);
-
-var nfiles = 0;
-files.forEach(function (file) {
- assert.eq('string', typeof (file.name)); // sanity checking
- if (/prealloc/.test(file.name)) {
- ;
- }
- else {
- nfiles++;
- assert(!(/j\._[01]/.test(file.name)), "Old journal file still exists: " + file.name);
- }
-})
-
-assert.eq(2, nfiles); // j._2 and lsn
+
+var nfiles = 0;
+files.forEach(function (file) {
+ assert.eq('string', typeof (file.name)); // sanity checking
+ if (/prealloc/.test(file.name)) {
+ ;
+ }
+ else {
+ nfiles++;
+ }
+})
+
+assert.eq(2, nfiles); // latest journal file and lsn
stopMongod(30001);
diff --git a/jstests/slowNightly/geo_axis_aligned.js b/jstests/slowNightly/geo_axis_aligned.js
new file mode 100644
index 0000000..0161ecc
--- /dev/null
+++ b/jstests/slowNightly/geo_axis_aligned.js
@@ -0,0 +1,108 @@
+// Axis aligned circles - hard-to-find precision errors possible with exact distances here
+
+t = db.axisaligned
+t.drop();
+
+scale = [ 1, 10, 1000, 10000 ]
+bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ]
+radius = [ 0.0001, 0.001, 0.01, 0.1 ]
+center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ]
+
+bound = []
+for( var j = 0; j < center.length; j++ ) bound.push( [-180, 180] );
+
+// Scale all our values to test different sizes
+radii = []
+centers = []
+bounds = []
+
+for( var s = 0; s < scale.length; s++ ){
+ for ( var i = 0; i < radius.length; i++ ) {
+ radii.push( radius[i] * scale[s] )
+ }
+
+ for ( var j = 0; j < center.length; j++ ) {
+ centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] )
+ bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] )
+ }
+
+}
+
+radius = radii
+center = centers
+bound = bounds
+
+
+for ( var b = 0; b < bits.length; b++ ) {
+
+
+ printjson( radius )
+ printjson( centers )
+
+ for ( var i = 0; i < radius.length; i++ ) {
+ for ( var j = 0; j < center.length; j++ ) {
+
+ printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
+
+ t.drop()
+
+ // Make sure our numbers are precise enough for this test
+ if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
+ continue;
+
+ t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
+ t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
+
+ t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } );
+
+ if( db.getLastError() ) continue;
+
+ print( "DOING WITHIN QUERY ")
+ r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
+
+ //printjson( r.toArray() );
+
+ assert.eq( 5, r.count() );
+
+ // FIXME: surely code like this belongs in utils.js.
+ a = r.toArray();
+ x = [];
+ for ( k in a )
+ x.push( a[k]["_id"] )
+ x.sort()
+ assert.eq( [ 1, 2, 3, 4, 5 ], x );
+
+ print( " DOING NEAR QUERY ")
+ //printjson( center[j] )
+ r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } )
+ assert.eq( 5, r.count() );
+
+ print( " DOING DIST QUERY ")
+
+ a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results
+ assert.eq( 5, a.length );
+
+ //printjson( a );
+
+ var distance = 0;
+ for( var k = 0; k < a.length; k++ ){
+ //print( a[k].dis )
+ //print( distance )
+ assert.gte( a[k].dis, distance );
+ //printjson( a[k].obj )
+ //print( distance = a[k].dis );
+ }
+
+ r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } )
+ assert.eq( 9, r.count() );
+
+ }
+ }
+} \ No newline at end of file
diff --git a/jstests/slowNightly/geo_mnypts.js b/jstests/slowNightly/geo_mnypts.js
new file mode 100644
index 0000000..ac40651
--- /dev/null
+++ b/jstests/slowNightly/geo_mnypts.js
@@ -0,0 +1,51 @@
+// Test sanity of geo queries with a lot of points
+
+var coll = db.testMnyPts
+coll.drop()
+
+var totalPts = 500 * 1000
+
+// Add points in a 100x100 grid
+for( var i = 0; i < totalPts; i++ ){
+ var ii = i % 10000
+ coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] })
+}
+
+coll.ensureIndex({ loc : "2d" })
+
+// Check that quarter of points in each quadrant
+for( var i = 0; i < 4; i++ ){
+ var x = i % 2
+ var y = Math.floor( i / 2 )
+
+ var box = [[0, 0], [49, 49]]
+ box[0][0] += ( x == 1 ? 50 : 0 )
+ box[1][0] += ( x == 1 ? 50 : 0 )
+ box[0][1] += ( y == 1 ? 50 : 0 )
+ box[1][1] += ( y == 1 ? 50 : 0 )
+
+ assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() )
+ assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() )
+
+}
+
+// Check that half of points in each half
+for( var i = 0; i < 2; i++ ){
+
+ var box = [[0, 0], [49, 99]]
+ box[0][0] += ( i == 1 ? 50 : 0 )
+ box[1][0] += ( i == 1 ? 50 : 0 )
+
+ assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() )
+ assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() )
+
+}
+
+// Check that all but corner set of points in radius
+var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ]
+
+assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() )
+assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() )
+
+
+
diff --git a/jstests/slowNightly/geo_polygon.js b/jstests/slowNightly/geo_polygon.js
new file mode 100644
index 0000000..25bf026
--- /dev/null
+++ b/jstests/slowNightly/geo_polygon.js
@@ -0,0 +1,53 @@
+t = db.geo_polygon4;
+t.drop();
+
+shouldRun = true;
+
+bi = db.adminCommand( "buildinfo" ).sysInfo
+if ( bi.indexOf( "erh2" ) >= 0 ){
+ // this machine runs this test very slowly
+ // it seems to be related to osx 10.5
+ // if this machine gets upgraded, we should remove this check
+ // the os x debug builders still run thistest, so i'm not worried about it
+ shouldRun = false;
+}
+
+if ( shouldRun ) {
+
+ num = 0;
+ for ( x = -180; x < 180; x += .5 ){
+ for ( y = -180; y < 180; y += .5 ){
+ o = { _id : num++ , loc : [ x , y ] };
+ t.save( o );
+ }
+ }
+
+ var numTests = 31;
+ for( var n = 0; n < numTests; n++ ){
+ t.dropIndexes()
+ t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
+
+ assert.between( 9 - 2 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,1], [0,2]] }}} ).count() , 9, "Triangle Test", true);
+ assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : [ [-180,-180], [-180,180], [180,180], [180,-180] ] } } } ).count() , "Bounding Box Test" );
+
+ assert.eq( 441 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0] ] } } } ).count() , "Square Test" );
+ assert.eq( 25 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0] ] } } } ).count() , "Square Test 2" );
+
+ if(1){ // SERVER-3726
+ // Points exactly on diagonals may be in or out, depending on how the error calculating the slope falls.
+ assert.between( 341 - 18 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0], [5,5] ] } } } ).count(), 341, "Square Missing Chunk Test", true );
+ assert.between( 21 - 2 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0], [1,1] ] } } } ).count(), 21 , "Square Missing Chunk Test 2", true );
+ }
+
+ assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [0,0]] }}} ).count() , "Point Test" );
+
+ // SERVER-3725
+ {
+ assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,0], [2,0]] }}} ).count() , "Line Test 1" );
+ assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [1,0]] }}} ).count() , "Line Test 2" );
+ assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,2], [0,1], [0,0]] }}} ).count() , "Line Test 3" );
+ }
+
+ assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,1], [0,0], [0,0]] }}} ).count() , "Line Test 4" );
+ }
+}
diff --git a/jstests/slowNightly/index_check10.js b/jstests/slowNightly/index_check10.js
new file mode 100644
index 0000000..be94be2
--- /dev/null
+++ b/jstests/slowNightly/index_check10.js
@@ -0,0 +1,133 @@
+// Randomized index testing with initial btree constructed using btree builder.
+// Also uses large strings.
+
+Random.setRandomSeed();
+
+t = db.test_index_check10;
+
+function doIt( indexVersion ) {
+
+ t.drop();
+
+ function sort() {
+ var sort = {};
+ for( var i = 0; i < n; ++i ) {
+ sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
+ }
+ return sort;
+ }
+
+ var fields = [ 'a', 'b', 'c', 'd', 'e' ];
+ n = Random.randInt( 5 ) + 1;
+ var idx = sort();
+
+ var chars = "abcdefghijklmnopqrstuvwxyz";
+
+ function obj() {
+ var ret = {};
+ for( var i = 0; i < n; ++i ) {
+ ret[ fields[ i ] ] = r();
+ }
+ return ret;
+ }
+
+ function r() {
+ var len = Random.randInt( 1000 / n );
+ buf = "";
+ for( var i = 0; i < len; ++i ) {
+ buf += chars.charAt( Random.randInt( chars.length ) );
+ }
+ return buf;
+ }
+
+ function check() {
+ var v = t.validate();
+ if ( !t.valid ) {
+ printjson( t );
+ assert( t.valid );
+ }
+ var spec = {};
+ for( var i = 0; i < n; ++i ) {
+ if ( Random.rand() > 0.5 ) {
+ var bounds = [ r(), r() ];
+ if ( bounds[ 0 ] > bounds[ 1 ] ) {
+ bounds.reverse();
+ }
+ var s = {};
+ if ( Random.rand() > 0.5 ) {
+ s[ "$gte" ] = bounds[ 0 ];
+ } else {
+ s[ "$gt" ] = bounds[ 0 ];
+ }
+ if ( Random.rand() > 0.5 ) {
+ s[ "$lte" ] = bounds[ 1 ];
+ } else {
+ s[ "$lt" ] = bounds[ 1 ];
+ }
+ spec[ fields[ i ] ] = s;
+ } else {
+ var vals = []
+ for( var j = 0; j < Random.randInt( 15 ); ++j ) {
+ vals.push( r() );
+ }
+ spec[ fields[ i ] ] = { $in: vals };
+ }
+ }
+ s = sort();
+ c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
+ try {
+ c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
+ } catch( e ) {
+ // may assert if too much data for in memory sort
+ print( "retrying check..." );
+ check(); // retry with different bounds
+ return;
+ }
+
+ var j = 0;
+ for( var i = 0; i < c3.length; ++i ) {
+ if( friendlyEqual( c1[ j ], c3[ i ] ) ) {
+ ++j;
+ } else {
+ var o = c3[ i ];
+ var size = Object.bsonsize( o );
+ for( var f in o ) {
+ size -= f.length;
+ }
+
+ var max = indexVersion == 0 ? 819 : 818;
+
+ if ( size <= max /* KeyMax */ ) {
+ assert.eq( c1, c3 , "size: " + size );
+ }
+ }
+ }
+ }
+
+ for( var i = 0; i < 10000; ++i ) {
+ t.save( obj() );
+ }
+
+ t.ensureIndex( idx , { v : indexVersion } );
+ check();
+
+ for( var i = 0; i < 10000; ++i ) {
+ if ( Random.rand() > 0.9 ) {
+ t.save( obj() );
+ } else {
+ t.remove( obj() ); // improve
+ }
+ if( Random.rand() > 0.999 ) {
+ print( i );
+ check();
+ }
+ }
+
+ check();
+
+}
+
+for( var z = 0; z < 5; ++z ) {
+ var indexVersion = z % 2;
+ doIt( indexVersion );
+}
diff --git a/jstests/slowNightly/index_check9.js b/jstests/slowNightly/index_check9.js
index 6634d06..33ce0a6 100644
--- a/jstests/slowNightly/index_check9.js
+++ b/jstests/slowNightly/index_check9.js
@@ -1,3 +1,5 @@
+// Randomized index testing
+
Random.setRandomSeed();
t = db.test_index_check9;
diff --git a/jstests/slowNightly/replReads.js b/jstests/slowNightly/replReads.js
new file mode 100644
index 0000000..4fe9130
--- /dev/null
+++ b/jstests/slowNightly/replReads.js
@@ -0,0 +1,108 @@
+2// Test that doing slaveOk reads from secondaries hits all the secondaries evenly
+
+function testReadLoadBalancing(numReplicas) {
+
+ s = new ShardingTest( "replReads" , 1 /* numShards */, 0 /* verboseLevel */, 1 /* numMongos */, { rs : true , numReplicas : numReplicas, chunksize : 1 } )
+
+ s.adminCommand({enablesharding : "test"})
+ s.config.settings.find().forEach(printjson)
+
+ s.adminCommand({shardcollection : "test.foo", key : {_id : 1}})
+
+ s.getDB("test").foo.insert({a : 123})
+
+ primary = s._rs[0].test.liveNodes.master
+ secondaries = s._rs[0].test.liveNodes.slaves
+
+ function rsStats() {
+ return s.getDB( "admin" ).runCommand( "connPoolStats" )["replicaSets"]["replReads-rs0"];
+ }
+
+ assert.eq( numReplicas , rsStats().hosts.length );
+
+ function isMasterOrSecondary( info ){
+ if ( ! info.ok )
+ return false;
+ if ( info.ismaster )
+ return true;
+ return info.secondary && ! info.hidden;
+ }
+
+ assert.soon(
+ function() {
+ var x = rsStats().hosts;
+ printjson(x)
+ for ( var i=0; i<x.length; i++ )
+ if ( ! isMasterOrSecondary( x[i] ) )
+ return false;
+ return true;
+ }
+ );
+
+ for (var i = 0; i < secondaries.length; i++) {
+ assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } )
+ secondaries[i].getDB('test').setProfilingLevel(2)
+ }
+
+ for (var i = 0; i < secondaries.length * 10; i++) {
+ conn = new Mongo(s._mongos[0].host)
+ conn.setSlaveOk()
+ conn.getDB('test').foo.findOne()
+ }
+
+ for (var i = 0; i < secondaries.length; i++) {
+ var profileCollection = secondaries[i].getDB('test').system.profile;
+ assert.eq(10, profileCollection.find().count(), "Wrong number of read queries sent to secondary " + i + " " + tojson( profileCollection.find().toArray() ))
+ }
+
+ db = primary.getDB( "test" );
+
+ printjson(rs.status());
+ c = rs.conf();
+ print( "config before: " + tojson(c) );
+ for ( i=0; i<c.members.length; i++ ) {
+ if ( c.members[i].host == db.runCommand( "ismaster" ).primary )
+ continue;
+ c.members[i].hidden = true;
+ c.members[i].priority = 0;
+ break;
+ }
+ rs.reconfig( c );
+ print( "config after: " + tojson( rs.conf() ) );
+
+ assert.soon(
+ function() {
+ var x = rsStats();
+ printjson(x);
+ var numOk = 0;
+ for ( var i=0; i<x.hosts.length; i++ )
+ if ( x.hosts[i].hidden )
+ return true;
+ return false;
+ } , "one slave not ok" , 180000 , 5000
+ );
+
+ for (var i = 0; i < secondaries.length * 10; i++) {
+ conn = new Mongo(s._mongos[0].host)
+ conn.setSlaveOk()
+ conn.getDB('test').foo.findOne()
+ }
+
+ var counts = []
+ for (var i = 0; i < secondaries.length; i++) {
+ var profileCollection = secondaries[i].getDB('test').system.profile;
+ counts.push( profileCollection.find().count() );
+ }
+
+ counts = counts.sort();
+ assert.eq( 20 , counts[1] - counts[0] , "counts wrong: " + tojson( counts ) );
+
+ s.stop()
+}
+
+//for (var i = 1; i < 10; i++) {
+// testReadLoadBalancing(i)
+//}
+
+// Is there a way that this can be run multiple times with different values?
+testReadLoadBalancing(3)
diff --git a/jstests/slowNightly/replsets_priority1.js b/jstests/slowNightly/replsets_priority1.js
new file mode 100644
index 0000000..3eef5cf
--- /dev/null
+++ b/jstests/slowNightly/replsets_priority1.js
@@ -0,0 +1,173 @@
+// come up with random priorities and make sure that the right member gets
+// elected. then kill that member and make sure then next one gets elected.
+
+load("jstests/replsets/rslib.js");
+
+var rs = new ReplSetTest( {name: 'testSet', nodes: 3} );
+var nodes = rs.startSet();
+rs.initiate();
+
+var master = rs.getMaster();
+
+var everyoneOkSoon = function() {
+ var status;
+ assert.soon(function() {
+ var ok = true;
+ status = master.adminCommand({replSetGetStatus : 1});
+
+ if (!status.members) {
+ return false;
+ }
+
+ for (var i in status.members) {
+ if (status.members[i].health == 0) {
+ continue;
+ }
+ ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ }
+ return ok;
+ }, tojson(status));
+};
+
+var checkPrimaryIs = function(node) {
+ var status;
+
+ assert.soon(function() {
+ var ok = true;
+
+ try {
+ status = master.adminCommand({replSetGetStatus : 1});
+ }
+ catch(e) {
+ print(e);
+ reconnect(master);
+ status = master.adminCommand({replSetGetStatus : 1});
+ }
+
+ var str = "goal: "+node.host+"==1 states: ";
+ if (!status || !status.members) {
+ return false;
+ }
+ status.members.forEach( function(m) {
+ str += m.name + ": "+m.state +" ";
+
+ if (m.name == node.host) {
+ ok &= m.state == 1;
+ }
+ else {
+ ok &= m.state != 1 || (m.state == 1 && m.health == 0);
+ }
+ });
+ print(str);
+
+ occasionally(function() {
+ printjson(status);
+ }, 15);
+
+ return ok;
+ }, node.host+'==1', 60000, 1000);
+
+ everyoneOkSoon();
+};
+
+everyoneOkSoon();
+
+// intial sync
+master.getDB("foo").bar.insert({x:1});
+rs.awaitReplication();
+
+print("starting loop");
+
+var n = 5;
+for (i=0; i<n; i++) {
+ print("Round "+i+": FIGHT!");
+
+ var max = null;
+ var second = null;
+ reconnect(master);
+ var config = master.getDB("local").system.replset.findOne();
+
+ var version = config.version;
+ config.version++;
+
+ for (var j=0; j<config.members.length; j++) {
+ var priority = Math.random()*100;
+ config.members[j].priority = priority;
+
+ if (!max || priority > max.priority) {
+ max = config.members[j];
+ }
+ }
+
+ for (var j=0; j<config.members.length; j++) {
+ if (config.members[j] == max) {
+ continue;
+ }
+ if (!second || config.members[j].priority > second.priority) {
+ second = config.members[j];
+ }
+ }
+
+ print("max is "+max.host+" with priority "+max.priority+", reconfiguring...");
+
+ var count = 0;
+ while (config.version != version && count < 100) {
+ reconnect(master);
+
+ occasionally(function() {
+ print("version is "+version+", trying to update to "+config.version);
+ });
+
+ try {
+ master.adminCommand({replSetReconfig : config});
+ master = rs.getMaster();
+ reconnect(master);
+
+ version = master.getDB("local").system.replset.findOne().version;
+ }
+ catch (e) {
+ print("Caught exception: "+e);
+ }
+
+ count++;
+ }
+
+ assert.soon(function() {
+ rs.getMaster();
+ return rs.liveNodes.slaves.length == 2;
+ }, "2 slaves");
+
+ assert.soon(function() {
+ versions = [0,0];
+ rs.liveNodes.slaves[0].setSlaveOk();
+ versions[0] = rs.liveNodes.slaves[0].getDB("local").system.replset.findOne().version;
+ rs.liveNodes.slaves[1].setSlaveOk();
+ versions[1] = rs.liveNodes.slaves[1].getDB("local").system.replset.findOne().version;
+ return versions[0] == config.version && versions[1] == config.version;
+ });
+
+ // the reconfiguration needs to be replicated! the hb sends it out
+ // separately from the repl
+ rs.awaitReplication();
+
+ print("reconfigured. Checking statuses.");
+
+ checkPrimaryIs(max);
+
+ rs.stop(max._id);
+
+ var master = rs.getMaster();
+
+ print("killed max primary. Checking statuses.");
+
+ print("second is "+second.host+" with priority "+second.priority);
+ checkPrimaryIs(second);
+
+ rs.restart(max._id);
+ master = rs.getMaster();
+
+ print("Restarted max. Checking statuses.");
+ checkPrimaryIs(max);
+}
+
+print("priority1.js SUCCESS!");
diff --git a/jstests/slowNightly/sharding_balance1.js b/jstests/slowNightly/sharding_balance1.js
index 9379c4f..c50148c 100644
--- a/jstests/slowNightly/sharding_balance1.js
+++ b/jstests/slowNightly/sharding_balance1.js
@@ -41,7 +41,8 @@ print( diff() )
assert.soon( function(){
var d = diff();
return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+// Make sure there's enough time here, since balancing can sleep for 15s or so between balances.
+} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
var chunkCount = sum();
s.adminCommand( { removeshard: "shard0000" } );
diff --git a/jstests/slowNightly/sharding_balance4.js b/jstests/slowNightly/sharding_balance4.js
index c7f76dd..5288bda 100644
--- a/jstests/slowNightly/sharding_balance4.js
+++ b/jstests/slowNightly/sharding_balance4.js
@@ -90,8 +90,12 @@ function diff(){
if ( le.err )
print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid );
- assert( le.updatedExisting , "GLE diff 1 myid: " + myid + " " + tojson(le) )
- assert.eq( 1 , le.n , "GLE diff 2 myid: " + myid + " " + tojson(le) )
+ if ( ! le.updatedExisting || le.n != 1 ) {
+ print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + db.foo.findOne( { _id : myid } ) );
+ }
+
+ assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) )
+ assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) )
if ( Math.random() > .99 ){
diff --git a/jstests/slowNightly/sharding_migrateBigObject.js b/jstests/slowNightly/sharding_migrateBigObject.js
new file mode 100644
index 0000000..5ad9ed1
--- /dev/null
+++ b/jstests/slowNightly/sharding_migrateBigObject.js
@@ -0,0 +1,61 @@
+
+var shardA = startMongodEmpty("--shardsvr", "--port", 30001, "--dbpath", "/data/migrateBigger0");
+var shardB = startMongodEmpty("--shardsvr", "--port", 30002, "--dbpath", "/data/migrateBigger1");
+var config = startMongodEmpty("--configsvr", "--port", 29999, "--dbpath", "/data/migrateBiggerC");
+
+var mongos = startMongos("--port", 30000, "--configdb", "localhost:29999")
+
+var admin = mongos.getDB("admin")
+
+admin.runCommand({ addshard : "localhost:30001" })
+admin.runCommand({ addshard : "localhost:30002" })
+
+db = mongos.getDB("test");
+var coll = db.getCollection("stuff")
+
+var data = "x"
+var nsq = 16
+var n = 255
+
+for( var i = 0; i < nsq; i++ ) data += data
+
+dataObj = {}
+for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data
+
+for( var i = 0; i < 40; i++ ) {
+ if(i != 0 && i % 10 == 0) printjson( coll.stats() )
+ coll.save({ data : dataObj })
+}
+db.getLastError();
+
+assert.eq( 40 , coll.count() , "prep1" );
+
+printjson( coll.stats() )
+
+admin.runCommand({ enablesharding : "" + coll.getDB() })
+
+admin.printShardingStatus()
+
+admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } })
+
+assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
+
+assert.soon(
+ function(){
+ res = mongos.getDB( "config" ).chunks.group( { cond : { ns : "test.stuff" } ,
+ key : { shard : 1 } ,
+ reduce : function( doc , out ){ out.nChunks++; } ,
+ initial : { nChunks : 0 } } );
+
+ printjson( res );
+ return res.length > 1 && Math.abs( res[0].nChunks - res[1].nChunks ) <= 3;
+
+ } ,
+ "never migrated" , 180000 , 1000 );
+
+stopMongod( 30000 );
+stopMongod( 29999 );
+stopMongod( 30001 );
+stopMongod( 30002 );
+
+
diff --git a/jstests/slowNightly/sharding_multiple_ns_rs.js b/jstests/slowNightly/sharding_multiple_ns_rs.js
new file mode 100644
index 0000000..3cd7b3e
--- /dev/null
+++ b/jstests/slowNightly/sharding_multiple_ns_rs.js
@@ -0,0 +1,49 @@
+
+s = new ShardingTest( "blah" , 1 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { rs : true , chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" );
+
+for ( i=0; i<100; i++ ) {
+ db.foo.insert( { _id : i , x : i } )
+ db.bar.insert( { _id : i , x : i } )
+}
+
+db.getLastError();
+
+sh.splitAt( "test.foo" , { _id : 50 } )
+
+other = new Mongo( s.s.name );
+dbother = other.getDB( "test" );
+
+assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+
+assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+
+
+s._rs[0].test.awaitReplication();
+
+s._rs[0].test.stopMaster( 15 , true )
+
+sleep( 20 * 1000 );
+
+assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
+assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+
+s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
+sh.splitAt( "test.bar" , { _id : 50 } )
+
+yetagain = new Mongo( s.s.name )
+assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x )
+assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x )
+
+assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+
+
+s.stop();
+
diff --git a/jstests/slowNightly/sharding_passthrough.js b/jstests/slowNightly/sharding_passthrough.js
index 81781ca..d81df68 100644
--- a/jstests/slowNightly/sharding_passthrough.js
+++ b/jstests/slowNightly/sharding_passthrough.js
@@ -1,6 +1,6 @@
-s = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 );
-s.adminCommand( { enablesharding : "test" } );
-db=s.getDB("test");
+myShardingTest = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 );
+myShardingTest.adminCommand( { enablesharding : "test" } );
+db=myShardingTest.getDB("test");
var files = listFiles("jstests");
@@ -9,7 +9,6 @@ var runnerStart = new Date()
files.forEach(
function(x) {
-// /(basic|update).*\.js$/
if ( /[\/\\]_/.test(x.name) ||
! /\.js$/.test(x.name ) ){
print(" >>>>>>>>>>>>>>> skipping " + x.name);
@@ -63,17 +62,17 @@ files.forEach(
* clean (apitest_dbcollection)
* logout and getnonce
*/
- if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile1|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|unix_socket\d*)\.js$/.test(x.name)) {
+ if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile\d*|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|dropdb_race|unix_socket\d*)\.js$/.test(x.name)) {
print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
return;
}
// These are bugs (some might be fixed now):
- if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4)\.js$/.test(x.name)) {
+ if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4|profile\d*)\.js$/.test(x.name)) {
print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
return;
}
// These aren't supposed to get run under sharding:
- if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|check_shard_index|mr_replaceIntoDB)\.js$/.test(x.name)) {
+ if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|compact.*|check_shard_index|bench_test.*|mr_replaceIntoDB)\.js$/.test(x.name)) {
print(" >>>>>>>>>>>>>>> skipping test that would fail under sharding " + x.name)
return;
}
@@ -89,6 +88,9 @@ files.forEach(
);
+myShardingTest.stop()
+
var runnerEnd = new Date()
print( "total runner time: " + ( ( runnerEnd.getTime() - runnerStart.getTime() ) / 1000 ) + "secs" )
+
diff --git a/jstests/slowNightly/sharding_rs1.js b/jstests/slowNightly/sharding_rs1.js
index 4ad126e..f73e690 100644
--- a/jstests/slowNightly/sharding_rs1.js
+++ b/jstests/slowNightly/sharding_rs1.js
@@ -1,6 +1,6 @@
// tests sharding with replica sets
-s = new ShardingTest( "rs1" , 3 , 1 , 2 , { rs : true , chunksize : 1 } )
+s = new ShardingTest( "rs1" , 3 /* numShards */, 1 /* verboseLevel */, 2 /* numMongos */, { rs : true , chunksize : 1 } )
s.adminCommand( { enablesharding : "test" } );
@@ -59,6 +59,12 @@ assert.soon( function(){
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+sleep( 1000 );
+
+while ( sh.isBalancerRunning() ){
+ sleep( 1000 );
+}
+
for ( i=0; i<s._rs.length; i++ ){
r = s._rs[i];
r.test.awaitReplication();
diff --git a/jstests/slowNightly/sharding_rs2.js b/jstests/slowNightly/sharding_rs2.js
index cd7cf68..4de935b 100644
--- a/jstests/slowNightly/sharding_rs2.js
+++ b/jstests/slowNightly/sharding_rs2.js
@@ -155,7 +155,29 @@ assert.eq( before.query + 10 , after.query , "E3" )
assert.eq( 100 , ts.count() , "E4" )
assert.eq( 100 , ts.find().itcount() , "E5" )
printjson( ts.find().batchSize(5).explain() )
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+// Careful, mongos can poll the masters here too unrelated to the query,
+// resulting in this test failing sporadically if/when there's a delay here.
assert.eq( 100 , ts.find().batchSize(5).itcount() , "E6" )
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+assert.eq( before.query + before.getmore , after.query + after.getmore , "E6.1" )
+
+assert.eq( 100 , ts.find().batchSize(5).itcount() , "F1" )
+
+for ( i=0; i<10; i++ ) {
+ m = new Mongo( s.s.name );
+ m.setSlaveOk();
+ ts = m.getDB( "test" ).foo
+ assert.eq( 100 , ts.find().batchSize(5).itcount() , "F2." + i )
+}
+
+for ( i=0; i<10; i++ ) {
+ m = new Mongo( s.s.name );
+ ts = m.getDB( "test" ).foo
+ assert.eq( 100 , ts.find().batchSize(5).itcount() , "F3." + i )
+}
+
printjson( db.adminCommand( "getShardMap" ) );
diff --git a/jstests/slowNightly/sharding_rs_arb1.js b/jstests/slowNightly/sharding_rs_arb1.js
new file mode 100644
index 0000000..be4c4dc
--- /dev/null
+++ b/jstests/slowNightly/sharding_rs_arb1.js
@@ -0,0 +1,40 @@
+x = 5
+name = "sharding_rs_arb1"
+replTest = new ReplSetTest( { name : name , nodes : 3 , startPort : 31000 } );
+nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({_id : name, members :
+ [
+ {_id:0, host : getHostName()+":"+port[0]},
+ {_id:1, host : getHostName()+":"+port[1]},
+ {_id:2, host : getHostName()+":"+port[2], arbiterOnly : true},
+ ],
+ });
+
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+db = master.getDB( "test" );
+printjson( rs.status() );
+
+var config = startMongodEmpty("--configsvr", "--port", 29999, "--dbpath", "/data/db/" + name + "_config" );
+
+var mongos = startMongos("--port", 30000, "--configdb", getHostName() + ":29999")
+var admin = mongos.getDB("admin")
+var url = name + "/";
+for ( i=0; i<port.length; i++ ) {
+ if ( i > 0 )
+ url += ",";
+ url += getHostName() + ":" + port[i];
+}
+print( url )
+res = admin.runCommand( { addshard : url } )
+printjson( res )
+assert( res.ok , tojson(res) )
+
+
+
+stopMongod( 30000 )
+stopMongod( 29999 )
+replTest.stopSet();
+
diff --git a/jstests/slowNightly/sync6_slow.js b/jstests/slowNightly/sync6_slow.js
new file mode 100644
index 0000000..63d6123
--- /dev/null
+++ b/jstests/slowNightly/sync6_slow.js
@@ -0,0 +1,82 @@
+// More complete version of sharding/sync6.js
+// Test that distributed lock forcing does not result in inconsistencies, using a
+// fast timeout.
+
+// Note that this test will always have random factors, since we can't control the
+// thread scheduling.
+
+test = new SyncCCTest( "sync6", { logpath : "/dev/null" } )
+
+// Startup another process to handle our commands to the cluster, mostly so it's
+// easier to read.
+var commandConn = startMongodTest( 30000 + 4, "syncCommander", false, {})//{ logpath : "/dev/null" } )//{verbose : ""} )
+// { logpath : "/data/db/syncCommander/mongod.log" } );
+
+// Up the log level for this test
+commandConn.getDB( "admin" ).runCommand( { setParameter : 1, logLevel : 0 } )
+
+// Have lots of threads, so use larger i
+// Can't test too many, we get socket exceptions... possibly due to the
+// javascript console.
+// TODO: Figure out our max bounds here - use less threads now to avoid pinger starvation issues.
+for ( var t = 0; t < 4; t++ ) {
+for ( var i = 4; i < 5; i++ ) {
+
+ // Our force time is 6 seconds - slightly diff from sync6 to ensure exact time not important
+ var takeoverMS = 6000;
+
+ // Generate valid sleep and skew for this timeout
+ var threadSleepWithLock = takeoverMS / 2;
+ var configServerTimeSkew = [ 0, 0, 0 ]
+ for ( var h = 0; h < 3; h++ ) {
+ // Skew by 1/30th the takeover time either way, at max
+ configServerTimeSkew[h] = ( i + h ) % Math.floor( takeoverMS / 60 )
+ // Make skew pos or neg
+ configServerTimeSkew[h] *= ( ( i + h ) % 2 ) ? -1 : 1;
+ }
+
+ // Build command
+ command = { _testDistLockWithSkew : 1 }
+
+ // Basic test parameters
+ command["lockName"] = "TimeSkewFailNewTest_lock_" + i;
+ command["host"] = test.url
+ command["seed"] = i
+ command["numThreads"] = ( i % 50 ) + 1
+
+ // Critical values so we're sure of correct operation
+ command["takeoverMS"] = takeoverMS
+ command["wait"] = 4 * takeoverMS // so we must force the lock
+ command["skewHosts"] = configServerTimeSkew
+ command["threadWait"] = threadSleepWithLock
+
+ // Less critical test params
+
+ // 1/3 of threads will not release the lock
+ command["hangThreads"] = 3
+ // Amount of time to wait before trying lock again
+ command["threadSleep"] = 1;// ( ( i + 1 ) * 100 ) % (takeoverMS / 4)
+ // Amount of total clock skew possible between locking threads (processes)
+ // This can be large now.
+ command["skewRange"] = ( command["takeoverMS"] * 3 ) * 60 * 1000
+
+ // Double-check our sleep, host skew, and takeoverMS values again
+
+ // At maximum, our threads must sleep only half the lock timeout time.
+ assert( command["threadWait"] <= command["takeoverMS"] / 2 )
+ for ( var h = 0; h < command["skewHosts"].length; h++ ) {
+ // At maximum, our config server time skew needs to be less than 1/30th
+ // the total time skew (1/60th either way).
+ assert( Math.abs( command["skewHosts"][h] ) <= ( command["takeoverMS"] / 60 ) )
+ }
+
+ result = commandConn.getDB( "admin" ).runCommand( command )
+ printjson( result )
+ printjson( command )
+ assert( result.ok, "Skewed threads did not increment correctly." );
+
+}
+}
+
+stopMongoProgram( 30004 )
+test.stop();
diff --git a/jstests/slowWeekly/geo_full.js b/jstests/slowWeekly/geo_full.js
new file mode 100644
index 0000000..9eb1b7a
--- /dev/null
+++ b/jstests/slowWeekly/geo_full.js
@@ -0,0 +1,487 @@
+//
+// Integration test of the geo code
+//
+// Basically, this tests adds a random number of docs with a random number of points,
+// given a 2d environment of random precision which is either randomly earth-like or of
+// random bounds, and indexes these points after a random amount of points have been added
+// with a random number of additional fields which correspond to whether the documents are
+// in randomly generated circular, spherical, box, and box-polygon shapes (and exact),
+// queried randomly from a set of query types. Each point is randomly either and object
+// or array, and all points and document data fields are nested randomly in arrays (or not).
+//
+// We approximate the user here as a random function :-)
+//
+// These random point fields can then be tested against all types of geo queries using these random shapes.
+//
+// Tests can be easily reproduced by getting the test number from the output directly before a
+// test fails, and hard-wiring that as the test number.
+//
+
+
+var randEnvironment = function(){
+
+ // Normal earth environment
+ if( Random.rand() < 0.5 ){
+ return { max : 180,
+ min : -180,
+ bits : Math.floor( Random.rand() * 32 ) + 1,
+ earth : true,
+ bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) }
+ }
+
+ var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ]
+ var scale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var offset = Random.rand() * scale
+
+ var max = Random.rand() * scale + offset
+ var min = - Random.rand() * scale + offset
+ var bits = Math.floor( Random.rand() * 32 ) + 1
+ var range = max - min
+ var bucketSize = range / ( 4 * 1024 * 1024 * 1024 )
+
+ return { max : max,
+ min : min,
+ bits : bits,
+ earth : false,
+ bucketSize : bucketSize }
+
+}
+
+var randPoint = function( env, query ) {
+
+ if( query && Random.rand() > 0.5 )
+ return query.exact
+
+ if( env.earth )
+ return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]
+
+ var range = env.max - env.min
+ return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
+}
+
+var randLocType = function( loc, wrapIn ){
+ return randLocTypes( [ loc ], wrapIn )[0]
+}
+
+var randLocTypes = function( locs, wrapIn ) {
+
+ var rLocs = []
+
+ for( var i = 0; i < locs.length; i++ ){
+ if( Random.rand() < 0.5 )
+ rLocs.push( { x : locs[i][0], y : locs[i][1] } )
+ else
+ rLocs.push( locs[i] )
+ }
+
+ if( wrapIn ){
+ var wrappedLocs = []
+ for( var i = 0; i < rLocs.length; i++ ){
+ var wrapper = {}
+ wrapper[wrapIn] = rLocs[i]
+ wrappedLocs.push( wrapper )
+ }
+
+ return wrappedLocs
+ }
+
+ return rLocs
+
+}
+
+var randDataType = function() {
+
+ var scales = [ 1, 10, 100, 1000, 10000 ]
+ var docScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var locScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+
+ var numDocs = 40000
+ var maxLocs = 40000
+ // Make sure we don't blow past our test resources
+ while( numDocs * maxLocs > 40000 ){
+ numDocs = Math.floor( Random.rand() * docScale ) + 1
+ maxLocs = Math.floor( Random.rand() * locScale ) + 1
+ }
+
+ return { numDocs : numDocs,
+ maxLocs : maxLocs }
+
+}
+
+var randQuery = function( env ) {
+
+ var center = randPoint( env )
+
+ var sphereRadius = -1
+ var sphereCenter = null
+ if( env.earth ){
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ sphereRadius = Random.rand() * 45 * Math.PI / 180
+ sphereCenter = randPoint( env )
+ var i
+ for( i = 0; i < 5; i++ ){
+ var t = db.testSphere; t.drop(); t.ensureIndex({ loc : "2d" }, env )
+ try{ t.find({ loc : { $within : { $centerSphere : [ sphereCenter, sphereRadius ] } } } ).count(); var err; if( err = db.getLastError() ) throw err; }
+ catch(e) { print( e ); continue }
+ print( " Radius " + sphereRadius + " and center " + sphereCenter + " ok ! ")
+ break;
+ }
+ if( i == 5 ) sphereRadius = -1;
+
+ }
+
+ var box = [ randPoint( env ), randPoint( env ) ]
+
+ var boxPoly = [[ box[0][0], box[0][1] ],
+ [ box[0][0], box[1][1] ],
+ [ box[1][0], box[1][1] ],
+ [ box[1][0], box[0][1] ] ]
+
+ if( box[0][0] > box[1][0] ){
+ var swap = box[0][0]
+ box[0][0] = box[1][0]
+ box[1][0] = swap
+ }
+
+ if( box[0][1] > box[1][1] ){
+ var swap = box[0][1]
+ box[0][1] = box[1][1]
+ box[1][1] = swap
+ }
+
+ return { center : center,
+ radius : box[1][0] - box[0][0],
+ exact : randPoint( env ),
+ sphereCenter : sphereCenter,
+ sphereRadius : sphereRadius,
+ box : box,
+ boxPoly : boxPoly }
+
+}
+
+
+var resultTypes = {
+"exact" : function( loc ){
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1]
+},
+"center" : function( loc ){
+ return Geo.distance( query.center, loc ) <= query.radius
+},
+"box" : function( loc ){
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+
+},
+"sphere" : function( loc ){
+ return ( query.sphereRadius >= 0 ? ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false )
+},
+"poly" : function( loc ){
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+}}
+
+var queryResults = function( locs, query, results ){
+
+ if( ! results["center"] ){
+ for( var type in resultTypes ){
+ results[type] = {
+ docsIn : 0,
+ docsOut : 0,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+ }
+
+ var indResults = {}
+ for( var type in resultTypes ){
+ indResults[type] = {
+ docIn : false,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+
+ for( var type in resultTypes ){
+
+ var docIn = false
+ for( var i = 0; i < locs.length; i++ ){
+ if( resultTypes[type]( locs[i] ) ){
+ results[type].locsIn++
+ indResults[type].locsIn++
+ indResults[type].docIn = true
+ }
+ else{
+ results[type].locsOut++
+ indResults[type].locsOut++
+ }
+ }
+ if( indResults[type].docIn ) results[type].docsIn++
+ else results[type].docsOut++
+
+ }
+
+ return indResults
+
+}
+
+var randQueryAdditions = function( doc, indResults ){
+
+ for( var type in resultTypes ){
+ var choice = Random.rand()
+ if( Random.rand() < 0.25 )
+ doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } )
+ else if( Random.rand() < 0.5 )
+ doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } )
+ else if( Random.rand() < 0.75 )
+ doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] )
+ else
+ doc[type] = ( indResults[type].docIn ? [ { docIn : [ "yes" ] } ] : [ { docIn : [ "no" ] } ] )
+ }
+
+}
+
+var randIndexAdditions = function( indexDoc ){
+
+ for( var type in resultTypes ){
+
+ if( Random.rand() < 0.5 ) continue;
+
+ var choice = Random.rand()
+ if( Random.rand() < 0.5 )
+ indexDoc[type] = 1
+ else
+ indexDoc[type + ".docIn"] = 1
+
+ }
+
+}
+
+var randYesQuery = function(){
+
+ var choice = Math.floor( Random.rand() * 7 )
+ if( choice == 0 )
+ return { $ne : "no" }
+ else if( choice == 1 )
+ return "yes"
+ else if( choice == 2 )
+ return /^yes/
+ else if( choice == 3 )
+ return { $in : [ "good", "yes", "ok" ] }
+ else if( choice == 4 )
+ return { $exists : true }
+ else if( choice == 5 )
+ return { $nin : [ "bad", "no", "not ok" ] }
+ else if( choice == 6 )
+ return { $not : /^no/ }
+}
+
+var locArray = function( loc ){
+ if( loc.x ) return [ loc.x, loc.y ]
+ if( ! loc.length ) return [ loc[0], loc[1] ]
+ return loc
+}
+
+var locsArray = function( locs ){
+ if( locs.loc ){
+ arr = []
+ for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) )
+ return arr
+ }
+ else{
+ arr = []
+ for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) )
+ return arr
+ }
+}
+
+var minBoxSize = function( env, box ){
+ return env.bucketSize * Math.pow( 2, minBucketScale( env, box ) )
+}
+
+var minBucketScale = function( env, box ){
+
+ if( box.length && box[0].length )
+ box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ]
+
+ if( box.length )
+ box = Math.max( box[0], box[1] )
+
+ print( box )
+ print( env.bucketSize )
+
+ return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) )
+
+}
+
+// TODO: Add spherical $uniqueDocs tests
+var numTests = 100
+
+// Our seed will change every time this is run, but
+// each individual test will be reproducible given
+// that seed and test number
+var seed = new Date().getTime()
+
+for ( var test = 0; test < numTests; test++ ) {
+
+ Random.srand( seed + test );
+ //Random.srand( 42240 )
+ //Random.srand( 7344 )
+ var t = db.testAllGeo
+ t.drop()
+
+ print( "Generating test environment #" + test )
+ var env = randEnvironment()
+ //env.bits = 11
+ var query = randQuery( env )
+ var data = randDataType()
+ //data.numDocs = 100; data.maxLocs = 3;
+ var results = {}
+ var totalPoints = 0
+ print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " )
+
+ // Index after a random number of docs added
+ var indexIt = Math.floor( Random.rand() * data.numDocs )
+
+ for ( var i = 0; i < data.numDocs; i++ ) {
+
+ if( indexIt == i ){
+ var indexDoc = { "locs.loc" : "2d" }
+ randIndexAdditions( indexDoc )
+
+ // printjson( indexDoc )
+
+ t.ensureIndex( indexDoc, env )
+ assert.isnull( db.getLastError() )
+ }
+
+ var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 )
+ totalPoints += numLocs
+
+ var multiPoint = []
+ for ( var p = 0; p < numLocs; p++ ) {
+ var point = randPoint( env, query )
+ multiPoint.push( point )
+ }
+
+ var indResults = queryResults( multiPoint, query, results )
+
+ var doc
+ // Nest the keys differently
+ if( Random.rand() < 0.5 )
+ doc = { locs : { loc : randLocTypes( multiPoint ) } }
+ else
+ doc = { locs : randLocTypes( multiPoint, "loc" ) }
+
+ randQueryAdditions( doc, indResults )
+
+ //printjson( doc )
+ doc._id = i
+ t.insert( doc )
+
+ }
+
+ printjson( { seed : seed,
+ test: test,
+ env : env,
+ query : query,
+ data : data,
+ results : results } )
+
+
+ // exact
+ print( "Exact query..." )
+ assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() )
+
+ // $center
+ print( "Center query..." )
+ print( "Min box : " + minBoxSize( env, query.radius ) )
+ assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.center.locsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : false } }, "center.docIn" : randYesQuery() } ).count() )
+ if( query.sphereRadius >= 0 ){
+ print( "Center sphere query...")
+ // $centerSphere
+ assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.sphere.locsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ], $uniqueDocs : 0.0 } }, "sphere.docIn" : randYesQuery() } ).count() )
+ }
+
+ // $box
+ print( "Box query..." )
+ assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.box.locsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : false } }, "box.docIn" : randYesQuery() } ).count() )
+
+ // $polygon
+ print( "Polygon query..." )
+ assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.poly.locsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly, $uniqueDocs : 0 } }, "poly.docIn" : randYesQuery() } ).count() )
+
+ // $near
+ print( "Near query..." )
+ assert.eq( results.center.locsIn > 100 ? 100 : results.center.locsIn, t.find( { "locs.loc" : { $near : query.center, $maxDistance : query.radius } } ).count( true ) )
+
+ if( query.sphereRadius >= 0 ){
+ print( "Near sphere query...")
+ // $centerSphere
+ assert.eq( results.sphere.locsIn > 100 ? 100 : results.sphere.locsIn, t.find( { "locs.loc" : { $nearSphere : query.sphereCenter, $maxDistance : query.sphereRadius } } ).count( true ) )
+ }
+
+
+ // geoNear
+ // results limited by size of objects
+ if( data.maxLocs < 100 ){
+
+ // GeoNear query
+ print( "GeoNear query..." )
+ assert.eq( results.center.locsIn > 100 ? 100 : results.center.locsIn, t.getDB().runCommand({ geoNear : "testAllGeo", near : query.center, maxDistance : query.radius }).results.length )
+ // GeoNear query
+ assert.eq( results.center.docsIn > 100 ? 100 : results.center.docsIn, t.getDB().runCommand({ geoNear : "testAllGeo", near : query.center, maxDistance : query.radius, uniqueDocs : true }).results.length )
+
+
+ var num = 2 * results.center.locsIn;
+ if( num > 200 ) num = 200;
+
+ var output = db.runCommand( {
+ geoNear : "testAllGeo",
+ near : query.center,
+ maxDistance : query.radius ,
+ includeLocs : true,
+ num : num } ).results
+
+ assert.eq( Math.min( 200, results.center.locsIn ), output.length )
+
+ var distance = 0;
+ for ( var i = 0; i < output.length; i++ ) {
+ var retDistance = output[i].dis
+ var retLoc = locArray( output[i].loc )
+
+ // print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
+ // + retDistance + " vs " + radius )
+
+ var arrLocs = locsArray( output[i].obj.locs )
+
+ assert.contains( retLoc, arrLocs )
+
+ // printjson( arrLocs )
+
+ var distInObj = false
+ for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
+ var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] )
+ distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
+ }
+
+ assert( distInObj )
+ assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 )
+ assert.lte( retDistance, query.radius )
+ assert.gte( retDistance, distance )
+ distance = retDistance
+ }
+
+ }
+
+ //break;
+
+
+}
+
+
diff --git a/jstests/slowWeekly/geo_mnypts_plus_fields.js b/jstests/slowWeekly/geo_mnypts_plus_fields.js
new file mode 100644
index 0000000..f67e49b
--- /dev/null
+++ b/jstests/slowWeekly/geo_mnypts_plus_fields.js
@@ -0,0 +1,98 @@
+// Test sanity of geo queries with a lot of points
+
+var maxFields = 2;
+
+for( var fields = 1; fields < maxFields; fields++ ){
+
+ var coll = db.testMnyPts
+ coll.drop()
+
+ var totalPts = 500 * 1000
+
+ // Add points in a 100x100 grid
+ for( var i = 0; i < totalPts; i++ ){
+ var ii = i % 10000
+
+ var doc = { loc : [ ii % 100, Math.floor( ii / 100 ) ] }
+
+ // Add fields with different kinds of data
+ for( var j = 0; j < fields; j++ ){
+
+ var field = null
+
+ if( j % 3 == 0 ){
+ // Make half the points not searchable
+ field = "abcdefg" + ( i % 2 == 0 ? "h" : "" )
+ }
+ else if( j % 3 == 1 ){
+ field = new Date()
+ }
+ else{
+ field = true
+ }
+
+ doc[ "field" + j ] = field
+ }
+
+ coll.insert( doc )
+ }
+
+ // Create the query for the additional fields
+ queryFields = {}
+ for( var j = 0; j < fields; j++ ){
+
+ var field = null
+
+ if( j % 3 == 0 ){
+ field = "abcdefg"
+ }
+ else if( j % 3 == 1 ){
+ field = { $lte : new Date() }
+ }
+ else{
+ field = true
+ }
+
+ queryFields[ "field" + j ] = field
+ }
+
+ coll.ensureIndex({ loc : "2d" })
+
+ // Check that quarter of points in each quadrant
+ for( var i = 0; i < 4; i++ ){
+ var x = i % 2
+ var y = Math.floor( i / 2 )
+
+ var box = [[0, 0], [49, 49]]
+ box[0][0] += ( x == 1 ? 50 : 0 )
+ box[1][0] += ( x == 1 ? 50 : 0 )
+ box[0][1] += ( y == 1 ? 50 : 0 )
+ box[1][1] += ( y == 1 ? 50 : 0 )
+
+ // Now only half of each result comes back
+ assert.eq( totalPts / ( 4 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).count() )
+ assert.eq( totalPts / ( 4 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).itcount() )
+
+ }
+
+ // Check that half of points in each half
+ for( var i = 0; i < 2; i++ ){
+
+ var box = [[0, 0], [49, 99]]
+ box[0][0] += ( i == 1 ? 50 : 0 )
+ box[1][0] += ( i == 1 ? 50 : 0 )
+
+ assert.eq( totalPts / ( 2 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).count() )
+ assert.eq( totalPts / ( 2 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).itcount() )
+
+ }
+
+ // Check that all but corner set of points in radius
+ var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ]
+
+ // All [99,x] pts are field0 : "abcdefg"
+ assert.eq( totalPts / 2 - totalPts / ( 100 * 100 ), coll.find(Object.extend( { loc : { $within : { $center : circle } } }, queryFields ) ).count() )
+ assert.eq( totalPts / 2 - totalPts / ( 100 * 100 ), coll.find(Object.extend( { loc : { $within : { $center : circle } } }, queryFields ) ).itcount() )
+
+}
+
diff --git a/jstests/slowWeekly/query_yield2.js b/jstests/slowWeekly/query_yield2.js
index dd7e5d9..6d06357 100644
--- a/jstests/slowWeekly/query_yield2.js
+++ b/jstests/slowWeekly/query_yield2.js
@@ -59,7 +59,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 100 , me );
+ assert.gt( 200 , me );
if ( x.inprog.length == 0 )
break;
diff --git a/jstests/slowWeekly/repair2.js b/jstests/slowWeekly/repair2.js
new file mode 100644
index 0000000..3097d81
--- /dev/null
+++ b/jstests/slowWeekly/repair2.js
@@ -0,0 +1,29 @@
+// SERVER-2843 The repair command should not yield.
+
+baseName = "jstests_repair2";
+
+t = db.getSisterDB( baseName )[ baseName ];
+t.drop();
+
+function protect( f ) {
+ try {
+ f();
+ } catch( e ) {
+ printjson( e );
+ }
+}
+
+s = startParallelShell( "db = db.getSisterDB( '" + baseName + "'); for( i = 0; i < 10; ++i ) { db.repairDatabase(); sleep( 5000 ); }" );
+
+for( i = 0; i < 30; ++i ) {
+
+ for( j = 0; j < 5000; ++j ) {
+ protect( function() { t.insert( {_id:j} ); } );
+ }
+
+ for( j = 0; j < 5000; ++j ) {
+ protect( function() { t.remove( {_id:j} ); } );
+ }
+
+ assert.eq( 0, t.count() );
+} \ No newline at end of file
diff --git a/jstests/slowWeekly/update_yield1.js b/jstests/slowWeekly/update_yield1.js
index 7e95855..5f71830 100644
--- a/jstests/slowWeekly/update_yield1.js
+++ b/jstests/slowWeekly/update_yield1.js
@@ -54,7 +54,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 2000 , me );
+ assert.gt( time / 3 , me );
}
join();
diff --git a/jstests/sort10.js b/jstests/sort10.js
new file mode 100644
index 0000000..74a2a2d
--- /dev/null
+++ b/jstests/sort10.js
@@ -0,0 +1,48 @@
+// signed dates check
+t = db.sort2;
+
+function checkSorting1(opts) {
+ t.drop();
+ t.insert({ x: new Date(50000) });
+ t.insert({ x: new Date(-50) });
+ var d = new Date(-50);
+ for (var pass = 0; pass < 2; pass++) {
+ assert(t.find().sort({x:1})[0].x.valueOf() == d.valueOf());
+ t.ensureIndex({ x: 1 }, opts);
+ t.insert({ x: new Date() });
+ }
+}
+
+checkSorting1({})
+checkSorting1({"background":true})
+
+
+
+function checkSorting2(dates, sortOrder) {
+ cur = t.find().sort({x:sortOrder});
+ assert.eq(dates.length, cur.count(), "Incorrect number of results returned");
+ index = 0;
+ while (cur.hasNext()) {
+ date = cur.next().x;
+ assert.eq(dates[index].valueOf(), date.valueOf());
+ index++;
+ }
+}
+
+t.drop();
+dates = [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)];
+for (var i = 0; i < dates.length; i++) {
+ t.insert({x:dates[i]});
+}
+dates.sort(function(a,b){return a - b});
+reverseDates = dates.slice(0).reverse()
+
+checkSorting2(dates, 1)
+checkSorting2(reverseDates, -1)
+t.ensureIndex({x:1})
+checkSorting2(dates, 1)
+checkSorting2(reverseDates, -1)
+t.dropIndexes()
+t.ensureIndex({x:-1})
+checkSorting2(dates, 1)
+checkSorting2(reverseDates, -1)
diff --git a/jstests/sort2.js b/jstests/sort2.js
index 1e21414..6dfa848 100644
--- a/jstests/sort2.js
+++ b/jstests/sort2.js
@@ -1,22 +1,32 @@
// test sorting, mainly a test ver simple with no index
t = db.sort2;
-t.drop();
+t.drop();
t.save({x:1, y:{a:5,b:4}});
t.save({x:1, y:{a:7,b:3}});
t.save({x:1, y:{a:2,b:3}});
t.save({x:1, y:{a:9,b:3}});
-
for( var pass = 0; pass < 2; pass++ ) {
-
var res = t.find().sort({'y.a':1}).toArray();
assert( res[0].y.a == 2 );
assert( res[1].y.a == 5 );
assert( res.length == 4 );
-
t.ensureIndex({"y.a":1});
-
}
-
assert(t.validate().valid);
+
+t.drop();
+t.insert({ x: 1 })
+t.insert({ x: 5000000000 })
+t.insert({ x: NaN });
+t.insert({ x: Infinity });
+t.insert({ x: -Infinity });
+var good = [NaN, -Infinity, 1, 5000000000, Infinity];
+for (var pass = 0; pass < 2; pass++) {
+ var res = t.find({}, { _id: 0 }).sort({ x: 1 }).toArray();
+ for (var i = 0; i < good.length; i++) {
+ assert(good[i].toString() == res[i].x.toString());
+ }
+ t.ensureIndex({ x : 1 });
+}
diff --git a/jstests/sort7.js b/jstests/sort7.js
new file mode 100644
index 0000000..d73f13a
--- /dev/null
+++ b/jstests/sort7.js
@@ -0,0 +1,25 @@
+// Check sorting of array sub field SERVER-480.
+
+t = db.jstests_sort7;
+t.drop();
+
+// Compare indexed and unindexed sort order for an array embedded field.
+
+t.save( { a : [ { x : 2 } ] } );
+t.save( { a : [ { x : 1 } ] } );
+t.save( { a : [ { x : 3 } ] } );
+unindexed = t.find().sort( {"a.x":1} ).toArray();
+t.ensureIndex( { "a.x" : 1 } );
+indexed = t.find().sort( {"a.x":1} ).hint( {"a.x":1} ).toArray();
+assert.eq( unindexed, indexed );
+
+// Now check when there are two objects in the array.
+
+t.remove();
+t.save( { a : [ { x : 2 }, { x : 3 } ] } );
+t.save( { a : [ { x : 1 }, { x : 4 } ] } );
+t.save( { a : [ { x : 3 }, { x : 2 } ] } );
+unindexed = t.find().sort( {"a.x":1} ).toArray();
+t.ensureIndex( { "a.x" : 1 } );
+indexed = t.find().sort( {"a.x":1} ).hint( {"a.x":1} ).toArray();
+assert.eq( unindexed, indexed );
diff --git a/jstests/sort8.js b/jstests/sort8.js
new file mode 100644
index 0000000..169195b
--- /dev/null
+++ b/jstests/sort8.js
@@ -0,0 +1,30 @@
+// Check sorting of arrays indexed by key SERVER-2884
+
+t = db.jstests_sort8;
+t.drop();
+
+t.save( {a:[1,10]} );
+t.save( {a:5} );
+unindexedForward = t.find().sort( {a:1} ).toArray();
+unindexedReverse = t.find().sort( {a:-1} ).toArray();
+t.ensureIndex( {a:1} );
+indexedForward = t.find().sort( {a:1} ).hint( {a:1} ).toArray();
+indexedReverse = t.find().sort( {a:1} ).hint( {a:1} ).toArray();
+
+assert.eq( unindexedForward, indexedForward );
+assert.eq( unindexedReverse, indexedReverse );
+
+// Sorting is based on array members, not the array itself.
+assert.eq( [1,10], unindexedForward[ 0 ].a );
+assert.eq( [1,10], unindexedReverse[ 0 ].a );
+
+// Now try with a bounds constraint.
+t.dropIndexes();
+unindexedForward = t.find({a:{$gte:5}}).sort( {a:1} ).toArray();
+unindexedReverse = t.find({a:{$lte:5}}).sort( {a:-1} ).toArray();
+t.ensureIndex( {a:1} );
+indexedForward = t.find({a:{$gte:5}}).sort( {a:1} ).hint( {a:1} ).toArray();
+indexedReverse = t.find({a:{$lte:5}}).sort( {a:-1} ).hint( {a:1} ).toArray();
+
+assert.eq( unindexedForward, indexedForward );
+assert.eq( unindexedReverse, indexedReverse );
diff --git a/jstests/sort9.js b/jstests/sort9.js
new file mode 100644
index 0000000..62407d6
--- /dev/null
+++ b/jstests/sort9.js
@@ -0,0 +1,26 @@
+// Unindexed array sorting SERVER-2884
+
+t = db.jstests_sort9;
+t.drop();
+
+t.save( {a:[]} );
+t.save( {a:[[]]} );
+assert.eq( 2, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
+assert.eq( 2, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 2, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+
+t.drop();
+t.save( {} );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:0}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:0}} ).sort( {'a.b':1} ).itcount() );
+
+t.drop();
+t.save( {a:{}} );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$exists:0}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$exists:0}} ).sort( {'a.b':1} ).itcount() );
diff --git a/jstests/sorta.js b/jstests/sorta.js
new file mode 100644
index 0000000..7c82778
--- /dev/null
+++ b/jstests/sorta.js
@@ -0,0 +1,26 @@
+// SERVER-2905 sorting with missing fields
+
+t = db.jstests_sorta;
+t.drop();
+
+// Enable _allow_dot to try and bypass v8 field name checking.
+t.insert( {_id:0,a:MinKey}, true );
+t.save( {_id:3,a:null} );
+t.save( {_id:1,a:[]} );
+t.save( {_id:7,a:[2]} );
+t.save( {_id:4} );
+t.save( {_id:5,a:null} );
+t.save( {_id:2,a:[]} );
+t.save( {_id:6,a:1} );
+t.insert( {_id:8,a:MaxKey}, true );
+
+function sorted( arr ) {
+ assert.eq( 9, arr.length );
+ for( i = 1; i < arr.length; ++i ) {
+ assert.lte( arr[ i-1 ]._id, arr[ i ]._id );
+ }
+}
+
+sorted( t.find().sort( {a:1} ).toArray() );
+t.ensureIndex( {a:1} );
+sorted( t.find().sort( {a:1} ).hint( {a:1} ).toArray() );
diff --git a/jstests/tool/csv1.js b/jstests/tool/csv1.js
index ccf1d09..5eb7ab0 100644
--- a/jstests/tool/csv1.js
+++ b/jstests/tool/csv1.js
@@ -4,25 +4,25 @@ t = new ToolTest( "csv1" )
c = t.startDB( "foo" );
-base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-'};
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
assert.eq( 0 , c.count() , "setup1" );
c.insert( base );
delete base._id
assert.eq( 1 , c.count() , "setup2" );
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e" )
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
c.drop()
assert.eq( 0 , c.count() , "after drop" )
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e" );
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
assert.soon( "2 == c.count()" , "restore 2" );
a = c.find().sort( { a : 1 } ).toArray();
delete a[0]._id
delete a[1]._id
-assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e'} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" );
assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
c.drop()
diff --git a/jstests/tool/csvexport1.js b/jstests/tool/csvexport1.js
new file mode 100644
index 0000000..eb4e6e3
--- /dev/null
+++ b/jstests/tool/csvexport1.js
@@ -0,0 +1,45 @@
+// csvexport1.js
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27"), c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i, e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectID(" + objId.toString() + ")", c : "[ 1, 2, 3 ]", d : "{ \"a\" : \"hello\", \"b\" : \"world\" }", e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+expected.push({ a : "D76DF8", b : "2009-08-27T00:00:00Z", c : "{ \"t\" : 1000 , \"i\" : 9876 }", d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq( expected[i], actual[i], "CSV export " + i);
+}
+
+
+t.stop() \ No newline at end of file
diff --git a/jstests/tool/csvexport2.js b/jstests/tool/csvexport2.js
new file mode 100644
index 0000000..3e0dd2c
--- /dev/null
+++ b/jstests/tool/csvexport2.js
@@ -0,0 +1,31 @@
+// csvexport2.js
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop() \ No newline at end of file
diff --git a/jstests/tool/csvimport1.js b/jstests/tool/csvimport1.js
new file mode 100644
index 0000000..3bff111
--- /dev/null
+++ b/jstests/tool/csvimport1.js
@@ -0,0 +1,40 @@
+// csvimport1.js
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/jstests/tool/data/a.tsv b/jstests/tool/data/a.tsv
new file mode 100644
index 0000000..1e09417
--- /dev/null
+++ b/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/jstests/tool/data/csvimport1.csv b/jstests/tool/data/csvimport1.csv
new file mode 100644
index 0000000..256d40a
--- /dev/null
+++ b/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/jstests/tool/data/dumprestore6/foo.bson b/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 0000000..b8f8f99
--- /dev/null
+++ b/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/jstests/tool/data/dumprestore6/system.indexes.bson b/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 0000000..dde25da
--- /dev/null
+++ b/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/jstests/tool/dumprestore5.js b/jstests/tool/dumprestore5.js
new file mode 100644
index 0000000..ce28fea
--- /dev/null
+++ b/jstests/tool/dumprestore5.js
@@ -0,0 +1,36 @@
+// dumprestore5.js
+
+t = new ToolTest( "dumprestore5" );
+
+t.startDB( "foo" );
+
+db = t.db
+
+db.addUser('user','password')
+
+assert.eq(1, db.system.users.count(), "setup")
+assert.eq(1, db.system.indexes.count(), "setup2")
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase()
+
+assert.eq(0, db.system.users.count(), "didn't drop users")
+assert.eq(0, db.system.indexes.count(), "didn't drop indexes")
+
+t.runTool("restore", "--dir", t.ext)
+
+assert.soon("db.system.users.findOne()", "no data after restore");
+assert.eq(1, db.system.users.find({user:'user'}).count(), "didn't restore users")
+assert.eq(1, db.system.indexes.count(), "didn't restore indexes")
+
+db.removeUser('user')
+db.addUser('user2', 'password2')
+
+t.runTool("restore", "--dir", t.ext, "--drop")
+
+assert.soon("1 == db.system.users.find({user:'user'}).count()", "didn't restore users 2")
+assert.eq(0, db.system.users.find({user:'user2'}).count(), "didn't drop users")
+assert.eq(1, db.system.indexes.count(), "didn't maintain indexes")
+
+t.stop();
diff --git a/jstests/tool/dumprestore6.js b/jstests/tool/dumprestore6.js
new file mode 100644
index 0000000..d8b349e
--- /dev/null
+++ b/jstests/tool/dumprestore6.js
@@ -0,0 +1,27 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/jstests/tool/exportimport1.js b/jstests/tool/exportimport1.js
index 915adcd..451078e 100644
--- a/jstests/tool/exportimport1.js
+++ b/jstests/tool/exportimport1.js
@@ -4,7 +4,8 @@ t = new ToolTest( "exportimport1" );
c = t.startDB( "foo" );
assert.eq( 0 , c.count() , "setup1" );
-c.save( { a : 22 } );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
assert.eq( 1 , c.count() , "setup2" );
t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
@@ -15,8 +16,11 @@ assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
assert.soon( "c.findOne()" , "no data after sleep" );
assert.eq( 1 , c.count() , "after restore 2" );
-assert.eq( 22 , c.findOne().a , "after restore 2" );
-
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+}
// now with --jsonArray
@@ -30,4 +34,23 @@ assert.soon( "c.findOne()" , "no data after sleep" );
assert.eq( 1 , c.count() , "after restore 2" );
assert.eq( 22 , c.findOne().a , "after restore 2" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+}
+
+
t.stop();
diff --git a/jstests/tool/tsv1.js b/jstests/tool/tsv1.js
new file mode 100644
index 0000000..1b0ddbb
--- /dev/null
+++ b/jstests/tool/tsv1.js
@@ -0,0 +1,32 @@
+// tsv1.js
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.eq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/jstests/type2.js b/jstests/type2.js
new file mode 100644
index 0000000..820607e
--- /dev/null
+++ b/jstests/type2.js
@@ -0,0 +1,19 @@
+// SERVER-1735 $type:10 matches null value, not missing value.
+
+t = db.jstests_type2;
+t.drop();
+
+t.save( {a:null} );
+t.save( {} );
+t.save( {a:'a'} );
+
+function test() {
+ assert.eq( 2, t.count( {a:null} ) );
+ assert.eq( 1, t.count( {a:{$type:10}} ) );
+ assert.eq( 2, t.count( {a:{$exists:true}} ) );
+ assert.eq( 1, t.count( {a:{$exists:false}} ) );
+}
+
+test();
+t.ensureIndex( {a:1} );
+test(); \ No newline at end of file
diff --git a/jstests/type3.js b/jstests/type3.js
new file mode 100644
index 0000000..b16502b
--- /dev/null
+++ b/jstests/type3.js
@@ -0,0 +1,68 @@
+// Check query type bracketing SERVER-3222
+
+t = db.jstests_type3;
+t.drop();
+
+t.ensureIndex( {a:1} );
+
+// Type Object
+t.save( {a:{'':''}} );
+assert.eq( 1, t.find( {a:{$type:3}} ).hint( {a:1} ).itcount() );
+
+// Type Array
+t.remove();
+t.save( {a:[['c']]} );
+assert.eq( 1, t.find( {a:{$type:4}} ).hint( {a:1} ).itcount() );
+
+// Type RegEx
+t.remove();
+t.save( {a:/r/} );
+assert.eq( 1, t.find( {a:{$type:11}} ).hint( {a:1} ).itcount() );
+
+// Type jstNULL
+t.remove();
+assert.eq( [[null,null]], t.find( {a:{$type:10}} ).hint( {a:1} ).explain().indexBounds.a );
+
+// Type Undefined
+t.remove();
+// 'null' is the client friendly version of undefined.
+assert.eq( [[null,null]], t.find( {a:{$type:6}} ).hint( {a:1} ).explain().indexBounds.a );
+
+t.save( {a:undefined} );
+assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
+
+// This one won't be returned.
+t.save( {a:null} );
+assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
+
+t.remove();
+// Type MinKey
+assert.eq( [[{$minElement:1},{$minElement:1}]], t.find( {a:{$type:-1}} ).hint( {a:1} ).explain().indexBounds.a );
+// Type MaxKey
+assert.eq( [[{$maxElement:1},{$maxElement:1}]], t.find( {a:{$type:127}} ).hint( {a:1} ).explain().indexBounds.a );
+
+// Type Timestamp
+t.remove();
+t.save( {a:new Timestamp()} );
+assert.eq( 1, t.find( {a:{$type:17}} ).itcount() );
+if ( 0 ) { // SERVER-3304
+assert.eq( 0, t.find( {a:{$type:9}} ).itcount() );
+}
+
+// Type Date
+t.remove();
+t.save( {a:new Date()} );
+if ( 0 ) { // SERVER-3304
+assert.eq( 0, t.find( {a:{$type:17}} ).itcount() );
+}
+assert.eq( 1, t.find( {a:{$type:9}} ).itcount() );
+
+// Type Code
+t.remove();
+t.save( {a:function(){var a = 0;}} );
+assert.eq( 1, t.find( {a:{$type:13}} ).itcount() );
+
+// Type BinData
+t.remove();
+t.save( {a:new BinData(0,'')} );
+assert.eq( 1, t.find( {a:{$type:5}} ).itcount() );
diff --git a/jstests/unique2.js b/jstests/unique2.js
index 42cf9fb..1c28288 100644
--- a/jstests/unique2.js
+++ b/jstests/unique2.js
@@ -1,3 +1,11 @@
+// Test unique and dropDups index options.
+
+function checkNprev( np ) {
+ // getPrevError() is not available sharded.
+ if ( typeof( myShardingTest ) == 'undefined' ) {
+ assert.eq( np, db.getPrevError().nPrev );
+ }
+}
t = db.jstests_unique2;
@@ -21,7 +29,9 @@ t.ensureIndex({k:1}, {unique:true});
t.insert({k:3});
t.insert({k:[2,3]});
+assert( db.getLastError() );
t.insert({k:[4,3]});
+assert( db.getLastError() );
assert( t.count() == 1 ) ;
assert( t.find().sort({k:1}).toArray().length == 1 ) ;
@@ -33,9 +43,52 @@ t.insert({k:[2,3]});
t.insert({k:[4,3]});
assert( t.count() == 3 ) ;
+// Trigger an error, so we can test n of getPrevError() later.
+assert.throws( function() { t.find( {$where:'aaa'} ).itcount(); } );
+assert( db.getLastError() );
+checkNprev( 1 );
+
t.ensureIndex({k:1}, {unique:true, dropDups:true});
+// Check error flag was not set SERVER-2054.
+assert( !db.getLastError() );
+// Check that offset of previous error is correct.
+checkNprev( 2 );
+
+// Check the dups were dropped.
+assert( t.count() == 1 ) ;
+assert( t.find().sort({k:1}).toArray().length == 1 ) ;
+assert( t.find().sort({k:1}).count() == 1 ) ;
+
+// Check that a new conflicting insert will cause an error.
+t.insert({k:[2,3]});
+assert( db.getLastError() );
+
+t.drop();
+t.insert({k:3});
+t.insert({k:[2,3]});
+t.insert({k:[4,3]});
+assert( t.count() == 3 ) ;
+
+
+// Now try with a background index op.
+
+// Trigger an error, so we can test n of getPrevError() later.
+assert.throws( function() { t.find( {$where:'aaa'} ).itcount(); } );
+assert( db.getLastError() );
+checkNprev( 1 );
+
+t.ensureIndex({k:1}, {background:true, unique:true, dropDups:true});
+// Check error flag was not set SERVER-2054.
+assert( !db.getLastError() );
+// Check that offset of pervious error is correct.
+checkNprev( 2 );
+
+// Check the dups were dropped.
assert( t.count() == 1 ) ;
assert( t.find().sort({k:1}).toArray().length == 1 ) ;
assert( t.find().sort({k:1}).count() == 1 ) ;
+// Check that a new conflicting insert will cause an error.
+t.insert({k:[2,3]});
+assert( db.getLastError() );
diff --git a/jstests/uniqueness.js b/jstests/uniqueness.js
index f1651b3..ce19ad0 100644
--- a/jstests/uniqueness.js
+++ b/jstests/uniqueness.js
@@ -26,8 +26,21 @@ db.jstests_uniqueness2.drop();
db.jstests_uniqueness2.insert({a:3});
db.jstests_uniqueness2.insert({a:3});
assert( db.jstests_uniqueness2.count() == 2 , 6) ;
+db.resetError();
db.jstests_uniqueness2.ensureIndex({a:1}, true);
assert( db.getLastError() , 7);
+assert( db.getLastError().match( /E11000/ ) );
+
+// Check for an error message when we index in the background and there are dups
+db.jstests_uniqueness2.drop();
+db.jstests_uniqueness2.insert({a:3});
+db.jstests_uniqueness2.insert({a:3});
+assert( db.jstests_uniqueness2.count() == 2 , 6) ;
+assert( !db.getLastError() );
+db.resetError();
+db.jstests_uniqueness2.ensureIndex({a:1}, {unique:true,background:true});
+assert( db.getLastError() , 7);
+assert( db.getLastError().match( /E11000/ ) );
/* Check that if we update and remove _id, it gets added back by the DB */
diff --git a/jstests/update.js b/jstests/update.js
index 70f9f15..d388918 100644
--- a/jstests/update.js
+++ b/jstests/update.js
@@ -23,3 +23,16 @@ for(var i=1; i<=5000; i++) {
}
assert(asdf.validate().valid);
+
+var stats = db.runCommand({ collstats: "asdf" });
+
+// some checks. want to check that padding factor is working; in addition this lets us do a little basic
+// testing of the collstats command at the same time
+assert(stats.count == 5000);
+assert(stats.size < 140433012 * 5 && stats.size > 1000000);
+assert(stats.numExtents < 20);
+assert(stats.nindexes == 1);
+var pf = stats.paddingFactor;
+print("update.js padding factor: " + pf);
+assert(pf > 1.7 && pf < 2);
+
diff --git a/jstests/update_blank1.js b/jstests/update_blank1.js
new file mode 100644
index 0000000..8742bd2
--- /dev/null
+++ b/jstests/update_blank1.js
@@ -0,0 +1,12 @@
+
+t = db.update_blank1
+t.drop();
+
+orig = { _id : 1 , "" : 1 , "a" : 2 , "b" : 3 };
+t.insert( orig );
+assert.eq( orig , t.findOne() , "A1" );
+
+t.update( {} , { $set : { "c" : 1 } } );
+print( db.getLastError() );
+orig["c"] = 1;
+//assert.eq( orig , t.findOne() , "A2" ); // SERVER-2651
diff --git a/jstests/update_invalid1.js b/jstests/update_invalid1.js
new file mode 100644
index 0000000..7c94507
--- /dev/null
+++ b/jstests/update_invalid1.js
@@ -0,0 +1,6 @@
+
+t = db.update_invalid1
+t.drop()
+
+t.update( { _id : 5 } , { $set : { $inc : { x : 5 } } } , true );
+assert.eq( 0 , t.count() , "A1" );
diff --git a/jstests/updatea.js b/jstests/updatea.js
index 9864aa6..5b45d60 100644
--- a/jstests/updatea.js
+++ b/jstests/updatea.js
@@ -47,4 +47,10 @@ t.update( {} , { $inc: { "a.10" : 1 } } );
orig.a[10]++;
+// SERVER-3218
+t.drop()
+t.insert({"a":{"c00":1}, 'c':2})
+t.update({"c":2}, {'$inc':{'a.c000':1}})
+
+assert.eq( { "c00" : 1 , "c000" : 1 } , t.findOne().a , "D1" )
diff --git a/jstests/updatef.js b/jstests/updatef.js
new file mode 100644
index 0000000..6942593
--- /dev/null
+++ b/jstests/updatef.js
@@ -0,0 +1,24 @@
+// Test unsafe management of nsdt on update command yield SERVER-3208
+
+prefixNS = db.jstests_updatef;
+prefixNS.save( {} );
+
+t = db.jstests_updatef_actual;
+t.drop();
+
+t.save( {a:0,b:[]} );
+for( i = 0; i < 1000; ++i ) {
+ t.save( {a:100} );
+}
+t.save( {a:0,b:[]} );
+
+db.getLastError();
+// Repeatedly rename jstests_updatef to jstests_updatef_ and back. This will
+// invalidate the jstests_updatef_actual NamespaceDetailsTransient object.
+s = startParallelShell( "for( i=0; i < 100; ++i ) { db.jstests_updatef.renameCollection( 'jstests_updatef_' ); db.jstests_updatef_.renameCollection( 'jstests_updatef' ); }" );
+
+for( i=0; i < 20; ++i ) {
+ t.update( {a:0}, {$push:{b:i}}, false, true );
+}
+
+s();
diff --git a/jstests/updateg.js b/jstests/updateg.js
new file mode 100644
index 0000000..f8d452f
--- /dev/null
+++ b/jstests/updateg.js
@@ -0,0 +1,17 @@
+// SERVER-3370 check modifiers with field name characters comparing less than '.' character.
+
+t = db.jstests_updateg;
+
+t.drop();
+t.update({}, { '$inc' : { 'all.t' : 1, 'all-copy.t' : 1 }}, true);
+assert.eq( 1, t.count( {all:{t:1},'all-copy':{t:1}} ) );
+
+t.drop();
+t.save({ 'all' : {}, 'all-copy' : {}});
+t.update({}, { '$inc' : { 'all.t' : 1, 'all-copy.t' : 1 }});
+assert.eq( 1, t.count( {all:{t:1},'all-copy':{t:1}} ) );
+
+t.drop();
+t.save({ 'all11' : {}, 'all2' : {}});
+t.update({}, { '$inc' : { 'all11.t' : 1, 'all2.t' : 1 }});
+assert.eq( 1, t.count( {all11:{t:1},'all2':{t:1}} ) );
diff --git a/pch.cpp b/pch.cpp
index a81ff61..da20832 100644
--- a/pch.cpp
+++ b/pch.cpp
@@ -17,6 +17,16 @@
#include "pch.h"
+#ifndef JSTIME_VIRTUAL_SKEW
+#define JSTIME_VIRTUAL_SKEW
+
+namespace mongo {
+ long long jsTime_virtual_skew = 0;
+ boost::thread_specific_ptr<long long> jsTime_virtual_thread_skew;
+}
+
+#endif
+
#if defined( __MSVC__ )
// should probably check VS version here
#elif defined( __GNUC__ )
diff --git a/pch.h b/pch.h
index c70f316..1e9684d 100644
--- a/pch.h
+++ b/pch.h
@@ -31,26 +31,33 @@
# define _CRT_SECURE_NO_WARNINGS
#endif
-// [dm] i am not sure why we need this.
-#if defined(WIN32)
-# ifndef _WIN32
-# define _WIN32
-# endif
-#endif
-
#if defined(_WIN32)
// for rand_s() usage:
# define _CRT_RAND_S
# ifndef NOMINMAX
# define NOMINMAX
# endif
+#define WIN32_LEAN_AND_MEAN
# include <winsock2.h> //this must be included before the first windows.h include
# include <ws2tcpip.h>
# include <wspiapi.h>
# include <windows.h>
#endif
+#if defined(__linux__) && defined(MONGO_EXPOSE_MACROS)
+// glibc's optimized versions are better than g++ builtins
+# define __builtin_strcmp strcmp
+# define __builtin_strlen strlen
+# define __builtin_memchr memchr
+# define __builtin_memcmp memcmp
+# define __builtin_memcpy memcpy
+# define __builtin_memset memset
+# define __builtin_memmove memmove
+#endif
+
+
#include <ctime>
+#include <cstring>
#include <sstream>
#include <string>
#include <memory>
@@ -69,9 +76,9 @@
#include "string.h"
#include "limits.h"
-#include <boost/any.hpp>
+//#include <boost/any.hpp>
#include "boost/thread/once.hpp"
-#include <boost/archive/iterators/transform_width.hpp>
+//#include <boost/archive/iterators/transform_width.hpp>
#define BOOST_FILESYSTEM_VERSION 2
#include <boost/filesystem/convenience.hpp>
#include <boost/filesystem/exception.hpp>
@@ -144,7 +151,11 @@ namespace mongo {
void asserted(const char *msg, const char *file, unsigned line);
}
-#define MONGO_assert(_Expression) (void)( (!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
+
+
+// TODO: Rework the headers so we don't need this craziness
+#include "bson/inline_decls.h"
+#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
#include "util/debug_util.h"
#include "util/goodies.h"
@@ -162,6 +173,11 @@ namespace mongo {
using boost::uint32_t;
using boost::uint64_t;
+ /** called by mongos, mongod, test. do not call from clients and such.
+ invoked before about everything except global var construction.
+ */
+ void doPreServerStatupInits();
+
} // namespace mongo
#endif // MONGO_PCH_H
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 94dba51..3506882 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.8.3
+Version: 2.0.0
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
@@ -106,6 +106,7 @@ fi
%{_bindir}/mongorestore
%{_bindir}/mongostat
%{_bindir}/bsondump
+%{_bindir}/mongotop
%{_mandir}/man1/mongo.1*
%{_mandir}/man1/mongod.1*
@@ -116,6 +117,7 @@ fi
%{_mandir}/man1/mongosniff.1*
%{_mandir}/man1/mongostat.1*
%{_mandir}/man1/mongorestore.1*
+%{_mandir}/man1/bsondum.1*
%files server
%defattr(-,root,root,-)
diff --git a/rpm/mongod.conf b/rpm/mongod.conf
index 1530199..73ff749 100644
--- a/rpm/mongod.conf
+++ b/rpm/mongod.conf
@@ -12,6 +12,9 @@ fork = true
dbpath=/var/lib/mongo
+# Disables write-ahead journaling
+# nojournal = true
+
# Enables periodic logging of CPU utilization and I/O wait
#cpu = true
@@ -37,9 +40,6 @@ dbpath=/var/lib/mongo
# 7=W+some reads
#oplog = 0
-# Diagnostic/debugging option
-#nocursors = true
-
# Ignore query hints
#nohints = true
diff --git a/s/balance.cpp b/s/balance.cpp
index 8b01ea7..d4bba1e 100644
--- a/s/balance.cpp
+++ b/s/balance.cpp
@@ -34,10 +34,9 @@ namespace mongo {
Balancer balancer;
- Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy ) {}
+ Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy() ) {}
Balancer::~Balancer() {
- delete _policy;
}
int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) {
@@ -74,7 +73,7 @@ namespace mongo {
}
// the move requires acquiring the collection metadata's lock, which can fail
- log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
+ log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
<< " chunk: " << chunkToMove << endl;
if ( res["chunkTooBig"].trueValue() ) {
@@ -156,7 +155,7 @@ namespace mongo {
cursor.reset();
if ( collections.empty() ) {
- log(1) << "no collections to balance" << endl;
+ LOG(1) << "no collections to balance" << endl;
return;
}
@@ -171,7 +170,7 @@ namespace mongo {
vector<Shard> allShards;
Shard::getAllShards( allShards );
if ( allShards.size() < 2) {
- log(1) << "can't balance without more active shards" << endl;
+ LOG(1) << "can't balance without more active shards" << endl;
return;
}
@@ -206,7 +205,7 @@ namespace mongo {
cursor.reset();
if (shardToChunksMap.empty()) {
- log(1) << "skipping empty collection (" << ns << ")";
+ LOG(1) << "skipping empty collection (" << ns << ")";
continue;
}
@@ -245,9 +244,8 @@ namespace mongo {
return true;
}
- catch ( std::exception& ) {
-
- log( LL_WARNING ) << "could not initialize balancer, please check that all shards and config servers are up" << endl;
+ catch ( std::exception& e ) {
+ warning() << "could not initialize balancer, please check that all shards and config servers are up: " << e.what() << endl;
return false;
}
@@ -267,7 +265,7 @@ namespace mongo {
break;
}
- // getConnectioString and the constructor of a DistributedLock do not throw, which is what we expect on while
+ // getConnectioString and dist lock constructor does not throw, which is what we expect on while
// on the balancer thread
ConnectionString config = configServer.getConnectionString();
DistributedLock balanceLock( config , "balancer" );
@@ -283,7 +281,7 @@ namespace mongo {
// now make sure we should even be running
if ( ! grid.shouldBalance() ) {
- log(1) << "skipping balancing round because balancing is disabled" << endl;
+ LOG(1) << "skipping balancing round because balancing is disabled" << endl;
conn.done();
sleepsecs( 30 );
@@ -295,41 +293,45 @@ namespace mongo {
// use fresh shard state
Shard::reloadShardInfo();
- dist_lock_try lk( &balanceLock , "doing balance round" );
- if ( ! lk.got() ) {
- log(1) << "skipping balancing round because another balancer is active" << endl;
- conn.done();
-
- sleepsecs( 30 ); // no need to wake up soon
- continue;
- }
-
- log(1) << "*** start balancing round" << endl;
-
- vector<CandidateChunkPtr> candidateChunks;
- _doBalanceRound( conn.conn() , &candidateChunks );
- if ( candidateChunks.size() == 0 ) {
- log(1) << "no need to move any chunk" << endl;
- }
- else {
- _balancedLastTime = _moveChunks( &candidateChunks );
+ {
+ dist_lock_try lk( &balanceLock , "doing balance round" );
+ if ( ! lk.got() ) {
+ LOG(1) << "skipping balancing round because another balancer is active" << endl;
+ conn.done();
+
+ sleepsecs( 30 ); // no need to wake up soon
+ continue;
+ }
+
+ LOG(1) << "*** start balancing round" << endl;
+
+ vector<CandidateChunkPtr> candidateChunks;
+ _doBalanceRound( conn.conn() , &candidateChunks );
+ if ( candidateChunks.size() == 0 ) {
+ LOG(1) << "no need to move any chunk" << endl;
+ }
+ else {
+ _balancedLastTime = _moveChunks( &candidateChunks );
+ }
+
+ LOG(1) << "*** end of balancing round" << endl;
}
-
- log(1) << "*** end of balancing round" << endl;
+
conn.done();
-
+
sleepsecs( _balancedLastTime ? 5 : 10 );
}
catch ( std::exception& e ) {
log() << "caught exception while doing balance: " << e.what() << endl;
// Just to match the opening statement if in log level 1
- log(1) << "*** End of balancing round" << endl;
+ LOG(1) << "*** End of balancing round" << endl;
sleepsecs( 30 ); // sleep a fair amount b/c of error
continue;
}
}
+
}
} // namespace mongo
diff --git a/s/balance.h b/s/balance.h
index 0ad2647..6875996 100644
--- a/s/balance.h
+++ b/s/balance.h
@@ -59,8 +59,8 @@ namespace mongo {
int _balancedLastTime;
// decide which chunks to move; owned here.
- BalancerPolicy* _policy;
-
+ scoped_ptr<BalancerPolicy> _policy;
+
/**
* Checks that the balancer can connect to all servers it needs to do its job.
*
diff --git a/s/balancer_policy.cpp b/s/balancer_policy.cpp
index 482fab0..f1b4bf1 100644
--- a/s/balancer_policy.cpp
+++ b/s/balancer_policy.cpp
@@ -53,6 +53,7 @@ namespace mongo {
const bool draining = isDraining( shardLimits );
const bool opsQueued = hasOpsQueued( shardLimits );
+
// Is this shard a better chunk receiver then the current one?
// Shards that would be bad receiver candidates:
// + maxed out shards
@@ -64,6 +65,13 @@ namespace mongo {
min = make_pair( shard , size );
}
}
+ else if ( opsQueued ) {
+ LOG(1) << "won't send a chunk to: " << shard << " because it has ops queued" << endl;
+ }
+ else if ( maxedOut ) {
+ LOG(1) << "won't send a chunk to: " << shard << " because it is maxedOut" << endl;
+ }
+
// Check whether this shard is a better chunk donor then the current one.
// Draining shards take a lower priority than overloaded shards.
@@ -79,7 +87,7 @@ namespace mongo {
// If there is no candidate chunk receiver -- they may have all been maxed out,
// draining, ... -- there's not much that the policy can do.
if ( min.second == numeric_limits<unsigned>::max() ) {
- log() << "no availalable shards to take chunks" << endl;
+ log() << "no available shards to take chunks" << endl;
return NULL;
}
@@ -88,13 +96,13 @@ namespace mongo {
return NULL;
}
- log(1) << "collection : " << ns << endl;
- log(1) << "donor : " << max.second << " chunks on " << max.first << endl;
- log(1) << "receiver : " << min.second << " chunks on " << min.first << endl;
+ LOG(1) << "collection : " << ns << endl;
+ LOG(1) << "donor : " << max.second << " chunks on " << max.first << endl;
+ LOG(1) << "receiver : " << min.second << " chunks on " << min.first << endl;
if ( ! drainingShards.empty() ) {
string drainingStr;
joinStringDelim( drainingShards, &drainingStr, ',' );
- log(1) << "draining : " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl;
+ LOG(1) << "draining : " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl;
}
// Solving imbalances takes a higher priority than draining shards. Many shards can
@@ -126,7 +134,7 @@ namespace mongo {
}
BSONObj BalancerPolicy::pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to ) {
- // It is possible for a donor ('from') shard to have less chunks than a recevier one ('to')
+ // It is possible for a donor ('from') shard to have less chunks than a receiver one ('to')
// if the donor is in draining mode.
if ( to.size() == 0 )
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 2d0ad5d..09dc994 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -19,8 +19,10 @@
#include "pch.h"
#include "../client/connpool.h"
+#include "../db/querypattern.h"
#include "../db/queryutil.h"
#include "../util/unittest.h"
+#include "../util/timer.h"
#include "chunk.h"
#include "config.h"
@@ -48,17 +50,34 @@ namespace mongo {
int Chunk::MaxObjectPerChunk = 250000;
- Chunk::Chunk( ChunkManager * manager ) : _manager(manager), _lastmod(0) {
- _setDataWritten();
- }
+ Chunk::Chunk(const ChunkManager * manager, BSONObj from)
+ : _manager(manager), _lastmod(0), _dataWritten(mkDataWritten())
+ {
+ string ns = from.getStringField( "ns" );
+ _shard.reset( from.getStringField( "shard" ) );
+
+ _lastmod = from["lastmod"];
+ assert( _lastmod > 0 );
- Chunk::Chunk(ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard)
- : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0) {
- _setDataWritten();
+ _min = from.getObjectField( "min" ).getOwned();
+ _max = from.getObjectField( "max" ).getOwned();
+
+ uassert( 10170 , "Chunk needs a ns" , ! ns.empty() );
+ uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() );
+
+ uassert( 10171 , "Chunk needs a server" , _shard.ok() );
+
+ uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() );
+ uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() );
}
- void Chunk::_setDataWritten() {
- _dataWritten = rand() % ( MaxChunkSize / 5 );
+
+ Chunk::Chunk(const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard)
+ : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0), _dataWritten(mkDataWritten())
+ {}
+
+ long Chunk::mkDataWritten() {
+ return rand() % ( MaxChunkSize / 5 );
}
string Chunk::getns() const {
@@ -175,7 +194,7 @@ namespace mongo {
conn.done();
}
- bool Chunk::singleSplit( bool force , BSONObj& res , ChunkPtr* low, ChunkPtr* high) {
+ BSONObj Chunk::singleSplit( bool force , BSONObj& res ) const {
vector<BSONObj> splitPoint;
// if splitting is not obligatory we may return early if there are not enough data
@@ -189,8 +208,8 @@ namespace mongo {
// no split points means there isn't enough data to split on
// 1 split point means we have between half the chunk size to full chunk size
// so we shouldn't split
- log(1) << "chunk not full enough to trigger auto-split" << endl;
- return false;
+ LOG(1) << "chunk not full enough to trigger auto-split" << endl;
+ return BSONObj();
}
splitPoint.push_back( candidates.front() );
@@ -228,24 +247,16 @@ namespace mongo {
if ( splitPoint.empty() || _min == splitPoint.front() || _max == splitPoint.front() ) {
log() << "want to split chunk, but can't find split point chunk " << toString()
<< " got: " << ( splitPoint.empty() ? "<empty>" : splitPoint.front().toString() ) << endl;
- return false;
- }
-
- if (!multiSplit( splitPoint , res , true ))
- return false;
-
- if (low && high) {
- low->reset( new Chunk(_manager, _min, splitPoint[0], _shard));
- high->reset(new Chunk(_manager, splitPoint[0], _max, _shard));
- }
- else {
- assert(!low && !high); // can't have one without the other
+ return BSONObj();
}
-
- return true;
+
+ if (multiSplit( splitPoint , res ))
+ return splitPoint.front();
+ else
+ return BSONObj();
}
- bool Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res , bool resetIfSplit) {
+ bool Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res ) const {
const size_t maxSplitPoints = 8192;
uassert( 10165 , "can't split as shard doesn't have a manager" , _manager );
@@ -270,24 +281,22 @@ namespace mongo {
warning() << "splitChunk failed - cmd: " << cmdObj << " result: " << res << endl;
conn.done();
- // reloading won't stricly solve all problems, e.g. the collection's metdata lock can be taken
- // but we issue here so that mongos may refresh wihtout needing to be written/read against
- grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true);
+ // reloading won't strictly solve all problems, e.g. the collection's metadata lock can be taken
+ // but we issue here so that mongos may refresh without needing to be written/read against
+ _manager->reload();
return false;
}
conn.done();
-
- if ( resetIfSplit ) {
- // force reload of chunks
- grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true);
- }
+
+ // force reload of config
+ _manager->reload();
return true;
}
- bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) {
+ bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) const {
uassert( 10167 , "can't move shard to its current location!" , getShard() != to );
log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") " << _shard.toString() << " -> " << to.toString() << endl;
@@ -311,15 +320,17 @@ namespace mongo {
fromconn.done();
+ log( worked ) << "moveChunk result: " << res << endl;
+
// if succeeded, needs to reload to pick up the new location
// if failed, mongos may be stale
// reload is excessive here as the failure could be simply because collection metadata is taken
- grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true);
+ _manager->reload();
return worked;
}
- bool Chunk::splitIfShould( long dataWritten ) {
+ bool Chunk::splitIfShould( long dataWritten ) const {
LastError::Disabled d( lastError.get() );
try {
@@ -332,28 +343,63 @@ namespace mongo {
if ( _dataWritten < splitThreshold / 5 )
return false;
- log(1) << "about to initiate autosplit: " << *this << " dataWritten: " << _dataWritten << " splitThreshold: " << splitThreshold << endl;
+ // this is a bit ugly
+ // we need it so that mongos blocks for the writes to actually be committed
+ // this does mean mongos has more back pressure than mongod alone
+ // since it nots 100% tcp queue bound
+ // this was implicit before since we did a splitVector on the same socket
+ ShardConnection::sync();
+
+ LOG(1) << "about to initiate autosplit: " << *this << " dataWritten: " << _dataWritten << " splitThreshold: " << splitThreshold << endl;
_dataWritten = 0; // reset so we check often enough
BSONObj res;
- ChunkPtr low;
- ChunkPtr high;
- bool worked = singleSplit( false /* does not force a split if not enough data */ , res , &low, &high);
- if ( !worked ) {
+ BSONObj splitPoint = singleSplit( false /* does not force a split if not enough data */ , res );
+ if ( splitPoint.isEmpty() ) {
// singleSplit would have issued a message if we got here
_dataWritten = 0; // this means there wasn't enough data to split, so don't want to try again until considerable more data
return false;
}
log() << "autosplitted " << _manager->getns() << " shard: " << toString()
- << " on: " << low->getMax() << "(splitThreshold " << splitThreshold << ")"
+ << " on: " << splitPoint << "(splitThreshold " << splitThreshold << ")"
#ifdef _DEBUG
- << " size: " << getPhysicalSize() // slow - but can be usefule when debugging
+ << " size: " << getPhysicalSize() // slow - but can be useful when debugging
#endif
<< endl;
- low->moveIfShould( high );
+ BSONElement shouldMigrate = res["shouldMigrate"]; // not in mongod < 1.9.1 but that is ok
+ if (!shouldMigrate.eoo() && grid.shouldBalance()){
+ BSONObj range = shouldMigrate.embeddedObject();
+ BSONObj min = range["min"].embeddedObject();
+ BSONObj max = range["max"].embeddedObject();
+
+ Shard newLocation = Shard::pick( getShard() );
+ if ( getShard() == newLocation ) {
+ // if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard).
+ LOG(1) << "recently split chunk: " << range << " already in the best shard: " << getShard() << endl;
+ return true; // we did split even if we didn't migrate
+ }
+
+ ChunkManagerPtr cm = _manager->reload(false/*just reloaded in mulitsplit*/);
+ ChunkPtr toMove = cm->findChunk(min);
+
+ if ( ! (toMove->getMin() == min && toMove->getMax() == max) ){
+ LOG(1) << "recently split chunk: " << range << " modified before we could migrate " << toMove << endl;
+ return true;
+ }
+
+ log() << "moving chunk (auto): " << toMove << " to: " << newLocation.toString() << endl;
+
+ BSONObj res;
+ massert( 10412 ,
+ str::stream() << "moveAndCommit failed: " << res ,
+ toMove->moveAndCommit( newLocation , MaxChunkSize , res ) );
+
+ // update our config
+ _manager->reload();
+ }
return true;
@@ -365,40 +411,6 @@ namespace mongo {
}
}
- bool Chunk::moveIfShould( ChunkPtr newChunk ) {
- ChunkPtr toMove;
-
- if ( newChunk->countObjects(2) <= 1 ) {
- toMove = newChunk;
- }
- else if ( this->countObjects(2) <= 1 ) {
- DEV assert( shared_from_this() );
- toMove = shared_from_this();
- }
- else {
- // moving middle shards is handled by balancer
- return false;
- }
-
- assert( toMove );
-
- Shard newLocation = Shard::pick( getShard() );
- if ( getShard() == newLocation ) {
- // if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard).
- log(1) << "recently split chunk: " << toString() << "already in the best shard" << endl;
- return 0;
- }
-
- log() << "moving chunk (auto): " << toMove->toString() << " to: " << newLocation.toString() << " #objects: " << toMove->countObjects() << endl;
-
- BSONObj res;
- massert( 10412 ,
- str::stream() << "moveAndCommit failed: " << res ,
- toMove->moveAndCommit( newLocation , MaxChunkSize , res ) );
-
- return true;
- }
-
long Chunk::getPhysicalSize() const {
ScopedDbConnection conn( getShard().getConnString() );
@@ -416,24 +428,7 @@ namespace mongo {
return (long)result["size"].number();
}
- int Chunk::countObjects(int maxCount) const {
- static const BSONObj fields = BSON("_id" << 1 );
-
- ShardConnection conn( getShard() , _manager->getns() );
-
- // not using regular count as this is more flexible and supports $min/$max
- Query q = Query().minKey(_min).maxKey(_max);
- int n;
- {
- auto_ptr<DBClientCursor> c = conn->query(_manager->getns(), q, maxCount, 0, &fields);
- assert( c.get() );
- n = c->itcount();
- }
- conn.done();
- return n;
- }
-
- void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) {
+ void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
BSONObjBuilder bb( b.subobjStart( name ) );
bb.append( "min" , _min );
bb.append( "max" , _max );
@@ -481,33 +476,6 @@ namespace mongo {
return buf.str();
}
- void Chunk::unserialize(const BSONObj& from) {
- string ns = from.getStringField( "ns" );
- _shard.reset( from.getStringField( "shard" ) );
-
- _lastmod = from["lastmod"];
- assert( _lastmod > 0 );
-
- BSONElement e = from["minDotted"];
-
- if (e.eoo()) {
- _min = from.getObjectField( "min" ).getOwned();
- _max = from.getObjectField( "max" ).getOwned();
- }
- else { // TODO delete this case after giving people a chance to migrate
- _min = e.embeddedObject().getOwned();
- _max = from.getObjectField( "maxDotted" ).getOwned();
- }
-
- uassert( 10170 , "Chunk needs a ns" , ! ns.empty() );
- uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() );
-
- uassert( 10171 , "Chunk needs a server" , _shard.ok() );
-
- uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() );
- uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() );
- }
-
string Chunk::toString() const {
stringstream ss;
ss << "ns:" << _manager->getns() << " at: " << _shard.toString() << " lastmod: " << _lastmod.toString() << " min: " << _min << " max: " << _max;
@@ -523,57 +491,63 @@ namespace mongo {
AtomicUInt ChunkManager::NextSequenceNumber = 1;
ChunkManager::ChunkManager( string ns , ShardKeyPattern pattern , bool unique ) :
- _ns( ns ) , _key( pattern ) , _unique( unique ) , _lock("rw:ChunkManager"),
- _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns ) {
- _reload_inlock(); // will set _sequenceNumber
- }
-
- ChunkManager::~ChunkManager() {
- _chunkMap.clear();
- _chunkRanges.clear();
- _shards.clear();
- }
+ _ns( ns ) , _key( pattern ) , _unique( unique ) , _chunkRanges(), _mutex("ChunkManager"),
+ _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns ),
- void ChunkManager::_reload() {
- rwlock lk( _lock , true );
- _reload_inlock();
- }
+ // The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's.
+ // Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to
+ // the most up to date value.
+ _sequenceNumber(++NextSequenceNumber)
- void ChunkManager::_reload_inlock() {
+ {
int tries = 3;
while (tries--) {
- _chunkMap.clear();
- _chunkRanges.clear();
- _shards.clear();
- _load();
-
- if (_isValid()) {
- _chunkRanges.reloadAll(_chunkMap);
-
- // The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's.
- // Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to
- // the most up to date value.
- _sequenceNumber = ++NextSequenceNumber;
+ ChunkMap chunkMap;
+ set<Shard> shards;
+ ShardVersionMap shardVersions;
+ Timer t;
+ _load(chunkMap, shards, shardVersions);
+ {
+ int ms = t.millis();
+ log() << "ChunkManager: time to load chunks for " << ns << ": " << ms << "ms"
+ << " sequenceNumber: " << _sequenceNumber
+ << " version: " << _version.toString()
+ << endl;
+ }
+ if (_isValid(chunkMap)) {
+ // These variables are const for thread-safety. Since the
+ // constructor can only be called from one thread, we don't have
+ // to worry about that here.
+ const_cast<ChunkMap&>(_chunkMap).swap(chunkMap);
+ const_cast<set<Shard>&>(_shards).swap(shards);
+ const_cast<ShardVersionMap&>(_shardVersions).swap(shardVersions);
+ const_cast<ChunkRangeManager&>(_chunkRanges).reloadAll(_chunkMap);
return;
}
-
+
if (_chunkMap.size() < 10) {
_printChunks();
}
+
+ warning() << "ChunkManager loaded an invalid config, trying again" << endl;
sleepmillis(10 * (3-tries));
}
+ // this will abort construction so we should never have a reference to an invalid config
msgasserted(13282, "Couldn't load a valid config for " + _ns + " after 3 attempts. Please try again.");
+ }
+ ChunkManagerPtr ChunkManager::reload(bool force) const {
+ return grid.getDBConfig(getns())->getChunkManager(getns(), force);
}
- void ChunkManager::_load() {
+ void ChunkManager::_load(ChunkMap& chunkMap, set<Shard>& shards, ShardVersionMap& shardVersions) {
ScopedDbConnection conn( configServer.modelServer() );
// TODO really need the sort?
- auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",1), 0, 0, 0, 0,
+ auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",-1), 0, 0, 0, 0,
(DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds
assert( cursor.get() );
while ( cursor->more() ) {
@@ -582,28 +556,36 @@ namespace mongo {
continue;
}
- ChunkPtr c( new Chunk( this ) );
- c->unserialize( d );
+ ChunkPtr c( new Chunk( this, d ) );
- _chunkMap[c->getMax()] = c;
- _shards.insert(c->getShard());
+ chunkMap[c->getMax()] = c;
+ shards.insert(c->getShard());
+
+ // set global max
+ if ( c->getLastmod() > _version )
+ _version = c->getLastmod();
+
+ // set shard max
+ ShardChunkVersion& shardMax = shardVersions[c->getShard()];
+ if ( c->getLastmod() > shardMax )
+ shardMax = c->getLastmod();
}
conn.done();
}
- bool ChunkManager::_isValid() const {
+ bool ChunkManager::_isValid(const ChunkMap& chunkMap) {
#define ENSURE(x) do { if(!(x)) { log() << "ChunkManager::_isValid failed: " #x << endl; return false; } } while(0)
- if (_chunkMap.empty())
+ if (chunkMap.empty())
return true;
// Check endpoints
- ENSURE(allOfType(MinKey, _chunkMap.begin()->second->getMin()));
- ENSURE(allOfType(MaxKey, prior(_chunkMap.end())->second->getMax()));
+ ENSURE(allOfType(MinKey, chunkMap.begin()->second->getMin()));
+ ENSURE(allOfType(MaxKey, prior(chunkMap.end())->second->getMax()));
// Make sure there are no gaps or overlaps
- for (ChunkMap::const_iterator it=boost::next(_chunkMap.begin()), end=_chunkMap.end(); it != end; ++it) {
+ for (ChunkMap::const_iterator it=boost::next(chunkMap.begin()), end=chunkMap.end(); it != end; ++it) {
ChunkMap::const_iterator last = prior(it);
if (!(it->second->getMin() == last->second->getMax())) {
@@ -625,14 +607,15 @@ namespace mongo {
}
}
- bool ChunkManager::hasShardKey( const BSONObj& obj ) {
+ bool ChunkManager::hasShardKey( const BSONObj& obj ) const {
return _key.hasShardKey( obj );
}
- void ChunkManager::createFirstChunk( const Shard& shard ) {
+ void ChunkManager::createFirstChunk( const Shard& shard ) const {
+ // TODO distlock?
assert( _chunkMap.size() == 0 );
- ChunkPtr c( new Chunk(this, _key.globalMin(), _key.globalMax(), shard ) );
+ Chunk c (this, _key.globalMin(), _key.globalMax(), shard);
// this is the first chunk; start the versioning from scratch
ShardChunkVersion version;
@@ -640,52 +623,42 @@ namespace mongo {
// build update for the chunk collection
BSONObjBuilder chunkBuilder;
- c->serialize( chunkBuilder , version );
+ c.serialize( chunkBuilder , version );
BSONObj chunkCmd = chunkBuilder.obj();
log() << "about to create first chunk for: " << _ns << endl;
ScopedDbConnection conn( configServer.modelServer() );
BSONObj res;
- conn->update( Chunk::chunkMetadataNS, QUERY( "_id" << c->genID() ), chunkCmd, true, false );
+ conn->update( Chunk::chunkMetadataNS, QUERY( "_id" << c.genID() ), chunkCmd, true, false );
string errmsg = conn->getLastError();
if ( errmsg.size() ) {
stringstream ss;
ss << "saving first chunk failed. cmd: " << chunkCmd << " result: " << errmsg;
log( LL_ERROR ) << ss.str() << endl;
- msgasserted( 13592 , ss.str() ); // assert(13592)
+ msgasserted( 13592 , ss.str() );
}
conn.done();
- // every instance of ChunkManager has a unique sequence number; callers of ChunkManager may
- // inquiry about whether there were changes in chunk configuration (see re/load() calls) since
- // the last access to ChunkManager by checking the sequence number
- _sequenceNumber = ++NextSequenceNumber;
-
- _chunkMap[c->getMax()] = c;
- _chunkRanges.reloadAll(_chunkMap);
- _shards.insert(c->getShard());
- c->setLastmod(version);
-
// the ensure index will have the (desired) indirect effect of creating the collection on the
// assigned shard, as it sets up the index over the sharding keys.
- ensureIndex_inlock();
+ ScopedDbConnection shardConn( c.getShard().getConnString() );
+ shardConn->ensureIndex( getns() , getShardKey().key() , _unique , "" , false /* do not cache ensureIndex SERVER-1691 */ );
+ shardConn.done();
- log() << "successfully created first chunk for " << c->toString() << endl;
+ log() << "successfully created first chunk for " << c.toString() << endl;
}
- ChunkPtr ChunkManager::findChunk( const BSONObj & obj) {
+ ChunkPtr ChunkManager::findChunk( const BSONObj & obj ) const {
BSONObj key = _key.extractKey(obj);
{
- rwlock lk( _lock , false );
-
BSONObj foo;
ChunkPtr c;
{
- ChunkMap::iterator it = _chunkMap.upper_bound(key);
+ ChunkMap::const_iterator it = _chunkMap.upper_bound(key);
if (it != _chunkMap.end()) {
foo = it->first;
c = it->second;
@@ -693,25 +666,24 @@ namespace mongo {
}
if ( c ) {
- if ( c->contains( obj ) )
+ if ( c->contains( key ) ){
+ dassert(c->contains(key)); // doesn't use fast-path in extractKey
return c;
+ }
PRINT(foo);
PRINT(*c);
PRINT(key);
- grid.getDBConfig(getns())->getChunkManager(getns(), true);
+ reload();
massert(13141, "Chunk map pointed to incorrect chunk", false);
}
}
- massert(8070, str::stream() << "couldn't find a chunk aftry retry which should be impossible extracted: " << key, false);
- return ChunkPtr(); // unreachable
+ throw UserException( 8070 , str::stream() << "couldn't find a chunk which should be impossible: " << key );
}
ChunkPtr ChunkManager::findChunkOnServer( const Shard& shard ) const {
- rwlock lk( _lock , false );
-
for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
ChunkPtr c = i->second;
if ( c->getShard() == shard )
@@ -721,14 +693,11 @@ namespace mongo {
return ChunkPtr();
}
- void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ) {
- rwlock lk( _lock , false );
- DEV PRINT(query);
-
+ void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const {
//TODO look into FieldRangeSetOr
- FieldRangeOrSet fros(_ns.c_str(), query, false);
+ OrRangeGenerator org(_ns.c_str(), query, false);
- const string special = fros.getSpecial();
+ const string special = org.getSpecial();
if (special == "2d") {
BSONForEach(field, query) {
if (getGtLtOp(field) == BSONObj::opNEAR) {
@@ -743,25 +712,22 @@ namespace mongo {
}
do {
- boost::scoped_ptr<FieldRangeSet> frs (fros.topFrs());
+ boost::scoped_ptr<FieldRangeSetPair> frsp (org.topFrsp());
{
// special case if most-significant field isn't in query
- FieldRange range = frs->range(_key.key().firstElement().fieldName());
+ FieldRange range = frsp->singleKeyRange(_key.key().firstElementFieldName());
if ( !range.nontrivial() ) {
DEV PRINT(range.nontrivial());
- getAllShards_inlock(shards);
+ getAllShards(shards);
return;
}
}
- BoundList ranges = frs->indexBounds(_key.key(), 1);
+ BoundList ranges = frsp->singleKeyIndexBounds(_key.key(), 1);
for (BoundList::const_iterator it=ranges.begin(), end=ranges.end(); it != end; ++it) {
BSONObj minObj = it->first.replaceFieldNames(_key.key());
BSONObj maxObj = it->second.replaceFieldNames(_key.key());
- DEV PRINT(minObj);
- DEV PRINT(maxObj);
-
ChunkRangeMap::const_iterator min, max;
min = _chunkRanges.upper_bound(minObj);
max = _chunkRanges.upper_bound(maxObj);
@@ -781,14 +747,14 @@ namespace mongo {
//return;
}
- if (fros.moreOrClauses())
- fros.popOrClause();
+ if (org.moreOrClauses())
+ org.popOrClauseSingleKey();
}
- while (fros.moreOrClauses());
+ while (org.moreOrClauses());
}
- void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) {
+ void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) const {
uassert(13405, "min must have shard key", hasShardKey(min));
uassert(13406, "max must have shard key", hasShardKey(max));
@@ -804,37 +770,30 @@ namespace mongo {
}
}
- void ChunkManager::getAllShards( set<Shard>& all ) {
- rwlock lk( _lock , false );
- getAllShards_inlock( all );
- }
-
- void ChunkManager::getAllShards_inlock( set<Shard>& all ){
+ void ChunkManager::getAllShards( set<Shard>& all ) const {
all.insert(_shards.begin(), _shards.end());
}
- void ChunkManager::ensureIndex_inlock() {
- //TODO in parallel?
- for ( set<Shard>::const_iterator i=_shards.begin(); i!=_shards.end(); ++i ) {
- ScopedDbConnection conn( i->getConnString() );
- conn->ensureIndex( getns() , getShardKey().key() , _unique , "" , false /* do not cache ensureIndex SERVER-1691 */ );
- conn.done();
- }
- }
-
- void ChunkManager::drop( ChunkManagerPtr me ) {
- rwlock lk( _lock , true );
+ void ChunkManager::drop( ChunkManagerPtr me ) const {
+ scoped_lock lk( _mutex );
configServer.logChange( "dropCollection.start" , _ns , BSONObj() );
- dist_lock_try dlk( &_nsLock , "drop" );
+ dist_lock_try dlk;
+ try{
+ dlk = dist_lock_try( &_nsLock , "drop" );
+ }
+ catch( LockException& e ){
+ uassert( 14022, str::stream() << "Error locking distributed lock for chunk drop." << causedBy( e ), false);
+ }
+
uassert( 13331 , "collection's metadata is undergoing changes. Please try again." , dlk.got() );
uassert( 10174 , "config servers not all up" , configServer.allUp() );
set<Shard> seen;
- log(1) << "ChunkManager::drop : " << _ns << endl;
+ LOG(1) << "ChunkManager::drop : " << _ns << endl;
// lock all shards so no one can do a split/migrate
for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
@@ -842,12 +801,7 @@ namespace mongo {
seen.insert( c->getShard() );
}
- log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
-
- // wipe my meta-data
- _chunkMap.clear();
- _chunkRanges.clear();
- _shards.clear();
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
// delete data from mongod
for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) {
@@ -856,82 +810,64 @@ namespace mongo {
conn.done();
}
- log(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl;
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl;
// remove chunk data
ScopedDbConnection conn( configServer.modelServer() );
conn->remove( Chunk::chunkMetadataNS , BSON( "ns" << _ns ) );
conn.done();
- log(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) {
ScopedDbConnection conn( *i );
BSONObj res;
+
+ // this is horrible
+ // we need a special command for dropping on the d side
+ // this hack works for the moment
+
if ( ! setShardVersion( conn.conn() , _ns , 0 , true , res ) )
throw UserException( 8071 , str::stream() << "cleaning up after drop failed: " << res );
+ conn->simpleCommand( "admin", 0, "unsetSharding" );
conn.done();
}
- log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl;
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl;
configServer.logChange( "dropCollection" , _ns , BSONObj() );
}
- bool ChunkManager::maybeChunkCollection() {
- ensureIndex_inlock();
-
+ void ChunkManager::maybeChunkCollection() const {
uassert( 13346 , "can't pre-split already splitted collection" , (_chunkMap.size() == 1) );
-
+
ChunkPtr soleChunk = _chunkMap.begin()->second;
vector<BSONObj> splitPoints;
soleChunk->pickSplitVector( splitPoints , Chunk::MaxChunkSize );
if ( splitPoints.empty() ) {
- log(1) << "not enough data to warrant chunking " << getns() << endl;
- return false;
+ LOG(1) << "not enough data to warrant chunking " << getns() << endl;
+ return;
}
-
+
BSONObj res;
- bool worked = soleChunk->multiSplit( splitPoints , res , false );
+ ChunkPtr p;
+ bool worked = soleChunk->multiSplit( splitPoints , res );
if (!worked) {
log( LL_WARNING ) << "could not split '" << getns() << "': " << res << endl;
- return false;
+ return;
}
- return true;
}
ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const {
- rwlock lk( _lock , false );
- // TODO: cache or something?
-
- ShardChunkVersion max = 0;
-
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
- ChunkPtr c = i->second;
- DEV assert( c );
- if ( c->getShard() != shard )
- continue;
- if ( c->getLastmod() > max )
- max = c->getLastmod();
- }
- return max;
+ ShardVersionMap::const_iterator i = _shardVersions.find( shard );
+ if ( i == _shardVersions.end() )
+ return 0;
+ return i->second;
}
ShardChunkVersion ChunkManager::getVersion() const {
- rwlock lk( _lock , false );
-
- ShardChunkVersion max = 0;
-
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
- ChunkPtr c = i->second;
- if ( c->getLastmod() > max )
- max = c->getLastmod();
- }
-
- return max;
+ return _version;
}
string ChunkManager::toString() const {
- rwlock lk( _lock , false );
-
stringstream ss;
ss << "ChunkManager: " << _ns << " key:" << _key.toString() << '\n';
for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
@@ -993,69 +929,6 @@ namespace mongo {
}
}
- void ChunkRangeManager::reloadRange(const ChunkMap& chunks, const BSONObj& min, const BSONObj& max) {
- if (_ranges.empty()) {
- reloadAll(chunks);
- return;
- }
-
- ChunkRangeMap::iterator low = _ranges.upper_bound(min);
- ChunkRangeMap::iterator high = _ranges.lower_bound(max);
-
- assert(low != _ranges.end());
- assert(high != _ranges.end());
- assert(low->second);
- assert(high->second);
-
- ChunkMap::const_iterator begin = chunks.upper_bound(low->second->getMin());
- ChunkMap::const_iterator end = chunks.lower_bound(high->second->getMax());
-
- assert(begin != chunks.end());
- assert(end != chunks.end());
-
- // C++ end iterators are one-past-last
- ++high;
- ++end;
-
- // update ranges
- _ranges.erase(low, high); // invalidates low
- _insertRange(begin, end);
-
- assert(!_ranges.empty());
- DEV assertValid();
-
- // merge low-end if possible
- low = _ranges.upper_bound(min);
- assert(low != _ranges.end());
- if (low != _ranges.begin()) {
- shared_ptr<ChunkRange> a = prior(low)->second;
- shared_ptr<ChunkRange> b = low->second;
- if (a->getShard() == b->getShard()) {
- shared_ptr<ChunkRange> cr (new ChunkRange(*a, *b));
- _ranges.erase(prior(low));
- _ranges.erase(low); // invalidates low
- _ranges[cr->getMax()] = cr;
- }
- }
-
- DEV assertValid();
-
- // merge high-end if possible
- high = _ranges.lower_bound(max);
- if (high != prior(_ranges.end())) {
- shared_ptr<ChunkRange> a = high->second;
- shared_ptr<ChunkRange> b = boost::next(high)->second;
- if (a->getShard() == b->getShard()) {
- shared_ptr<ChunkRange> cr (new ChunkRange(*a, *b));
- _ranges.erase(boost::next(high));
- _ranges.erase(high); //invalidates high
- _ranges[cr->getMax()] = cr;
- }
- }
-
- DEV assertValid();
- }
-
void ChunkRangeManager::reloadAll(const ChunkMap& chunks) {
_ranges.clear();
_insertRange(chunks.begin(), chunks.end());
@@ -1095,13 +968,6 @@ namespace mongo {
class ChunkObjUnitTest : public UnitTest {
public:
- void runShard() {
- ChunkPtr c;
- assert( ! c );
- c.reset( new Chunk( 0 ) );
- assert( c );
- }
-
void runShardChunkVersion() {
vector<ShardChunkVersion> all;
all.push_back( ShardChunkVersion(1,1) );
@@ -1118,9 +984,8 @@ namespace mongo {
}
void run() {
- runShard();
runShardChunkVersion();
- log(1) << "shardObjTest passed" << endl;
+ LOG(1) << "shardObjTest passed" << endl;
}
} shardObjTest;
@@ -1145,7 +1010,7 @@ namespace mongo {
cmdBuilder.append( "shardHost" , s.getConnString() );
BSONObj cmd = cmdBuilder.obj();
- log(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl;
+ LOG(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl;
return conn.runCommand( "admin" , cmd , result );
}
diff --git a/s/chunk.h b/s/chunk.h
index 6054afc..4c36dd1 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -37,13 +37,13 @@ namespace mongo {
class ChunkRangeMangager;
class ChunkObjUnitTest;
- typedef shared_ptr<Chunk> ChunkPtr;
+ typedef shared_ptr<const Chunk> ChunkPtr;
// key is max for each Chunk or ChunkRange
typedef map<BSONObj,ChunkPtr,BSONObjCmp> ChunkMap;
typedef map<BSONObj,shared_ptr<ChunkRange>,BSONObjCmp> ChunkRangeMap;
- typedef shared_ptr<ChunkManager> ChunkManagerPtr;
+ typedef shared_ptr<const ChunkManager> ChunkManagerPtr;
/**
config.chunks
@@ -52,17 +52,16 @@ namespace mongo {
x is in a shard iff
min <= x < max
*/
- class Chunk : boost::noncopyable, public boost::enable_shared_from_this<Chunk> {
+ class Chunk : boost::noncopyable {
public:
- Chunk( ChunkManager * info );
- Chunk( ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard);
+ Chunk( const ChunkManager * info , BSONObj from);
+ Chunk( const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard);
//
// serialization support
//
void serialize(BSONObjBuilder& to, ShardChunkVersion myLastMod=0);
- void unserialize(const BSONObj& from);
//
// chunk boundary support
@@ -70,8 +69,6 @@ namespace mongo {
const BSONObj& getMin() const { return _min; }
const BSONObj& getMax() const { return _max; }
- void setMin(const BSONObj& o) { _min = o; }
- void setMax(const BSONObj& o) { _max = o; }
// if min/max key is pos/neg infinity
bool minIsInf() const;
@@ -86,7 +83,7 @@ namespace mongo {
// chunk version support
//
- void appendShortVersion( const char * name , BSONObjBuilder& b );
+ void appendShortVersion( const char * name , BSONObjBuilder& b ) const;
ShardChunkVersion getLastmod() const { return _lastmod; }
void setLastmod( ShardChunkVersion v ) { _lastmod = v; }
@@ -100,7 +97,7 @@ namespace mongo {
* then we check the real size, and if its too big, we split
* @return if something was split
*/
- bool splitIfShould( long dataWritten );
+ bool splitIfShould( long dataWritten ) const;
/**
* Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk.
@@ -108,18 +105,18 @@ namespace mongo {
* @param force if set to true, will split the chunk regardless if the split is really necessary size wise
* if set to false, will only split if the chunk has reached the currently desired maximum size
* @param res the object containing details about the split execution
- * @return if found a key and split successfully
+ * @return splitPoint if found a key and split successfully, else empty BSONObj
*/
- bool singleSplit( bool force , BSONObj& res , ChunkPtr* low=NULL, ChunkPtr* high=NULL);
+ BSONObj singleSplit( bool force , BSONObj& res ) const;
/**
* Splits this chunk at the given key (or keys)
*
* @param splitPoints the vector of keys that should be used to divide this chunk
* @param res the object containing details about the split execution
- * @return if split was successful
+ * @return if the split was successful
*/
- bool multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res , bool resetIfSplit );
+ bool multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res ) const;
/**
* Asks the mongod holding this chunk to find a key that approximately divides this chunk in two
@@ -141,13 +138,6 @@ namespace mongo {
//
/**
- * moves either this shard or newShard if it makes sense too
- *
- * @return whether or not a shard was moved
- */
- bool moveIfShould( ChunkPtr newShard = ChunkPtr() );
-
- /**
* Issues a migrate request for this chunk
*
* @param to shard to move this chunk to
@@ -155,7 +145,7 @@ namespace mongo {
* @param res the object containing details about the migrate execution
* @return true if move was successful
*/
- bool moveAndCommit( const Shard& to , long long chunkSize , BSONObj& res );
+ bool moveAndCommit( const Shard& to , long long chunkSize , BSONObj& res ) const;
/**
* @return size of shard in bytes
@@ -164,11 +154,6 @@ namespace mongo {
long getPhysicalSize() const;
//
- // chunk size support
-
- int countObjects(int maxcount=0) const;
-
- //
// public constants
//
@@ -192,9 +177,10 @@ namespace mongo {
private:
+
// main shard info
- ChunkManager * _manager;
+ const ChunkManager * _manager;
BSONObj _min;
BSONObj _max;
@@ -203,7 +189,7 @@ namespace mongo {
// transient stuff
- long _dataWritten;
+ mutable long _dataWritten;
// methods, etc..
@@ -215,7 +201,7 @@ namespace mongo {
BSONObj _getExtremeKey( int sort ) const;
/** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay in splitting */
- void _setDataWritten();
+ static long mkDataWritten();
ShardKeyPattern skey() const;
};
@@ -275,7 +261,6 @@ namespace mongo {
void clear() { _ranges.clear(); }
void reloadAll(const ChunkMap& chunks);
- void reloadRange(const ChunkMap& chunks, const BSONObj& min, const BSONObj& max);
// Slow operation -- wrap with DEV
void assertValid() const;
@@ -298,27 +283,27 @@ namespace mongo {
*/
class ChunkManager {
public:
+ typedef map<Shard,ShardChunkVersion> ShardVersionMap;
ChunkManager( string ns , ShardKeyPattern pattern , bool unique );
- virtual ~ChunkManager();
string getns() const { return _ns; }
- int numChunks() const { rwlock lk( _lock , false ); return _chunkMap.size(); }
- bool hasShardKey( const BSONObj& obj );
+ int numChunks() const { return _chunkMap.size(); }
+ bool hasShardKey( const BSONObj& obj ) const;
- void createFirstChunk( const Shard& shard );
- ChunkPtr findChunk( const BSONObj& obj );
+ void createFirstChunk( const Shard& shard ) const; // only call from DBConfig::shardCollection
+ ChunkPtr findChunk( const BSONObj& obj ) const;
ChunkPtr findChunkOnServer( const Shard& shard ) const;
const ShardKeyPattern& getShardKey() const { return _key; }
bool isUnique() const { return _unique; }
- bool maybeChunkCollection();
+ void maybeChunkCollection() const;
- void getShardsForQuery( set<Shard>& shards , const BSONObj& query );
- void getAllShards( set<Shard>& all );
- void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max); // [min, max)
+ void getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const;
+ void getAllShards( set<Shard>& all ) const;
+ void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) const; // [min, max)
string toString() const;
@@ -330,7 +315,7 @@ namespace mongo {
*/
unsigned long long getSequenceNumber() const { return _sequenceNumber; }
- void getInfo( BSONObjBuilder& b ) {
+ void getInfo( BSONObjBuilder& b ) const {
b.append( "key" , _key.key() );
b.appendBool( "unique" , _unique );
}
@@ -338,39 +323,41 @@ namespace mongo {
/**
* @param me - so i don't get deleted before i'm done
*/
- void drop( ChunkManagerPtr me );
+ void drop( ChunkManagerPtr me ) const;
void _printChunks() const;
int getCurrentDesiredChunkSize() const;
private:
- void _reload();
- void _reload_inlock();
- void _load();
+ ChunkManagerPtr reload(bool force=true) const; // doesn't modify self!
+
+ // helpers for constructor
+ void _load(ChunkMap& chunks, set<Shard>& shards, ShardVersionMap& shardVersions);
+ static bool _isValid(const ChunkMap& chunks);
- void ensureIndex_inlock();
- void getAllShards_inlock( set<Shard>& all );
+ // All members should be const for thread-safety
+ const string _ns;
+ const ShardKeyPattern _key;
+ const bool _unique;
- string _ns;
- ShardKeyPattern _key;
- bool _unique;
+ const ChunkMap _chunkMap;
+ const ChunkRangeManager _chunkRanges;
- ChunkMap _chunkMap;
- ChunkRangeManager _chunkRanges;
+ const set<Shard> _shards;
- set<Shard> _shards;
+ const ShardVersionMap _shardVersions; // max version per shard
- unsigned long long _sequenceNumber;
+ ShardChunkVersion _version; // max version of any chunk
- mutable RWLock _lock;
- DistributedLock _nsLock;
+ mutable mutex _mutex; // only used with _nsLock
+ mutable DistributedLock _nsLock;
+
+ const unsigned long long _sequenceNumber;
friend class Chunk;
friend class ChunkRangeManager; // only needed for CRM::assertValid()
static AtomicUInt NextSequenceNumber;
-
- bool _isValid() const;
};
// like BSONObjCmp. for use as an STL comparison functor
diff --git a/s/client.cpp b/s/client.cpp
index c053289..0da05b6 100644
--- a/s/client.cpp
+++ b/s/client.cpp
@@ -55,7 +55,7 @@ namespace mongo {
if ( p ) {
HostAndPort r = p->remote();
- if ( _remote.port() == -1 )
+ if ( ! _remote.hasPort() )
_remote = r;
else if ( _remote != r ) {
stringstream ss;
@@ -96,7 +96,7 @@ namespace mongo {
BSONElement cid = gle["connectionId"];
if ( cid.eoo() ) {
- error() << "getLastError writeback can't work because of version mis-match" << endl;
+ error() << "getLastError writeback can't work because of version mismatch" << endl;
return;
}
@@ -114,7 +114,7 @@ namespace mongo {
return res;
if ( fromWriteBackListener ) {
- LOG(1) << "not doing recusrive writeback" << endl;
+ LOG(1) << "not doing recursive writeback" << endl;
return res;
}
@@ -150,7 +150,7 @@ namespace mongo {
}
catch( std::exception &e ){
- warning() << "Could not get last error." << e.what() << endl;
+ warning() << "could not get last error." << causedBy( e ) << endl;
// Catch everything that happens here, since we need to ensure we return our connection when we're
// finished.
@@ -223,7 +223,7 @@ namespace mongo {
// Safe to return here, since we haven't started any extra processing yet, just collecting
// responses.
- warning() << "Could not get last error." << e.what() << endl;
+ warning() << "could not get last error." << causedBy( e ) << endl;
conn.done();
return false;
diff --git a/s/client.h b/s/client.h
index a01b1de..dc529f7 100644
--- a/s/client.h
+++ b/s/client.h
@@ -18,6 +18,7 @@
#include "../pch.h"
#include "writeback_listener.h"
+#include "../db/security.h"
namespace mongo {
@@ -82,9 +83,10 @@ namespace mongo {
void noAutoSplit() { _autoSplitOk = false; }
static ClientInfo * get();
-
+ AuthenticationInfo* getAuthenticationInfo() const { return (AuthenticationInfo*)&_ai; }
+ bool isAdmin() { return _ai.isAuthorized( "admin" ); }
private:
-
+ AuthenticationInfo _ai;
struct WBInfo {
WBInfo( const WriteBackListener::ConnectionIdent& c , OID o ) : ident( c ) , id( o ) {}
WriteBackListener::ConnectionIdent ident;
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 7677265..4568c4d 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -26,7 +26,8 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
#include "../util/processinfo.h"
#include "../util/stringutils.h"
#include "../util/version.h"
@@ -44,6 +45,7 @@
#include "stats.h"
#include "writeback_listener.h"
#include "client.h"
+#include "../util/ramlog.h"
namespace mongo {
@@ -62,6 +64,15 @@ namespace mongo {
// all grid commands are designed not to lock
virtual LockType locktype() const { return NONE; }
+
+ bool okForConfigChanges( string& errmsg ) {
+ string e;
+ if ( ! configServer.allUp(e) ) {
+ errmsg = str::stream() << "not all config servers are up: " << e;
+ return false;
+ }
+ return true;
+ }
};
// --------------- misc commands ----------------------
@@ -72,7 +83,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << " shows status/reachability of servers in the cluster";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
result.append("configserver", configServer.getPrimary().getConnString() );
result.append("isdbgrid", 1);
return true;
@@ -85,7 +96,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "flush all router config";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
grid.flushConfig();
result.appendBool( "flushed" , true );
return true;
@@ -102,7 +113,7 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return NONE; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.append( "host" , prettyHostName() );
result.append("version", versionString);
result.append("process","mongos");
@@ -167,6 +178,20 @@ namespace mongo {
bb.done();
}
+ {
+ RamLog* rl = RamLog::get( "warnings" );
+ verify(15879, rl);
+
+ if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
+ vector<const char*> lines;
+ rl->get( lines );
+
+ BSONArrayBuilder arr( result.subarrayStart( "warnings" ) );
+ for ( unsigned i=std::max(0,(int)lines.size()-10); i<lines.size(); i++ )
+ arr.append( lines[i] );
+ arr.done();
+ }
+ }
return 1;
}
@@ -177,7 +202,7 @@ namespace mongo {
class FsyncCommand : public GridAdminCmd {
public:
FsyncCommand() : GridAdminCmd( "fsync" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
if ( cmdObj["lock"].trueValue() ) {
errmsg = "can't do lock through mongos";
return false;
@@ -217,9 +242,8 @@ namespace mongo {
MoveDatabasePrimaryCommand() : GridAdminCmd("movePrimary") { }
virtual void help( stringstream& help ) const {
help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }";
- // TODO: locking?
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string dbname = cmdObj.firstElement().valuestrsafe();
if ( dbname.size() == 0 ) {
@@ -246,7 +270,7 @@ namespace mongo {
Shard s = Shard::make( to );
if ( config->getPrimary() == s.getConnString() ) {
- errmsg = "thats already the primary";
+ errmsg = "it is already the primary";
return false;
}
@@ -255,10 +279,27 @@ namespace mongo {
return false;
}
- log() << "movePrimary: moving " << dbname << " primary from: " << config->getPrimary().toString()
+ log() << "Moving " << dbname << " primary from: " << config->getPrimary().toString()
<< " to: " << s.toString() << endl;
- // TODO LOCKING: this is not safe with multiple mongos
+ // Locking enabled now...
+ DistributedLock lockSetup( configServer.getConnectionString(), dbname + "-movePrimary" );
+ dist_lock_try dlk;
+
+ // Distributed locking added.
+ try{
+ dlk = dist_lock_try( &lockSetup , string("Moving primary shard of ") + dbname );
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "error locking distributed lock to move primary shard of " << dbname << causedBy( e );
+ warning() << errmsg << endl;
+ return false;
+ }
+
+ if ( ! dlk.got() ) {
+ errmsg = (string)"metadata lock is already taken for moving " + dbname;
+ return false;
+ }
ScopedDbConnection toconn( s.getConnString() );
@@ -297,19 +338,31 @@ namespace mongo {
<< "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n"
<< " { enablesharding : \"<dbname>\" }\n";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string dbname = cmdObj.firstElement().valuestrsafe();
if ( dbname.size() == 0 ) {
errmsg = "no db";
return false;
}
+
+ if ( dbname == "admin" ) {
+ errmsg = "can't shard the admin db";
+ return false;
+ }
+ if ( dbname == "local" ) {
+ errmsg = "can't shard the local db";
+ return false;
+ }
DBConfigPtr config = grid.getDBConfig( dbname );
if ( config->isShardingEnabled() ) {
errmsg = "already enabled";
return false;
}
-
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
log() << "enabling sharding on: " << dbname << endl;
config->enableSharding();
@@ -330,7 +383,7 @@ namespace mongo {
<< " { enablesharding : \"<dbname>\" }\n";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "no ns";
@@ -366,9 +419,12 @@ namespace mongo {
return false;
}
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
// Sharding interacts with indexing in at least two ways:
//
- // 1. A unique index must have the sharding key as its prefix. Otherwise maintainig uniqueness would
+ // 1. A unique index must have the sharding key as its prefix. Otherwise maintaining uniqueness would
// require coordinated access to all shards. Trying to shard a collection with such an index is not
// allowed.
//
@@ -380,25 +436,38 @@ namespace mongo {
//
// We enforce both these conditions in what comes next.
+ bool careAboutUnique = cmdObj["unique"].trueValue();
+
{
ShardKeyPattern proposedKey( key );
bool hasShardIndex = false;
+ bool hasUniqueShardIndex = false;
ScopedDbConnection conn( config->getPrimary() );
BSONObjBuilder b;
b.append( "ns" , ns );
+ BSONArrayBuilder allIndexes;
+
auto_ptr<DBClientCursor> cursor = conn->query( config->getName() + ".system.indexes" , b.obj() );
while ( cursor->more() ) {
BSONObj idx = cursor->next();
+ allIndexes.append( idx );
+
+ bool idIndex = ! idx["name"].eoo() && idx["name"].String() == "_id_";
+ bool uniqueIndex = ( ! idx["unique"].eoo() && idx["unique"].trueValue() ) ||
+ idIndex;
+
// Is index key over the sharding key? Remember that.
if ( key.woCompare( idx["key"].embeddedObjectUserCheck() ) == 0 ) {
hasShardIndex = true;
+ hasUniqueShardIndex = uniqueIndex;
+ continue;
}
// Not a unique index? Move on.
- if ( idx["unique"].eoo() || ! idx["unique"].trueValue() )
+ if ( ! uniqueIndex || idIndex )
continue;
// Shard key is prefix of unique index? Move on.
@@ -409,6 +478,12 @@ namespace mongo {
conn.done();
return false;
}
+
+ if( careAboutUnique && hasShardIndex && ! hasUniqueShardIndex ){
+ errmsg = (string)"can't shard collection " + ns + ", index not unique";
+ conn.done();
+ return false;
+ }
BSONObj res = conn->findOne( config->getName() + ".system.namespaces" , BSON( "name" << ns ) );
if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ) {
@@ -432,6 +507,8 @@ namespace mongo {
if ( ! hasShardIndex && ( conn->count( ns ) != 0 ) ) {
errmsg = "please create an index over the sharding key before sharding.";
+ result.append( "proposedKey" , key );
+ result.appendArray( "curIndexes" , allIndexes.done() );
conn.done();
return false;
}
@@ -441,7 +518,7 @@ namespace mongo {
tlog() << "CMD: shardcollection: " << cmdObj << endl;
- config->shardCollection( ns , key , cmdObj["unique"].trueValue() );
+ config->shardCollection( ns , key , careAboutUnique );
result << "collectionsharded" << ns;
return true;
@@ -455,10 +532,10 @@ namespace mongo {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ) {
- errmsg = "need to speciy fully namespace";
+ errmsg = "need to specify fully namespace";
return false;
}
@@ -468,7 +545,7 @@ namespace mongo {
return false;
}
- ChunkManagerPtr cm = config->getChunkManager( ns );
+ ChunkManagerPtr cm = config->getChunkManagerIfExists( ns );
if ( ! cm ) {
errmsg = "no chunk manager?";
return false;
@@ -489,11 +566,15 @@ namespace mongo {
<< " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
<< " example: - split the shard that contains the key with this as the middle \n"
<< " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
- << " NOTE: this does not move move the chunks, it merely creates a logical seperation \n"
+ << " NOTE: this does not move move the chunks, it merely creates a logical separation \n"
;
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
ShardConnection::sync();
string ns = cmdObj.firstElement().valuestrsafe();
@@ -504,8 +585,11 @@ namespace mongo {
DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ) {
- errmsg = "ns not sharded. have to shard before can split";
- return false;
+ config->reload();
+ if ( ! config->isSharded( ns ) ) {
+ errmsg = "ns not sharded. have to shard before can split";
+ return false;
+ }
}
BSONObj find = cmdObj.getObjectField( "find" );
@@ -528,8 +612,8 @@ namespace mongo {
BSONObj res;
bool worked;
if ( middle.isEmpty() ) {
- worked = chunk->singleSplit( true /* force a split even if not enough data */ , res );
-
+ BSONObj ret = chunk->singleSplit( true /* force a split even if not enough data */ , res );
+ worked = !ret.isEmpty();
}
else {
// sanity check if the key provided is a valid split point
@@ -538,9 +622,14 @@ namespace mongo {
return false;
}
+ if (!fieldsMatch(middle, info->getShardKey().key())){
+ errmsg = "middle has different fields (or different order) than shard key";
+ return false;
+ }
+
vector<BSONObj> splitPoints;
splitPoints.push_back( middle );
- worked = chunk->multiSplit( splitPoints , res , true );
+ worked = chunk->multiSplit( splitPoints , res );
}
if ( !worked ) {
@@ -559,7 +648,11 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "{ movechunk : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
ShardConnection::sync();
Timer t;
@@ -571,8 +664,11 @@ namespace mongo {
DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ) {
- errmsg = "ns not sharded. have to shard before can move a chunk";
- return false;
+ config->reload();
+ if ( ! config->isSharded( ns ) ) {
+ errmsg = "ns not sharded. have to shard before we can move a chunk";
+ return false;
+ }
}
BSONObj find = cmdObj.getObjectField( "find" );
@@ -613,7 +709,7 @@ namespace mongo {
return false;
}
- // pre-emptively reload the config to get new version info
+ // preemptively reload the config to get new version info
config->getChunkManager( ns , true );
result.append( "millis" , t.millis() );
@@ -629,7 +725,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "list all shards of the system";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
ScopedDbConnection conn( configServer.getPrimary() );
vector<BSONObj> all;
@@ -653,7 +749,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "add a new shard to the system";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
errmsg.clear();
// get replica set component hosts
@@ -663,12 +759,15 @@ namespace mongo {
return false;
}
- // using localhost in server names implies every other process must use locahost addresses too
+ // using localhost in server names implies every other process must use localhost addresses too
vector<HostAndPort> serverAddrs = servers.getServers();
for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ) {
if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ) {
- errmsg = "can't use localhost as a shard since all shards need to communicate. "
- "either use all shards and configdbs in localhost or all in actual IPs " ;
+ errmsg = str::stream() <<
+ "can't use localhost as a shard since all shards need to communicate. " <<
+ "either use all shards and configdbs in localhost or all in actual IPs " <<
+ " host: " << serverAddrs[i].toString() << " isLocalHost:" << serverAddrs[i].isLocalHost();
+
log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl;
return false;
}
@@ -711,7 +810,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "remove a shard to the system.";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string target = cmdObj.firstElement().valuestrsafe();
Shard s = Shard::make( target );
if ( ! grid.knowAboutShard( s.getConnString() ) ) {
@@ -794,11 +893,12 @@ namespace mongo {
class IsDbGridCmd : public Command {
public:
virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
virtual bool slaveOk() const {
return true;
}
IsDbGridCmd() : Command("isdbgrid") { }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
result.append("isdbgrid", 1);
result.append("hostname", getHostNameCached());
return true;
@@ -816,7 +916,7 @@ namespace mongo {
help << "test if this is master half of a replica pair";
}
CmdIsMaster() : Command("isMaster" , false , "ismaster") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
result.appendBool("ismaster", true );
result.append("msg", "isdbgrid");
result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
@@ -840,7 +940,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "{whatsmyuri:1}";
}
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
result << "you" << ClientInfo::get()->getRemote();
return true;
}
@@ -858,7 +958,7 @@ namespace mongo {
help << "get previous error (since last reseterror command)";
}
CmdShardingGetPrevError() : Command( "getPrevError" , false , "getpreverror") { }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
errmsg += "getpreverror not supported for sharded environments";
return false;
}
@@ -876,7 +976,7 @@ namespace mongo {
}
CmdShardingGetLastError() : Command("getLastError" , false , "getlasterror") { }
- virtual bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
LastError *le = lastError.disableForCommand();
{
assert( le );
@@ -903,7 +1003,7 @@ namespace mongo {
return true;
}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
LastError *le = lastError.get();
if ( le )
le->reset();
@@ -934,7 +1034,7 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const { help << "list databases on cluster"; }
- bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
vector<Shard> shards;
Shard::getAllShards( shards );
@@ -976,7 +1076,7 @@ namespace mongo {
if ( name == "local" ) {
// we don't return local
- // since all shards have their own independant local
+ // since all shards have their own independent local
continue;
}
@@ -1031,7 +1131,7 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const { help << "Not supported sharded"; }
- bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) {
+ bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) {
errmsg = "closeAllDatabases isn't supported through mongos";
return false;
}
@@ -1047,13 +1147,25 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const { help << "Not supported through mongos"; }
- bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) {
+ bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
if ( jsobj["forShell"].trueValue() )
lastError.disableForCommand();
errmsg = "replSetGetStatus is not supported through mongos";
+ result.append("info", "mongos"); // see sayReplSetMemberState
return false;
}
} cmdReplSetGetStatus;
+ CmdShutdown cmdShutdown;
+
+ void CmdShutdown::help( stringstream& help ) const {
+ help << "shutdown the database. must be ran against admin db and "
+ << "either (1) ran from localhost or (2) authenticated.";
+ }
+
+ bool CmdShutdown::run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ return shutdownHelper();
+ }
+
} // namespace mongo
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index f29205b..ef7110c 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -18,20 +18,28 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../db/dbmessage.h"
#include "../client/connpool.h"
#include "../client/parallel.h"
#include "../db/commands.h"
-#include "../db/query.h"
+#include "../db/queryutil.h"
+#include "../scripting/engine.h"
#include "config.h"
#include "chunk.h"
#include "strategy.h"
#include "grid.h"
+#include "mr_shard.h"
+#include "client.h"
namespace mongo {
+ bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl )
+ {
+ return true;
+ }
+
namespace dbgrid_pub_cmds {
class PublicGridCommand : public Command {
@@ -45,22 +53,38 @@ namespace mongo {
return false;
}
+ // Override if passthrough should also send query options
+ // Safer as off by default, can slowly enable as we add more tests
+ virtual bool passOptions() const { return false; }
+
// all grid commands are designed not to lock
virtual LockType locktype() const { return NONE; }
protected:
+
bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
- return _passthrough(conf->getName(), conf, cmdObj, result);
+ return _passthrough(conf->getName(), conf, cmdObj, 0, result);
}
bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
- return _passthrough("admin", conf, cmdObj, result);
+ return _passthrough("admin", conf, cmdObj, 0, result);
+ }
+
+ bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) {
+ return _passthrough(conf->getName(), conf, cmdObj, options, result);
+ }
+ bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) {
+ return _passthrough("admin", conf, cmdObj, options, result);
}
private:
- bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
+ bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , int options , BSONObjBuilder& result ) {
ShardConnection conn( conf->getPrimary() , "" );
BSONObj res;
- bool ok = conn->runCommand( db , cmdObj , res );
+ bool ok = conn->runCommand( db , cmdObj , res , passOptions() ? options : 0 );
+ if ( ! ok && res["code"].numberInt() == StaleConfigInContextCode ) {
+ conn.done();
+ throw StaleConfigException("foo","command failed because of stale config");
+ }
result.appendElements( res );
conn.done();
return ok;
@@ -87,13 +111,14 @@ namespace mongo {
virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {}
// don't override
- virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& output, bool) {
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& output, bool) {
+ LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << cmdObj << endl;
set<Shard> shards;
getShards(dbName, cmdObj, shards);
list< shared_ptr<Future::CommandResult> > futures;
for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
- futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj ) );
+ futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, 0 ) );
}
vector<BSONObj> results;
@@ -147,13 +172,13 @@ namespace mongo {
virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) = 0;
- virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
string fullns = getFullNS( dbName , cmdObj );
DBConfigPtr conf = grid.getDBConfig( dbName , false );
if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
- return passthrough( conf , cmdObj , result );
+ return passthrough( conf , cmdObj , options, result );
}
errmsg = "can't do command: " + name + " on sharded collection";
return false;
@@ -172,9 +197,41 @@ namespace mongo {
ReIndexCmd() : AllShardsCollectionCommand("reIndex") {}
} reIndexCmd;
+ class ProfileCmd : public PublicGridCommand {
+ public:
+ ProfileCmd() : PublicGridCommand("profile") {}
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg = "profile currently not supported via mongos";
+ return false;
+ }
+ } profileCmd;
+
+
class ValidateCmd : public AllShardsCollectionCommand {
public:
ValidateCmd() : AllShardsCollectionCommand("validate") {}
+ virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {
+ for (vector<BSONObj>::const_iterator it(results.begin()), end(results.end()); it!=end; it++){
+ const BSONObj& result = *it;
+ const BSONElement valid = result["valid"];
+ if (!valid.eoo()){
+ if (!valid.trueValue()) {
+ output.appendBool("valid", false);
+ return;
+ }
+ }
+ else {
+ // Support pre-1.9.0 output with everything in a big string
+ const char* s = result["result"].valuestrsafe();
+ if (strstr(s, "exception") || strstr(s, "corrupt")){
+ output.appendBool("valid", false);
+ return;
+ }
+ }
+ }
+
+ output.appendBool("valid", true);
+ }
} validateCmd;
class RepairDatabaseCmd : public RunOnAllShardsCommand {
@@ -221,7 +278,7 @@ namespace mongo {
class DropCmd : public PublicGridCommand {
public:
DropCmd() : PublicGridCommand( "drop" ) {}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
@@ -246,7 +303,7 @@ namespace mongo {
class DropDBCmd : public PublicGridCommand {
public:
DropDBCmd() : PublicGridCommand( "dropDatabase" ) {}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
BSONElement e = cmdObj.firstElement();
@@ -275,7 +332,7 @@ namespace mongo {
class RenameCollectionCmd : public PublicGridCommand {
public:
RenameCollectionCmd() : PublicGridCommand( "renameCollection" ) {}
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string fullnsFrom = cmdObj.firstElement().valuestrsafe();
string dbNameFrom = nsToDatabase( fullnsFrom.c_str() );
DBConfigPtr confFrom = grid.getDBConfig( dbNameFrom , false );
@@ -300,7 +357,7 @@ namespace mongo {
class CopyDBCmd : public PublicGridCommand {
public:
CopyDBCmd() : PublicGridCommand( "copydb" ) {}
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string todb = cmdObj.getStringField("todb");
uassert(13402, "need a todb argument", !todb.empty());
@@ -336,7 +393,8 @@ namespace mongo {
class CountCmd : public PublicGridCommand {
public:
CountCmd() : PublicGridCommand("count") { }
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool l) {
+ virtual bool passOptions() const { return true; }
+ bool run(const string& dbName, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
@@ -345,12 +403,11 @@ namespace mongo {
filter = cmdObj["query"].Obj();
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
ShardConnection conn( conf->getPrimary() , fullns );
BSONObj temp;
- bool ok = conn->runCommand( dbName , cmdObj , temp );
+ bool ok = conn->runCommand( dbName , cmdObj , temp, options );
conn.done();
if ( ok ) {
@@ -365,7 +422,7 @@ namespace mongo {
}
// this collection got sharded
- ChunkManagerPtr cm = conf->getChunkManager( fullns , true );
+ ChunkManagerPtr cm = conf->getChunkManagerIfExists( fullns , true );
if ( ! cm ) {
errmsg = "should be sharded now";
result.append( "root" , temp );
@@ -376,11 +433,11 @@ namespace mongo {
long long total = 0;
map<string,long long> shardCounts;
- ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManagerIfExists( fullns );
while ( true ) {
if ( ! cm ) {
// probably unsharded now
- return run( dbName , cmdObj , errmsg , result , l );
+ return run( dbName , cmdObj , options , errmsg , result, false );
}
set<Shard> shards;
@@ -394,14 +451,14 @@ namespace mongo {
if ( conn.setVersion() ) {
total = 0;
shardCounts.clear();
- cm = conf->getChunkManager( fullns );
+ cm = conf->getChunkManagerIfExists( fullns );
conn.done();
hadToBreak = true;
break;
}
BSONObj temp;
- bool ok = conn->runCommand( dbName , BSON( "count" << collection << "query" << filter ) , temp );
+ bool ok = conn->runCommand( dbName , BSON( "count" << collection << "query" << filter ) , temp, options );
conn.done();
if ( ok ) {
@@ -415,7 +472,7 @@ namespace mongo {
// my version is old
total = 0;
shardCounts.clear();
- cm = conf->getChunkManager( fullns , true );
+ cm = conf->getChunkManagerIfExists( fullns , true );
hadToBreak = true;
break;
}
@@ -442,14 +499,13 @@ namespace mongo {
class CollectionStats : public PublicGridCommand {
public:
CollectionStats() : PublicGridCommand("collStats", "collstats") { }
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
DBConfigPtr conf = grid.getDBConfig( dbName , false );
if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
- result.append( "ns" , fullns );
result.appendBool("sharded", false);
result.append( "primary" , conf->getPrimary().getName() );
return passthrough( conf , cmdObj , result);
@@ -463,9 +519,13 @@ namespace mongo {
cm->getAllShards(servers);
BSONObjBuilder shardStats;
+ map<string,long long> counts;
+ map<string,long long> indexSizes;
+ /*
long long count=0;
long long size=0;
long long storageSize=0;
+ */
int nindexes=0;
bool warnedAboutIndexes = false;
for ( set<Shard>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
@@ -476,39 +536,82 @@ namespace mongo {
return false;
}
conn.done();
-
- count += res["count"].numberLong();
- size += res["size"].numberLong();
- storageSize += res["storageSize"].numberLong();
-
- int myIndexes = res["nindexes"].numberInt();
-
- if ( nindexes == 0 ) {
- nindexes = myIndexes;
- }
- else if ( nindexes == myIndexes ) {
- // no-op
- }
- else {
- // hopefully this means we're building an index
-
- if ( myIndexes > nindexes )
- nindexes = myIndexes;
-
- if ( ! warnedAboutIndexes ) {
- result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" );
- warnedAboutIndexes = true;
+
+ BSONObjIterator j( res );
+ while ( j.more() ) {
+ BSONElement e = j.next();
+
+ if ( str::equals( e.fieldName() , "ns" ) ||
+ str::equals( e.fieldName() , "ok" ) ||
+ str::equals( e.fieldName() , "avgObjSize" ) ||
+ str::equals( e.fieldName() , "lastExtentSize" ) ||
+ str::equals( e.fieldName() , "paddingFactor" ) ) {
+ continue;
+ }
+ else if ( str::equals( e.fieldName() , "count" ) ||
+ str::equals( e.fieldName() , "size" ) ||
+ str::equals( e.fieldName() , "storageSize" ) ||
+ str::equals( e.fieldName() , "numExtents" ) ||
+ str::equals( e.fieldName() , "totalIndexSize" ) ) {
+ counts[e.fieldName()] += e.numberLong();
+ }
+ else if ( str::equals( e.fieldName() , "indexSizes" ) ) {
+ BSONObjIterator k( e.Obj() );
+ while ( k.more() ) {
+ BSONElement temp = k.next();
+ indexSizes[temp.fieldName()] += temp.numberLong();
+ }
+ }
+ else if ( str::equals( e.fieldName() , "flags" ) ) {
+ if ( ! result.hasField( e.fieldName() ) )
+ result.append( e );
}
+ else if ( str::equals( e.fieldName() , "nindexes" ) ) {
+ int myIndexes = e.numberInt();
+
+ if ( nindexes == 0 ) {
+ nindexes = myIndexes;
+ }
+ else if ( nindexes == myIndexes ) {
+ // no-op
+ }
+ else {
+ // hopefully this means we're building an index
+
+ if ( myIndexes > nindexes )
+ nindexes = myIndexes;
+
+ if ( ! warnedAboutIndexes ) {
+ result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" );
+ warnedAboutIndexes = true;
+ }
+ }
+ }
+ else {
+ warning() << "mongos collstats doesn't know about: " << e.fieldName() << endl;
+ }
+
}
-
shardStats.append(i->getName(), res);
}
result.append("ns", fullns);
- result.appendNumber("count", count);
- result.appendNumber("size", size);
- result.append ("avgObjSize", double(size) / double(count));
- result.appendNumber("storageSize", storageSize);
+
+ for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); ++i )
+ result.appendNumber( i->first , i->second );
+
+ {
+ BSONObjBuilder ib( result.subobjStart( "indexSizes" ) );
+ for ( map<string,long long>::iterator i=indexSizes.begin(); i!=indexSizes.end(); ++i )
+ ib.appendNumber( i->first , i->second );
+ ib.done();
+ }
+
+ if ( counts["count"] > 0 )
+ result.append("avgObjSize", (double)counts["size"] / (double)counts["count"] );
+ else
+ result.append( "avgObjSize", 0.0 );
+
result.append("nindexes", nindexes);
result.append("nchunks", cm->numChunks());
@@ -521,7 +624,7 @@ namespace mongo {
class FindAndModifyCmd : public PublicGridCommand {
public:
FindAndModifyCmd() : PublicGridCommand("findAndModify", "findandmodify") { }
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
@@ -532,7 +635,7 @@ namespace mongo {
}
ChunkManagerPtr cm = conf->getChunkManager( fullns );
- massert( 13002 , "how could chunk manager be null!" , cm );
+ massert( 13002 , "shard internal error chunk manager should never be null" , cm );
BSONObj filter = cmdObj.getObjectField("query");
uassert(13343, "query for sharded findAndModify must have shardkey", cm->hasShardKey(filter));
@@ -558,7 +661,7 @@ namespace mongo {
class DataSizeCmd : public PublicGridCommand {
public:
DataSizeCmd() : PublicGridCommand("dataSize", "datasize") { }
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string fullns = cmdObj.firstElement().String();
DBConfigPtr conf = grid.getDBConfig( dbName , false );
@@ -622,7 +725,7 @@ namespace mongo {
class GroupCmd : public NotAllowedOnShardedCollectionCmd {
public:
GroupCmd() : NotAllowedOnShardedCollectionCmd("group") {}
-
+ virtual bool passOptions() const { return true; }
virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) {
return dbName + "." + cmdObj.firstElement().embeddedObjectUserCheck()["ns"].valuestrsafe();
}
@@ -635,14 +738,15 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool passOptions() const { return true; }
+ bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
DBConfigPtr conf = grid.getDBConfig( dbName , false );
if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
- return passthrough( conf , cmdObj , result );
+ return passthrough( conf , cmdObj , options, result );
}
ChunkManagerPtr cm = conf->getChunkManager( fullns );
@@ -658,7 +762,7 @@ namespace mongo {
for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ) {
ShardConnection conn( *i , fullns );
BSONObj res;
- bool ok = conn->runCommand( conf->getName() , cmdObj , res );
+ bool ok = conn->runCommand( conf->getName() , cmdObj , res, options );
conn.done();
if ( ! ok ) {
@@ -693,7 +797,7 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string fullns = dbName;
fullns += ".";
{
@@ -730,15 +834,15 @@ namespace mongo {
public:
Geo2dFindNearCmd() : PublicGridCommand( "geoNear" ) {}
void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; }
-
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool passOptions() const { return true; }
+ bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
DBConfigPtr conf = grid.getDBConfig( dbName , false );
if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
- return passthrough( conf , cmdObj , result );
+ return passthrough( conf , cmdObj , options, result );
}
ChunkManagerPtr cm = conf->getChunkManager( fullns );
@@ -755,7 +859,7 @@ namespace mongo {
list< shared_ptr<Future::CommandResult> > futures;
BSONArrayBuilder shardArray;
for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
- futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj ) );
+ futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, options ) );
shardArray.append(i->getName());
}
@@ -820,12 +924,13 @@ namespace mongo {
class MRCmd : public PublicGridCommand {
public:
+ AtomicUInt JOB_NUMBER;
+
MRCmd() : PublicGridCommand( "mapreduce" ) {}
string getTmpName( const string& coll ) {
- static int inc = 1;
stringstream ss;
- ss << "tmp.mrs." << coll << "_" << time(0) << "_" << inc++;
+ ss << "tmp.mrs." << coll << "_" << time(0) << "_" << JOB_NUMBER++;
return ss.str();
}
@@ -851,8 +956,8 @@ namespace mongo {
if (fn == "out" && e.type() == Object) {
// check if there is a custom output
BSONObj out = e.embeddedObject();
- if (out.hasField("db"))
- customOut = out;
+// if (out.hasField("db"))
+ customOut = out;
}
}
else {
@@ -864,7 +969,7 @@ namespace mongo {
return b.obj();
}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
Timer t;
string collection = cmdObj.firstElement().valuestrsafe();
@@ -876,7 +981,7 @@ namespace mongo {
BSONObj customOut;
BSONObj shardedCommand = fixForShards( cmdObj , shardedOutputCollection, customOut , badShardedField );
- bool customOutDB = ! customOut.isEmpty() && customOut.hasField( "db" );
+ bool customOutDB = customOut.hasField( "db" );
DBConfigPtr conf = grid.getDBConfig( dbName , false );
@@ -911,26 +1016,32 @@ namespace mongo {
finalCmd.append( "shardedOutputCollection" , shardedOutputCollection );
+ set<ServerAndQuery> servers;
+ BSONObj shardCounts;
+ BSONObj aggCounts;
+ map<string,long long> countsMap;
{
// we need to use our connections to the shard
// so filtering is done correctly for un-owned docs
// so we allocate them in our thread
// and hand off
-
+ // Note: why not use pooled connections? This has been reported to create too many connections
vector< shared_ptr<ShardConnection> > shardConns;
-
list< shared_ptr<Future::CommandResult> > futures;
for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
shared_ptr<ShardConnection> temp( new ShardConnection( i->getConnString() , fullns ) );
assert( temp->get() );
- futures.push_back( Future::spawnCommand( i->getConnString() , dbName , shardedCommand , temp->get() ) );
+ futures.push_back( Future::spawnCommand( i->getConnString() , dbName , shardedCommand , 0 , temp->get() ) );
shardConns.push_back( temp );
}
bool failed = false;
-
- BSONObjBuilder shardresults;
+
+ // now wait for the result of all shards
+ BSONObjBuilder shardResultsB;
+ BSONObjBuilder shardCountsB;
+ BSONObjBuilder aggCountsB;
for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
shared_ptr<Future::CommandResult> res = *i;
if ( ! res->join() ) {
@@ -941,7 +1052,19 @@ namespace mongo {
failed = true;
continue;
}
- shardresults.append( res->getServer() , res->result() );
+ BSONObj result = res->result();
+ shardResultsB.append( res->getServer() , result );
+ BSONObj counts = result["counts"].embeddedObjectUserCheck();
+ shardCountsB.append( res->getServer() , counts );
+ servers.insert(res->getServer());
+
+ // add up the counts for each shard
+ // some of them will be fixed later like output and reduce
+ BSONObjIterator j( counts );
+ while ( j.more() ) {
+ BSONElement temp = j.next();
+ countsMap[temp.fieldName()] += temp.numberLong();
+ }
}
for ( unsigned i=0; i<shardConns.size(); i++ )
@@ -950,28 +1073,205 @@ namespace mongo {
if ( failed )
return 0;
- finalCmd.append( "shards" , shardresults.obj() );
+ finalCmd.append( "shards" , shardResultsB.obj() );
+ shardCounts = shardCountsB.obj();
+ finalCmd.append( "shardCounts" , shardCounts );
timingBuilder.append( "shards" , t.millis() );
+
+ for ( map<string,long long>::iterator i=countsMap.begin(); i!=countsMap.end(); i++ ) {
+ aggCountsB.append( i->first , i->second );
+ }
+ aggCounts = aggCountsB.obj();
+ finalCmd.append( "counts" , aggCounts );
}
Timer t2;
- // by default the target database is same as input
- Shard outServer = conf->getPrimary();
- string outns = fullns;
- if ( customOutDB ) {
- // have to figure out shard for the output DB
- BSONElement elmt = customOut.getField("db");
- string outdb = elmt.valuestrsafe();
- outns = outdb + "." + collection;
- DBConfigPtr conf2 = grid.getDBConfig( outdb , true );
- outServer = conf2->getPrimary();
- }
- log() << "customOut: " << customOut << " outServer: " << outServer << endl;
-
- ShardConnection conn( outServer , outns );
BSONObj finalResult;
- bool ok = conn->runCommand( dbName , finalCmd.obj() , finalResult );
- conn.done();
+ bool ok = false;
+ string outdb = dbName;
+ if (customOutDB) {
+ BSONElement elmt = customOut.getField("db");
+ outdb = elmt.valuestrsafe();
+ }
+
+ if (!customOut.getBoolField("sharded")) {
+ // non-sharded, use the MRFinish command on target server
+ // This will save some data transfer
+
+ // by default the target database is same as input
+ Shard outServer = conf->getPrimary();
+ string outns = fullns;
+ if ( customOutDB ) {
+ // have to figure out shard for the output DB
+ DBConfigPtr conf2 = grid.getDBConfig( outdb , true );
+ outServer = conf2->getPrimary();
+ outns = outdb + "." + collection;
+ }
+ log() << "customOut: " << customOut << " outServer: " << outServer << endl;
+
+ ShardConnection conn( outServer , outns );
+ ok = conn->runCommand( dbName , finalCmd.obj() , finalResult );
+ conn.done();
+ } else {
+ // grab records from each shard and insert back in correct shard in "temp" collection
+ // we do the final reduce in mongos since records are ordered and already reduced on each shard
+// string shardedIncLong = str::stream() << outdb << ".tmp.mr." << collection << "_" << "shardedTemp" << "_" << time(0) << "_" << JOB_NUMBER++;
+
+ mr_shard::Config config( dbName , cmdObj );
+ mr_shard::State state(config);
+ LOG(1) << "mr sharded output ns: " << config.ns << endl;
+
+ if (config.outType == mr_shard::Config::INMEMORY) {
+ errmsg = "This Map Reduce mode is not supported with sharded output";
+ return false;
+ }
+
+ if (!config.outDB.empty()) {
+ BSONObjBuilder loc;
+ if ( !config.outDB.empty())
+ loc.append( "db" , config.outDB );
+ loc.append( "collection" , config.finalShort );
+ result.append("result", loc.obj());
+ }
+ else {
+ if ( !config.finalShort.empty() )
+ result.append( "result" , config.finalShort );
+ }
+
+ string outns = config.finalLong;
+ string tempns;
+
+ // result will be inserted into a temp collection to post process
+ const string postProcessCollection = getTmpName( collection );
+ finalCmd.append("postProcessCollection", postProcessCollection);
+ tempns = dbName + "." + postProcessCollection;
+
+// if (config.outType == mr_shard::Config::REPLACE) {
+// // drop previous collection
+// BSONObj dropColCmd = BSON("drop" << config.finalShort);
+// BSONObjBuilder dropColResult(32);
+// string outdbCmd = outdb + ".$cmd";
+// bool res = Command::runAgainstRegistered(outdbCmd.c_str(), dropColCmd, dropColResult);
+// if (!res) {
+// errmsg = str::stream() << "Could not drop sharded output collection " << outns << ": " << dropColResult.obj().toString();
+// return false;
+// }
+// }
+
+ BSONObj sortKey = BSON( "_id" << 1 );
+ if (!conf->isSharded(outns)) {
+ // create the sharded collection
+
+ BSONObj shardColCmd = BSON("shardCollection" << outns << "key" << sortKey);
+ BSONObjBuilder shardColResult(32);
+ bool res = Command::runAgainstRegistered("admin.$cmd", shardColCmd, shardColResult);
+ if (!res) {
+ errmsg = str::stream() << "Could not create sharded output collection " << outns << ": " << shardColResult.obj().toString();
+ return false;
+ }
+ }
+
+ ParallelSortClusteredCursor cursor( servers , dbName + "." + shardedOutputCollection ,
+ Query().sort( sortKey ) );
+ cursor.init();
+ state.init();
+
+ mr_shard::BSONList values;
+ Strategy* s = SHARDED;
+ long long finalCount = 0;
+ int currentSize = 0;
+ while ( cursor.more() || !values.empty() ) {
+ BSONObj t;
+ if ( cursor.more() ) {
+ t = cursor.next().getOwned();
+
+ if ( values.size() == 0 || t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
+ values.push_back( t );
+ currentSize += t.objsize();
+
+ // check size and potentially reduce
+ if (currentSize > config.maxInMemSize && values.size() > config.reduceTriggerRatio) {
+ BSONObj reduced = config.reducer->finalReduce(values, 0);
+ values.clear();
+ values.push_back( reduced );
+ currentSize = reduced.objsize();
+ }
+ continue;
+ }
+ }
+
+ BSONObj final = config.reducer->finalReduce(values, config.finalizer.get());
+ if (config.outType == mr_shard::Config::MERGE) {
+ BSONObj id = final["_id"].wrap();
+ s->updateSharded(conf, outns.c_str(), id, final, UpdateOption_Upsert, true);
+ } else {
+ // insert into temp collection, but using final collection's shard chunks
+ s->insertSharded(conf, tempns.c_str(), final, 0, true, outns.c_str());
+ }
+ ++finalCount;
+ values.clear();
+ if (!t.isEmpty()) {
+ values.push_back( t );
+ currentSize = t.objsize();
+ }
+ }
+
+ if (config.outType == mr_shard::Config::REDUCE || config.outType == mr_shard::Config::REPLACE) {
+ // results were written to temp collection, need post processing
+ vector< shared_ptr<ShardConnection> > shardConns;
+ list< shared_ptr<Future::CommandResult> > futures;
+ BSONObj finalCmdObj = finalCmd.obj();
+ for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
+ shared_ptr<ShardConnection> temp( new ShardConnection( i->getConnString() , outns ) );
+ futures.push_back( Future::spawnCommand( i->getConnString() , dbName , finalCmdObj , 0 , temp->get() ) );
+ shardConns.push_back( temp );
+ }
+
+ // now wait for the result of all shards
+ bool failed = false;
+ for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
+ shared_ptr<Future::CommandResult> res = *i;
+ if ( ! res->join() ) {
+ error() << "final reduce on sharded output m/r failed on shard: " << res->getServer() << " error: " << res->result() << endl;
+ result.append( "cause" , res->result() );
+ errmsg = "mongod mr failed: ";
+ errmsg += res->result().toString();
+ failed = true;
+ continue;
+ }
+ BSONObj result = res->result();
+ }
+
+ for ( unsigned i=0; i<shardConns.size(); i++ )
+ shardConns[i]->done();
+
+ if (failed)
+ return 0;
+ }
+
+ for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
+ ScopedDbConnection conn( i->_server );
+ conn->dropCollection( dbName + "." + shardedOutputCollection );
+ conn.done();
+ }
+
+ result.append("shardCounts", shardCounts);
+
+ // fix the global counts
+ BSONObjBuilder countsB(32);
+ BSONObjIterator j(aggCounts);
+ while (j.more()) {
+ BSONElement elmt = j.next();
+ if (!strcmp(elmt.fieldName(), "reduce"))
+ countsB.append("reduce", elmt.numberLong() + state.numReduces());
+ else if (!strcmp(elmt.fieldName(), "output"))
+ countsB.append("output", finalCount);
+ else
+ countsB.append(elmt);
+ }
+ result.append( "counts" , countsB.obj() );
+ ok = true;
+ }
if ( ! ok ) {
errmsg = "final reduce failed: ";
@@ -991,14 +1291,81 @@ namespace mongo {
class ApplyOpsCmd : public PublicGridCommand {
public:
ApplyOpsCmd() : PublicGridCommand( "applyOps" ) {}
-
- virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
errmsg = "applyOps not allowed through mongos";
return false;
}
-
} applyOpsCmd;
+ class CompactCmd : public PublicGridCommand {
+ public:
+ CompactCmd() : PublicGridCommand( "compact" ) {}
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg = "compact not allowed through mongos";
+ return false;
+ }
+ } compactCmd;
+
}
+ bool Command::runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder, int queryOptions) {
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
+
+ bool ok = false;
+
+ BSONElement e = jsobj.firstElement();
+ map<string,Command*>::iterator i;
+
+ if ( e.eoo() )
+ ;
+ // check for properly registered command objects.
+ else if ( (i = _commands->find(e.fieldName())) != _commands->end() ) {
+ string errmsg;
+ Command *c = i->second;
+ ClientInfo *client = ClientInfo::get();
+ AuthenticationInfo *ai = client->getAuthenticationInfo();
+
+ char cl[256];
+ nsToDatabase(ns, cl);
+ if( c->requiresAuth() && !ai->isAuthorized(cl)) {
+ ok = false;
+ errmsg = "unauthorized";
+ }
+ else if( c->adminOnly() && c->localHostOnlyIfNoAuth( jsobj ) && noauth && !ai->isLocalHost ) {
+ ok = false;
+ errmsg = "unauthorized: this command must run from localhost when running db without auth";
+ log() << "command denied: " << jsobj.toString() << endl;
+ }
+ else if ( c->adminOnly() && !startsWith(ns, "admin.") ) {
+ ok = false;
+ errmsg = "access denied - use admin db";
+ }
+ else if ( jsobj.getBoolField( "help" ) ) {
+ stringstream help;
+ help << "help for: " << e.fieldName() << " ";
+ c->help( help );
+ anObjBuilder.append( "help" , help.str() );
+ }
+ else {
+ ok = c->run( nsToDatabase( ns ) , jsobj, queryOptions, errmsg, anObjBuilder, false );
+ }
+
+ BSONObj tmp = anObjBuilder.asTempObj();
+ bool have_ok = tmp.hasField("ok");
+ bool have_errmsg = tmp.hasField("errmsg");
+
+ if (!have_ok)
+ anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
+
+ if ( !ok && !have_errmsg) {
+ anObjBuilder.append("errmsg", errmsg);
+ uassert_nothrow(errmsg.c_str());
+ }
+ return true;
+ }
+
+ return false;
+ }
}
diff --git a/s/config.cpp b/s/config.cpp
index 0766717..23475eb 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -17,11 +17,9 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../util/stringutils.h"
#include "../util/unittest.h"
-#include "../util/timer.h"
-
#include "../client/connpool.h"
#include "../client/model.h"
#include "../db/pdfile.h"
@@ -56,19 +54,18 @@ namespace mongo {
_dirty = false;
_dropped = in["dropped"].trueValue();
if ( in["key"].isABSONObj() ) {
- Timer t;
- shard( in["_id"].String() , in["key"].Obj() , in["unique"].trueValue() );
- log() << "creating ChunkManager ns: " << in["_id"]
- << " took: " << t.millis() << "ms"
- << " sequenceNumber: " << _cm->getSequenceNumber()
- << endl;
- _dirty = false;
+ _key = in["key"].Obj().getOwned();
+ _unqiue = in["unique"].trueValue();
+ shard( in["_id"].String() , _key , _unqiue );
}
+ _dirty = false;
}
-
+
void DBConfig::CollectionInfo::shard( const string& ns , const ShardKeyPattern& key , bool unique ) {
_cm.reset( new ChunkManager( ns , key , unique ) );
+ _key = key.key().getOwned();
+ _unqiue = unique;
_dirty = true;
_dropped = false;
}
@@ -77,6 +74,7 @@ namespace mongo {
_cm.reset();
_dropped = true;
_dirty = true;
+ _key = BSONObj();
}
void DBConfig::CollectionInfo::save( const string& ns , DBClientBase* conn ) {
@@ -96,32 +94,6 @@ namespace mongo {
_dirty = false;
}
- bool DBConfig::CollectionInfo::needsReloading( DBClientBase * conn , const BSONObj& collectionInfo ) {
- if ( ! _cm ) {
- return true;
- }
-
- if ( _dirty || _dropped ) {
- return true;
- }
-
- if ( collectionInfo["dropped"].trueValue() ) {
- return true;
- }
-
- BSONObj newest = conn->findOne( ShardNS::chunk ,
- Query( BSON( "ns" << collectionInfo["_id"].String() ) ).sort( "lastmod" , -1 ) );
-
- if ( newest.isEmpty() ) {
- // either a drop or something else weird
- return true;
- }
-
- ShardChunkVersion fromdb = newest["lastmod"];
- ShardChunkVersion inmemory = _cm->getVersion();
- return fromdb != inmemory;
- }
-
bool DBConfig::isSharded( const string& ns ) {
if ( ! _shardingEnabled )
return false;
@@ -160,34 +132,36 @@ namespace mongo {
ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ) {
uassert( 8042 , "db doesn't have sharding enabled" , _shardingEnabled );
+ uassert( 13648 , str::stream() << "can't shard collection because not all config servers are up" , configServer.allUp() );
- scoped_lock lk( _lock );
+
+ {
+ scoped_lock lk( _lock );
- CollectionInfo& ci = _collections[ns];
- uassert( 8043 , "collection already sharded" , ! ci.isSharded() );
+ CollectionInfo& ci = _collections[ns];
+ uassert( 8043 , "collection already sharded" , ! ci.isSharded() );
- log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
+ log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
- // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first
- // time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining
- // chunks will be created as a by-product of splitting.
- ci.shard( ns , fieldsAndOrder , unique );
- ChunkManagerPtr cm = ci.getCM();
- uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) );
- cm->createFirstChunk( getPrimary() );
- _save();
+ // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first
+ // time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining
+ // chunks will be created as a by-product of splitting.
+ ci.shard( ns , fieldsAndOrder , unique );
+ ChunkManagerPtr cm = ci.getCM();
+ uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) );
+ cm->createFirstChunk( getPrimary() );
+ _save();
+ }
try {
- if ( cm->maybeChunkCollection() ) {
- _load();
- }
+ getChunkManager(ns, true)->maybeChunkCollection();
}
catch ( UserException& e ) {
// failure to chunk is not critical enough to abort the command (and undo the _save()'d configDB state)
log() << "couldn't chunk recently created collection: " << ns << " " << e << endl;
}
- return cm;
+ return getChunkManager(ns);
}
bool DBConfig::removeSharding( const string& ns ) {
@@ -207,18 +181,89 @@ namespace mongo {
return false;
ci.unshard();
- _save();
+ _save( false, true );
return true;
}
+ ChunkManagerPtr DBConfig::getChunkManagerIfExists( const string& ns, bool shouldReload ){
+ try{
+ return getChunkManager( ns, shouldReload );
+ }
+ catch( AssertionException& e ){
+ warning() << "chunk manager not found for " << ns << causedBy( e ) << endl;
+ return ChunkManagerPtr();
+ }
+ }
+
ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ) {
- scoped_lock lk( _lock );
+ BSONObj key;
+ bool unique;
+ ShardChunkVersion oldVersion;
- if ( shouldReload )
- _reload();
+ {
+ scoped_lock lk( _lock );
+
+ CollectionInfo& ci = _collections[ns];
+
+ bool earlyReload = ! ci.isSharded() && shouldReload;
+ if ( earlyReload ) {
+ // this is to catch cases where there this is a new sharded collection
+ _reload();
+ ci = _collections[ns];
+ }
+ massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() );
+ assert( ! ci.key().isEmpty() );
+
+ if ( ! shouldReload || earlyReload )
+ return ci.getCM();
+ key = ci.key().copy();
+ unique = ci.unique();
+ if ( ci.getCM() )
+ oldVersion = ci.getCM()->getVersion();
+ }
+
+ assert( ! key.isEmpty() );
+
+ if ( oldVersion > 0 ) {
+ ScopedDbConnection conn( configServer.modelServer() , 30.0 );
+ BSONObj newest = conn->findOne( ShardNS::chunk ,
+ Query( BSON( "ns" << ns ) ).sort( "lastmod" , -1 ) );
+ conn.done();
+
+ if ( ! newest.isEmpty() ) {
+ ShardChunkVersion v = newest["lastmod"];
+ if ( v == oldVersion ) {
+ scoped_lock lk( _lock );
+ CollectionInfo& ci = _collections[ns];
+ massert( 15885 , str::stream() << "not sharded after reloading from chunks : " << ns , ci.isSharded() );
+ return ci.getCM();
+ }
+ }
+
+ }
+
+ // we are not locked now, and want to load a new ChunkManager
+
+ auto_ptr<ChunkManager> temp( new ChunkManager( ns , key , unique ) );
+ if ( temp->numChunks() == 0 ) {
+ // maybe we're not sharded any more
+ reload(); // this is a full reload
+ return getChunkManager( ns , false );
+ }
+
+ scoped_lock lk( _lock );
+
CollectionInfo& ci = _collections[ns];
- massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() || ci.wasDropped() );
+ massert( 14822 , (string)"state changed in the middle: " + ns , ci.isSharded() );
+
+ if ( temp->getVersion() > ci.getCM()->getVersion() ) {
+ // we only want to reset if we're newer
+ // otherwise we go into a bad cycle
+ ci.resetCM( temp.release() );
+ }
+
+ massert( 15883 , str::stream() << "not sharded after chunk manager reset : " << ns , ci.isSharded() );
return ci.getCM();
}
@@ -235,7 +280,7 @@ namespace mongo {
}
void DBConfig::unserialize(const BSONObj& from) {
- log(1) << "DBConfig unserialize: " << _name << " " << from << endl;
+ LOG(1) << "DBConfig unserialize: " << _name << " " << from << endl;
assert( _name == from["_id"].String() );
_shardingEnabled = from.getBoolField("partitioned");
@@ -255,7 +300,7 @@ namespace mongo {
}
bool DBConfig::_load() {
- ScopedDbConnection conn( configServer.modelServer() );
+ ScopedDbConnection conn( configServer.modelServer(), 30.0 );
BSONObj o = conn->findOne( ShardNS::database , BSON( "_id" << _name ) );
@@ -273,14 +318,8 @@ namespace mongo {
assert( cursor.get() );
while ( cursor->more() ) {
BSONObj o = cursor->next();
- string ns = o["_id"].String();
-
- Collections::iterator i = _collections.find( ns );
- if ( i != _collections.end() && ! i->second.needsReloading( conn.get() , o ) ) {
- continue;
- }
-
- _collections[ns] = CollectionInfo( o );
+ if( o["dropped"].trueValue() ) _collections.erase( o["_id"].String() );
+ else _collections[o["_id"].String()] = CollectionInfo( o );
}
conn.done();
@@ -288,24 +327,32 @@ namespace mongo {
return true;
}
- void DBConfig::_save() {
- ScopedDbConnection conn( configServer.modelServer() );
+ void DBConfig::_save( bool db, bool coll ) {
+ ScopedDbConnection conn( configServer.modelServer(), 30.0 );
+
+ if( db ){
+
+ BSONObj n;
+ {
+ BSONObjBuilder b;
+ serialize(b);
+ n = b.obj();
+ }
+
+ conn->update( ShardNS::database , BSON( "_id" << _name ) , n , true );
+ string err = conn->getLastError();
+ uassert( 13396 , (string)"DBConfig save failed: " + err , err.size() == 0 );
- BSONObj n;
- {
- BSONObjBuilder b;
- serialize(b);
- n = b.obj();
}
- conn->update( ShardNS::database , BSON( "_id" << _name ) , n , true );
- string err = conn->getLastError();
- uassert( 13396 , (string)"DBConfig save failed: " + err , err.size() == 0 );
+ if( coll ){
+
+ for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ) {
+ if ( ! i->second.isDirty() )
+ continue;
+ i->second.save( i->first , conn.get() );
+ }
- for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ) {
- if ( ! i->second.isDirty() )
- continue;
- i->second.save( i->first , conn.get() );
}
conn.done();
@@ -335,14 +382,14 @@ namespace mongo {
// 1
if ( ! configServer.allUp( errmsg ) ) {
- log(1) << "\t DBConfig::dropDatabase not all up" << endl;
+ LOG(1) << "\t DBConfig::dropDatabase not all up" << endl;
return 0;
}
// 2
grid.removeDB( _name );
{
- ScopedDbConnection conn( configServer.modelServer() );
+ ScopedDbConnection conn( configServer.modelServer(), 30.0 );
conn->remove( ShardNS::database , BSON( "_id" << _name ) );
errmsg = conn->getLastError();
if ( ! errmsg.empty() ) {
@@ -358,7 +405,7 @@ namespace mongo {
log() << "error removing from config server even after checking!" << endl;
return 0;
}
- log(1) << "\t removed entry from config server for: " << _name << endl;
+ LOG(1) << "\t removed entry from config server for: " << _name << endl;
set<Shard> allServers;
@@ -374,7 +421,7 @@ namespace mongo {
// 4
{
- ScopedDbConnection conn( _primary );
+ ScopedDbConnection conn( _primary, 30.0 );
BSONObj res;
if ( ! conn->dropDatabase( _name , &res ) ) {
errmsg = res.toString();
@@ -385,7 +432,7 @@ namespace mongo {
// 5
for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ) {
- ScopedDbConnection conn( *i );
+ ScopedDbConnection conn( *i, 30.0 );
BSONObj res;
if ( ! conn->dropDatabase( _name , &res ) ) {
errmsg = res.toString();
@@ -394,7 +441,7 @@ namespace mongo {
conn.done();
}
- log(1) << "\t dropped primary db for: " << _name << endl;
+ LOG(1) << "\t dropped primary db for: " << _name << endl;
configServer.logChange( "dropDatabase" , _name , BSONObj() );
return true;
@@ -406,6 +453,7 @@ namespace mongo {
while ( true ) {
Collections::iterator i = _collections.begin();
for ( ; i != _collections.end(); ++i ) {
+ // log() << "coll : " << i->first << " and " << i->second.isSharded() << endl;
if ( i->second.isSharded() )
break;
}
@@ -419,7 +467,7 @@ namespace mongo {
}
seen.insert( i->first );
- log(1) << "\t dropping sharded collection: " << i->first << endl;
+ LOG(1) << "\t dropping sharded collection: " << i->first << endl;
i->second.getCM()->getAllShards( allServers );
i->second.getCM()->drop( i->second.getCM() );
@@ -427,13 +475,14 @@ namespace mongo {
num++;
uassert( 10184 , "_dropShardedCollections too many collections - bailing" , num < 100000 );
- log(2) << "\t\t dropped " << num << " so far" << endl;
+ LOG(2) << "\t\t dropped " << num << " so far" << endl;
}
return true;
}
void DBConfig::getAllShards(set<Shard>& shards) const {
+ scoped_lock lk( _lock );
shards.insert(getPrimary());
for (Collections::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it) {
if (it->second.isSharded()) {
@@ -493,15 +542,12 @@ namespace mongo {
string fullString;
joinStringDelim( configHosts, &fullString, ',' );
_primary.setAddress( ConnectionString( fullString , ConnectionString::SYNC ) );
- log(1) << " config string : " << fullString << endl;
+ LOG(1) << " config string : " << fullString << endl;
return true;
}
bool ConfigServer::checkConfigServersConsistent( string& errmsg , int tries ) const {
- if ( _config.size() == 1 )
- return true;
-
if ( tries <= 0 )
return false;
@@ -511,7 +557,16 @@ namespace mongo {
for ( unsigned i=0; i<_config.size(); i++ ) {
BSONObj x;
try {
- ScopedDbConnection conn( _config[i] );
+ ScopedDbConnection conn( _config[i], 30.0 );
+
+ // check auth
+ conn->update("config.foo.bar", BSONObj(), BSON("x" << 1));
+ conn->simpleCommand( "admin", &x, "getlasterror");
+ if (x["err"].type() == String && x["err"].String() == "unauthorized") {
+ errmsg = "not authorized, did you start with --keyFile?";
+ return false;
+ }
+
if ( ! conn->simpleCommand( "config" , &x , "dbhash" ) )
x = BSONObj();
else {
@@ -528,6 +583,9 @@ namespace mongo {
res.push_back(x);
}
+ if ( _config.size() == 1 )
+ return true;
+
if ( up == 0 ) {
errmsg = "no config servers reachable";
return false;
@@ -574,7 +632,7 @@ namespace mongo {
if ( checkConsistency ) {
string errmsg;
if ( ! checkConfigServersConsistent( errmsg ) ) {
- log( LL_ERROR ) << "config servers not in sync! " << errmsg << endl;
+ log( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings;
return false;
}
}
@@ -589,7 +647,7 @@ namespace mongo {
bool ConfigServer::allUp( string& errmsg ) {
try {
- ScopedDbConnection conn( _primary );
+ ScopedDbConnection conn( _primary, 30.0 );
conn->getLastError();
conn.done();
return true;
@@ -603,7 +661,7 @@ namespace mongo {
}
int ConfigServer::dbConfigVersion() {
- ScopedDbConnection conn( _primary );
+ ScopedDbConnection conn( _primary, 30.0 );
int version = dbConfigVersion( conn.conn() );
conn.done();
return version;
@@ -629,7 +687,7 @@ namespace mongo {
void ConfigServer::reloadSettings() {
set<string> got;
- ScopedDbConnection conn( _primary );
+ ScopedDbConnection conn( _primary, 30.0 );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
assert( c.get() );
while ( c->more() ) {
@@ -637,7 +695,7 @@ namespace mongo {
string name = o["_id"].valuestrsafe();
got.insert( name );
if ( name == "chunksize" ) {
- log(1) << "MaxChunkSize: " << o["value"] << endl;
+ LOG(1) << "MaxChunkSize: " << o["value"] << endl;
Chunk::MaxChunkSize = o["value"].numberInt() * 1024 * 1024;
}
else if ( name == "balancer" ) {
@@ -703,7 +761,7 @@ namespace mongo {
assert( _primary.ok() );
- ScopedDbConnection conn( _primary );
+ ScopedDbConnection conn( _primary, 30.0 );
static bool createdCapped = false;
if ( ! createdCapped ) {
@@ -711,7 +769,7 @@ namespace mongo {
conn->createCollection( "config.changelog" , 1024 * 1024 * 10 , true );
}
catch ( UserException& e ) {
- log(1) << "couldn't create changelog (like race condition): " << e << endl;
+ LOG(1) << "couldn't create changelog (like race condition): " << e << endl;
// don't care
}
createdCapped = true;
@@ -731,7 +789,7 @@ namespace mongo {
void ConfigServer::replicaSetChange( const ReplicaSetMonitor * monitor ) {
try {
- ScopedDbConnection conn( configServer.getConnectionString() );
+ ScopedDbConnection conn( configServer.getConnectionString(), 30.0 );
conn->update( ShardNS::shard , BSON( "_id" << monitor->getName() ) , BSON( "$set" << BSON( "host" << monitor->getServerAddress() ) ) );
conn.done();
}
diff --git a/s/config.h b/s/config.h
index 13afe23..90c06cb 100644
--- a/s/config.h
+++ b/s/config.h
@@ -81,17 +81,27 @@ namespace mongo {
return _cm;
}
+ void resetCM( ChunkManager * cm ) {
+ assert(cm);
+ assert(_cm); // this has to be already sharded
+ _cm.reset( cm );
+ }
+
void shard( const string& ns , const ShardKeyPattern& key , bool unique );
void unshard();
bool isDirty() const { return _dirty; }
bool wasDropped() const { return _dropped; }
-
+
void save( const string& ns , DBClientBase* conn );
- bool needsReloading( DBClientBase * conn , const BSONObj& collectionInfo );
+ bool unique() const { return _unqiue; }
+ BSONObj key() const { return _key; }
+
private:
+ BSONObj _key;
+ bool _unqiue;
ChunkManagerPtr _cm;
bool _dirty;
bool _dropped;
@@ -133,6 +143,7 @@ namespace mongo {
bool isSharded( const string& ns );
ChunkManagerPtr getChunkManager( const string& ns , bool reload = false );
+ ChunkManagerPtr getChunkManagerIfExists( const string& ns , bool reload = false );
/**
* @return the correct for shard for the ns
@@ -172,7 +183,7 @@ namespace mongo {
bool _load();
bool _reload();
- void _save();
+ void _save( bool db = true, bool coll = true );
string _name; // e.g. "alleyinsider"
Shard _primary; // e.g. localhost , mongo.foo.com:9999
@@ -183,7 +194,7 @@ namespace mongo {
Collections _collections;
- mongo::mutex _lock; // TODO: change to r/w lock ??
+ mutable mongo::mutex _lock; // TODO: change to r/w lock ??
};
class ConfigServer : public DBConfig {
diff --git a/s/config_migrate.cpp b/s/config_migrate.cpp
index 57890a0..fff023c 100644
--- a/s/config_migrate.cpp
+++ b/s/config_migrate.cpp
@@ -17,7 +17,7 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../util/unittest.h"
#include "../client/connpool.h"
#include "../client/model.h"
diff --git a/s/cursors.cpp b/s/cursors.cpp
index cf2735b..12b3d5e 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -22,6 +22,7 @@
#include "../db/queryutil.h"
#include "../db/commands.h"
#include "../util/concurrency/task.h"
+#include "../util/net/listen.h"
namespace mongo {
@@ -111,7 +112,7 @@ namespace mongo {
}
bool hasMore = sendMore && _cursor->more();
- log(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl;
+ LOG(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl;
replyToQuery( 0 , r.p() , r.m() , b.buf() , b.len() , num , _totalSent , hasMore ? getId() : 0 );
_totalSent += num;
@@ -130,13 +131,15 @@ namespace mongo {
CursorCache::~CursorCache() {
// TODO: delete old cursors?
- int logLevel = 1;
+ bool print = logLevel > 0;
if ( _cursors.size() || _refs.size() )
- logLevel = 0;
- log( logLevel ) << " CursorCache at shutdown - "
- << " sharded: " << _cursors.size()
- << " passthrough: " << _refs.size()
- << endl;
+ print = true;
+
+ if ( print )
+ cout << " CursorCache at shutdown - "
+ << " sharded: " << _cursors.size()
+ << " passthrough: " << _refs.size()
+ << endl;
}
ShardedClientCursorPtr CursorCache::get( long long id ) const {
@@ -184,7 +187,7 @@ namespace mongo {
long long CursorCache::genId() {
while ( true ) {
- long long x = security.getNonce();
+ long long x = Security::getNonce();
if ( x == 0 )
continue;
if ( x < 0 )
@@ -272,6 +275,9 @@ namespace mongo {
}
log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1)
_cursors.erase( i );
+ i = _cursors.begin(); // possible 2nd entry will get skipped, will get on next pass
+ if ( i == _cursors.end() )
+ break;
}
}
@@ -299,7 +305,7 @@ namespace mongo {
help << " example: { cursorInfo : 1 }";
}
virtual LockType locktype() const { return NONE; }
- bool run(const string&, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string&, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
cursorCache.appendInfo( result );
if ( jsobj["setTimeout"].isNumber() )
CursorCache::TIMEOUT = jsobj["setTimeout"].numberLong();
diff --git a/s/d_chunk_manager.cpp b/s/d_chunk_manager.cpp
index d4fea30..82a06f6 100644
--- a/s/d_chunk_manager.cpp
+++ b/s/d_chunk_manager.cpp
@@ -21,6 +21,7 @@
#include "../client/connpool.h"
#include "../client/dbclientmockcursor.h"
#include "../db/instance.h"
+#include "../db/clientcursor.h"
#include "d_chunk_manager.h"
@@ -29,7 +30,7 @@ namespace mongo {
ShardChunkManager::ShardChunkManager( const string& configServer , const string& ns , const string& shardName ) {
// have to get a connection to the config db
- // special case if i'm the configdb since i'm locked and if i connect to myself
+ // special case if I'm the configdb since I'm locked and if I connect to myself
// its a deadlock
scoped_ptr<ScopedDbConnection> scoped;
scoped_ptr<DBDirectClient> direct;
@@ -112,7 +113,7 @@ namespace mongo {
BSONObj currMax = it->second;
++it;
- // coallesce the chunk's bounds in ranges if they are adjacent chunks
+ // coalesce the chunk's bounds in ranges if they are adjacent chunks
if ( min.isEmpty() ) {
min = currMin;
max = currMax;
@@ -136,13 +137,23 @@ namespace mongo {
static bool contains( const BSONObj& min , const BSONObj& max , const BSONObj& point ) {
return point.woCompare( min ) >= 0 && point.woCompare( max ) < 0;
}
+
+ bool ShardChunkManager::belongsToMe( ClientCursor* cc ) const {
+ verify( 15851 , cc );
+ if ( _rangesMap.size() == 0 )
+ return false;
+
+ return _belongsToMe( cc->extractFields( _key , true ) );
+ }
bool ShardChunkManager::belongsToMe( const BSONObj& obj ) const {
if ( _rangesMap.size() == 0 )
return false;
- BSONObj x = obj.extractFields(_key);
+ return _belongsToMe( obj.extractFields( _key , true ) );
+ }
+ bool ShardChunkManager::_belongsToMe( const BSONObj& x ) const {
RangeMap::const_iterator it = _rangesMap.upper_bound( x );
if ( it != _rangesMap.begin() )
it--;
@@ -206,7 +217,7 @@ namespace mongo {
ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ShardChunkVersion& version ) {
- // check that we have the exact chunk that'll be subtracted
+ // check that we have the exact chunk that will be subtracted
_assertChunkExists( min , max );
auto_ptr<ShardChunkManager> p( new ShardChunkManager );
@@ -282,14 +293,14 @@ namespace mongo {
//
// TODO drop the uniqueness constraint and tigthen the check below so that only the minor portion of version changes
if ( version <= _version ) {
- uasserted( 13592 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
+ uasserted( 14039 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
}
- // check that we have the exact chunk that'll be split and that the split point is valid
+ // check that we have the exact chunk that will be split and that the split point is valid
_assertChunkExists( min , max );
for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) {
if ( ! contains( min , max , *it ) ) {
- uasserted( 13593 , str::stream() << "can split " << min << " -> " << max << " on " << *it );
+ uasserted( 14040 , str::stream() << "can split " << min << " -> " << max << " on " << *it );
}
}
diff --git a/s/d_chunk_manager.h b/s/d_chunk_manager.h
index 9fb95e7..fd5974e 100644
--- a/s/d_chunk_manager.h
+++ b/s/d_chunk_manager.h
@@ -25,6 +25,8 @@
namespace mongo {
+ class ClientCursor;
+
/**
* Controls the boundaries of all the chunks for a given collection that live in this shard.
*
@@ -102,6 +104,14 @@ namespace mongo {
bool belongsToMe( const BSONObj& obj ) const;
/**
+ * Checks whether a document belongs to this shard.
+ *
+ * @param obj document containing sharding keys (and, optionally, other attributes)
+ * @return true if shards hold the object
+ */
+ bool belongsToMe( ClientCursor* cc ) const;
+
+ /**
* Given a chunk's min key (or empty doc), gets the boundary of the chunk following that one (the first).
*
* @param lookupKey is the min key for a previously obtained chunk or the empty document
@@ -119,6 +129,13 @@ namespace mongo {
string toString() const;
private:
+
+ /**
+ * @same as belongsToMe to but key has to be the shard key
+ */
+ bool _belongsToMe( const BSONObj& key ) const;
+
+
// highest ShardChunkVersion for which this ShardChunkManager's information is accurate
ShardChunkVersion _version;
diff --git a/s/d_logic.cpp b/s/d_logic.cpp
index 1ab7c64..9d4fd74 100644
--- a/s/d_logic.cpp
+++ b/s/d_logic.cpp
@@ -29,7 +29,7 @@
#include "../db/commands.h"
#include "../db/jsobj.h"
#include "../db/dbmessage.h"
-#include "../db/query.h"
+#include "../db/ops/query.h"
#include "../client/connpool.h"
@@ -56,11 +56,11 @@ namespace mongo {
DbMessage d(m);
const char *ns = d.getns();
string errmsg;
- if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ) {
+ if ( shardVersionOk( ns , errmsg ) ) {
return false;
}
- log(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
+ LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
if ( doesOpGetAResponse( op ) ) {
assert( dbresponse );
@@ -87,6 +87,8 @@ namespace mongo {
dbresponse->responseTo = m.header()->id;
return true;
}
+
+ uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 );
OID writebackID;
writebackID.init();
@@ -95,8 +97,8 @@ namespace mongo {
const OID& clientID = ShardedConnectionInfo::get(false)->getID();
massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() );
- log(1) << "got write with an old config - writing back ns: " << ns << endl;
- if ( logLevel ) log(1) << debugString( m ) << endl;
+ LOG(1) << "got write with an old config - writing back ns: " << ns << endl;
+ if ( logLevel ) LOG(1) << m.toString() << endl;
BSONObjBuilder b;
b.appendBool( "writeBack" , true );
@@ -107,7 +109,7 @@ namespace mongo {
b.appendTimestamp( "version" , shardingState.getVersion( ns ) );
b.appendTimestamp( "yourVersion" , ShardedConnectionInfo::get( true )->getVersion( ns ) );
b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) );
- log(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl;
+ LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl;
writeBackManager.queueWriteBack( clientID.str() , b.obj() );
return true;
diff --git a/s/d_logic.h b/s/d_logic.h
index 718836c..d96f937 100644
--- a/s/d_logic.h
+++ b/s/d_logic.h
@@ -173,6 +173,7 @@ namespace mongo {
static ShardedConnectionInfo* get( bool create );
static void reset();
+ static void addHook();
bool inForceVersionOkMode() const {
return _forceVersionOk;
@@ -219,7 +220,7 @@ namespace mongo {
/**
* @return true if the current threads shard version is ok, or not in sharded version
*/
- bool shardVersionOk( const string& ns , bool write , string& errmsg );
+ bool shardVersionOk( const string& ns , string& errmsg );
/**
* @return true if we took care of the message and nothing else should be done
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 6f2607d..e24a02d 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -29,13 +29,12 @@
#include "../db/commands.h"
#include "../db/jsobj.h"
-#include "../db/dbmessage.h"
-#include "../db/query.h"
#include "../db/cmdline.h"
#include "../db/queryoptimizer.h"
#include "../db/btree.h"
#include "../db/repl_block.h"
#include "../db/dur.h"
+#include "../db/clientcursor.h"
#include "../client/connpool.h"
#include "../client/distlock.h"
@@ -43,6 +42,7 @@
#include "../util/queue.h"
#include "../util/unittest.h"
#include "../util/processinfo.h"
+#include "../util/ramlog.h"
#include "shard.h"
#include "d_logic.h"
@@ -53,6 +53,8 @@ using namespace std;
namespace mongo {
+ Tee* migrateLog = new RamLog( "migrate" );
+
class MoveTimingHelper {
public:
MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max , int total )
@@ -72,7 +74,7 @@ namespace mongo {
configServer.logChange( (string)"moveChunk." + _where , _ns, _b.obj() );
}
catch ( const std::exception& e ) {
- log( LL_WARNING ) << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << endl;
+ warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << migrateLog;
}
}
@@ -88,7 +90,7 @@ namespace mongo {
if ( op )
op->setMessage( s.c_str() );
else
- log( LL_WARNING ) << "op is null in MoveTimingHelper::done" << endl;
+ warning() << "op is null in MoveTimingHelper::done" << migrateLog;
_b.appendNumber( s , _t.millis() );
_t.reset();
@@ -98,7 +100,7 @@ namespace mongo {
ProcessInfo pi;
ss << " v:" << pi.getVirtualMemorySize()
<< " r:" << pi.getResidentSize();
- log() << ss.str() << endl;
+ log() << ss.str() << migrateLog;
#endif
}
@@ -130,7 +132,7 @@ namespace mongo {
};
struct OldDataCleanup {
- static AtomicUInt _numThreads; // how many threads are doing async cleanusp
+ static AtomicUInt _numThreads; // how many threads are doing async cleanup
string ns;
BSONObj min;
@@ -151,12 +153,31 @@ namespace mongo {
_numThreads--;
}
+ string toString() const {
+ return str::stream() << ns << " from " << min << " -> " << max;
+ }
+
void doRemove() {
ShardForceVersionOkModeBlock sf;
- writelock lk(ns);
- RemoveSaver rs("moveChunk",ns,"post-cleanup");
- long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
- log() << "moveChunk deleted: " << num << endl;
+ {
+ writelock lk(ns);
+ RemoveSaver rs("moveChunk",ns,"post-cleanup");
+ long long numDeleted = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
+ log() << "moveChunk deleted: " << numDeleted << migrateLog;
+ }
+
+ ReplTime lastOpApplied = cc().getLastOp();
+
+ Timer t;
+ for ( int i=0; i<3600; i++ ) {
+ if ( opReplicatedEnough( lastOpApplied , ( getSlaveCount() / 2 ) + 1 ) ) {
+ LOG(t.seconds() < 30 ? 1 : 0) << "moveChunk repl sync took " << t.seconds() << " seconds" << migrateLog;
+ return;
+ }
+ sleepsecs(1);
+ }
+
+ warning() << "moveChunk repl sync timed out after " << t.seconds() << " seconds" << migrateLog;
}
};
@@ -172,7 +193,7 @@ namespace mongo {
}
virtual void help( stringstream& help ) const {
- help << "internal - should not be called directly" << endl;
+ help << "internal - should not be called directly" << migrateLog;
}
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
@@ -190,14 +211,14 @@ namespace mongo {
class MigrateFromStatus {
public:
- MigrateFromStatus() : _m("MigrateFromStatus") , _workLock( "MigrateFromStatus::WorkLock" ) {
+ MigrateFromStatus() : _m("MigrateFromStatus") , _workLock("MigrateFromStatus::workLock") {
_active = false;
_inCriticalSection = false;
_memoryUsed = 0;
}
void start( string ns , const BSONObj& min , const BSONObj& max ) {
- scoped_lock lk( _workLock );
+ scoped_lock ll(_workLock);
scoped_lock l(_m); // reads and writes _active
assert( ! _active );
@@ -255,7 +276,7 @@ namespace mongo {
ide = obj["_id"];
if ( ide.eoo() ) {
- log( LL_WARNING ) << "logOpForSharding got mod with no _id, ignoring obj: " << obj << endl;
+ warning() << "logOpForSharding got mod with no _id, ignoring obj: " << obj << migrateLog;
return;
}
@@ -284,7 +305,7 @@ namespace mongo {
case 'u':
if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) {
- log( LL_WARNING ) << "logOpForSharding couldn't find: " << ide << " even though should have" << endl;
+ warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << migrateLog;
return;
}
break;
@@ -378,13 +399,13 @@ namespace mongo {
return false;
}
- scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout ,
- shared_ptr<Cursor>( new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) ,
- _ns ) );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout ,
+ shared_ptr<Cursor>( BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) ,
+ _ns ) );
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
- // there's a fair amout of slack before we determine a chunk is too large because object sizes will vary
+ // there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = d->stats.nrecords;
@@ -412,7 +433,8 @@ namespace mongo {
// we can afford to yield here because any change to the base data that we might miss is already being
// queued and will be migrated in the 'transferMods' stage
- if ( ! cc->yieldSometimes() ) {
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ cc.release();
break;
}
@@ -422,19 +444,19 @@ namespace mongo {
}
if ( isLargeChunk ) {
- warning() << "can't move chunk of size (aprox) " << recCount * avgRecSize
+ warning() << "can't move chunk of size (approximately) " << recCount * avgRecSize
<< " because maximum size allowed to move is " << maxChunkSize
<< " ns: " << _ns << " " << _min << " -> " << _max
- << endl;
+ << migrateLog;
result.appendBool( "chunkTooBig" , true );
- result.appendNumber( "chunkSize" , (long long)(recCount * avgRecSize) );
+ result.appendNumber( "estimatedChunkSize" , (long long)(recCount * avgRecSize) );
errmsg = "chunk too big to move";
return false;
}
{
scoped_spinlock lk( _trackerLocks );
- log() << "moveChunk number of documents: " << _cloneLocs.size() << endl;
+ log() << "moveChunk number of documents: " << _cloneLocs.size() << migrateLog;
}
return true;
}
@@ -515,18 +537,19 @@ namespace mongo {
void setInCriticalSection( bool b ) { scoped_lock l(_m); _inCriticalSection = b; }
bool isActive() const { return _getActive(); }
-
-
+
void doRemove( OldDataCleanup& cleanup ) {
+ int it = 0;
while ( true ) {
+ if ( it > 20 && it % 10 == 0 ) log() << "doRemote iteration " << it << " for: " << cleanup << endl;
{
- scoped_lock lk( _workLock );
+ scoped_lock ll(_workLock);
if ( ! _active ) {
cleanup.doRemove();
return;
}
}
- sleepmillis( 100 );
+ sleepmillis( 1000 );
}
}
@@ -572,7 +595,10 @@ namespace mongo {
void _cleanupOldData( OldDataCleanup cleanup ) {
Client::initThread( cleanUpThreadName );
- log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
+ if (!noauth) {
+ cc().getAuthenticationInfo()->authorize("local", internalSecurity.user);
+ }
+ log() << " (start) waiting to cleanup " << cleanup << " # cursors:" << cleanup.initial.size() << migrateLog;
int loops = 0;
Timer t;
@@ -595,14 +621,14 @@ namespace mongo {
cleanup.initial = left;
if ( ( loops++ % 200 ) == 0 ) {
- log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
+ log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << migrateLog;
stringstream ss;
for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
CursorId id = *i;
ss << id << " ";
}
- log() << " cursors: " << ss.str() << endl;
+ log() << " cursors: " << ss.str() << migrateLog;
}
}
@@ -616,10 +642,10 @@ namespace mongo {
_cleanupOldData( cleanup );
}
catch ( std::exception& e ) {
- log() << " error cleaning old data:" << e.what() << endl;
+ log() << " error cleaning old data:" << e.what() << migrateLog;
}
catch ( ... ) {
- log() << " unknown error cleaning old data" << endl;
+ log() << " unknown error cleaning old data" << migrateLog;
}
}
@@ -635,7 +661,7 @@ namespace mongo {
public:
TransferModsCommand() : ChunkCommandHelper( "_transferMods" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
return migrateFromStatus.transferMods( errmsg, result );
}
} transferModsCommand;
@@ -645,7 +671,7 @@ namespace mongo {
public:
InitialCloneCommand() : ChunkCommandHelper( "_migrateClone" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
return migrateFromStatus.clone( errmsg, result );
}
} initialCloneCommand;
@@ -661,7 +687,7 @@ namespace mongo {
public:
MoveChunkCommand() : Command( "moveChunk" ) {}
virtual void help( stringstream& help ) const {
- help << "should not be calling this directly" << endl;
+ help << "should not be calling this directly" << migrateLog;
}
virtual bool slaveOk() const { return false; }
@@ -669,7 +695,7 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
// 1. parse options
// 2. make sure my view is complete and lock
// 3. start migrate
@@ -745,15 +771,24 @@ namespace mongo {
Shard fromShard( from );
Shard toShard( to );
- log() << "received moveChunk request: " << cmdObj << endl;
+ log() << "received moveChunk request: " << cmdObj << migrateLog;
timing.done(1);
// 2.
DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC ) , ns );
- dist_lock_try dlk( &lockSetup , (string)"migrate-" + min.toString() );
+ dist_lock_try dlk;
+
+ try{
+ dlk = dist_lock_try( &lockSetup , (string)"migrate-" + min.toString() );
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "error locking distributed lock for migration " << "migrate-" << min.toString() << causedBy( e );
+ return false;
+ }
+
if ( ! dlk.got() ) {
- errmsg = "the collection's metadata lock is taken";
+ errmsg = str::stream() << "the collection metadata could not be locked with lock " << "migrate-" << min.toString();
result.append( "who" , dlk.other() );
return false;
}
@@ -785,8 +820,8 @@ namespace mongo {
result.append( "requestedMin" , min );
result.append( "requestedMax" , max );
- log( LL_WARNING ) << "aborted moveChunk because" << errmsg << ": " << min << "->" << max
- << " is now " << currMin << "->" << currMax << endl;
+ warning() << "aborted moveChunk because" << errmsg << ": " << min << "->" << max
+ << " is now " << currMin << "->" << currMax << migrateLog;
return false;
}
@@ -795,8 +830,8 @@ namespace mongo {
result.append( "from" , fromShard.getName() );
result.append( "official" , myOldShard );
- log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard
- << " and not at " << fromShard.getName() << endl;
+ warning() << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard
+ << " and not at " << fromShard.getName() << migrateLog;
return false;
}
@@ -805,8 +840,8 @@ namespace mongo {
result.appendTimestamp( "officialVersion" , maxVersion );
result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
- log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": official " << maxVersion
- << " mine: " << shardingState.getVersion(ns) << endl;
+ warning() << "aborted moveChunk because " << errmsg << ": official " << maxVersion
+ << " mine: " << shardingState.getVersion(ns) << migrateLog;
return false;
}
@@ -815,7 +850,7 @@ namespace mongo {
ShardChunkVersion shardVersion;
shardingState.trySetVersion( ns , shardVersion /* will return updated */ );
- log() << "moveChunk request accepted at version " << shardVersion << endl;
+ log() << "moveChunk request accepted at version " << shardVersion << migrateLog;
}
timing.done(2);
@@ -860,10 +895,10 @@ namespace mongo {
res = res.getOwned();
conn.done();
- log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << endl;
+ log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
if ( ! ok || res["state"].String() == "fail" ) {
- log( LL_WARNING ) << "moveChunk error transfering data caused migration abort: " << res << endl;
+ warning() << "moveChunk error transferring data caused migration abort: " << res << migrateLog;
errmsg = "data transfer error";
result.append( "cause" , res );
return false;
@@ -880,7 +915,7 @@ namespace mongo {
conn->runCommand( "admin" , BSON( "_recvChunkAbort" << 1 ) , res );
res = res.getOwned();
conn.done();
- error() << "aborting migrate because too much memory used res: " << res << endl;
+ error() << "aborting migrate because too much memory used res: " << res << migrateLog;
errmsg = "aborting migrate because too much memory used";
result.appendBool( "split" , true );
return false;
@@ -908,7 +943,7 @@ namespace mongo {
shardingState.donateChunk( ns , min , max , myVersion );
}
- log() << "moveChunk setting version to: " << myVersion << endl;
+ log() << "moveChunk setting version to: " << myVersion << migrateLog;
// 5.b
// we're under the collection lock here, too, so we can undo the chunk donation because no other state change
@@ -929,15 +964,15 @@ namespace mongo {
shardingState.undoDonateChunk( ns , min , max , currVersion );
}
- log() << "movChunk migrate commit not accepted by TO-shard: " << res
- << " resetting shard version to: " << currVersion << endl;
+ log() << "moveChunk migrate commit not accepted by TO-shard: " << res
+ << " resetting shard version to: " << currVersion << migrateLog;
errmsg = "_recvChunkCommit failed!";
result.append( "cause" , res );
return false;
}
- log() << "moveChunk migrate commit accepted by TO-shard: " << res << endl;
+ log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog;
}
// 5.c
@@ -1018,12 +1053,12 @@ namespace mongo {
updates.append( op.obj() );
log() << "moveChunk updating self version to: " << nextVersion << " through "
- << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << endl;
+ << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << migrateLog;
}
else {
- log() << "moveChunk moved last chunk out for collection '" << ns << "'" << endl;
+ log() << "moveChunk moved last chunk out for collection '" << ns << "'" << migrateLog;
}
updates.done();
@@ -1044,7 +1079,7 @@ namespace mongo {
preCond.done();
BSONObj cmd = cmdBuilder.obj();
- log(7) << "moveChunk update: " << cmd << endl;
+ LOG(7) << "moveChunk update: " << cmd << migrateLog;
bool ok = false;
BSONObj cmdResult;
@@ -1054,6 +1089,7 @@ namespace mongo {
conn.done();
}
catch ( DBException& e ) {
+ warning() << e << migrateLog;
ok = false;
BSONObjBuilder b;
e.getInfo().append( b );
@@ -1069,7 +1105,7 @@ namespace mongo {
// if the commit did not make it, currently the only way to fix this state is to bounce the mongod so
// that the old state (before migrating) be brought in
- warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << endl;
+ warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << migrateLog;
sleepsecs( 10 );
try {
@@ -1081,13 +1117,13 @@ namespace mongo {
ShardChunkVersion checkVersion = doc["lastmod"];
if ( checkVersion == nextVersion ) {
- log() << "moveChunk commit confirmed" << endl;
+ log() << "moveChunk commit confirmed" << migrateLog;
}
else {
error() << "moveChunk commit failed: version is at"
- << checkVersion << " instead of " << nextVersion << endl;
- error() << "TERMINATING" << endl;
+ << checkVersion << " instead of " << nextVersion << migrateLog;
+ error() << "TERMINATING" << migrateLog;
dbexit( EXIT_SHARDING_ERROR );
}
@@ -1095,8 +1131,8 @@ namespace mongo {
}
catch ( ... ) {
- error() << "moveChunk failed to get confirmation of commit" << endl;
- error() << "TERMINATING" << endl;
+ error() << "moveChunk failed to get confirmation of commit" << migrateLog;
+ error() << "TERMINATING" << migrateLog;
dbexit( EXIT_SHARDING_ERROR );
}
}
@@ -1118,11 +1154,11 @@ namespace mongo {
c.max = max.getOwned();
ClientCursor::find( ns , c.initial );
if ( c.initial.size() ) {
- log() << "forking for cleaning up chunk data" << endl;
+ log() << "forking for cleaning up chunk data" << migrateLog;
boost::thread t( boost::bind( &cleanupOldData , c ) );
}
else {
- log() << "doing delete inline" << endl;
+ log() << "doing delete inline" << migrateLog;
// 7.
c.doRemove();
}
@@ -1156,7 +1192,7 @@ namespace mongo {
class MigrateStatus {
public:
-
+
MigrateStatus() : m_active("MigrateStatus") { active = false; }
void prepare() {
@@ -1181,12 +1217,12 @@ namespace mongo {
catch ( std::exception& e ) {
state = FAIL;
errmsg = e.what();
- log( LL_ERROR ) << "migrate failed: " << e.what() << endl;
+ error() << "migrate failed: " << e.what() << migrateLog;
}
catch ( ... ) {
state = FAIL;
errmsg = "UNKNOWN ERROR";
- log( LL_ERROR ) << "migrate failed with unknown exception" << endl;
+ error() << "migrate failed with unknown exception" << migrateLog;
}
setActive( false );
}
@@ -1230,7 +1266,7 @@ namespace mongo {
RemoveSaver rs( "moveChunk" , ns , "preCleanup" );
long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
if ( num )
- log( LL_WARNING ) << "moveChunkCmd deleted data already in chunk # objects: " << num << endl;
+ warning() << "moveChunkCmd deleted data already in chunk # objects: " << num << migrateLog;
timing.done(2);
}
@@ -1246,7 +1282,7 @@ namespace mongo {
state = FAIL;
errmsg = "_migrateClone failed: ";
errmsg += res.toString();
- error() << errmsg << endl;
+ error() << errmsg << migrateLog;
conn.done();
return;
}
@@ -1274,7 +1310,7 @@ namespace mongo {
}
// if running on a replicated system, we'll need to flush the docs we cloned to the secondaries
- ReplTime lastOpApplied;
+ ReplTime lastOpApplied = cc().getLastOp();
{
// 4. do bulk of mods
@@ -1285,7 +1321,7 @@ namespace mongo {
state = FAIL;
errmsg = "_transferMods failed: ";
errmsg += res.toString();
- log( LL_ERROR ) << "_transferMods failed: " << res << endl;
+ error() << "_transferMods failed: " << res << migrateLog;
conn.done();
return;
}
@@ -1306,7 +1342,7 @@ namespace mongo {
break;
if ( i > 100 ) {
- warning() << "secondaries having hard time keeping up with migrate" << endl;
+ warning() << "secondaries having hard time keeping up with migrate" << migrateLog;
}
sleepmillis( 20 );
@@ -1314,7 +1350,7 @@ namespace mongo {
if ( i == maxIterations ) {
errmsg = "secondary can't keep up with migrate";
- error() << errmsg << endl;
+ error() << errmsg << migrateLog;
conn.done();
state = FAIL;
return;
@@ -1324,15 +1360,25 @@ namespace mongo {
timing.done(4);
}
+ {
+ // pause to wait for replication
+ // this will prevent us from going into critical section until we're ready
+ Timer t;
+ while ( t.minutes() < 600 ) {
+ if ( flushPendingWrites( lastOpApplied ) )
+ break;
+ sleepsecs(1);
+ }
+ }
+
{
// 5. wait for commit
- Timer timeWaitingForCommit;
state = STEADY;
while ( state == STEADY || state == COMMIT_START ) {
BSONObj res;
if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) {
- log() << "_transferMods failed in STEADY state: " << res << endl;
+ log() << "_transferMods failed in STEADY state: " << res << migrateLog;
errmsg = res.toString();
state = FAIL;
conn.done();
@@ -1342,20 +1388,21 @@ namespace mongo {
if ( res["size"].number() > 0 && apply( res , &lastOpApplied ) )
continue;
- if ( state == COMMIT_START && flushPendingWrites( lastOpApplied ) )
- break;
-
+ if ( state == ABORT ) {
+ timing.note( "aborted" );
+ return;
+ }
+
+ if ( state == COMMIT_START ) {
+ if ( flushPendingWrites( lastOpApplied ) )
+ break;
+ }
+
sleepmillis( 10 );
}
- if ( state == ABORT ) {
- timing.note( "aborted" );
- return;
- }
-
- if ( timeWaitingForCommit.seconds() > 86400 ) {
- state = FAIL;
- errmsg = "timed out waiting for commit";
+ if ( state == FAIL ) {
+ errmsg = "imted out waiting for commit";
return;
}
@@ -1411,7 +1458,7 @@ namespace mongo {
BSONObj fullObj;
if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) {
if ( ! isInRange( fullObj , min , max ) ) {
- log() << "not applying out of range deletion: " << fullObj << endl;
+ log() << "not applying out of range deletion: " << fullObj << migrateLog;
continue;
}
@@ -1451,18 +1498,22 @@ namespace mongo {
bool flushPendingWrites( const ReplTime& lastOpApplied ) {
if ( ! opReplicatedEnough( lastOpApplied ) ) {
- warning() << "migrate commit attempt timed out contacting " << slaveCount
- << " slaves for '" << ns << "' " << min << " -> " << max << endl;
+ OpTime op( lastOpApplied );
+ OCCASIONALLY warning() << "migrate commit waiting for " << slaveCount
+ << " slaves for '" << ns << "' " << min << " -> " << max
+ << " waiting for: " << op
+ << migrateLog;
return false;
}
- log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << endl;
+
+ log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << migrateLog;
{
readlock lk(ns); // commitNow() currently requires it
// if durability is on, force a write to journal
if ( getDur().commitNow() ) {
- log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << endl;
+ log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << migrateLog;
}
}
@@ -1488,13 +1539,16 @@ namespace mongo {
if ( state != STEADY )
return false;
state = COMMIT_START;
-
- for ( int i=0; i<86400; i++ ) {
+
+ Timer t;
+ // we wait for the commit to succeed before giving up
+ while ( t.minutes() <= 5 ) {
sleepmillis(1);
if ( state == DONE )
return true;
}
- log() << "startCommit never finished!" << endl;
+ state = FAIL;
+ log() << "startCommit never finished!" << migrateLog;
return false;
}
@@ -1529,6 +1583,10 @@ namespace mongo {
void migrateThread() {
Client::initThread( "migrateThread" );
+ if (!noauth) {
+ ShardedConnectionInfo::addHook();
+ cc().getAuthenticationInfo()->authorize("local", internalSecurity.user);
+ }
migrateStatus.go();
cc().shutdown();
}
@@ -1539,7 +1597,7 @@ namespace mongo {
virtual LockType locktype() const { return WRITE; } // this is so don't have to do locking internally
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
if ( migrateStatus.getActive() ) {
errmsg = "migrate already in progress";
@@ -1576,7 +1634,7 @@ namespace mongo {
public:
RecvChunkStatusCommand() : ChunkCommandHelper( "_recvChunkStatus" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
migrateStatus.status( result );
return 1;
}
@@ -1587,7 +1645,7 @@ namespace mongo {
public:
RecvChunkCommitCommand() : ChunkCommandHelper( "_recvChunkCommit" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
bool ok = migrateStatus.startCommit();
migrateStatus.status( result );
return ok;
@@ -1599,7 +1657,7 @@ namespace mongo {
public:
RecvChunkAbortCommand() : ChunkCommandHelper( "_recvChunkAbort" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
migrateStatus.abort();
migrateStatus.status( result );
return true;
@@ -1621,7 +1679,7 @@ namespace mongo {
assert( ! isInRange( BSON( "x" << 5 ) , min , max ) );
assert( ! isInRange( BSON( "x" << 6 ) , min , max ) );
- log(1) << "isInRangeTest passed" << endl;
+ LOG(1) << "isInRangeTest passed" << migrateLog;
}
} isInRangeTest;
}
diff --git a/s/d_split.cpp b/s/d_split.cpp
index 3ed6e9b..cef6188 100644
--- a/s/d_split.cpp
+++ b/s/d_split.cpp
@@ -22,10 +22,10 @@
#include "../db/btree.h"
#include "../db/commands.h"
-#include "../db/dbmessage.h"
#include "../db/jsobj.h"
-#include "../db/query.h"
+#include "../db/instance.h"
#include "../db/queryoptimizer.h"
+#include "../db/clientcursor.h"
#include "../client/connpool.h"
#include "../client/distlock.h"
@@ -57,7 +57,7 @@ namespace mongo {
"example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
"NOTE: This command may take a while to run";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const char *ns = jsobj.getStringField( "medianKey" );
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
@@ -74,22 +74,25 @@ namespace mongo {
NamespaceDetails *d = nsdetails(ns);
int idxNo = d->idxNo(*id);
- // only yielding on firt half for now
+ // only yielding on first half for now
// after this it should be in ram, so 2nd should be fast
{
- shared_ptr<Cursor> c( new BtreeCursor( d, idxNo, *id, min, max, false, 1 ) );
- scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ shared_ptr<Cursor> c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
while ( c->ok() ) {
num++;
c->advance();
- if ( ! cc->yieldSometimes() )
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ cc.release();
break;
+ }
}
}
num /= 2;
- BtreeCursor c( d, idxNo, *id, min, max, false, 1 );
+ auto_ptr<BtreeCursor> _c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) );
+ BtreeCursor& c = *_c;
for( ; num; c.advance(), --num );
ostringstream os;
@@ -133,12 +136,12 @@ namespace mongo {
help << "Internal command.\n";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const char* ns = jsobj.getStringField( "checkShardingIndex" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
- if ( keyPattern.nFields() == 1 && str::equals( "_id" , keyPattern.firstElement().fieldName() ) ) {
+ if ( keyPattern.nFields() == 1 && str::equals( "_id" , keyPattern.firstElementFieldName() ) ) {
result.appendBool( "idskip" , true );
return true;
}
@@ -174,9 +177,14 @@ namespace mongo {
return false;
}
- BtreeCursor * bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
+ if( d->isMultikey( d->idxNo( *idx ) ) ) {
+ errmsg = "index is multikey, cannot use for sharding";
+ return false;
+ }
+
+ BtreeCursor * bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
shared_ptr<Cursor> c( bc );
- scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
if ( ! cc->ok() ) {
// range is empty
return true;
@@ -217,8 +225,10 @@ namespace mongo {
}
cc->advance();
- if ( ! cc->yieldSometimes() )
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ cc.release();
break;
+ }
}
return true;
@@ -243,7 +253,7 @@ namespace mongo {
"NOTE: This command may take a while to run";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
//
// 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get
@@ -368,9 +378,9 @@ namespace mongo {
long long currCount = 0;
long long numChunks = 0;
- BtreeCursor * bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
+ BtreeCursor * bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
shared_ptr<Cursor> c( bc );
- scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
if ( ! cc->ok() ) {
errmsg = "can't open a cursor for splitting (desired range is possibly empty)";
return false;
@@ -414,13 +424,13 @@ namespace mongo {
break;
}
- if ( ! cc->yieldSometimes() ) {
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
// we were near and and got pushed to the end
// i think returning the splits we've already found is fine
- // don't use the btree cursor pointer to acces keys beyond this point but ok
+ // don't use the btree cursor pointer to access keys beyond this point but ok
// to use it for format the keys we've got already
-
+ cc.release();
break;
}
}
@@ -433,7 +443,7 @@ namespace mongo {
currCount = 0;
log() << "splitVector doing another cycle because of force, keyCount now: " << keyCount << endl;
- bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
+ bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
c.reset( bc );
cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
}
@@ -519,7 +529,7 @@ namespace mongo {
virtual bool adminOnly() const { return true; }
virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
//
// 1. check whether parameters passed to splitChunk are sound
@@ -531,31 +541,31 @@ namespace mongo {
return false;
}
- BSONObj keyPattern = cmdObj["keyPattern"].Obj();
+ const BSONObj keyPattern = cmdObj["keyPattern"].Obj();
if ( keyPattern.isEmpty() ) {
errmsg = "need to specify the key pattern the collection is sharded over";
return false;
}
- BSONObj min = cmdObj["min"].Obj();
+ const BSONObj min = cmdObj["min"].Obj();
if ( min.isEmpty() ) {
- errmsg = "neet to specify the min key for the chunk";
+ errmsg = "need to specify the min key for the chunk";
return false;
}
- BSONObj max = cmdObj["max"].Obj();
+ const BSONObj max = cmdObj["max"].Obj();
if ( max.isEmpty() ) {
- errmsg = "neet to specify the max key for the chunk";
+ errmsg = "need to specify the max key for the chunk";
return false;
}
- string from = cmdObj["from"].str();
+ const string from = cmdObj["from"].str();
if ( from.empty() ) {
errmsg = "need specify server to split chunk at";
return false;
}
- BSONObj splitKeysElem = cmdObj["splitKeys"].Obj();
+ const BSONObj splitKeysElem = cmdObj["splitKeys"].Obj();
if ( splitKeysElem.isEmpty() ) {
errmsg = "need to provide the split points to chunk over";
return false;
@@ -566,7 +576,7 @@ namespace mongo {
splitKeys.push_back( it.next().Obj().getOwned() );
}
- BSONElement shardId = cmdObj["shardId"];
+ const BSONElement shardId = cmdObj["shardId"];
if ( shardId.eoo() ) {
errmsg = "need to provide shardId";
return false;
@@ -594,7 +604,16 @@ namespace mongo {
//
DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC) , ns );
- dist_lock_try dlk( &lockSetup, string("split-") + min.toString() );
+ dist_lock_try dlk;
+
+ try{
+ dlk = dist_lock_try( &lockSetup, string("split-") + min.toString() );
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "Error locking distributed lock for split." << causedBy( e );
+ return false;
+ }
+
if ( ! dlk.got() ) {
errmsg = "the collection's metadata lock is taken";
result.append( "who" , dlk.other() );
@@ -672,7 +691,7 @@ namespace mongo {
BSONObjBuilder logDetail;
origChunk.appendShortVersion( "before" , logDetail );
- log(1) << "before split on " << origChunk << endl;
+ LOG(1) << "before split on " << origChunk << endl;
vector<ChunkInfo> newChunks;
ShardChunkVersion myVersion = maxVersion;
@@ -695,7 +714,7 @@ namespace mongo {
op.appendBool( "b" , true );
op.append( "ns" , ShardNS::chunk );
- // add the modified (new) chunk infomation as the update object
+ // add the modified (new) chunk information as the update object
BSONObjBuilder n( op.subobjStart( "o" ) );
n.append( "_id" , Chunk::genID( ns , startKey ) );
n.appendTimestamp( "lastmod" , myVersion );
@@ -781,13 +800,28 @@ namespace mongo {
for ( int i=0; i < newChunksSize; i++ ) {
BSONObjBuilder chunkDetail;
chunkDetail.appendElements( beforeDetailObj );
- chunkDetail.append( "number", i );
+ chunkDetail.append( "number", i+1 );
chunkDetail.append( "of" , newChunksSize );
newChunks[i].appendShortVersion( "chunk" , chunkDetail );
configServer.logChange( "multi-split" , ns , chunkDetail.obj() );
}
}
+ if (newChunks.size() == 2){
+ // If one of the chunks has only one object in it we should move it
+ static const BSONObj fields = BSON("_id" << 1 );
+ DBDirectClient conn;
+ for (int i=1; i >= 0 ; i--){ // high chunk more likely to have only one obj
+ ChunkInfo chunk = newChunks[i];
+ Query q = Query().minKey(chunk.min).maxKey(chunk.max);
+ scoped_ptr<DBClientCursor> c (conn.query(ns, q, /*limit*/-2, 0, &fields));
+ if (c && c->itcount() == 1) {
+ result.append("shouldMigrate", BSON("min" << chunk.min << "max" << chunk.max));
+ break;
+ }
+ }
+ }
+
return true;
}
} cmdSplitChunk;
diff --git a/s/d_state.cpp b/s/d_state.cpp
index e10400f..f43865b 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -28,8 +28,7 @@
#include "../db/commands.h"
#include "../db/jsobj.h"
-#include "../db/dbmessage.h"
-#include "../db/query.h"
+#include "../db/db.h"
#include "../client/connpool.h"
@@ -289,7 +288,7 @@ namespace mongo {
ShardedConnectionInfo* ShardedConnectionInfo::get( bool create ) {
ShardedConnectionInfo* info = _tl.get();
if ( ! info && create ) {
- log(1) << "entering shard mode for connection" << endl;
+ LOG(1) << "entering shard mode for connection" << endl;
info = new ShardedConnectionInfo();
_tl.reset( info );
}
@@ -314,6 +313,15 @@ namespace mongo {
_versions[ns] = version;
}
+ void ShardedConnectionInfo::addHook() {
+ static bool done = false;
+ if (!done) {
+ LOG(1) << "adding sharding hook" << endl;
+ pool.addHook(new ShardingConnectionHook(false));
+ done = true;
+ }
+ }
+
void ShardedConnectionInfo::setID( const OID& id ) {
_id = id;
}
@@ -372,7 +380,7 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
ShardedConnectionInfo::reset();
return true;
}
@@ -412,6 +420,7 @@ namespace mongo {
}
if ( locked ) {
+ ShardedConnectionInfo::addHook();
shardingState.enable( configdb );
configServer.init( configdb );
return true;
@@ -443,7 +452,7 @@ namespace mongo {
return true;
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
// Steps
// 1. check basic config
@@ -476,7 +485,7 @@ namespace mongo {
string ns = cmdObj["setShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
- errmsg = "need to speciy namespace";
+ errmsg = "need to specify namespace";
return false;
}
@@ -493,7 +502,7 @@ namespace mongo {
if ( globalVersion > 0 && version > 0 ) {
// this means there is no reset going on an either side
- // so its safe to make some assuptions
+ // so its safe to make some assumptions
if ( version == globalVersion ) {
// mongos and mongod agree!
@@ -507,6 +516,10 @@ namespace mongo {
}
// step 4
+
+ // this is because of a weird segfault I saw and I can't see why this should ever be set
+ massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );
+
dblock setShardVersionLock; // TODO: can we get rid of this??
if ( oldVersion > 0 && globalVersion == 0 ) {
@@ -538,7 +551,7 @@ namespace mongo {
}
if ( version < oldVersion ) {
- errmsg = "you already have a newer version of collection '" + ns + "'";
+ errmsg = "this connection already had a newer version of collection '" + ns + "'";
result.append( "ns" , ns );
result.appendTimestamp( "newVersion" , version );
result.appendTimestamp( "globalVersion" , globalVersion );
@@ -551,10 +564,11 @@ namespace mongo {
sleepmillis(2);
OCCASIONALLY log() << "waiting till out of critical section" << endl;
}
- errmsg = "going to older version for global for collection '" + ns + "'";
+ errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
result.append( "ns" , ns );
result.appendTimestamp( "version" , version );
result.appendTimestamp( "globalVersion" , globalVersion );
+ result.appendBool( "reloadConfig" , true );
return false;
}
@@ -572,7 +586,7 @@ namespace mongo {
ShardChunkVersion currVersion = version;
if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
- errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'";
+ errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
result.append( "ns" , ns );
result.appendTimestamp( "version" , version );
result.appendTimestamp( "globalVersion" , currVersion );
@@ -599,10 +613,10 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj["getShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
- errmsg = "need to speciy fully namespace";
+ errmsg = "need to specify full namespace";
return false;
}
@@ -611,6 +625,7 @@ namespace mongo {
result.appendTimestamp( "global" , shardingState.getVersion(ns) );
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
+ result.appendBool( "inShardedMode" , info != 0 );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns) );
else
@@ -627,7 +642,7 @@ namespace mongo {
virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
shardingState.appendInfo( result );
return true;
}
@@ -638,7 +653,7 @@ namespace mongo {
* @ return true if not in sharded mode
or if version for this client is ok
*/
- bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) {
+ bool shardVersionOk( const string& ns , string& errmsg ) {
if ( ! shardingState.enabled() )
return true;
@@ -668,7 +683,7 @@ namespace mongo {
if ( version == 0 && clientVersion > 0 ) {
stringstream ss;
- ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
+ ss << "collection was dropped or this shard no longer valid version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
@@ -697,4 +712,7 @@ namespace mongo {
return false;
}
+ void ShardingConnectionHook::onHandedOut( DBClientBase * conn ) {
+ // no-op for mongod
+ }
}
diff --git a/s/d_writeback.cpp b/s/d_writeback.cpp
index 401e0aa..01c0c14 100644
--- a/s/d_writeback.cpp
+++ b/s/d_writeback.cpp
@@ -20,6 +20,7 @@
#include "../db/commands.h"
#include "../util/queue.h"
+#include "../util/net/listen.h"
#include "d_writeback.h"
@@ -39,28 +40,82 @@ namespace mongo {
}
void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ) {
- getWritebackQueue( remote )->push( o );
+ getWritebackQueue( remote )->queue.push( o );
}
- BlockingQueue<BSONObj>* WriteBackManager::getWritebackQueue( const string& remote ) {
+ shared_ptr<WriteBackManager::QueueInfo> WriteBackManager::getWritebackQueue( const string& remote ) {
scoped_lock lk ( _writebackQueueLock );
- BlockingQueue<BSONObj>*& q = _writebackQueues[remote];
+ shared_ptr<QueueInfo>& q = _writebackQueues[remote];
if ( ! q )
- q = new BlockingQueue<BSONObj>();
+ q.reset( new QueueInfo() );
+ q->lastCall = Listener::getElapsedTimeMillis();
return q;
}
bool WriteBackManager::queuesEmpty() const {
scoped_lock lk( _writebackQueueLock );
for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
- const BlockingQueue<BSONObj>* queue = it->second;
- if (! queue->empty() ) {
+ const shared_ptr<QueueInfo> queue = it->second;
+ if (! queue->queue.empty() ) {
return false;
}
}
return true;
}
+ void WriteBackManager::appendStats( BSONObjBuilder& b ) const {
+ BSONObjBuilder sub;
+ long long totalQueued = 0;
+ long long now = Listener::getElapsedTimeMillis();
+ {
+ scoped_lock lk( _writebackQueueLock );
+ for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
+ const shared_ptr<QueueInfo> queue = it->second;
+
+ BSONObjBuilder t( sub.subobjStart( it->first ) );
+ t.appendNumber( "n" , queue->queue.size() );
+ t.appendNumber( "minutesSinceLastCall" , ( now - queue->lastCall ) / ( 1000 * 60 ) );
+ t.done();
+
+ totalQueued += queue->queue.size();
+ }
+ }
+
+ b.appendBool( "hasOpsQueued" , totalQueued > 0 );
+ b.appendNumber( "totalOpsQueued" , totalQueued );
+ b.append( "queues" , sub.obj() );
+ }
+
+ bool WriteBackManager::cleanupOldQueues() {
+ long long now = Listener::getElapsedTimeMillis();
+
+ scoped_lock lk( _writebackQueueLock );
+ for ( WriteBackQueuesMap::iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
+ const shared_ptr<QueueInfo> queue = it->second;
+ long long sinceMinutes = ( now - queue->lastCall ) / ( 1000 * 60 );
+
+ if ( sinceMinutes < 60 ) // minutes of inactivity.
+ continue;
+
+ log() << "deleting queue from: " << it->first
+ << " of size: " << queue->queue.size()
+ << " after " << sinceMinutes << " inactivity"
+ << " (normal if any mongos has restarted)"
+ << endl;
+
+ _writebackQueues.erase( it );
+ return true;
+ }
+ return false;
+ }
+
+ void WriteBackManager::Cleaner::taskDoWork() {
+ for ( int i=0; i<1000; i++ ) {
+ if ( ! writeBackManager.cleanupOldQueues() )
+ break;
+ }
+ }
+
// ---------- admin commands ----------
// Note, this command will block until there is something to WriteBack
@@ -74,7 +129,7 @@ namespace mongo {
void help(stringstream& h) const { h<<"internal"; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
BSONElement e = cmdObj.firstElement();
if ( e.type() != jstOID ) {
@@ -88,8 +143,8 @@ namespace mongo {
// the command issuer is blocked awaiting a response
// we want to do return at least at every 5 minutes so sockets don't timeout
BSONObj z;
- if ( writeBackManager.getWritebackQueue(id.str())->blockingPop( z, 5 * 60 /* 5 minutes */ ) ) {
- log(1) << "WriteBackCommand got : " << z << endl;
+ if ( writeBackManager.getWritebackQueue(id.str())->queue.blockingPop( z, 5 * 60 /* 5 minutes */ ) ) {
+ LOG(1) << "WriteBackCommand got : " << z << endl;
result.append( "data" , z );
}
else {
@@ -110,14 +165,15 @@ namespace mongo {
void help(stringstream& help) const {
help << "Returns whether there are operations in the writeback queue at the time the command was called. "
- << "This is an internal comand";
+ << "This is an internal command";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- result.appendBool( "hasOpsQueued" , ! writeBackManager.queuesEmpty() );
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ writeBackManager.appendStats( result );
return true;
}
} writeBacksQueuedCommand;
+
} // namespace mongo
diff --git a/s/d_writeback.h b/s/d_writeback.h
index 32f5b1c..d3f36a1 100644
--- a/s/d_writeback.h
+++ b/s/d_writeback.h
@@ -21,6 +21,7 @@
#include "../pch.h"
#include "../util/queue.h"
+#include "../util/background.h"
namespace mongo {
@@ -33,6 +34,21 @@ namespace mongo {
*/
class WriteBackManager {
public:
+
+ class QueueInfo : boost::noncopyable {
+ public:
+ QueueInfo(){}
+
+ BlockingQueue<BSONObj> queue;
+ long long lastCall; // this is ellapsed millis since startup
+ };
+
+ // a map from mongos's serverIDs to queues of "rejected" operations
+ // an operation is rejected if it targets data that does not live on this shard anymore
+ typedef map<string,shared_ptr<QueueInfo> > WriteBackQueuesMap;
+
+
+ public:
WriteBackManager();
~WriteBackManager();
@@ -51,22 +67,37 @@ namespace mongo {
*
* Gets access to server 'remote's queue, which is synchronized.
*/
- BlockingQueue<BSONObj>* getWritebackQueue( const string& remote );
+ shared_ptr<QueueInfo> getWritebackQueue( const string& remote );
/*
* @return true if there is no operation queued for write back
*/
bool queuesEmpty() const;
+ /**
+ * appends a number of statistics
+ */
+ void appendStats( BSONObjBuilder& b ) const;
+
+ /**
+ * removes queues that have been idle
+ * @return if something was removed
+ */
+ bool cleanupOldQueues();
+
private:
- // a map from mongos's serverIDs to queues of "rejected" operations
- // an operation is rejected if it targets data that does not live on this shard anymore
- typedef map< string , BlockingQueue<BSONObj>* > WriteBackQueuesMap;
-
+
// '_writebackQueueLock' protects only the map itself, since each queue is syncrhonized.
mutable mongo::mutex _writebackQueueLock;
WriteBackQueuesMap _writebackQueues;
+
+ class Cleaner : public PeriodicTask {
+ public:
+ virtual string taskName() const { return "WriteBackManager::cleaner"; }
+ virtual void taskDoWork();
+ };
+ Cleaner _cleaner;
};
// TODO collect global state in a central place and init during startup
diff --git a/s/dbgrid.vcxproj b/s/dbgrid.vcxproj
index 61a8458..33d6221 100644
--- a/s/dbgrid.vcxproj
+++ b/s/dbgrid.vcxproj
@@ -1,587 +1,616 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug|Win32">
- <Configuration>Debug</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|Win32">
- <Configuration>Release</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectName>mongos</ProjectName>
- <ProjectGuid>{E03717ED-69B4-4D21-BC55-DF6690B585C6}</ProjectGuid>
- <RootNamespace>dbgrid</RootNamespace>
- <Keyword>Win32Proj</Keyword>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup>
- <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <MinimalRebuild>No</MinimalRebuild>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <TargetMachine>MachineX86</TargetMachine>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <TargetMachine>MachineX86</TargetMachine>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- </Link>
- </ItemDefinitionGroup>
- <ItemGroup>
- <ClCompile Include="..\bson\oid.cpp" />
- <ClCompile Include="..\client\dbclientcursor.cpp" />
- <ClCompile Include="..\client\dbclient_rs.cpp" />
- <ClCompile Include="..\client\distlock.cpp" />
- <ClCompile Include="..\db\dbwebserver.cpp" />
- <ClCompile Include="..\db\security_key.cpp" />
- <ClCompile Include="..\scripting\bench.cpp" />
- <ClCompile Include="..\util\alignedbuilder.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
- <ClCompile Include="..\util\concurrency\task.cpp" />
- <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
- <ClCompile Include="..\util\concurrency\vars.cpp" />
- <ClCompile Include="..\util\log.cpp" />
- <ClCompile Include="..\util\miniwebserver.cpp" />
- <ClCompile Include="..\util\processinfo.cpp" />
- <ClCompile Include="..\util\signal_handlers.cpp" />
- <ClCompile Include="..\util\stringutils.cpp" />
- <ClCompile Include="..\util\text.cpp" />
- <ClCompile Include="..\util\version.cpp" />
- <ClCompile Include="balance.cpp" />
- <ClCompile Include="balancer_policy.cpp" />
- <ClCompile Include="chunk.cpp" />
- <ClCompile Include="client.cpp" />
- <ClCompile Include="commands_admin.cpp" />
- <ClCompile Include="commands_public.cpp" />
- <ClCompile Include="config.cpp" />
- <ClCompile Include="config_migrate.cpp" />
- <ClCompile Include="cursors.cpp" />
- <ClCompile Include="..\pch.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\db\queryutil.cpp" />
- <ClCompile Include="grid.cpp" />
- <ClCompile Include="request.cpp" />
- <ClCompile Include="shardconnection.cpp" />
- <ClCompile Include="shard_version.cpp" />
- <ClCompile Include="s_only.cpp" />
- <ClCompile Include="server.cpp" />
- <ClCompile Include="shard.cpp" />
- <ClCompile Include="shardkey.cpp" />
- <ClCompile Include="stats.cpp" />
- <ClCompile Include="strategy.cpp" />
- <ClCompile Include="strategy_shard.cpp" />
- <ClCompile Include="strategy_single.cpp" />
- <ClCompile Include="..\scripting\utils.cpp" />
- <ClCompile Include="..\client\connpool.cpp" />
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\client\dbclient.cpp" />
- <ClCompile Include="..\client\model.cpp" />
- <ClCompile Include="..\util\assert_util.cpp" />
- <ClCompile Include="..\util\background.cpp" />
- <ClCompile Include="..\util\base64.cpp" />
- <ClCompile Include="..\db\cmdline.cpp" />
- <ClCompile Include="..\db\commands.cpp" />
- <ClCompile Include="..\db\stats\counters.cpp" />
- <ClCompile Include="..\util\debug_util.cpp" />
- <ClCompile Include="..\scripting\engine.cpp" />
- <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
- <ClCompile Include="..\db\indexkey.cpp" />
- <ClCompile Include="..\db\jsobj.cpp" />
- <ClCompile Include="..\db\json.cpp" />
- <ClCompile Include="..\db\lasterror.cpp" />
- <ClCompile Include="..\db\matcher.cpp" />
- <ClCompile Include="..\util\md5.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\md5main.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\message.cpp" />
- <ClCompile Include="..\util\message_server_port.cpp" />
- <ClCompile Include="..\util\mmap.cpp" />
- <ClCompile Include="..\util\mmap_win.cpp" />
- <ClCompile Include="..\shell\mongo_vstudio.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\db\nonce.cpp" />
- <ClCompile Include="..\client\parallel.cpp" />
- <ClCompile Include="..\util\processinfo_win32.cpp" />
- <ClCompile Include="..\util\sock.cpp" />
- <ClCompile Include="..\client\syncclusterconnection.cpp" />
- <ClCompile Include="..\util\util.cpp" />
- <ClCompile Include="writeback_listener.cpp" />
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="..\util\processinfo.h" />
- <ClInclude Include="..\util\signal_handlers.h" />
- <ClInclude Include="..\util\version.h" />
- <ClInclude Include="balancer_policy.h" />
- <ClInclude Include="grid.h" />
- <ClInclude Include="gridconfig.h" />
- <ClInclude Include="griddatabase.h" />
- <ClInclude Include="shard.h" />
- <ClInclude Include="strategy.h" />
- <ClInclude Include="..\util\background.h" />
- <ClInclude Include="..\db\commands.h" />
- <ClInclude Include="..\db\dbmessage.h" />
- <ClInclude Include="..\util\goodies.h" />
- <ClInclude Include="..\db\jsobj.h" />
- <ClInclude Include="..\db\json.h" />
- <ClInclude Include="..\pch.h" />
- <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
- <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
- <ClInclude Include="..\pcre-7.4\config.h" />
- <ClInclude Include="..\pcre-7.4\pcre.h" />
- <ClInclude Include="..\client\connpool.h" />
- <ClInclude Include="..\client\dbclient.h" />
- <ClInclude Include="..\client\model.h" />
- </ItemGroup>
- <ItemGroup>
- <Library Include="..\..\js\js32d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js32r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- </ItemGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectName>mongos</ProjectName>
+ <ProjectGuid>{E03717ED-69B4-4D21-BC55-DF6690B585C6}</ProjectGuid>
+ <RootNamespace>dbgrid</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\db\common.cpp" />
+ <ClCompile Include="..\db\dbmessage.cpp" />
+ <ClCompile Include="..\db\dbcommands_generic.cpp" />
+ <ClCompile Include="..\db\dbwebserver.cpp" />
+ <ClCompile Include="..\db\querypattern.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\security_common.cpp" />
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\signal_handlers.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="balance.cpp" />
+ <ClCompile Include="balancer_policy.cpp" />
+ <ClCompile Include="chunk.cpp" />
+ <ClCompile Include="client.cpp" />
+ <ClCompile Include="commands_admin.cpp" />
+ <ClCompile Include="commands_public.cpp" />
+ <ClCompile Include="config.cpp" />
+ <ClCompile Include="config_migrate.cpp" />
+ <ClCompile Include="cursors.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\queryutil.cpp" />
+ <ClCompile Include="grid.cpp" />
+ <ClCompile Include="mr_shard.cpp" />
+ <ClCompile Include="request.cpp" />
+ <ClCompile Include="security.cpp" />
+ <ClCompile Include="shardconnection.cpp" />
+ <ClCompile Include="shard_version.cpp" />
+ <ClCompile Include="s_only.cpp" />
+ <ClCompile Include="server.cpp" />
+ <ClCompile Include="shard.cpp" />
+ <ClCompile Include="shardkey.cpp" />
+ <ClCompile Include="stats.cpp" />
+ <ClCompile Include="strategy.cpp" />
+ <ClCompile Include="strategy_shard.cpp" />
+ <ClCompile Include="strategy_single.cpp" />
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\db\cmdline.cpp" />
+ <ClCompile Include="..\db\commands.cpp" />
+ <ClCompile Include="..\db\stats\counters.cpp" />
+ <ClCompile Include="..\util\debug_util.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\db\indexkey.cpp" />
+ <ClCompile Include="..\db\jsobj.cpp" />
+ <ClCompile Include="..\db\json.cpp" />
+ <ClCompile Include="..\db\lasterror.cpp" />
+ <ClCompile Include="..\db\matcher.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="..\shell\mongo_vstudio.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\nonce.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="writeback_listener.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\util\processinfo.h" />
+ <ClInclude Include="..\util\signal_handlers.h" />
+ <ClInclude Include="..\util\version.h" />
+ <ClInclude Include="balance.h" />
+ <ClInclude Include="balancer_policy.h" />
+ <ClInclude Include="chunk.h" />
+ <ClInclude Include="client.h" />
+ <ClInclude Include="config.h" />
+ <ClInclude Include="cursors.h" />
+ <ClInclude Include="d_chunk_manager.h" />
+ <ClInclude Include="d_logic.h" />
+ <ClInclude Include="d_writeback.h" />
+ <ClInclude Include="grid.h" />
+ <ClInclude Include="gridconfig.h" />
+ <ClInclude Include="griddatabase.h" />
+ <ClInclude Include="request.h" />
+ <ClInclude Include="server.h" />
+ <ClInclude Include="shard.h" />
+ <ClInclude Include="shardkey.h" />
+ <ClInclude Include="shard_version.h" />
+ <ClInclude Include="stats.h" />
+ <ClInclude Include="strategy.h" />
+ <ClInclude Include="..\util\background.h" />
+ <ClInclude Include="..\db\commands.h" />
+ <ClInclude Include="..\db\dbmessage.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\db\jsobj.h" />
+ <ClInclude Include="..\db\json.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="util.h" />
+ <ClInclude Include="writeback_listener.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
</Project> \ No newline at end of file
diff --git a/s/dbgrid.vcxproj.filters b/s/dbgrid.vcxproj.filters
index b87a1f2..e417e95 100755
--- a/s/dbgrid.vcxproj.filters
+++ b/s/dbgrid.vcxproj.filters
@@ -83,78 +83,6 @@
<ClCompile Include="..\client\connpool.cpp">
<Filter>Header Files\Header Shared</Filter>
</ClCompile>
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <Filter>libs_etc</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
<ClCompile Include="..\client\dbclient.cpp">
<Filter>client</Filter>
</ClCompile>
@@ -317,9 +245,27 @@
<ClCompile Include="client.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\db\security_key.cpp">
+ <ClCompile Include="..\db\dbcommands_generic.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\querypattern.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\ramlog.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="mr_shard.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\common.cpp">
<Filter>Shared Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\db\security_common.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="security.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="gridconfig.h">
@@ -391,6 +337,51 @@
<ClInclude Include="..\util\signal_handlers.h">
<Filter>Shared Source Files</Filter>
</ClInclude>
+ <ClInclude Include="writeback_listener.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="balance.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="chunk.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="client.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="config.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="cursors.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_chunk_manager.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_logic.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_writeback.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="request.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="server.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shard_version.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shardkey.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="stats.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="util.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<Library Include="..\..\js\js32d.lib" />
diff --git a/s/grid.cpp b/s/grid.cpp
index 0646507..3756e13 100644
--- a/s/grid.cpp
+++ b/s/grid.cpp
@@ -119,12 +119,14 @@ namespace mongo {
}
bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ) {
- // name can be NULL, so privide a dummy one here to avoid testing it elsewhere
+ // name can be NULL, so provide a dummy one here to avoid testing it elsewhere
string nameInternal;
if ( ! name ) {
name = &nameInternal;
}
+ ReplicaSetMonitorPtr rsMonitor;
+
// Check whether the host (or set) exists and run several sanity checks on this request.
// There are two set of sanity checks: making sure adding this particular shard is consistent
// with the replica set state (if it exists) and making sure this shards databases can be
@@ -140,7 +142,7 @@ namespace mongo {
errMsg = "can't use sync cluster as a shard. for replica set, have to use <setname>/<server1>,<server2>,...";
return false;
}
-
+
BSONObj resIsMongos;
bool ok = newShardConn->runCommand( "admin" , BSON( "isdbgrid" << 1 ) , resIsMongos );
@@ -171,6 +173,13 @@ namespace mongo {
newShardConn.done();
return false;
}
+ if ( !commandSetName.empty() && setName.empty() ) {
+ ostringstream ss;
+ ss << "host did not return a set name, is the replica set still initializing? " << resIsMaster;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
// if the shard is part of replica set, make sure it is the right one
if ( ! commandSetName.empty() && ( commandSetName != setName ) ) {
@@ -197,6 +206,12 @@ namespace mongo {
hostSet.insert( piter.next().String() ); // host:port
}
}
+ if ( resIsMaster["arbiters"].isABSONObj() ) {
+ BSONObjIterator piter( resIsMaster["arbiters"].Obj() );
+ while ( piter.more() ) {
+ hostSet.insert( piter.next().String() ); // host:port
+ }
+ }
vector<HostAndPort> hosts = servers.getServers();
for ( size_t i = 0 ; i < hosts.size() ; i++ ) {
@@ -213,7 +228,8 @@ namespace mongo {
}
if ( ! foundAll ) {
ostringstream ss;
- ss << "host " << offendingHost << " does not belong to replica set as a non-passive member" << setName;;
+ ss << "in seed list " << servers.toString() << ", host " << offendingHost
+ << " does not belong to replica set " << setName;
errMsg = ss.str();
newShardConn.done();
return false;
@@ -250,6 +266,9 @@ namespace mongo {
}
}
+ if ( newShardConn->type() == ConnectionString::SET )
+ rsMonitor = ReplicaSetMonitor::get( setName );
+
newShardConn.done();
}
catch ( DBException& e ) {
@@ -281,7 +300,7 @@ namespace mongo {
// build the ConfigDB shard document
BSONObjBuilder b;
b.append( "_id" , *name );
- b.append( "host" , servers.toString() );
+ b.append( "host" , rsMonitor ? rsMonitor->getServerAddress() : servers.toString() );
if ( maxSize > 0 ) {
b.append( ShardFields::maxSize.name() , maxSize );
}
@@ -375,10 +394,7 @@ namespace mongo {
// check the 'stopped' marker maker
// if present, it is a simple bool
BSONElement stoppedElem = balancerDoc["stopped"];
- if ( ! stoppedElem.eoo() && stoppedElem.isBoolean() ) {
- return stoppedElem.boolean();
- }
- return false;
+ return stoppedElem.trueValue();
}
bool Grid::_inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now ) {
@@ -392,24 +408,32 @@ namespace mongo {
// check if both 'start' and 'stop' are present
if ( ! windowElem.isABSONObj() ) {
- log(1) << "'activeWindow' format is { start: \"hh:mm\" , stop: ... }" << balancerDoc << endl;
+ warning() << "'activeWindow' format is { start: \"hh:mm\" , stop: ... }" << balancerDoc << endl;
return true;
}
BSONObj intervalDoc = windowElem.Obj();
const string start = intervalDoc["start"].str();
const string stop = intervalDoc["stop"].str();
if ( start.empty() || stop.empty() ) {
- log(1) << "must specify both start and end of balancing window: " << intervalDoc << endl;
+ warning() << "must specify both start and end of balancing window: " << intervalDoc << endl;
return true;
}
// check that both 'start' and 'stop' are valid time-of-day
boost::posix_time::ptime startTime, stopTime;
if ( ! toPointInTime( start , &startTime ) || ! toPointInTime( stop , &stopTime ) ) {
- log(1) << "cannot parse active window (use hh:mm 24hs format): " << intervalDoc << endl;
+ warning() << "cannot parse active window (use hh:mm 24hs format): " << intervalDoc << endl;
return true;
}
+ if ( logLevel ) {
+ stringstream ss;
+ ss << " now: " << now
+ << " startTime: " << startTime
+ << " stopTime: " << stopTime;
+ log() << "_inBalancingWindow: " << ss.str() << endl;
+ }
+
// allow balancing if during the activeWindow
// note that a window may be open during the night
if ( stopTime > startTime ) {
@@ -453,6 +477,10 @@ namespace mongo {
class BalancingWindowUnitTest : public UnitTest {
public:
void run() {
+
+ if ( ! cmdLine.isMongos() )
+ return;
+
// T0 < T1 < now < T2 < T3 and Error
const string T0 = "9:00";
const string T1 = "11:00";
@@ -485,7 +513,7 @@ namespace mongo {
assert( Grid::_inBalancingWindow( w8 , now ) );
assert( Grid::_inBalancingWindow( w9 , now ) );
- log(1) << "BalancingWidowObjTest passed" << endl;
+ LOG(1) << "BalancingWidowObjTest passed" << endl;
}
} BalancingWindowObjTest;
diff --git a/s/mr_shard.cpp b/s/mr_shard.cpp
new file mode 100644
index 0000000..93f49d1
--- /dev/null
+++ b/s/mr_shard.cpp
@@ -0,0 +1,312 @@
+// mr_shard.cpp
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../db/dbmessage.h"
+#include "../scripting/engine.h"
+
+#include "mr_shard.h"
+
+namespace mongo {
+
+ namespace mr_shard {
+
+ AtomicUInt Config::JOB_NUMBER;
+
+ JSFunction::JSFunction( string type , const BSONElement& e ) {
+ _type = type;
+ _code = e._asCode();
+
+ if ( e.type() == CodeWScope )
+ _wantedScope = e.codeWScopeObject();
+ }
+
+ void JSFunction::init( State * state ) {
+ _scope = state->scope();
+ assert( _scope );
+ _scope->init( &_wantedScope );
+
+ _func = _scope->createFunction( _code.c_str() );
+ uassert( 14836 , str::stream() << "couldn't compile code for: " << _type , _func );
+
+ // install in JS scope so that it can be called in JS mode
+ _scope->setFunction(_type.c_str(), _code.c_str());
+ }
+
+ /**
+ * Applies the finalize function to a tuple obj (key, val)
+ * Returns tuple obj {_id: key, value: newval}
+ */
+ BSONObj JSFinalizer::finalize( const BSONObj& o ) {
+ Scope * s = _func.scope();
+
+ Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
+ s->invokeSafe( _func.func() , &o, 0 );
+
+ // don't want to use o.objsize() to size b
+ // since there are many cases where the point of finalize
+ // is converting many fields to 1
+ BSONObjBuilder b;
+ b.append( o.firstElement() );
+ s->append( b , "value" , "return" );
+ return b.obj();
+ }
+
+ void JSReducer::init( State * state ) {
+ _func.init( state );
+ }
+
+ /**
+ * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
+ */
+ BSONObj JSReducer::reduce( const BSONList& tuples ) {
+ if (tuples.size() <= 1)
+ return tuples[0];
+ BSONObj key;
+ int endSizeEstimate = 16;
+ _reduce( tuples , key , endSizeEstimate );
+
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs( key.firstElement() , "0" );
+ _func.scope()->append( b , "1" , "return" );
+ return b.obj();
+ }
+
+ /**
+ * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
+ * Also applies a finalizer method if present.
+ */
+ BSONObj JSReducer::finalReduce( const BSONList& tuples , Finalizer * finalizer ) {
+
+ BSONObj res;
+ BSONObj key;
+
+ if (tuples.size() == 1) {
+ // 1 obj, just use it
+ key = tuples[0];
+ BSONObjBuilder b(key.objsize());
+ BSONObjIterator it(key);
+ b.appendAs( it.next() , "_id" );
+ b.appendAs( it.next() , "value" );
+ res = b.obj();
+ }
+ else {
+ // need to reduce
+ int endSizeEstimate = 16;
+ _reduce( tuples , key , endSizeEstimate );
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs( key.firstElement() , "_id" );
+ _func.scope()->append( b , "value" , "return" );
+ res = b.obj();
+ }
+
+ if ( finalizer ) {
+ res = finalizer->finalize( res );
+ }
+
+ return res;
+ }
+
+ /**
+ * actually applies a reduce, to a list of tuples (key, value).
+ * After the call, tuples will hold a single tuple {"0": key, "1": value}
+ */
+ void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
+ int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
+
+ // need to build the reduce args: ( key, [values] )
+ BSONObjBuilder reduceArgs( sizeEstimate );
+ boost::scoped_ptr<BSONArrayBuilder> valueBuilder;
+ int sizeSoFar = 0;
+ unsigned n = 0;
+ for ( ; n<tuples.size(); n++ ) {
+ BSONObjIterator j(tuples[n]);
+ BSONElement keyE = j.next();
+ if ( n == 0 ) {
+ reduceArgs.append( keyE );
+ key = keyE.wrap();
+ sizeSoFar = 5 + keyE.size();
+ valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
+ }
+
+ BSONElement ee = j.next();
+
+ uassert( 14837 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
+
+ if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
+ assert( n > 1 ); // if not, inf. loop
+ break;
+ }
+
+ valueBuilder->append( ee );
+ sizeSoFar += ee.size();
+ }
+ assert(valueBuilder);
+ valueBuilder->done();
+ BSONObj args = reduceArgs.obj();
+
+ Scope * s = _func.scope();
+
+ s->invokeSafe( _func.func() , &args, 0 );
+ ++numReduces;
+
+ if ( s->type( "return" ) == Array ) {
+ uasserted( 14838 , "reduce -> multiple not supported yet");
+ return;
+ }
+
+ endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() );
+
+ if ( n == tuples.size() )
+ return;
+
+ // the input list was too large, add the rest of elmts to new tuples and reduce again
+ // note: would be better to use loop instead of recursion to avoid stack overflow
+ BSONList x;
+ for ( ; n < tuples.size(); n++ ) {
+ x.push_back( tuples[n] );
+ }
+ BSONObjBuilder temp( endSizeEstimate );
+ temp.append( key.firstElement() );
+ s->append( temp , "1" , "return" );
+ x.push_back( temp.obj() );
+ _reduce( x , key , endSizeEstimate );
+ }
+
+ Config::Config( const string& _dbname , const BSONObj& cmdObj ) {
+
+ dbname = _dbname;
+ ns = dbname + "." + cmdObj.firstElement().valuestr();
+
+ verbose = cmdObj["verbose"].trueValue();
+ jsMode = cmdObj["jsMode"].trueValue();
+
+ jsMaxKeys = 500000;
+ reduceTriggerRatio = 2.0;
+ maxInMemSize = 5 * 1024 * 1024;
+
+ uassert( 14841 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
+
+ if ( cmdObj["out"].type() == String ) {
+ finalShort = cmdObj["out"].String();
+ outType = REPLACE;
+ }
+ else if ( cmdObj["out"].type() == Object ) {
+ BSONObj o = cmdObj["out"].embeddedObject();
+
+ BSONElement e = o.firstElement();
+ string t = e.fieldName();
+
+ if ( t == "normal" || t == "replace" ) {
+ outType = REPLACE;
+ finalShort = e.String();
+ }
+ else if ( t == "merge" ) {
+ outType = MERGE;
+ finalShort = e.String();
+ }
+ else if ( t == "reduce" ) {
+ outType = REDUCE;
+ finalShort = e.String();
+ }
+ else if ( t == "inline" ) {
+ outType = INMEMORY;
+ }
+ else {
+ uasserted( 14839 , str::stream() << "unknown out specifier [" << t << "]" );
+ }
+
+ if (o.hasElement("db")) {
+ outDB = o["db"].String();
+ }
+ }
+ else {
+ uasserted( 14840 , "'out' has to be a string or an object" );
+ }
+
+ if ( outType != INMEMORY ) { // setup names
+ tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << finalShort << "_" << JOB_NUMBER++;
+
+ incLong = tempLong + "_inc";
+
+ finalLong = str::stream() << (outDB.empty() ? dbname : outDB) << "." << finalShort;
+ }
+
+ {
+ // scope and code
+
+ if ( cmdObj["scope"].type() == Object )
+ scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
+
+ reducer.reset( new JSReducer( cmdObj["reduce"] ) );
+ if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() )
+ finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) );
+
+ }
+
+ {
+ // query options
+ if ( cmdObj["limit"].isNumber() )
+ limit = cmdObj["limit"].numberLong();
+ else
+ limit = 0;
+ }
+ }
+
+ State::State( const Config& c ) : _config( c ) {
+ _onDisk = _config.outType != Config::INMEMORY;
+ }
+
+ State::~State() {
+ if ( _onDisk ) {
+ try {
+// _db.dropCollection( _config.tempLong );
+// _db.dropCollection( _config.incLong );
+ }
+ catch ( std::exception& e ) {
+ error() << "couldn't cleanup after map reduce: " << e.what() << endl;
+ }
+ }
+
+ if (_scope) {
+ // cleanup js objects
+ ScriptingFunction cleanup = _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
+ _scope->invoke(cleanup, 0, 0, 0, true);
+ }
+ }
+
+ /**
+ * Initialize the mapreduce operation, creating the inc collection
+ */
+ void State::init() {
+ // setup js
+ _scope.reset(globalScriptEngine->getPooledScope( _config.dbname ).release() );
+// _scope->localConnect( _config.dbname.c_str() );
+ _scope->externalSetup();
+
+ if ( ! _config.scopeSetup.isEmpty() )
+ _scope->init( &_config.scopeSetup );
+
+ _config.reducer->init( this );
+ if ( _config.finalizer )
+ _config.finalizer->init( this );
+ _scope->setBoolean("_doFinal", _config.finalizer);
+ }
+ }
+}
+
diff --git a/s/mr_shard.h b/s/mr_shard.h
new file mode 100644
index 0000000..9603ba9
--- /dev/null
+++ b/s/mr_shard.h
@@ -0,0 +1,232 @@
+// mr_shard.h
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+namespace mongo {
+
+ namespace mr_shard {
+
+ typedef vector<BSONObj> BSONList;
+
+ class State;
+
+ // ------------ function interfaces -----------
+
+ class Finalizer : boost::noncopyable {
+ public:
+ virtual ~Finalizer() {}
+ virtual void init( State * state ) = 0;
+
+ /**
+ * this takes a tuple and returns a tuple
+ */
+ virtual BSONObj finalize( const BSONObj& tuple ) = 0;
+ };
+
+ class Reducer : boost::noncopyable {
+ public:
+ Reducer() : numReduces(0) {}
+ virtual ~Reducer() {}
+ virtual void init( State * state ) = 0;
+
+ virtual BSONObj reduce( const BSONList& tuples ) = 0;
+ /** this means its a final reduce, even if there is no finalizer */
+ virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
+
+ long long numReduces;
+ };
+
+ // ------------ js function implementations -----------
+
+ /**
+ * used as a holder for Scope and ScriptingFunction
+ * visitor like pattern as Scope is gotten from first access
+ */
+ class JSFunction : boost::noncopyable {
+ public:
+ /**
+ * @param type (map|reduce|finalize)
+ */
+ JSFunction( string type , const BSONElement& e );
+ virtual ~JSFunction() {}
+
+ virtual void init( State * state );
+
+ Scope * scope() const { return _scope; }
+ ScriptingFunction func() const { return _func; }
+
+ private:
+ string _type;
+ string _code; // actual javascript code
+ BSONObj _wantedScope; // this is for CodeWScope
+
+ Scope * _scope; // this is not owned by us, and might be shared
+ ScriptingFunction _func;
+ };
+
+ class JSReducer : public Reducer {
+ public:
+ JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {}
+ virtual void init( State * state );
+
+ virtual BSONObj reduce( const BSONList& tuples );
+ virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer );
+
+ private:
+
+ /**
+ * result in "return"
+ * @param key OUT
+ * @param endSizeEstimate OUT
+ */
+ void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
+
+ JSFunction _func;
+ };
+
+ class JSFinalizer : public Finalizer {
+ public:
+ JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {}
+ virtual BSONObj finalize( const BSONObj& o );
+ virtual void init( State * state ) { _func.init( state ); }
+ private:
+ JSFunction _func;
+
+ };
+
+ // -----------------
+
+ /**
+ * holds map/reduce config information
+ */
+ class Config {
+ public:
+ Config( const string& _dbname , const BSONObj& cmdObj );
+
+ string dbname;
+ string ns;
+
+ // options
+ bool verbose;
+ bool jsMode;
+
+ // query options
+
+ BSONObj filter;
+ BSONObj sort;
+ long long limit;
+
+ // functions
+ scoped_ptr<Reducer> reducer;
+ scoped_ptr<Finalizer> finalizer;
+
+ BSONObj mapParams;
+ BSONObj scopeSetup;
+
+ // output tables
+ string incLong;
+ string tempLong;
+
+ string finalShort;
+ string finalLong;
+
+ string outDB;
+
+ // max number of keys allowed in JS map before switching mode
+ long jsMaxKeys;
+ // ratio of duplicates vs unique keys before reduce is triggered in js mode
+ float reduceTriggerRatio;
+ // maximum size of map before it gets dumped to disk
+ long maxInMemSize;
+
+ enum { REPLACE , // atomically replace the collection
+ MERGE , // merge keys, override dups
+ REDUCE , // merge keys, reduce dups
+ INMEMORY // only store in memory, limited in size
+ } outType;
+
+ static AtomicUInt JOB_NUMBER;
+ }; // end MRsetup
+
+ /**
+ * stores information about intermediate map reduce state
+ * controls flow of data from map->reduce->finalize->output
+ */
+ class State {
+ public:
+ State( const Config& c );
+ ~State();
+
+ void init();
+
+ // ---- prep -----
+ bool sourceExists();
+
+ long long incomingDocuments();
+
+ // ---- map stage ----
+
+ /**
+ * stages on in in-memory storage
+ */
+ void emit( const BSONObj& a );
+
+ /**
+ * if size is big, run a reduce
+ * if its still big, dump to temp collection
+ */
+ void checkSize();
+
+ /**
+ * run reduce on _temp
+ */
+ void reduceInMemory();
+
+ // ------ reduce stage -----------
+
+ void prepTempCollection();
+
+ void finalReduce( BSONList& values );
+
+ void finalReduce( CurOp * op , ProgressMeterHolder& pm );
+
+ // ------ simple accessors -----
+
+ /** State maintains ownership, do no use past State lifetime */
+ Scope* scope() { return _scope.get(); }
+
+ const Config& config() { return _config; }
+
+ const bool isOnDisk() { return _onDisk; }
+
+ long long numReduces() const { return _config.reducer->numReduces; }
+
+ const Config& _config;
+
+ protected:
+
+ scoped_ptr<Scope> _scope;
+ bool _onDisk; // if the end result of this map reduce is disk or not
+ };
+
+ } // end mr namespace
+}
+
+
diff --git a/s/request.cpp b/s/request.cpp
index 32c17cc..36488cb 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -43,7 +43,12 @@ namespace mongo {
_clientInfo = ClientInfo::get();
_clientInfo->newRequest( p );
+ }
+ void Request::checkAuth() const {
+ char cl[256];
+ nsToDatabase(getns(), cl);
+ uassert(15845, "unauthorized", _clientInfo->getAuthenticationInfo()->isAuthorized(cl));
}
void Request::init() {
@@ -60,13 +65,21 @@ namespace mongo {
uassert( 13644 , "can't use 'local' database through mongos" , ! str::startsWith( getns() , "local." ) );
- _config = grid.getDBConfig( getns() );
- if ( reload )
- uassert( 10192 , "db config reload failed!" , _config->reload() );
+ const string nsStr (getns()); // use in functions taking string rather than char*
+
+ _config = grid.getDBConfig( nsStr );
+ if ( reload ) {
+ if ( _config->isSharded( nsStr ) )
+ _config->getChunkManager( nsStr , true );
+ else
+ _config->reload();
+ }
- if ( _config->isSharded( getns() ) ) {
- _chunkManager = _config->getChunkManager( getns() , reload );
- uassert( 10193 , (string)"no shard info for: " + getns() , _chunkManager );
+ if ( _config->isSharded( nsStr ) ) {
+ _chunkManager = _config->getChunkManager( nsStr , reload );
+ // TODO: All of these uasserts are no longer necessary, getChunkManager() throws when
+ // not returning the right value.
+ uassert( 10193 , (string)"no shard info for: " + nsStr , _chunkManager );
}
else {
_chunkManager.reset();
@@ -100,7 +113,7 @@ namespace mongo {
}
- log(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.header()->id) << " attempt: " << attempt << endl;
+ LOG(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.header()->id) << " attempt: " << attempt << endl;
Strategy * s = SINGLE;
_counter = &opsNonSharded;
@@ -134,6 +147,7 @@ namespace mongo {
s->getMore( *this );
}
else {
+ checkAuth();
s->writeOp( op, *this );
}
diff --git a/s/request.h b/s/request.h
index 7c51e5c..86a484e 100644
--- a/s/request.h
+++ b/s/request.h
@@ -19,7 +19,7 @@
#pragma once
#include "../pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../db/dbmessage.h"
#include "config.h"
#include "util.h"
@@ -70,6 +70,8 @@ namespace mongo {
return _clientInfo;
}
+ void checkAuth() const;
+
// ---- remote location info -----
diff --git a/s/s_only.cpp b/s/s_only.cpp
index 83bceac..6449b34 100644
--- a/s/s_only.cpp
+++ b/s/s_only.cpp
@@ -31,7 +31,7 @@ namespace mongo {
boost::thread_specific_ptr<Client> currentClient;
- Client::Client(const char *desc , MessagingPort *p) :
+ Client::Client(const char *desc , AbstractMessagingPort *p) :
_context(0),
_shutdown(false),
_desc(desc),
@@ -42,7 +42,7 @@ namespace mongo {
Client::~Client() {}
bool Client::shutdown() { return true; }
- Client& Client::initThread(const char *desc, MessagingPort *mp) {
+ Client& Client::initThread(const char *desc, AbstractMessagingPort *mp) {
setThreadName(desc);
assert( currentClient.get() == 0 );
Client *c = new Client(desc, mp);
@@ -85,8 +85,13 @@ namespace mongo {
log( 2 ) << "command: " << cmdObj << endl;
}
+ if (!client.getAuthenticationInfo()->isAuthorized(dbname)) {
+ result.append("errmsg" , "unauthorized");
+ return false;
+ }
+
string errmsg;
- int ok = c->run( dbname , cmdObj , errmsg , result , fromRepl );
+ int ok = c->run( dbname , cmdObj , queryOptions, errmsg , result , fromRepl );
if ( ! ok )
result.append( "errmsg" , errmsg );
return ok;
diff --git a/s/security.cpp b/s/security.cpp
new file mode 100644
index 0000000..0b8954e
--- /dev/null
+++ b/s/security.cpp
@@ -0,0 +1,112 @@
+// security.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// security.cpp
+
+#include "pch.h"
+#include "../db/security_common.h"
+#include "../db/security.h"
+#include "config.h"
+#include "client.h"
+#include "grid.h"
+
+// this is the _mongos only_ implementation of security.h
+
+namespace mongo {
+
+ bool AuthenticationInfo::_warned;
+
+ bool CmdAuthenticate::getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd) {
+ if (user == internalSecurity.user) {
+ uassert(15890, "key file must be used to log in with internal user", cmdLine.keyFile);
+ pwd = internalSecurity.pwd;
+ }
+ else {
+ string systemUsers = dbname + ".system.users";
+ DBConfigPtr config = grid.getDBConfig( systemUsers );
+ Shard s = config->getShard( systemUsers );
+
+ static BSONObj userPattern = BSON("user" << 1);
+
+ ShardConnection conn( s, systemUsers );
+ OCCASIONALLY conn->ensureIndex(systemUsers, userPattern, false, "user_1");
+ {
+ BSONObjBuilder b;
+ b << "user" << user;
+ BSONObj query = b.done();
+ userObj = conn->findOne(systemUsers, query);
+ if( userObj.isEmpty() ) {
+ log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
+ conn.done(); // return to pool
+ return false;
+ }
+ }
+
+ pwd = userObj.getStringField("pwd");
+
+ conn.done(); // return to pool
+ }
+ return true;
+ }
+
+ void CmdAuthenticate::authenticate(const string& dbname, const string& user, const bool readOnly) {
+ AuthenticationInfo *ai = ClientInfo::get()->getAuthenticationInfo();
+
+ if ( readOnly ) {
+ ai->authorizeReadOnly( dbname , user );
+ }
+ else {
+ ai->authorize( dbname , user );
+ }
+ }
+
+ bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) const {
+ if ( !isLocalHost ) {
+ return false;
+ }
+
+ string adminNs = "admin.system.users";
+
+ DBConfigPtr config = grid.getDBConfig( adminNs );
+ Shard s = config->getShard( adminNs );
+
+ ShardConnection conn( s, adminNs );
+ BSONObj result = conn->findOne("admin.system.users", Query());
+ if( result.isEmpty() ) {
+ if( ! _warned ) {
+ // you could get a few of these in a race, but that's ok
+ _warned = true;
+ log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
+ }
+
+ // Must return conn to pool
+ // TODO: Check for errors during findOne(), or just let the conn die?
+ conn.done();
+ return true;
+ }
+
+ // Must return conn to pool
+ conn.done();
+ return false;
+ }
+
+ bool CmdLogout::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ AuthenticationInfo *ai = ClientInfo::get()->getAuthenticationInfo();
+ ai->logout(dbname);
+ return true;
+ }
+}
diff --git a/s/server.cpp b/s/server.cpp
index 51f30f1..a6ffab9 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -17,15 +17,18 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../util/unittest.h"
#include "../client/connpool.h"
-#include "../util/message_server.h"
+#include "../util/net/message_server.h"
#include "../util/stringutils.h"
#include "../util/version.h"
+#include "../util/ramlog.h"
#include "../util/signal_handlers.h"
#include "../util/admin_access.h"
+#include "../util/concurrency/task.h"
#include "../db/dbwebserver.h"
+#include "../scripting/engine.h"
#include "server.h"
#include "request.h"
@@ -43,6 +46,7 @@ namespace mongo {
Database *database = 0;
string mongosCommand;
bool dbexitCalled = false;
+ static bool scriptingEnabled = true;
bool inShutdown() {
return dbexitCalled;
@@ -65,20 +69,18 @@ namespace mongo {
out() << endl;
}
- class ShardingConnectionHook : public DBConnectionHook {
- public:
-
- virtual void onHandedOut( DBClientBase * conn ) {
- ClientInfo::get()->addShard( conn->getServerAddress() );
- }
- } shardingConnectionHook;
+ void ShardingConnectionHook::onHandedOut( DBClientBase * conn ) {
+ ClientInfo::get()->addShard( conn->getServerAddress() );
+ }
class ShardedMessageHandler : public MessageHandler {
public:
virtual ~ShardedMessageHandler() {}
virtual void connected( AbstractMessagingPort* p ) {
- assert( ClientInfo::get() );
+ ClientInfo *c = ClientInfo::get();
+ massert(15849, "client info not defined", c);
+ c->getAuthenticationInfo()->isLocalHost = p->remote().isLocalHost();
}
virtual void process( Message& m , AbstractMessagingPort* p , LastError * le) {
@@ -93,7 +95,7 @@ namespace mongo {
r.process();
}
catch ( AssertionException & e ) {
- log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException in process: " << e.what() << endl;
+ log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;
le->raiseError( e.getCode() , e.what() );
@@ -147,6 +149,7 @@ namespace mongo {
setupSIGTRAPforGDB();
setupCoreSignals();
setupSignals( false );
+ Logstream::get().addGlobalTee( new RamLog("global") );
}
void start( const MessageServer::Options& opts ) {
@@ -154,10 +157,8 @@ namespace mongo {
installChunkShardVersioning();
balancer.go();
cursorCache.startTimeoutThread();
+ PeriodicTask::theRunner->go();
- log() << "waiting for connections on port " << cmdLine.port << endl;
- //DbGridListener l(port);
- //l.listen();
ShardedMessageHandler handler;
MessageServer * server = createServer( opts , &handler );
server->setAsTimeTracker();
@@ -201,6 +202,7 @@ int _main(int argc, char* argv[]) {
( "chunkSize" , po::value<int>(), "maximum amount of data per chunk" )
( "ipv6", "enable IPv6 support (disabled by default)" )
( "jsonp","allow JSONP access via http (has security implications)" )
+ ("noscripting", "disable scripting engine")
;
options.add(sharding_options);
@@ -242,6 +244,10 @@ int _main(int argc, char* argv[]) {
return 0;
}
+ if (params.count("noscripting")) {
+ scriptingEnabled = false;
+ }
+
if ( ! params.count( "configdb" ) ) {
out() << "error: no args for --configdb" << endl;
return 4;
@@ -254,7 +260,7 @@ int _main(int argc, char* argv[]) {
return 5;
}
- // we either have a seeting were all process are in localhost or none is
+ // we either have a setting where all processes are in localhost or none are
for ( vector<string>::const_iterator it = configdbs.begin() ; it != configdbs.end() ; ++it ) {
try {
@@ -278,8 +284,12 @@ int _main(int argc, char* argv[]) {
// set some global state
- pool.addHook( &shardingConnectionHook );
+ pool.addHook( new ShardingConnectionHook( false ) );
pool.setName( "mongos connectionpool" );
+
+ shardConnectionPool.addHook( new ShardingConnectionHook( true ) );
+ shardConnectionPool.setName( "mongos shardconnection connectionpool" );
+
DBClientConnection::setLazyKillCursor( false );
@@ -309,6 +319,16 @@ int _main(int argc, char* argv[]) {
return 8;
}
+ {
+ class CheckConfigServers : public task::Task {
+ virtual string name() const { return "CheckConfigServers"; }
+ virtual void doWork() { configServer.ok(true); }
+ };
+ static CheckConfigServers checkConfigServers;
+
+ task::repeat(&checkConfigServers, 60*1000);
+ }
+
int configError = configServer.checkConfigVersion( params.count( "upgrade" ) );
if ( configError ) {
if ( configError > 0 ) {
@@ -325,6 +345,12 @@ int _main(int argc, char* argv[]) {
boost::thread web( boost::bind(&webServerThread, new NoAdminAccess() /* takes ownership */) );
+ if ( scriptingEnabled ) {
+ ScriptEngine::setup();
+// globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback );
+// globalScriptEngine->setGetInterruptSpecCallback( jsGetInterruptSpecCallback );
+ }
+
MessageServer::Options opts;
opts.port = cmdLine.port;
opts.ipList = cmdLine.bind_ip;
@@ -335,6 +361,7 @@ int _main(int argc, char* argv[]) {
}
int main(int argc, char* argv[]) {
try {
+ doPreServerStatupInits();
return _main(argc, argv);
}
catch(DBException& e) {
@@ -352,6 +379,12 @@ int main(int argc, char* argv[]) {
}
#undef exit
+
+void mongo::exitCleanly( ExitCode code ) {
+ // TODO: do we need to add anything?
+ mongo::dbexit( code );
+}
+
void mongo::dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
dbexitCalled = true;
log() << "dbexit: " << why
diff --git a/s/server.h b/s/server.h
index 1a5c9ea..18e91e2 100644
--- a/s/server.h
+++ b/s/server.h
@@ -17,7 +17,7 @@
*/
#include <string>
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../db/jsobj.h"
namespace mongo {
diff --git a/s/shard.cpp b/s/shard.cpp
index c1e3b56..75326e0 100644
--- a/s/shard.cpp
+++ b/s/shard.cpp
@@ -20,6 +20,7 @@
#include "shard.h"
#include "config.h"
#include "request.h"
+#include "client.h"
#include "../db/commands.h"
#include <set>
@@ -111,6 +112,14 @@ namespace mongo {
return i->second;
}
+ // Useful for ensuring our shard data will not be modified while we use it
+ Shard findCopy( const string& ident ){
+ ShardPtr found = find( ident );
+ scoped_lock lk( _mutex );
+ massert( 13128 , (string)"can't find shard for: " + ident , found.get() );
+ return *found.get();
+ }
+
void set( const string& name , const Shard& s , bool setName = true , bool setAddr = true ) {
scoped_lock lk( _mutex );
ShardPtr ss( new Shard( s ) );
@@ -226,7 +235,7 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- virtual bool run(const string&, mongo::BSONObj&, std::string& errmsg , mongo::BSONObjBuilder& result, bool) {
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string& errmsg , mongo::BSONObjBuilder& result, bool) {
return staticShardInfo.getShardMap( result , errmsg );
}
} cmdGetShardMap;
@@ -243,10 +252,7 @@ namespace mongo {
void Shard::_rsInit() {
if ( _cs.type() == ConnectionString::SET ) {
string x = _cs.getSetName();
- if ( x.size() == 0 ) {
- warning() << "no set name for shard: " << _name << " " << _cs.toString() << endl;
- }
- assert( x.size() );
+ massert( 14807 , str::stream() << "no set name for shard: " << _name << " " << _cs.toString() , x.size() );
_rs = ReplicaSetMonitor::get( x , _cs.getServers() );
}
}
@@ -260,14 +266,9 @@ namespace mongo {
}
void Shard::reset( const string& ident ) {
- ShardPtr s = staticShardInfo.find( ident );
- massert( 13128 , (string)"can't find shard for: " + ident , s->ok() );
- _name = s->_name;
- _addr = s->_addr;
- _cs = s->_cs;
+ *this = staticShardInfo.findCopy( ident );
+ _rs.reset();
_rsInit();
- _maxSize = s->_maxSize;
- _isDraining = s->_isDraining;
}
bool Shard::containsNode( const string& node ) const {
@@ -289,10 +290,10 @@ namespace mongo {
}
void Shard::printShardInfo( ostream& out ) {
- vector<ShardPtr> all;
+ vector<Shard> all;
staticShardInfo.getAllShards( all );
for ( unsigned i=0; i<all.size(); i++ )
- out << all[i]->toString() << "\n";
+ out << all[i].toString() << "\n";
out.flush();
}
@@ -324,7 +325,7 @@ namespace mongo {
}
Shard Shard::pick( const Shard& current ) {
- vector<ShardPtr> all;
+ vector<Shard> all;
staticShardInfo.getAllShards( all );
if ( all.size() == 0 ) {
staticShardInfo.reload();
@@ -334,18 +335,18 @@ namespace mongo {
}
// if current shard was provided, pick a different shard only if it is a better choice
- ShardStatus best = all[0]->getStatus();
+ ShardStatus best = all[0].getStatus();
if ( current != EMPTY ) {
best = current.getStatus();
}
for ( size_t i=0; i<all.size(); i++ ) {
- ShardStatus t = all[i]->getStatus();
+ ShardStatus t = all[i].getStatus();
if ( t < best )
best = t;
}
- log(1) << "best shard for new allocation is " << best << endl;
+ LOG(1) << "best shard for new allocation is " << best << endl;
return best.shard();
}
@@ -356,4 +357,20 @@ namespace mongo {
_writeLock = 0; // TODO
}
+ void ShardingConnectionHook::onCreate( DBClientBase * conn ) {
+ if( !noauth ) {
+ string err;
+ LOG(2) << "calling onCreate auth for " << conn->toString() << endl;
+ uassert( 15847, "can't authenticate to shard server",
+ conn->auth("local", internalSecurity.user, internalSecurity.pwd, err, false));
+ }
+
+ if ( _shardedConnections ) {
+ conn->simpleCommand( "admin" , 0 , "setShardVersion" );
+ }
+ }
+
+ void ShardingConnectionHook::onDestory( DBClientBase * conn ) {
+ resetShardVersionCB( conn );
+ }
}
diff --git a/s/shard.h b/s/shard.h
index 70e478c..1c4dd75 100644
--- a/s/shard.h
+++ b/s/shard.h
@@ -255,6 +255,8 @@ namespace mongo {
_setVersion = false;
_finishedInit = true;
}
+
+ bool ok() const { return _conn > 0; }
/**
this just passes through excpet it checks for stale configs
@@ -275,4 +277,21 @@ namespace mongo {
DBClientBase* _conn;
bool _setVersion;
};
+
+
+ extern DBConnectionPool shardConnectionPool;
+
+ class ShardingConnectionHook : public DBConnectionHook {
+ public:
+
+ ShardingConnectionHook( bool shardedConnections )
+ : _shardedConnections( shardedConnections ) {
+ }
+
+ virtual void onCreate( DBClientBase * conn );
+ virtual void onHandedOut( DBClientBase * conn );
+ virtual void onDestory( DBClientBase * conn );
+
+ bool _shardedConnections;
+ };
}
diff --git a/s/shard_version.cpp b/s/shard_version.cpp
index a189a08..8782c8e 100644
--- a/s/shard_version.cpp
+++ b/s/shard_version.cpp
@@ -82,29 +82,54 @@ namespace mongo {
/**
* @return true if had to do something
*/
- bool checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative , int tryNumber ) {
+ bool checkShardVersion( DBClientBase& conn_in , const string& ns , bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
- WriteBackListener::init( conn );
+ WriteBackListener::init( conn_in );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
+ DBClientBase* conn = 0;
+
+ switch ( conn_in.type() ) {
+ case ConnectionString::INVALID:
+ assert(0);
+ break;
+ case ConnectionString::MASTER:
+ // great
+ conn = &conn_in;
+ break;
+ case ConnectionString::PAIR:
+ assert( ! "pair not support for sharding" );
+ break;
+ case ConnectionString::SYNC:
+ // TODO: we should check later that we aren't actually sharded on this
+ conn = &conn_in;
+ break;
+ case ConnectionString::SET:
+ DBClientReplicaSet* set = (DBClientReplicaSet*)&conn_in;
+ conn = &(set->masterConn());
+ break;
+ }
+
+ assert(conn);
+
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ) {
- manager = conf->getChunkManager( ns , authoritative );
+ manager = conf->getChunkManagerIfExists( ns , authoritative );
// It's possible the chunk manager was reset since we checked whether sharded was true,
// so must check this here.
if( manager ) officialSequenceNumber = manager->getSequenceNumber();
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
- // (ie, last time we issued the setShardVersions below)
- unsigned long long sequenceNumber = connectionShardStatus.getSequence(&conn,ns);
+ // (ie., last time we issued the setShardVersions below)
+ unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
@@ -112,40 +137,53 @@ namespace mongo {
ShardChunkVersion version = 0;
if ( isSharded && manager ) {
- version = manager->getVersion( Shard::make( conn.getServerAddress() ) );
+ version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
}
- log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns
+ LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
- if ( setShardVersion( conn , ns , version , authoritative , result ) ) {
+ if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
// success!
- log(1) << " setShardVersion success!" << endl;
- connectionShardStatus.setSequence( &conn , ns , officialSequenceNumber );
+ LOG(1) << " setShardVersion success: " << result << endl;
+ connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
return true;
}
- log(1) << " setShardVersion failed!\n" << result << endl;
+ LOG(1) << " setShardVersion failed!\n" << result << endl;
- if ( result.getBoolField( "need_authoritative" ) )
+ if ( result["need_authoritative"].trueValue() )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ) {
- checkShardVersion( conn , ns , 1 , tryNumber + 1 );
+ checkShardVersion( *conn , ns , 1 , tryNumber + 1 );
return true;
}
+
+ if ( result["reloadConfig"].trueValue() ) {
+ if( result["version"].timestampTime() == 0 ){
+ // reload db
+ conf->reload();
+ }
+ else {
+ // reload config
+ conf->getChunkManager( ns , true );
+ }
+ }
- if ( tryNumber < 4 ) {
- log(1) << "going to retry checkShardVersion" << endl;
- sleepmillis( 10 );
- checkShardVersion( conn , ns , 1 , tryNumber + 1 );
+ const int maxNumTries = 7;
+ if ( tryNumber < maxNumTries ) {
+ LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
+ << "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl;
+ sleepmillis( 10 * tryNumber );
+ checkShardVersion( *conn , ns , true , tryNumber + 1 );
return true;
}
- string errmsg = str::stream() << "setShardVersion failed host[" << conn.getServerAddress() << "] " << result;
+ string errmsg = str::stream() << "setShardVersion failed host: " << conn->getServerAddress() << " " << result;
log() << " " << errmsg << endl;
massert( 10429 , errmsg , 0 );
return true;
diff --git a/s/shard_version.h b/s/shard_version.h
index 023b7fc..98cacf6 100644
--- a/s/shard_version.h
+++ b/s/shard_version.h
@@ -28,4 +28,5 @@ namespace mongo {
*/
void installChunkShardVersioning();
+
} // namespace mongo
diff --git a/s/shardconnection.cpp b/s/shardconnection.cpp
index ec14139..04b49f2 100644
--- a/s/shardconnection.cpp
+++ b/s/shardconnection.cpp
@@ -41,12 +41,14 @@ namespace mongo {
boost::function4<bool, DBClientBase&, const string&, bool, int> checkShardVersionCB = defaultCheckShardVersion;
boost::function1<void, DBClientBase*> resetShardVersionCB = defaultResetShardVersion;
+ DBConnectionPool shardConnectionPool;
+
// Only print the non-top-level-shard-conn warning once if not verbose
volatile bool printedShardConnWarning = false;
/**
* holds all the actual db connections for a client to various servers
- * 1 pre thread, so don't have to worry about thread safety
+ * 1 per thread, so doesn't have to be thread safe
*/
class ClientConnections : boost::noncopyable {
public:
@@ -68,8 +70,10 @@ namespace mongo {
if ( ss->avail ) {
/* if we're shutting down, don't want to initiate release mechanism as it is slow,
and isn't needed since all connections will be closed anyway */
- if ( inShutdown() )
+ if ( inShutdown() ) {
+ resetShardVersionCB( ss->avail );
delete ss->avail;
+ }
else
release( addr , ss->avail );
ss->avail = 0;
@@ -115,12 +119,12 @@ namespace mongo {
if ( s->avail ) {
DBClientBase* c = s->avail;
s->avail = 0;
- pool.onHandedOut( c );
+ shardConnectionPool.onHandedOut( c );
return c;
}
s->created++;
- return pool.get( addr );
+ return shardConnectionPool.get( addr );
}
void done( const string& addr , DBClientBase* conn ) {
@@ -137,15 +141,10 @@ namespace mongo {
for ( HostMap::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
string addr = i->first;
Status* ss = i->second;
-
- if ( ss->avail ) {
+ if ( ss->avail )
ss->avail->getLastError();
- release( addr , ss->avail );
- ss->avail = 0;
- }
- delete ss;
+
}
- _hosts.clear();
}
void checkVersions( const string& ns ) {
@@ -157,14 +156,14 @@ namespace mongo {
for ( unsigned i=0; i<all.size(); i++ ) {
string sconnString = all[i].getConnString();
- Status* &s = _hosts[ sconnString ];
+ Status* &s = _hosts[sconnString];
if ( ! s ){
s = new Status();
}
if( ! s->avail )
- s->avail = pool.get( sconnString );
+ s->avail = shardConnectionPool.get( sconnString );
checkShardVersionCB( *s->avail, ns, false, 1 );
@@ -172,27 +171,7 @@ namespace mongo {
}
void release( const string& addr , DBClientBase * conn ) {
- resetShardVersionCB( conn );
- BSONObj res;
-
- try {
- if ( conn->simpleCommand( "admin" , &res , "unsetSharding" ) ) {
- pool.release( addr , conn );
- }
- else {
- error() << "unset sharding failed : " << res << endl;
- delete conn;
- }
- }
- catch ( SocketException& e ) {
- // server down or something
- LOG(1) << "socket exception trying to unset sharding: " << e.toString() << endl;
- delete conn;
- }
- catch ( std::exception& e ) {
- error() << "couldn't unset sharding : " << e.what() << endl;
- delete conn;
- }
+ shardConnectionPool.release( addr , conn );
}
void _check( const string& ns ) {
diff --git a/s/shardkey.cpp b/s/shardkey.cpp
index 84cdb4b..d6c8eda 100644
--- a/s/shardkey.cpp
+++ b/s/shardkey.cpp
@@ -55,7 +55,8 @@ namespace mongo {
*/
for(set<string>::const_iterator it = patternfields.begin(); it != patternfields.end(); ++it) {
- if(obj.getFieldDotted(it->c_str()).eoo())
+ BSONElement e = obj.getFieldDotted(it->c_str());
+ if(e.eoo() || e.type() == Array)
return false;
}
return true;
@@ -83,7 +84,7 @@ namespace mongo {
vector<const char*> keysToMove;
keysToMove.push_back("_id");
BSONForEach(e, pattern) {
- if (strchr(e.fieldName(), '.') == NULL)
+ if (strchr(e.fieldName(), '.') == NULL && strcmp(e.fieldName(), "_id") != 0)
keysToMove.push_back(e.fieldName());
}
@@ -93,6 +94,7 @@ namespace mongo {
}
else {
BufBuilder buf (obj.objsize());
+ buf.appendNum((unsigned)0); // refcount
buf.appendNum(obj.objsize());
vector<pair<const char*, size_t> > copies;
@@ -135,7 +137,7 @@ namespace mongo {
buf.appendChar('\0');
- BSONObj out (buf.buf(), true);
+ BSONObj out ((BSONObj::Holder*)buf.buf());
buf.decouple();
return out;
}
@@ -184,8 +186,8 @@ namespace mongo {
ShardKeyPattern k( fromjson("{a:1,'sub.b':-1,'sub.c':1}") );
BSONObj x = fromjson("{a:1,'sub.b':2,'sub.c':3}");
- assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).woEqual(x) );
- assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).woEqual(x) );
+ assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).binaryEqual(x) );
+ assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).binaryEqual(x) );
}
void moveToFrontTest() {
ShardKeyPattern sk (BSON("a" << 1 << "b" << 1));
@@ -193,13 +195,13 @@ namespace mongo {
BSONObj ret;
ret = sk.moveToFront(BSON("z" << 1 << "_id" << 1 << "y" << 1 << "a" << 1 << "x" << 1 << "b" << 1 << "w" << 1));
- assert(ret.woEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
+ assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
ret = sk.moveToFront(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1));
- assert(ret.woEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
+ assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
ret = sk.moveToFront(BSON("z" << 1 << "y" << 1 << "a" << 1 << "b" << 1 << "Z" << 1 << "Y" << 1));
- assert(ret.woEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1)));
+ assert(ret.binaryEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1)));
}
@@ -262,7 +264,7 @@ namespace mongo {
moveToFrontBenchmark(100);
}
- log(1) << "shardKeyTest passed" << endl;
+ LOG(1) << "shardKeyTest passed" << endl;
}
} shardKeyTest;
diff --git a/s/shardkey.h b/s/shardkey.h
index 96301ff..976cff0 100644
--- a/s/shardkey.h
+++ b/s/shardkey.h
@@ -102,7 +102,21 @@ namespace mongo {
};
inline BSONObj ShardKeyPattern::extractKey(const BSONObj& from) const {
- BSONObj k = from.extractFields(pattern);
+ BSONObj k = from;
+ bool needExtraction = false;
+
+ BSONObjIterator a(from);
+ BSONObjIterator b(pattern);
+ while (a.more() && b.more()){
+ if (strcmp(a.next().fieldName(), b.next().fieldName()) != 0){
+ needExtraction = true;
+ break;
+ }
+ }
+
+ if (needExtraction || a.more() != b.more())
+ k = from.extractFields(pattern);
+
uassert(13334, "Shard Key must be less than 512 bytes", k.objsize() < 512);
return k;
}
diff --git a/s/strategy.cpp b/s/strategy.cpp
index 7c1fb0b..4230b7f 100644
--- a/s/strategy.cpp
+++ b/s/strategy.cpp
@@ -38,7 +38,7 @@ namespace mongo {
conn.donotCheckVersion();
else if ( conn.setVersion() ) {
conn.done();
- throw StaleConfigException( r.getns() , "doWRite" , true );
+ throw StaleConfigException( r.getns() , "doWrite" , true );
}
conn->say( r.m() );
conn.done();
@@ -46,6 +46,8 @@ namespace mongo {
void Strategy::doQuery( Request& r , const Shard& shard ) {
+ r.checkAuth();
+
ShardConnection dbcon( shard , r.getns() );
DBClientBase &c = dbcon.conn();
@@ -67,13 +69,31 @@ namespace mongo {
dbcon.done();
}
- void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj ) {
+ void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj , int flags, bool safe ) {
ShardConnection dbcon( shard , ns );
if ( dbcon.setVersion() ) {
dbcon.done();
throw StaleConfigException( ns , "for insert" );
}
- dbcon->insert( ns , obj );
+ dbcon->insert( ns , obj , flags);
+ if (safe)
+ dbcon->getLastError();
dbcon.done();
}
+
+ void Strategy::update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags, bool safe ) {
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+
+ ShardConnection dbcon( shard , ns );
+ if ( dbcon.setVersion() ) {
+ dbcon.done();
+ throw StaleConfigException( ns , "for insert" );
+ }
+ dbcon->update( ns , query , toupdate, upsert, multi);
+ if (safe)
+ dbcon->getLastError();
+ dbcon.done();
+ }
+
}
diff --git a/s/strategy.h b/s/strategy.h
index 10a5a3f..326a515 100644
--- a/s/strategy.h
+++ b/s/strategy.h
@@ -32,11 +32,15 @@ namespace mongo {
virtual void getMore( Request& r ) = 0;
virtual void writeOp( int op , Request& r ) = 0;
+ virtual void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags, bool safe=false, const char* nsChunkLookup=0 ) = 0;
+ virtual void updateSharded( DBConfigPtr conf, const char* ns, BSONObj& query, BSONObj& toupdate, int flags, bool safe=false ) = 0;
+
protected:
void doWrite( int op , Request& r , const Shard& shard , bool checkVersion = true );
void doQuery( Request& r , const Shard& shard );
- void insert( const Shard& shard , const char * ns , const BSONObj& obj );
+ void insert( const Shard& shard , const char * ns , const BSONObj& obj , int flags=0 , bool safe=false );
+ void update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags=0, bool safe=false );
};
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 337fa58..c6b30e7 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -35,7 +35,9 @@ namespace mongo {
virtual void queryOp( Request& r ) {
QueryMessage q( r.d() );
- log(3) << "shard query: " << q.ns << " " << q.query << endl;
+ r.checkAuth();
+
+ LOG(3) << "shard query: " << q.ns << " " << q.query << endl;
if ( q.ntoreturn == 1 && strstr(q.ns, ".$cmd") )
throw UserException( 8010 , "something is wrong, shouldn't see a command here" );
@@ -66,20 +68,14 @@ namespace mongo {
ClusteredCursor * cursor = 0;
BSONObj sort = query.getSort();
-
- if ( sort.isEmpty() ) {
- cursor = new SerialServerClusteredCursor( servers , q );
- }
- else {
- cursor = new ParallelSortClusteredCursor( servers , q , sort );
- }
+ cursor = new ParallelSortClusteredCursor( servers , q , sort );
assert( cursor );
try {
cursor->init();
- log(5) << " cursor type: " << cursor->type() << endl;
+ LOG(5) << " cursor type: " << cursor->type() << endl;
shardedCursorTypes.hit( cursor->type() );
if ( query.isExplain() ) {
@@ -98,7 +94,7 @@ namespace mongo {
if ( ! cc->sendNextBatch( r ) ) {
return;
}
- log(6) << "storing cursor : " << cc->getId() << endl;
+ LOG(6) << "storing cursor : " << cc->getId() << endl;
cursorCache.store( cc );
}
@@ -106,11 +102,11 @@ namespace mongo {
int ntoreturn = r.d().pullInt();
long long id = r.d().pullInt64();
- log(6) << "want cursor : " << id << endl;
+ LOG(6) << "want cursor : " << id << endl;
ShardedClientCursorPtr cursor = cursorCache.get( id );
if ( ! cursor ) {
- log(6) << "\t invalid cursor :(" << endl;
+ LOG(6) << "\t invalid cursor :(" << endl;
replyToQuery( ResultFlag_CursorNotFound , r.p() , r.m() , 0 , 0 , 0 );
return;
}
@@ -126,56 +122,126 @@ namespace mongo {
}
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
+ const int flags = d.reservedField();
+ bool keepGoing = flags & InsertOption_ContinueOnError; // modified before assertion if should abort
while ( d.moreJSObjs() ) {
- BSONObj o = d.nextJsObj();
- if ( ! manager->hasShardKey( o ) ) {
+ try {
+ BSONObj o = d.nextJsObj();
+ if ( ! manager->hasShardKey( o ) ) {
+
+ bool bad = true;
- bool bad = true;
+ if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
+ BSONObjBuilder b;
+ b.appendOID( "_id" , 0 , true );
+ b.appendElements( o );
+ o = b.obj();
+ bad = ! manager->hasShardKey( o );
+ }
+
+ if ( bad ) {
+ log() << "tried to insert object with no valid shard key: " << r.getns() << " " << o << endl;
+ uasserted( 8011 , "tried to insert object with no valid shard key" );
+ }
- if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
- BSONObjBuilder b;
- b.appendOID( "_id" , 0 , true );
- b.appendElements( o );
- o = b.obj();
- bad = ! manager->hasShardKey( o );
}
- if ( bad ) {
- log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl;
- throw UserException( 8011 , "tried to insert object without shard key" );
+ // Many operations benefit from having the shard key early in the object
+ o = manager->getShardKey().moveToFront(o);
+
+ const int maxTries = 30;
+
+ bool gotThrough = false;
+ for ( int i=0; i<maxTries; i++ ) {
+ try {
+ ChunkPtr c = manager->findChunk( o );
+ LOG(4) << " server:" << c->getShard().toString() << " " << o << endl;
+ insert( c->getShard() , r.getns() , o , flags);
+
+ r.gotInsert();
+ if ( r.getClientInfo()->autoSplitOk() )
+ c->splitIfShould( o.objsize() );
+ gotThrough = true;
+ break;
+ }
+ catch ( StaleConfigException& e ) {
+ int logLevel = i < ( maxTries / 2 );
+ LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
+ r.reset();
+
+ manager = r.getChunkManager();
+ if( ! manager ) {
+ keepGoing = false;
+ uasserted(14804, "collection no longer sharded");
+ }
+
+ unsigned long long old = manager->getSequenceNumber();
+
+ LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
+ }
+ sleepmillis( i * 20 );
}
+ assert( inShutdown() || gotThrough ); // not caught below
+ } catch (const UserException&){
+ if (!keepGoing || !d.moreJSObjs()){
+ throw;
+ }
+ // otherwise ignore and keep going
}
+ }
+ }
- // Many operations benefit from having the shard key early in the object
- o = manager->getShardKey().moveToFront(o);
+ void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags, bool safe, const char* nsChunkLookup ) {
+ if (!nsChunkLookup)
+ nsChunkLookup = ns;
+ ChunkManagerPtr manager = conf->getChunkManager(nsChunkLookup);
+ if ( ! manager->hasShardKey( o ) ) {
- const int maxTries = 30;
+ bool bad = true;
- bool gotThrough = false;
- for ( int i=0; i<maxTries; i++ ) {
- try {
- ChunkPtr c = manager->findChunk( o );
- log(4) << " server:" << c->getShard().toString() << " " << o << endl;
- insert( c->getShard() , r.getns() , o );
+ if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
+ BSONObjBuilder b;
+ b.appendOID( "_id" , 0 , true );
+ b.appendElements( o );
+ o = b.obj();
+ bad = ! manager->hasShardKey( o );
+ }
- r.gotInsert();
- if ( r.getClientInfo()->autoSplitOk() )
- c->splitIfShould( o.objsize() );
- gotThrough = true;
- break;
- }
- catch ( StaleConfigException& ) {
- log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << o << endl;
- r.reset();
- manager = r.getChunkManager();
- }
- sleepmillis( i * 200 );
+ if ( bad ) {
+ log() << "tried to insert object with no valid shard key: " << nsChunkLookup << " " << o << endl;
+ uasserted( 14842 , "tried to insert object with no valid shard key" );
+ }
+
+ }
+
+ // Many operations benefit from having the shard key early in the object
+ o = manager->getShardKey().moveToFront(o);
+
+ const int maxTries = 30;
+
+ for ( int i=0; i<maxTries; i++ ) {
+ try {
+ ChunkPtr c = manager->findChunk( o );
+ LOG(4) << " server:" << c->getShard().toString() << " " << o << endl;
+ insert( c->getShard() , ns , o , flags, safe);
+ break;
}
+ catch ( StaleConfigException& e ) {
+ int logLevel = i < ( maxTries / 2 );
+ LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
- assert( gotThrough );
+ unsigned long long old = manager->getSequenceNumber();
+ manager = conf->getChunkManagerIfExists(ns);
+ LOG( logLevel ) << " sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
+
+ if (!manager) {
+ uasserted(14843, "collection no longer sharded");
+ }
+ }
+ sleepmillis( i * 20 );
}
}
@@ -186,16 +252,15 @@ namespace mongo {
uassert( 13506 , "$atomic not supported sharded" , query["$atomic"].eoo() );
uassert( 10201 , "invalid update" , d.moreJSObjs() );
BSONObj toupdate = d.nextJsObj();
-
BSONObj chunkFinder = query;
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
if (upsert) {
- uassert(8012, "can't upsert something without shard key",
+ uassert(8012, "can't upsert something without valid shard key",
(manager->hasShardKey(toupdate) ||
- (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))));
+ (toupdate.firstElementFieldName()[0] == '$' && manager->hasShardKey(query))));
BSONObj key = manager->getShardKey().extractKey(query);
BSONForEach(e, key) {
@@ -207,8 +272,9 @@ namespace mongo {
if ( ! manager->hasShardKey( query ) ) {
if ( multi ) {
}
- else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ) {
- throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" );
+ else if ( strcmp( query.firstElementFieldName() , "_id" ) || query.nFields() != 1 ) {
+ log() << "Query " << query << endl;
+ throw UserException( 8013 , "can't do non-multi update with query that doesn't have a valid shard key" );
}
else {
save = true;
@@ -218,7 +284,7 @@ namespace mongo {
if ( ! save ) {
- if ( toupdate.firstElement().fieldName()[0] == '$' ) {
+ if ( toupdate.firstElementFieldName()[0] == '$' ) {
BSONObjIterator ops(toupdate);
while(ops.more()) {
BSONElement op(ops.next());
@@ -241,7 +307,7 @@ namespace mongo {
}
else {
uasserted(12376,
- str::stream() << "shard key must be in update object for collection: " << manager->getns() );
+ str::stream() << "valid shard key must be in update object for collection: " << manager->getns() );
}
}
@@ -268,10 +334,101 @@ namespace mongo {
if ( left <= 0 )
throw e;
left--;
- log() << "update failed b/c of StaleConfigException, retrying "
+ log() << "update will be retried b/c sharding config info is stale, "
<< " left:" << left << " ns: " << r.getns() << " query: " << query << endl;
r.reset( false );
manager = r.getChunkManager();
+ uassert(14806, "collection no longer sharded", manager);
+ }
+ }
+ }
+ }
+
+ void updateSharded( DBConfigPtr conf, const char* ns, BSONObj& query, BSONObj& toupdate, int flags, bool safe ) {
+ ChunkManagerPtr manager = conf->getChunkManager(ns);
+ BSONObj chunkFinder = query;
+
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+
+ if (upsert) {
+ uassert(14854, "can't upsert something without valid shard key",
+ (manager->hasShardKey(toupdate) ||
+ (toupdate.firstElementFieldName()[0] == '$' && manager->hasShardKey(query))));
+
+ BSONObj key = manager->getShardKey().extractKey(query);
+ BSONForEach(e, key) {
+ uassert(14855, "shard key in upsert query must be an exact match", getGtLtOp(e) == BSONObj::Equality);
+ }
+ }
+
+ bool save = false;
+ if ( ! manager->hasShardKey( query ) ) {
+ if ( multi ) {
+ }
+ else if ( strcmp( query.firstElementFieldName() , "_id" ) || query.nFields() != 1 ) {
+ throw UserException( 14850 , "can't do non-multi update with query that doesn't have a valid shard key" );
+ }
+ else {
+ save = true;
+ chunkFinder = toupdate;
+ }
+ }
+
+
+ if ( ! save ) {
+ if ( toupdate.firstElementFieldName()[0] == '$' ) {
+ BSONObjIterator ops(toupdate);
+ while(ops.more()) {
+ BSONElement op(ops.next());
+ if (op.type() != Object)
+ continue;
+ BSONObjIterator fields(op.embeddedObject());
+ while(fields.more()) {
+ const string field = fields.next().fieldName();
+ uassert(14851,
+ str::stream() << "Can't modify shard key's value field" << field
+ << " for collection: " << manager->getns(),
+ ! manager->getShardKey().partOfShardKey(field));
+ }
+ }
+ }
+ else if ( manager->hasShardKey( toupdate ) ) {
+ uassert( 14856,
+ str::stream() << "cannot modify shard key for collection: " << manager->getns(),
+ manager->getShardKey().compare( query , toupdate ) == 0 );
+ }
+ else {
+ uasserted(14857,
+ str::stream() << "valid shard key must be in update object for collection: " << manager->getns() );
+ }
+ }
+
+ if ( multi ) {
+ set<Shard> shards;
+ manager->getShardsForQuery( shards , chunkFinder );
+// int * x = (int*)(r.d().afterNS());
+// x[0] |= UpdateOption_Broadcast;
+ for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++) {
+ update(*i, ns, query, toupdate, flags, safe);
+ }
+ }
+ else {
+ int left = 5;
+ while ( true ) {
+ try {
+ ChunkPtr c = manager->findChunk( chunkFinder );
+ update(c->getShard(), ns, query, toupdate, flags);
+ break;
+ }
+ catch ( StaleConfigException& e ) {
+ if ( left <= 0 )
+ throw e;
+ left--;
+ log() << "update will be retried b/c sharding config info is stale, "
+ << " left:" << left << " ns: " << ns << " query: " << query << endl;
+ manager = conf->getChunkManager(ns);
+ uassert(14849, "collection no longer sharded", manager);
}
}
}
@@ -293,7 +450,7 @@ namespace mongo {
while ( true ) {
try {
manager->getShardsForQuery( shards , pattern );
- log(2) << "delete : " << pattern << " \t " << shards.size() << " justOne: " << justOne << endl;
+ LOG(2) << "delete : " << pattern << " \t " << shards.size() << " justOne: " << justOne << endl;
if ( shards.size() == 1 ) {
doWrite( dbDelete , r , *shards.begin() );
return;
@@ -309,6 +466,7 @@ namespace mongo {
r.reset( false );
shards.clear();
manager = r.getChunkManager();
+ uassert(14805, "collection no longer sharded", manager);
}
}
@@ -324,7 +482,7 @@ namespace mongo {
virtual void writeOp( int op , Request& r ) {
const char *ns = r.getns();
- log(3) << "write: " << ns << endl;
+ LOG(3) << "write: " << ns << endl;
DbMessage& d = r.d();
ChunkManagerPtr info = r.getChunkManager();
diff --git a/s/strategy_single.cpp b/s/strategy_single.cpp
index 3fd357a..012be5f 100644
--- a/s/strategy_single.cpp
+++ b/s/strategy_single.cpp
@@ -36,7 +36,7 @@ namespace mongo {
virtual void queryOp( Request& r ) {
QueryMessage q( r.d() );
- log(3) << "single query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << endl;
+ LOG(3) << "single query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << " options : " << q.queryOptions << endl;
if ( r.isCommand() ) {
@@ -47,7 +47,15 @@ namespace mongo {
while ( true ) {
BSONObjBuilder builder;
try {
- bool ok = Command::runAgainstRegistered(q.ns, q.query, builder);
+ BSONObj cmdObj = q.query;
+ {
+ BSONElement e = cmdObj.firstElement();
+ if ( e.type() == Object && (e.fieldName()[0] == '$'
+ ? str::equals("query", e.fieldName()+1)
+ : str::equals("query", e.fieldName())))
+ cmdObj = e.embeddedObject();
+ }
+ bool ok = Command::runAgainstRegistered(q.ns, cmdObj, builder, q.queryOptions);
if ( ok ) {
BSONObj x = builder.done();
replyToQuery(0, r.p(), r.m(), x);
@@ -73,7 +81,7 @@ namespace mongo {
}
}
- string commandName = q.query.firstElement().fieldName();
+ string commandName = q.query.firstElementFieldName();
uassert(13390, "unrecognized command: " + commandName, _commandsSafeToPass.count(commandName) != 0);
}
@@ -87,7 +95,10 @@ namespace mongo {
LOG(3) << "single getmore: " << ns << endl;
long long id = r.d().getInt64( 4 );
-
+
+ // we used ScopedDbConnection because we don't get about config versions
+ // not deleting data is handled elsewhere
+ // and we don't want to call setShardVersion
ScopedDbConnection conn( cursorCache.getRef( id ) );
Message response;
@@ -150,12 +161,12 @@ namespace mongo {
if ( r.isShardingEnabled() &&
strstr( ns , ".system.indexes" ) == strchr( ns , '.' ) &&
strchr( ns , '.' ) ) {
- log(1) << " .system.indexes write for: " << ns << endl;
+ LOG(1) << " .system.indexes write for: " << ns << endl;
handleIndexWrite( op , r );
return;
}
- log(3) << "single write: " << ns << endl;
+ LOG(3) << "single write: " << ns << endl;
doWrite( op , r , r.primaryShard() );
r.gotInsert(); // Won't handle mulit-insert correctly. Not worth parsing the request.
}
@@ -251,6 +262,14 @@ namespace mongo {
return true;
}
+ void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags, bool safe, const char* nsChunkLookup ) {
+ // only useful for shards
+ }
+
+ void updateSharded( DBConfigPtr conf, const char* ns, BSONObj& query, BSONObj& toupdate, int flags, bool safe ) {
+ // only useful for shards
+ }
+
set<string> _commandsSafeToPass;
};
diff --git a/s/util.h b/s/util.h
index 2bc89ae..2bf1c94 100644
--- a/s/util.h
+++ b/s/util.h
@@ -129,7 +129,7 @@ namespace mongo {
virtual ~StaleConfigException() throw() {}
- virtual void appendPrefix( stringstream& ss ) const { ss << "StaleConfigException: "; }
+ virtual void appendPrefix( stringstream& ss ) const { ss << "stale sharding config exception: "; }
bool justConnection() const { return _justConnection; }
diff --git a/s/writeback_listener.cpp b/s/writeback_listener.cpp
index df7cc35..5f320d3 100644
--- a/s/writeback_listener.cpp
+++ b/s/writeback_listener.cpp
@@ -40,7 +40,8 @@ namespace mongo {
mongo::mutex WriteBackListener::_seenWritebacksLock("WriteBackListener::seen");
WriteBackListener::WriteBackListener( const string& addr ) : _addr( addr ) {
- log() << "creating WriteBackListener for: " << addr << endl;
+ _name = str::stream() << "WriteBackListener-" << addr;
+ log() << "creating WriteBackListener for: " << addr << " serverID: " << serverID << endl;
}
/* static */
@@ -88,16 +89,17 @@ namespace mongo {
/* static */
BSONObj WriteBackListener::waitFor( const ConnectionIdent& ident, const OID& oid ) {
Timer t;
- for ( int i=0; i<5000; i++ ) {
+ for ( int i=0; i<10000; i++ ) {
{
scoped_lock lk( _seenWritebacksLock );
WBStatus s = _seenWritebacks[ident];
if ( oid < s.id ) {
// this means we're waiting for a GLE that already passed.
- // it should be impossible becauseonce we call GLE, no other
+ // it should be impossible because once we call GLE, no other
// writebacks should happen with that connection id
- msgasserted( 13633 , str::stream() << "got writeback waitfor for older id " <<
- " oid: " << oid << " s.id: " << s.id << " connection: " << ident.toString() );
+
+ msgasserted( 14041 , str::stream() << "got writeback waitfor for older id " <<
+ " oid: " << oid << " s.id: " << s.id << " ident: " << ident.toString() );
}
else if ( oid == s.id ) {
return s.gle;
@@ -115,7 +117,7 @@ namespace mongo {
while ( ! inShutdown() ) {
if ( ! Shard::isAShardNode( _addr ) ) {
- log(1) << _addr << " is not a shard node" << endl;
+ LOG(1) << _addr << " is not a shard node" << endl;
sleepsecs( 60 );
continue;
}
@@ -129,6 +131,7 @@ namespace mongo {
BSONObjBuilder cmd;
cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ) {
+ result = result.getOwned();
log() << "writebacklisten command failed! " << result << endl;
conn.done();
continue;
@@ -136,7 +139,7 @@ namespace mongo {
}
- log(1) << "writebacklisten result: " << result << endl;
+ LOG(1) << "writebacklisten result: " << result << endl;
BSONObj data = result.getObjectField( "data" );
if ( data.getBoolField( "writeBack" ) ) {
@@ -163,13 +166,12 @@ namespace mongo {
ShardChunkVersion needVersion( data["version"] );
LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
- << " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3)
-
- if ( logLevel ) log(1) << debugString( m ) << endl;
+ << " mine : " << db->getChunkManager( ns )->getVersion().toString()
+ << endl;
- ShardChunkVersion start = db->getChunkManager( ns )->getVersion();
+ LOG(1) << m.toString() << endl;
- if ( needVersion.isSet() && needVersion <= start ) {
+ if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ) {
// this means when the write went originally, the version was old
// if we're here, it means we've already updated the config, so don't need to do again
//db->getChunkManager( ns , true ); // SERVER-1349
@@ -178,48 +180,60 @@ namespace mongo {
// we received a writeback object that was sent to a previous version of a shard
// the actual shard may not have the object the writeback operation is for
// we need to reload the chunk manager and get the new shard versions
- bool good = false;
- for ( int i=0; i<100; i++ ) {
- if ( db->getChunkManager( ns , true )->getVersion() >= needVersion ) {
- good = true;
- break;
- }
- log() << "writeback getChunkManager didn't update?" << endl;
- sleepmillis(10);
- }
- assert( good );
+ db->getChunkManager( ns , true );
}
// do request and then call getLastError
// we have to call getLastError so we can return the right fields to the user if they decide to call getLastError
BSONObj gle;
- try {
-
- Request r( m , 0 );
- r.init();
-
- ClientInfo * ci = r.getClientInfo();
- ci->noAutoSplit();
-
- r.process();
-
- ci->newRequest(); // this so we flip prev and cur shards
+ int attempts = 0;
+ while ( true ) {
+ attempts++;
+
+ try {
+
+ Request r( m , 0 );
+ r.init();
+
+ r.d().reservedField() |= DbMessage::Reserved_FromWriteback;
+
+ ClientInfo * ci = r.getClientInfo();
+ if (!noauth) {
+ ci->getAuthenticationInfo()->authorize("admin", internalSecurity.user);
+ }
+ ci->noAutoSplit();
+
+ r.process();
+
+ ci->newRequest(); // this so we flip prev and cur shards
+
+ BSONObjBuilder b;
+ if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) {
+ b.appendBool( "commandFailed" , true );
+ }
+ gle = b.obj();
+
+ if ( gle["code"].numberInt() == 9517 ) {
+ log() << "writeback failed because of stale config, retrying attempts: " << attempts << endl;
+ if( ! db->getChunkManagerIfExists( ns , true ) ){
+ uassert( 15884, str::stream() << "Could not reload chunk manager after " << attempts << " attempts.", attempts <= 4 );
+ sleepsecs( attempts - 1 );
+ }
+ continue;
+ }
- BSONObjBuilder b;
- if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) {
- b.appendBool( "commandFailed" , true );
+ ci->clearSinceLastGetError();
}
- gle = b.obj();
-
- ci->clearSinceLastGetError();
- }
- catch ( DBException& e ) {
- error() << "error processing writeback: " << e << endl;
- BSONObjBuilder b;
- b.append( "err" , e.toString() );
- e.getInfo().append( b );
- gle = b.obj();
+ catch ( DBException& e ) {
+ error() << "error processing writeback: " << e << endl;
+ BSONObjBuilder b;
+ b.append( "err" , e.toString() );
+ e.getInfo().append( b );
+ gle = b.obj();
+ }
+
+ break;
}
{
diff --git a/s/writeback_listener.h b/s/writeback_listener.h
index 0125073..1ef33da 100644
--- a/s/writeback_listener.h
+++ b/s/writeback_listener.h
@@ -31,7 +31,8 @@ namespace mongo {
* (Wrong here in the sense that the target chunk moved before this mongos had a chance to
* learn so.) It is responsible for reapplying these writes to the correct shard.
*
- * Currently, there is one listener per shard.
+ * Runs (instantiated) on mongos.
+ * Currently, there is one writebacklistener per shard.
*/
class WriteBackListener : public BackgroundJob {
public:
@@ -63,11 +64,12 @@ namespace mongo {
protected:
WriteBackListener( const string& addr );
- string name() const { return "WriteBackListener"; }
+ string name() const { return _name; }
void run();
private:
string _addr;
+ string _name;
static mongo::mutex _cacheLock; // protects _cache
static map<string,WriteBackListener*> _cache; // server to listener
diff --git a/scripting/bench.cpp b/scripting/bench.cpp
index 2723985..9ada7d6 100644
--- a/scripting/bench.cpp
+++ b/scripting/bench.cpp
@@ -30,13 +30,6 @@
namespace mongo {
- /**
- * benchQuery( "foo" , { _id : 1 } )
- */
- BSONObj benchQuery( const BSONObj& args ) {
- return BSONObj();
- }
-
struct BenchRunConfig {
BenchRunConfig() {
host = "localhost";
@@ -54,7 +47,7 @@ namespace mongo {
string db;
unsigned parallel;
- int seconds;
+ double seconds;
BSONObj ops;
@@ -64,6 +57,73 @@ namespace mongo {
bool error;
};
+ static bool _hasSpecial( const BSONObj& obj ) {
+ BSONObjIterator i( obj );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.fieldName()[0] == '#' )
+ return true;
+
+ if ( ! e.isABSONObj() )
+ continue;
+
+ if ( _hasSpecial( e.Obj() ) )
+ return true;
+ }
+ return false;
+ }
+
+ static void _fixField( BSONObjBuilder& b , const BSONElement& e ) {
+ assert( e.type() == Object );
+
+ BSONObj sub = e.Obj();
+ assert( sub.nFields() == 1 );
+
+ BSONElement f = sub.firstElement();
+ if ( str::equals( "#RAND_INT" , f.fieldName() ) ) {
+ BSONObjIterator i( f.Obj() );
+ int min = i.next().numberInt();
+ int max = i.next().numberInt();
+
+ int x = min + ( rand() % ( max - min ) );
+ b.append( e.fieldName() , x );
+ }
+ else {
+ uasserted( 14811 , str::stream() << "invalid bench dynamic piece: " << f.fieldName() );
+ }
+
+ }
+
+ static void fixQuery( BSONObjBuilder& b , const BSONObj& obj ) {
+ BSONObjIterator i( obj );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ if ( e.type() != Object ) {
+ b.append( e );
+ continue;
+ }
+
+ BSONObj sub = e.Obj();
+ if ( sub.firstElement().fieldName()[0] != '#' ) {
+ b.append( e );
+ continue;
+ }
+
+ _fixField( b , e );
+ }
+ }
+
+
+ static BSONObj fixQuery( const BSONObj& obj ) {
+ if ( ! _hasSpecial( obj ) )
+ return obj;
+
+ BSONObjBuilder b( obj.objsize() + 128 );
+ fixQuery( b , obj );
+ return b.obj();
+ }
+
static void benchThread( BenchRunConfig * config ) {
ScopedDbConnection conn( config->host );
config->threadsReady++;
@@ -76,14 +136,20 @@ namespace mongo {
string op = e["op"].String();
if ( op == "findOne" ) {
- conn->findOne( ns , e["query"].Obj() );
+ conn->findOne( ns , fixQuery( e["query"].Obj() ) );
+ }
+ else if ( op == "remove" ) {
+ conn->remove( ns , fixQuery( e["query"].Obj() ) );
+ }
+ else if ( op == "update" ) {
+ conn->update( ns , fixQuery( e["query"].Obj() ) , e["update"].Obj() , e["upsert"].trueValue() );
}
else {
log() << "don't understand op: " << op << endl;
config->error = true;
return;
}
-
+
}
}
@@ -93,7 +159,7 @@ namespace mongo {
/**
* benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
*/
- BSONObj benchRun( const BSONObj& argsFake ) {
+ BSONObj benchRun( const BSONObj& argsFake, void* data ) {
assert( argsFake.firstElement().isABSONObj() );
BSONObj args = argsFake.firstElement().Obj();
@@ -109,7 +175,7 @@ namespace mongo {
if ( args["parallel"].isNumber() )
config.parallel = args["parallel"].numberInt();
if ( args["seconds"].isNumber() )
- config.seconds = args["seconds"].numberInt();
+ config.seconds = args["seconds"].number();
config.ops = args["ops"].Obj();
@@ -130,7 +196,7 @@ namespace mongo {
BSONObj before;
conn->simpleCommand( "admin" , &before , "serverStatus" );
- sleepsecs( config.seconds );
+ sleepmillis( (int)(1000.0 * config.seconds) );
BSONObj after;
conn->simpleCommand( "admin" , &after , "serverStatus" );
@@ -147,11 +213,14 @@ namespace mongo {
// compute actual ops/sec
- before = before["opcounters"].Obj();
- after = after["opcounters"].Obj();
+ before = before["opcounters"].Obj().copy();
+ after = after["opcounters"].Obj().copy();
+
+ bool totals = args["totals"].trueValue();
BSONObjBuilder buf;
- buf.append( "note" , "values per second" );
+ if ( ! totals )
+ buf.append( "note" , "values per second" );
{
BSONObjIterator i( after );
@@ -159,7 +228,9 @@ namespace mongo {
BSONElement e = i.next();
double x = e.number();
x = x - before[e.fieldName()].number();
- buf.append( e.fieldName() , x / config.seconds );
+ if ( ! totals )
+ x = x / config.seconds;
+ buf.append( e.fieldName() , x );
}
}
BSONObj zoo = buf.obj();
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index f9be639..1982940 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -85,10 +85,10 @@ namespace mongo {
}
- int Scope::invoke( const char* code , const BSONObj& args, int timeoutMs ) {
+ int Scope::invoke( const char* code , const BSONObj* args, const BSONObj* recv, int timeoutMs ) {
ScriptingFunction func = createFunction( code );
uassert( 10207 , "compile failed" , func );
- return invoke( func , args, timeoutMs );
+ return invoke( func , args, recv, timeoutMs );
}
bool Scope::execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs ) {
@@ -241,6 +241,27 @@ namespace mongo {
return f;
}
+ namespace JSFiles {
+ extern const JSFile collection;
+ extern const JSFile db;
+ extern const JSFile mongo;
+ extern const JSFile mr;
+ extern const JSFile query;
+ extern const JSFile utils;
+ extern const JSFile utils_sh;
+ }
+
+ void Scope::execCoreFiles() {
+ // keeping same order as in SConstruct
+ execSetup(JSFiles::utils);
+ execSetup(JSFiles::utils_sh);
+ execSetup(JSFiles::db);
+ execSetup(JSFiles::mongo);
+ execSetup(JSFiles::mr);
+ execSetup(JSFiles::query);
+ execSetup(JSFiles::collection);
+ }
+
typedef map< string , list<Scope*> > PoolToScopes;
class ScopeCache {
@@ -373,8 +394,12 @@ namespace mongo {
void setBoolean( const char *field , bool val ) {
_real->setBoolean( field , val );
}
- void setThis( const BSONObj * obj ) {
- _real->setThis( obj );
+// void setThis( const BSONObj * obj ) {
+// _real->setThis( obj );
+// }
+
+ void setFunction( const char *field , const char * code ) {
+ _real->setFunction(field, code);
}
ScriptingFunction createFunction( const char * code ) {
@@ -392,8 +417,8 @@ namespace mongo {
/**
* @return 0 on success
*/
- int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs , bool ignoreReturn ) {
- return _real->invoke( func , args , timeoutMs , ignoreReturn );
+ int invoke( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs , bool ignoreReturn, bool readOnlyArgs, bool readOnlyRecv ) {
+ return _real->invoke( func , args , recv, timeoutMs , ignoreReturn );
}
string getError() {
@@ -407,14 +432,18 @@ namespace mongo {
return _real->execFile( filename , printResult , reportError , assertOnError , timeoutMs );
}
- void injectNative( const char *field, NativeFunction func ) {
- _real->injectNative( field , func );
+ void injectNative( const char *field, NativeFunction func, void* data ) {
+ _real->injectNative( field , func, data );
}
void gc() {
_real->gc();
}
+ void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ) {
+ _real->append(builder, fieldName, scopeName);
+ }
+
private:
string _pool;
Scope * _real;
diff --git a/scripting/engine.h b/scripting/engine.h
index 62afd77..1f9f1f5 100644
--- a/scripting/engine.h
+++ b/scripting/engine.h
@@ -27,18 +27,8 @@ namespace mongo {
const StringData& source;
};
- namespace JSFiles {
- extern const JSFile collection;
- extern const JSFile db;
- extern const JSFile mongo;
- extern const JSFile mr;
- extern const JSFile query;
- extern const JSFile servers;
- extern const JSFile utils;
- }
-
typedef unsigned long long ScriptingFunction;
- typedef BSONObj (*NativeFunction) ( const BSONObj &args );
+ typedef BSONObj (*NativeFunction) ( const BSONObj &args, void* data );
class Scope : boost::noncopyable {
public:
@@ -48,7 +38,7 @@ namespace mongo {
virtual void reset() = 0;
virtual void init( const BSONObj * data ) = 0;
void init( const char * data ) {
- BSONObj o( data , 0 );
+ BSONObj o( data );
init( &o );
}
@@ -79,14 +69,15 @@ namespace mongo {
virtual int type( const char *field ) = 0;
- void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName );
+ virtual void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName );
virtual void setElement( const char *field , const BSONElement& e ) = 0;
virtual void setNumber( const char *field , double val ) = 0;
virtual void setString( const char *field , const char * val ) = 0;
virtual void setObject( const char *field , const BSONObj& obj , bool readOnly=true ) = 0;
virtual void setBoolean( const char *field , bool val ) = 0;
- virtual void setThis( const BSONObj * obj ) = 0;
+ virtual void setFunction( const char *field , const char * code ) = 0;
+// virtual void setThis( const BSONObj * obj ) = 0;
virtual ScriptingFunction createFunction( const char * code );
@@ -94,18 +85,18 @@ namespace mongo {
/**
* @return 0 on success
*/
- virtual int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = false ) = 0;
- void invokeSafe( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 ) {
- int res = invoke( func , args , timeoutMs );
+ virtual int invoke( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = false, bool readOnlyArgs = false, bool readOnlyRecv = false ) = 0;
+ void invokeSafe( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0, bool readOnlyArgs = false, bool readOnlyRecv = false ) {
+ int res = invoke( func , args , recv, timeoutMs, readOnlyArgs, readOnlyRecv );
if ( res == 0 )
return;
throw UserException( 9004 , (string)"invoke failed: " + getError() );
}
virtual string getError() = 0;
- int invoke( const char* code , const BSONObj& args, int timeoutMs = 0 );
- void invokeSafe( const char* code , const BSONObj& args, int timeoutMs = 0 ) {
- if ( invoke( code , args , timeoutMs ) == 0 )
+ int invoke( const char* code , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 );
+ void invokeSafe( const char* code , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 ) {
+ if ( invoke( code , args , recv, timeoutMs ) == 0 )
return;
throw UserException( 9005 , (string)"invoke failed: " + getError() );
}
@@ -119,19 +110,11 @@ namespace mongo {
execSetup(file.source, file.name);
}
- void execCoreFiles() {
- // keeping same order as in SConstruct
- execSetup(JSFiles::utils);
- execSetup(JSFiles::db);
- execSetup(JSFiles::mongo);
- execSetup(JSFiles::mr);
- execSetup(JSFiles::query);
- execSetup(JSFiles::collection);
- }
+ void execCoreFiles();
virtual bool execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 );
- virtual void injectNative( const char *field, NativeFunction func ) = 0;
+ virtual void injectNative( const char *field, NativeFunction func, void* data = 0 ) = 0;
virtual void gc() = 0;
diff --git a/scripting/engine_java.cpp b/scripting/engine_java.cpp
index fc8945f..5738816 100644
--- a/scripting/engine_java.cpp
+++ b/scripting/engine_java.cpp
@@ -43,7 +43,7 @@ namespace mongo {
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../db/db.h"
using namespace std;
@@ -405,15 +405,17 @@ namespace mongo {
if ( guess == 0 )
return BSONObj();
- char * buf = (char *) malloc(guess);
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)buf , guess );
+ BSONObj::Holder* holder = (BSONObj::Holder*) malloc(guess + sizeof(unsigned));
+ holder->zero()
+
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)holder->data , guess );
jassert( bb );
int len = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGetObject , id , _getEnv()->NewStringUTF( field ) , bb );
_getEnv()->DeleteLocalRef( bb );
jassert( len > 0 && len < guess );
- BSONObj obj(buf, true);
+ BSONObj obj(holder);
assert( obj.objsize() <= guess );
return obj;
}
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index aed7b13..64fe21c 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -242,6 +242,10 @@ namespace mongo {
return val;
}
+ int toNumberInt( JSObject *o ) {
+ return (boost::uint32_t)(boost::int32_t) getNumber( o, "floatApprox" );
+ }
+
double toNumber( jsval v ) {
double d;
uassert( 10214 , "not a number" , JS_ValueToNumber( _context , v , &d ) );
@@ -492,7 +496,6 @@ namespace mongo {
return func;
}
-
jsval toval( double d ) {
jsval val;
assert( JS_NewNumberValue( _context, d , &val ) );
@@ -531,7 +534,7 @@ namespace mongo {
JSObject * toJSObject( const BSONObj * obj , bool readOnly=false ) {
static string ref = "$ref";
- if ( ref == obj->firstElement().fieldName() ) {
+ if ( ref == obj->firstElementFieldName() ) {
JSObject * o = JS_NewObject( _context , &dbref_class , NULL, NULL);
CHECKNEWOBJECT(o,_context,"toJSObject1");
assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
@@ -551,8 +554,9 @@ namespace mongo {
void makeLongObj( long long n, JSObject * o ) {
boost::uint64_t val = (boost::uint64_t)n;
CHECKNEWOBJECT(o,_context,"NumberLong1");
- setProperty( o , "floatApprox" , toval( (double)(boost::int64_t)( val ) ) );
- if ( (boost::int64_t)val != (boost::int64_t)(double)(boost::int64_t)( val ) ) {
+ double floatApprox = (double)(boost::int64_t)val;
+ setProperty( o , "floatApprox" , toval( floatApprox ) );
+ if ( (boost::int64_t)val != (boost::int64_t)floatApprox ) {
// using 2 doubles here instead of a single double because certain double
// bit patterns represent undefined values and sm might trash them
setProperty( o , "top" , toval( (double)(boost::uint32_t)( val >> 32 ) ) );
@@ -566,6 +570,19 @@ namespace mongo {
return OBJECT_TO_JSVAL( o );
}
+ void makeIntObj( int n, JSObject * o ) {
+ boost::uint32_t val = (boost::uint32_t)n;
+ CHECKNEWOBJECT(o,_context,"NumberInt1");
+ double floatApprox = (double)(boost::int32_t)val;
+ setProperty( o , "floatApprox" , toval( floatApprox ) );
+ }
+
+ jsval toval( int n ) {
+ JSObject * o = JS_NewObject( _context , &numberint_class , 0 , 0 );
+ makeIntObj( n, o );
+ return OBJECT_TO_JSVAL( o );
+ }
+
jsval toval( const BSONElement& e ) {
switch( e.type() ) {
@@ -576,6 +593,8 @@ namespace mongo {
case NumberDouble:
case NumberInt:
return toval( e.number() );
+// case NumberInt:
+// return toval( e.numberInt() );
case Symbol: // TODO: should we make a special class for this
case String:
return toval( e.valuestr() );
@@ -651,7 +670,7 @@ namespace mongo {
return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
}
case Date:
- return OBJECT_TO_JSVAL( js_NewDateObjectMsec( _context , (jsdouble) e.date().millis ) );
+ return OBJECT_TO_JSVAL( js_NewDateObjectMsec( _context , (jsdouble) ((long long)e.date().millis) ) );
case MinKey:
return OBJECT_TO_JSVAL( JS_NewObject( _context , &minkey_class , 0 , 0 ) );
@@ -903,6 +922,68 @@ namespace mongo {
// --- global helpers ---
+ JSBool hexToBinData(JSContext * cx, jsval *rval, int subtype, string s) {
+ JSObject * o = JS_NewObject( cx , &bindata_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
+ int len = s.size() / 2;
+ char * data = new char[len];
+ char *p = data;
+ const char *src = s.c_str();
+ for( size_t i = 0; i+1 < s.size(); i += 2 ) {
+ *p++ = fromHex(src + i);
+ }
+ assert( JS_SetPrivate( cx , o , new BinDataHolder( data , len ) ) );
+ Convertor c(cx);
+ c.setProperty( o, "len", c.toval((double)len) );
+ c.setProperty( o, "type", c.toval((double)subtype) );
+ *rval = OBJECT_TO_JSVAL( o );
+ delete data;
+ return JS_TRUE;
+ }
+
+ JSBool _HexData( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( argc != 2 ) {
+ JS_ReportError( cx , "HexData needs 2 arguments -- HexData(subtype,hexstring)" );
+ return JS_FALSE;
+ }
+ int type = (int)c.toNumber( argv[ 0 ] );
+ if ( type == 2 ) {
+ JS_ReportError( cx , "BinData subtype 2 is deprecated" );
+ return JS_FALSE;
+ }
+ string s = c.toString(argv[1]);
+ return hexToBinData(cx, rval, type, s);
+ }
+
+ JSBool _UUID( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( argc != 1 ) {
+ JS_ReportError( cx , "UUID needs argument -- UUID(hexstring)" );
+ return JS_FALSE;
+ }
+ string s = c.toString(argv[0]);
+ if( s.size() != 32 ) {
+ JS_ReportError( cx , "bad UUID hex string len" );
+ return JS_FALSE;
+ }
+ return hexToBinData(cx, rval, 3, s);
+ }
+
+ JSBool _MD5( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( argc != 1 ) {
+ JS_ReportError( cx , "MD5 needs argument -- MD5(hexstring)" );
+ return JS_FALSE;
+ }
+ string s = c.toString(argv[0]);
+ if( s.size() != 32 ) {
+ JS_ReportError( cx , "bad MD5 hex string len" );
+ return JS_FALSE;
+ }
+ return hexToBinData(cx, rval, 5, s);
+ }
+
JSBool native_print( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
stringstream ss;
Convertor c( cx );
@@ -920,6 +1001,7 @@ namespace mongo {
Convertor c(cx);
NativeFunction func = (NativeFunction)((long long)c.getNumber( obj , "x" ) );
+ void* data = (void*)((long long)c.getNumber( obj , "y" ) );
assert( func );
BSONObj a;
@@ -934,7 +1016,7 @@ namespace mongo {
BSONObj out;
try {
- out = func( a );
+ out = func( a, data );
}
catch ( std::exception& e ) {
JS_ReportError( cx , e.what() );
@@ -963,6 +1045,9 @@ namespace mongo {
{ "nativeHelper" , &native_helper , 1 , 0 , 0 } ,
{ "load" , &native_load , 1 , 0 , 0 } ,
{ "gc" , &native_gc , 1 , 0 , 0 } ,
+ { "UUID", &_UUID, 0, 0, 0 } ,
+ { "MD5", &_MD5, 0, 0, 0 } ,
+ { "HexData", &_HexData, 0, 0, 0 } ,
{ 0 , 0 , 0 , 0 , 0 }
};
@@ -1050,6 +1135,7 @@ namespace mongo {
if ( val != JSVAL_NULL && val != JSVAL_VOID && JSVAL_IS_OBJECT( val ) ) {
// TODO: this is a hack to get around sub objects being modified
+ // basically right now whenever a sub object is read we mark whole obj as possibly modified
JSObject * oo = JSVAL_TO_OBJECT( val );
if ( JS_InstanceOf( cx , oo , &bson_class , 0 ) ||
JS_IsArrayObject( cx , oo ) ) {
@@ -1346,6 +1432,12 @@ namespace mongo {
}
}
+ void setFunction( const char *field , const char * code ) {
+ smlock;
+ jsval v = OBJECT_TO_JSVAL(JS_GetFunctionObject(_convertor->compileFunction(code)));
+ JS_SetProperty( _context , _global , field , &v );
+ }
+
void rename( const char * from , const char * to ) {
smlock;
jsval v;
@@ -1462,33 +1554,35 @@ namespace mongo {
return worked;
}
- int invoke( JSFunction * func , const BSONObj& args, int timeoutMs , bool ignoreReturn ) {
+ int invoke( JSFunction * func , const BSONObj* args, const BSONObj* recv, int timeoutMs , bool ignoreReturn, bool readOnlyArgs, bool readOnlyRecv ) {
smlock;
precall();
assert( JS_EnterLocalRootScope( _context ) );
- int nargs = args.nFields();
+ int nargs = args ? args->nFields() : 0;
scoped_array<jsval> smargsPtr( new jsval[nargs] );
if ( nargs ) {
- BSONObjIterator it( args );
+ BSONObjIterator it( *args );
for ( int i=0; i<nargs; i++ ) {
smargsPtr[i] = _convertor->toval( it.next() );
}
}
- if ( args.isEmpty() ) {
+ if ( !args ) {
_convertor->setProperty( _global , "args" , JSVAL_NULL );
}
else {
- setObject( "args" , args , true ); // this is for backwards compatability
+ setObject( "args" , *args , true ); // this is for backwards compatability
}
JS_LeaveLocalRootScope( _context );
installInterrupt( timeoutMs );
jsval rval;
+ setThis(recv);
JSBool ret = JS_CallFunction( _context , _this ? _this : _global , func , nargs , smargsPtr.get() , &rval );
+ setThis(0);
uninstallInterrupt( timeoutMs );
if ( !ret ) {
@@ -1502,8 +1596,8 @@ namespace mongo {
return 0;
}
- int invoke( ScriptingFunction funcAddr , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = 0 ) {
- return invoke( (JSFunction*)funcAddr , args , timeoutMs , ignoreReturn );
+ int invoke( ScriptingFunction funcAddr , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = 0, bool readOnlyArgs = false, bool readOnlyRecv = false ) {
+ return invoke( (JSFunction*)funcAddr , args , recv, timeoutMs , ignoreReturn, readOnlyArgs, readOnlyRecv);
}
void gotError( string s ) {
@@ -1514,13 +1608,18 @@ namespace mongo {
return _error;
}
- void injectNative( const char *field, NativeFunction func ) {
+ void injectNative( const char *field, NativeFunction func, void* data ) {
smlock;
string name = field;
_convertor->setProperty( _global , (name + "_").c_str() , _convertor->toval( (double)(long long)func ) );
stringstream code;
- code << field << "_" << " = { x : " << field << "_ }; ";
+ if (data) {
+ _convertor->setProperty( _global , (name + "_data_").c_str() , _convertor->toval( (double)(long long)data ) );
+ code << field << "_" << " = { x : " << field << "_ , y: " << field << "_data_ }; ";
+ } else {
+ code << field << "_" << " = { x : " << field << "_ }; ";
+ }
code << field << " = function(){ return nativeHelper.apply( " << field << "_ , arguments ); }";
exec( code.str() );
}
diff --git a/scripting/engine_spidermonkey.h b/scripting/engine_spidermonkey.h
index 3ee7495..9fd430d 100644
--- a/scripting/engine_spidermonkey.h
+++ b/scripting/engine_spidermonkey.h
@@ -21,18 +21,6 @@
// START inc hacking
-#if defined( MOZJS )
-
-#define MOZILLA_1_8_BRANCH
-
-#include "mozjs/jsapi.h"
-#include "mozjs/jsdate.h"
-#include "mozjs/jsregexp.h"
-
-#warning if you are using an ubuntu version of spider monkey, we recommend installing spider monkey from source
-
-#elif defined( OLDJS )
-
#ifdef WIN32
#include "jstypes.h"
#undef JS_PUBLIC_API
@@ -46,30 +34,11 @@
#include "jsdate.h"
#include "jsregexp.h"
-#else
-
-#include "js/jsapi.h"
-#include "js/jsobj.h"
-#include "js/jsdate.h"
-#include "js/jsregexp.h"
-
-#endif
-
// END inc hacking
// -- SM 1.6 hacks ---
#ifndef JSCLASS_GLOBAL_FLAGS
-
-#warning old version of spider monkey ( probably 1.6 ) you should upgrade to at least 1.7
-
-#define JSCLASS_GLOBAL_FLAGS 0
-
-JSBool JS_CStringsAreUTF8() {
- return false;
-}
-
-#define SM16
-
+#error old version of spider monkey ( probably 1.6 ) you should upgrade to at least 1.7
#endif
// -- END SM 1.6 hacks ---
@@ -95,6 +64,7 @@ namespace mongo {
extern JSClass bindata_class;
extern JSClass timestamp_class;
extern JSClass numberlong_class;
+ extern JSClass numberint_class;
extern JSClass minkey_class;
extern JSClass maxkey_class;
diff --git a/scripting/engine_v8.cpp b/scripting/engine_v8.cpp
index cd186b4..fd69d66 100644
--- a/scripting/engine_v8.cpp
+++ b/scripting/engine_v8.cpp
@@ -15,6 +15,14 @@
* limitations under the License.
*/
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
#include "engine_v8.h"
#include "v8_wrapper.h"
@@ -28,9 +36,239 @@ namespace mongo {
// guarded by v8 mutex
map< unsigned, int > __interruptSpecToThreadId;
+ /**
+ * Unwraps a BSONObj from the JS wrapper
+ */
+ static BSONObj* unwrapBSONObj(const Handle<v8::Object>& obj) {
+ Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
+ if (field.IsEmpty() || !field->IsExternal())
+ return 0;
+ void* ptr = field->Value();
+ return (BSONObj*)ptr;
+ }
+
+ static void weakRefBSONCallback(v8::Persistent<v8::Value> p, void* scope) {
+ // should we lock here? no idea, and no doc from v8 of course
+ HandleScope handle_scope;
+ if (!p.IsNearDeath())
+ return;
+ Handle<External> field = Handle<External>::Cast(p->ToObject()->GetInternalField(0));
+ BSONObj* data = (BSONObj*) field->Value();
+ delete data;
+ p.Dispose();
+ }
+
+ Persistent<v8::Object> V8Scope::wrapBSONObject(Local<v8::Object> obj, BSONObj* data) {
+ obj->SetInternalField(0, v8::External::New(data));
+ Persistent<v8::Object> p = Persistent<v8::Object>::New(obj);
+ p.MakeWeak(this, weakRefBSONCallback);
+ return p;
+ }
+
+ static void weakRefArrayCallback(v8::Persistent<v8::Value> p, void* scope) {
+ // should we lock here? no idea, and no doc from v8 of course
+ HandleScope handle_scope;
+ if (!p.IsNearDeath())
+ return;
+ Handle<External> field = Handle<External>::Cast(p->ToObject()->GetInternalField(0));
+ char* data = (char*) field->Value();
+ delete [] data;
+ p.Dispose();
+ }
+
+ Persistent<v8::Object> V8Scope::wrapArrayObject(Local<v8::Object> obj, char* data) {
+ obj->SetInternalField(0, v8::External::New(data));
+ Persistent<v8::Object> p = Persistent<v8::Object>::New(obj);
+ p.MakeWeak(this, weakRefArrayCallback);
+ return p;
+ }
+
+ static Handle<v8::Value> namedGet(Local<v8::String> name, const v8::AccessorInfo &info) {
+ // all properties should be set, otherwise means builtin or deleted
+ if (!(info.This()->HasRealNamedProperty(name)))
+ return v8::Handle<v8::Value>();
+
+ Handle<v8::Value> val = info.This()->GetRealNamedProperty(name);
+ if (!val->IsUndefined()) {
+ // value already cached
+ return val;
+ }
+
+ string key = toSTLString(name);
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key.c_str());
+ if (elmt.eoo())
+ return Handle<Value>();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ val = scope->mongoToV8Element(elmt, false);
+ info.This()->ForceSet(name, val);
+
+ if (elmt.type() == mongo::Object || elmt.type() == mongo::Array) {
+ // if accessing a subobject, it may get modified and base obj would not know
+ // have to set base as modified, which means some optim is lost
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ }
+ return val;
+ }
+
+ static Handle<v8::Value> namedGetRO(Local<v8::String> name, const v8::AccessorInfo &info) {
+ string key = toSTLString(name);
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key.c_str());
+ if (elmt.eoo())
+ return Handle<Value>();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ Handle<v8::Value> val = scope->mongoToV8Element(elmt, true);
+ return val;
+ }
+
+ static Handle<v8::Value> namedSet(Local<v8::String> name, Local<v8::Value> value_obj, const v8::AccessorInfo& info) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Value>();
+ }
+
+ static Handle<v8::Array> namedEnumerator(const AccessorInfo &info) {
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ Handle<v8::Array> arr = Handle<v8::Array>(v8::Array::New(obj->nFields()));
+ int i = 0;
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ // note here that if keys are parseable number, v8 will access them using index
+ for ( BSONObjIterator it(*obj); it.more(); ++i) {
+ const BSONElement& f = it.next();
+// arr->Set(i, v8::String::NewExternal(new ExternalString(f.fieldName())));
+ Handle<v8::String> name = scope->getV8Str(f.fieldName());
+ arr->Set(i, name);
+ }
+ return arr;
+ }
+
+ Handle<Boolean> namedDelete( Local<v8::String> property, const AccessorInfo& info ) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Boolean>();
+ }
+
+// v8::Handle<v8::Integer> namedQuery(Local<v8::String> property, const AccessorInfo& info) {
+// string key = ToString(property);
+// return v8::Integer::New(None);
+// }
+
+ static Handle<v8::Value> indexedGet(uint32_t index, const v8::AccessorInfo &info) {
+ // all properties should be set, otherwise means builtin or deleted
+ if (!(info.This()->HasRealIndexedProperty(index)))
+ return v8::Handle<v8::Value>();
+
+ StringBuilder ss;
+ ss << index;
+ string key = ss.str();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ // cannot get v8 to properly cache the indexed val in the js object
+// Handle<v8::String> name = scope->getV8Str(key);
+// // v8 API really confusing here, must check existence on index, but then fetch with name
+// if (info.This()->HasRealIndexedProperty(index)) {
+// Handle<v8::Value> val = info.This()->GetRealNamedProperty(name);
+// if (!val.IsEmpty() && !val->IsNull())
+// return val;
+// }
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key);
+ if (elmt.eoo())
+ return Handle<Value>();
+ Handle<Value> val = scope->mongoToV8Element(elmt, false);
+// info.This()->ForceSet(name, val);
+
+ if (elmt.type() == mongo::Object || elmt.type() == mongo::Array) {
+ // if accessing a subobject, it may get modified and base obj would not know
+ // have to set base as modified, which means some optim is lost
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ }
+ return val;
+ }
+
+ Handle<Boolean> indexedDelete( uint32_t index, const AccessorInfo& info ) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Boolean>();
+ }
+
+ static Handle<v8::Value> indexedGetRO(uint32_t index, const v8::AccessorInfo &info) {
+ StringBuilder ss;
+ ss << index;
+ string key = ss.str();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ // cannot get v8 to properly cache the indexed val in the js object
+// Handle<v8::String> name = scope->getV8Str(key);
+// // v8 API really confusing here, must check existence on index, but then fetch with name
+// if (info.This()->HasRealIndexedProperty(index)) {
+// Handle<v8::Value> val = info.This()->GetRealNamedProperty(name);
+// if (!val.IsEmpty() && !val->IsNull())
+// return val;
+// }
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key);
+ if (elmt.eoo())
+ return Handle<Value>();
+ Handle<Value> val = scope->mongoToV8Element(elmt, true);
+// info.This()->ForceSet(name, val);
+ return val;
+ }
+
+ static Handle<v8::Value> indexedSet(uint32_t index, Local<v8::Value> value_obj, const v8::AccessorInfo& info) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Value>();
+ }
+
+// static Handle<v8::Array> indexedEnumerator(const AccessorInfo &info) {
+// BSONObj *obj = unwrapBSONObj(info.Holder());
+// Handle<v8::Array> arr = Handle<v8::Array>(v8::Array::New(obj->nFields()));
+// Local< External > scp = External::Cast( *info.Data() );
+// V8Scope* scope = (V8Scope*)(scp->Value());
+// int i = 0;
+// for ( BSONObjIterator it(*obj); it.more(); ++i) {
+// const BSONElement& f = it.next();
+//// arr->Set(i, v8::String::NewExternal(new ExternalString(f.fieldName())));
+// arr->Set(i, scope->getV8Str(f.fieldName()));
+// }
+// return arr;
+// }
+
+ Handle<Value> NamedReadOnlySet( Local<v8::String> property, Local<Value> value, const AccessorInfo& info ) {
+ string key = toSTLString(property);
+ cout << "cannot write property " << key << " to read-only object" << endl;
+ return value;
+ }
+
+ Handle<Boolean> NamedReadOnlyDelete( Local<v8::String> property, const AccessorInfo& info ) {
+ string key = toSTLString(property);
+ cout << "cannot delete property " << key << " from read-only object" << endl;
+ return Boolean::New( false );
+ }
+
+ Handle<Value> IndexedReadOnlySet( uint32_t index, Local<Value> value, const AccessorInfo& info ) {
+ cout << "cannot write property " << index << " to read-only array" << endl;
+ return value;
+ }
+
+ Handle<Boolean> IndexedReadOnlyDelete( uint32_t index, const AccessorInfo& info ) {
+ cout << "cannot delete property " << index << " from read-only array" << endl;
+ return Boolean::New( false );
+ }
+
// --- engine ---
- V8ScriptEngine::V8ScriptEngine() {}
+ V8ScriptEngine::V8ScriptEngine() {
+ }
V8ScriptEngine::~V8ScriptEngine() {
}
@@ -69,50 +307,105 @@ namespace mongo {
_context = Context::New();
Context::Scope context_scope( _context );
_global = Persistent< v8::Object >::New( _context->Global() );
-
- _this = Persistent< v8::Object >::New( v8::Object::New() );
-
- _global->Set(v8::String::New("print"), newV8Function< Print >()->GetFunction() );
- _global->Set(v8::String::New("version"), newV8Function< Version >()->GetFunction() );
-
- _global->Set(v8::String::New("load"),
- v8::FunctionTemplate::New( v8Callback< loadCallback >, v8::External::New(this))->GetFunction() );
-
- _wrapper = Persistent< v8::Function >::New( getObjectWrapperTemplate()->GetFunction() );
-
- _global->Set(v8::String::New("gc"), newV8Function< GCV8 >()->GetFunction() );
-
-
- installDBTypes( _global );
+ _emptyObj = Persistent< v8::Object >::New( v8::Object::New() );
+
+ // initialize lazy object template
+ lzObjectTemplate = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ lzObjectTemplate->SetInternalFieldCount( 1 );
+ lzObjectTemplate->SetNamedPropertyHandler(namedGet, namedSet, 0, namedDelete, 0, v8::External::New(this));
+ lzObjectTemplate->SetIndexedPropertyHandler(indexedGet, indexedSet, 0, indexedDelete, 0, v8::External::New(this));
+
+ roObjectTemplate = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ roObjectTemplate->SetInternalFieldCount( 1 );
+ roObjectTemplate->SetNamedPropertyHandler(namedGetRO, NamedReadOnlySet, 0, NamedReadOnlyDelete, namedEnumerator, v8::External::New(this));
+ roObjectTemplate->SetIndexedPropertyHandler(indexedGetRO, IndexedReadOnlySet, 0, IndexedReadOnlyDelete, 0, v8::External::New(this));
+
+ // initialize lazy array template
+ // unfortunately it is not possible to create true v8 array from a template
+ // this means we use an object template and copy methods over
+ // this it creates issues when calling certain methods that check array type
+ lzArrayTemplate = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ lzArrayTemplate->SetInternalFieldCount( 1 );
+ lzArrayTemplate->SetIndexedPropertyHandler(indexedGet, 0, 0, 0, 0, v8::External::New(this));
+
+ internalFieldObjects = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ internalFieldObjects->SetInternalFieldCount( 1 );
+
+ V8STR_CONN = getV8Str( "_conn" );
+ V8STR_ID = getV8Str( "_id" );
+ V8STR_LENGTH = getV8Str( "length" );
+ V8STR_LEN = getV8Str( "len" );
+ V8STR_TYPE = getV8Str( "type" );
+ V8STR_ISOBJECTID = getV8Str( "isObjectId" );
+ V8STR_RETURN = getV8Str( "return" );
+ V8STR_ARGS = getV8Str( "args" );
+ V8STR_T = getV8Str( "t" );
+ V8STR_I = getV8Str( "i" );
+ V8STR_EMPTY = getV8Str( "" );
+ V8STR_MINKEY = getV8Str( "$MinKey" );
+ V8STR_MAXKEY = getV8Str( "$MaxKey" );
+ V8STR_NUMBERLONG = getV8Str( "__NumberLong" );
+ V8STR_NUMBERINT = getV8Str( "__NumberInt" );
+ V8STR_DBPTR = getV8Str( "__DBPointer" );
+ V8STR_BINDATA = getV8Str( "__BinData" );
+ V8STR_NATIVE_FUNC = getV8Str( "_native_function" );
+ V8STR_NATIVE_DATA = getV8Str( "_native_data" );
+ V8STR_V8_FUNC = getV8Str( "_v8_function" );
+ V8STR_RO = getV8Str( "_ro" );
+ V8STR_MODIFIED = getV8Str( "_mod" );
+
+ injectV8Function("print", Print);
+ injectV8Function("version", Version);
+ injectV8Function("load", load);
+
+ _wrapper = Persistent< v8::Function >::New( getObjectWrapperTemplate(this)->GetFunction() );
+
+ injectV8Function("gc", GCV8);
+
+ installDBTypes( this, _global );
}
V8Scope::~V8Scope() {
V8Lock l;
Context::Scope context_scope( _context );
_wrapper.Dispose();
- _this.Dispose();
+ _emptyObj.Dispose();
for( unsigned i = 0; i < _funcs.size(); ++i )
_funcs[ i ].Dispose();
_funcs.clear();
_global.Dispose();
_context.Dispose();
+ std::map <string, v8::Persistent <v8::String> >::iterator it = _strCache.begin();
+ std::map <string, v8::Persistent <v8::String> >::iterator end = _strCache.end();
+ while (it != end) {
+ it->second.Dispose();
+ ++it;
+ }
+ lzObjectTemplate.Dispose();
+ lzArrayTemplate.Dispose();
+ roObjectTemplate.Dispose();
+ internalFieldObjects.Dispose();
}
- Handle< Value > V8Scope::nativeCallback( const Arguments &args ) {
+ /**
+ * JS Callback that will call a c++ function with BSON arguments.
+ */
+ Handle< Value > V8Scope::nativeCallback( V8Scope* scope, const Arguments &args ) {
V8Lock l;
HandleScope handle_scope;
- Local< External > f = External::Cast( *args.Callee()->Get( v8::String::New( "_native_function" ) ) );
+ Local< External > f = External::Cast( *args.Callee()->Get( scope->V8STR_NATIVE_FUNC ) );
NativeFunction function = (NativeFunction)(f->Value());
+ Local< External > data = External::Cast( *args.Callee()->Get( scope->V8STR_NATIVE_DATA ) );
BSONObjBuilder b;
for( int i = 0; i < args.Length(); ++i ) {
stringstream ss;
ss << i;
- v8ToMongoElement( b, v8::String::New( "foo" ), ss.str(), args[ i ] );
+ scope->v8ToMongoElement( b, scope->V8STR_EMPTY, ss.str(), args[ i ] );
}
BSONObj nativeArgs = b.obj();
BSONObj ret;
try {
- ret = function( nativeArgs );
+ ret = function( nativeArgs, data->Value() );
}
catch( const std::exception &e ) {
return v8::ThrowException(v8::String::New(e.what()));
@@ -120,26 +413,63 @@ namespace mongo {
catch( ... ) {
return v8::ThrowException(v8::String::New("unknown exception"));
}
- return handle_scope.Close( mongoToV8Element( ret.firstElement() ) );
+ return handle_scope.Close( scope->mongoToV8Element( ret.firstElement() ) );
}
- Handle< Value > V8Scope::loadCallback( const Arguments &args ) {
- V8Lock l;
- HandleScope handle_scope;
- Handle<External> field = Handle<External>::Cast(args.Data());
- void* ptr = field->Value();
- V8Scope* self = static_cast<V8Scope*>(ptr);
-
- Context::Scope context_scope(self->_context);
+ Handle< Value > V8Scope::load( V8Scope* scope, const Arguments &args ) {
+ Context::Scope context_scope(scope->_context);
for (int i = 0; i < args.Length(); ++i) {
std::string filename(toSTLString(args[i]));
- if (!self->execFile(filename, false , true , false)) {
+ if (!scope->execFile(filename, false , true , false)) {
return v8::ThrowException(v8::String::New((std::string("error loading file: ") + filename).c_str()));
}
}
return v8::True();
}
+ /**
+ * JS Callback that will call a c++ function with the v8 scope and v8 arguments.
+ * Handles interrupts, exception handling, etc
+ *
+ * The implementation below assumes that SERVER-1816 has been fixed - in
+ * particular, interrupted() must return true if an interrupt was ever
+ * sent; currently that is not the case if a new killop overwrites the data
+ * for an old one
+ */
+ v8::Handle< v8::Value > V8Scope::v8Callback( const v8::Arguments &args ) {
+ disableV8Interrupt(); // we don't want to have to audit all v8 calls for termination exceptions, so we don't allow these exceptions during the callback
+ if ( globalScriptEngine->interrupted() ) {
+ v8::V8::TerminateExecution(); // experimentally it seems that TerminateExecution() will override the return value
+ return v8::Undefined();
+ }
+ Local< External > f = External::Cast( *args.Callee()->Get( v8::String::New( "_v8_function" ) ) );
+ v8Function function = (v8Function)(f->Value());
+ Local< External > scp = External::Cast( *args.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+
+ v8::Handle< v8::Value > ret;
+ string exception;
+ try {
+ ret = function( scope, args );
+ }
+ catch( const std::exception &e ) {
+ exception = e.what();
+ }
+ catch( ... ) {
+ exception = "unknown exception";
+ }
+ enableV8Interrupt();
+ if ( globalScriptEngine->interrupted() ) {
+ v8::V8::TerminateExecution();
+ return v8::Undefined();
+ }
+ if ( !exception.empty() ) {
+ // technically, ThrowException is supposed to be the last v8 call before returning
+ ret = v8::ThrowException( v8::String::New( exception.c_str() ) );
+ }
+ return ret;
+ }
+
// ---- global stuff ----
void V8Scope::init( const BSONObj * data ) {
@@ -156,29 +486,29 @@ namespace mongo {
void V8Scope::setNumber( const char * field , double val ) {
V8_SIMPLE_HEADER
- _global->Set( v8::String::New( field ) , v8::Number::New( val ) );
+ _global->Set( getV8Str( field ) , v8::Number::New( val ) );
}
void V8Scope::setString( const char * field , const char * val ) {
V8_SIMPLE_HEADER
- _global->Set( v8::String::New( field ) , v8::String::New( val ) );
+ _global->Set( getV8Str( field ) , v8::String::New( val ) );
}
void V8Scope::setBoolean( const char * field , bool val ) {
V8_SIMPLE_HEADER
- _global->Set( v8::String::New( field ) , v8::Boolean::New( val ) );
+ _global->Set( getV8Str( field ) , v8::Boolean::New( val ) );
}
void V8Scope::setElement( const char *field , const BSONElement& e ) {
V8_SIMPLE_HEADER
- _global->Set( v8::String::New( field ) , mongoToV8Element( e ) );
+ _global->Set( getV8Str( field ) , mongoToV8Element( e ) );
}
void V8Scope::setObject( const char *field , const BSONObj& obj , bool readOnly) {
V8_SIMPLE_HEADER
// Set() accepts a ReadOnly parameter, but this just prevents the field itself
// from being overwritten and doesn't protect the object stored in 'field'.
- _global->Set( v8::String::New( field ) , mongoToV8( obj, false, readOnly) );
+ _global->Set( getV8Str( field ) , mongoToLZV8( obj, false, readOnly) );
}
int V8Scope::type( const char *field ) {
@@ -196,8 +526,9 @@ namespace mongo {
return Array;
if ( v->IsBoolean() )
return Bool;
- if ( v->IsInt32() )
- return NumberInt;
+ // needs to be explicit NumberInt to use integer
+// if ( v->IsInt32() )
+// return NumberInt;
if ( v->IsNumber() )
return NumberDouble;
if ( v->IsExternal() ) {
@@ -213,7 +544,7 @@ namespace mongo {
}
v8::Handle<v8::Value> V8Scope::get( const char * field ) {
- return _global->Get( v8::String::New( field ) );
+ return _global->Get( getV8Str( field ) );
}
double V8Scope::getNumber( const char *field ) {
@@ -314,45 +645,50 @@ namespace mongo {
return num;
}
- void V8Scope::setThis( const BSONObj * obj ) {
+ void V8Scope::setFunction( const char *field , const char * code ) {
V8_SIMPLE_HEADER
- if ( ! obj ) {
- _this = Persistent< v8::Object >::New( v8::Object::New() );
- return;
- }
-
- //_this = mongoToV8( *obj );
- v8::Handle<v8::Value> argv[1];
- argv[0] = v8::External::New( createWrapperHolder( obj , true , false ) );
- _this = Persistent< v8::Object >::New( _wrapper->NewInstance( 1, argv ) );
+ _global->Set( getV8Str( field ) , __createFunction(code) );
}
+// void V8Scope::setThis( const BSONObj * obj ) {
+// V8_SIMPLE_HEADER
+// if ( ! obj ) {
+// _this = Persistent< v8::Object >::New( v8::Object::New() );
+// return;
+// }
+//
+// //_this = mongoToV8( *obj );
+// v8::Handle<v8::Value> argv[1];
+// argv[0] = v8::External::New( createWrapperHolder( this, obj , true , false ) );
+// _this = Persistent< v8::Object >::New( _wrapper->NewInstance( 1, argv ) );
+// }
+
void V8Scope::rename( const char * from , const char * to ) {
V8_SIMPLE_HEADER;
- v8::Local<v8::String> f = v8::String::New( from );
- v8::Local<v8::String> t = v8::String::New( to );
+ Handle<v8::String> f = getV8Str( from );
+ Handle<v8::String> t = getV8Str( to );
_global->Set( t , _global->Get( f ) );
_global->Set( f , v8::Undefined() );
}
- int V8Scope::invoke( ScriptingFunction func , const BSONObj& argsObject, int timeoutMs , bool ignoreReturn ) {
+ int V8Scope::invoke( ScriptingFunction func , const BSONObj* argsObject, const BSONObj* recv, int timeoutMs , bool ignoreReturn, bool readOnlyArgs, bool readOnlyRecv ) {
V8_SIMPLE_HEADER
Handle<Value> funcValue = _funcs[func-1];
TryCatch try_catch;
- int nargs = argsObject.nFields();
+ int nargs = argsObject ? argsObject->nFields() : 0;
scoped_array< Handle<Value> > args;
if ( nargs ) {
args.reset( new Handle<Value>[nargs] );
- BSONObjIterator it( argsObject );
+ BSONObjIterator it( *argsObject );
for ( int i=0; i<nargs; i++ ) {
BSONElement next = it.next();
- args[i] = mongoToV8Element( next );
+ args[i] = mongoToV8Element( next, readOnlyArgs );
}
- setObject( "args", argsObject, true ); // for backwards compatibility
+ setObject( "args", *argsObject, readOnlyArgs); // for backwards compatibility
}
else {
- _global->Set( v8::String::New( "args" ), v8::Undefined() );
+ _global->Set( V8STR_ARGS, v8::Undefined() );
}
if ( globalScriptEngine->interrupted() ) {
stringstream ss;
@@ -361,8 +697,14 @@ namespace mongo {
log() << _error << endl;
return 1;
}
+ Handle<v8::Object> v8recv;
+ if (recv != 0)
+ v8recv = mongoToLZV8(*recv, false, readOnlyRecv);
+ else
+ v8recv = _emptyObj;
+
enableV8Interrupt(); // because of v8 locker we can check interrupted, then enable
- Local<Value> result = ((v8::Function*)(*funcValue))->Call( _this , nargs , args.get() );
+ Local<Value> result = ((v8::Function*)(*funcValue))->Call( v8recv , nargs , nargs ? args.get() : 0 );
disableV8Interrupt();
if ( result.IsEmpty() ) {
@@ -379,7 +721,7 @@ namespace mongo {
}
if ( ! ignoreReturn ) {
- _global->Set( v8::String::New( "return" ) , result );
+ _global->Set( V8STR_RETURN , result );
}
return 0;
@@ -438,7 +780,7 @@ namespace mongo {
return false;
}
- _global->Set( v8::String::New( "__lastres__" ) , result );
+ _global->Set( getV8Str( "__lastres__" ) , result );
if ( printResult && ! result->IsUndefined() ) {
cout << toSTLString( result ) << endl;
@@ -447,12 +789,43 @@ namespace mongo {
return true;
}
- void V8Scope::injectNative( const char *field, NativeFunction func ) {
+ void V8Scope::injectNative( const char *field, NativeFunction func, void* data ) {
+ injectNative(field, func, _global, data);
+ }
+
+ void V8Scope::injectNative( const char *field, NativeFunction func, Handle<v8::Object>& obj, void* data ) {
+ V8_SIMPLE_HEADER
+
+ Handle< FunctionTemplate > ft = createV8Function(nativeCallback);
+ ft->Set( this->V8STR_NATIVE_FUNC, External::New( (void*)func ) );
+ ft->Set( this->V8STR_NATIVE_DATA, External::New( data ) );
+ obj->Set( getV8Str( field ), ft->GetFunction() );
+ }
+
+ void V8Scope::injectV8Function( const char *field, v8Function func ) {
+ injectV8Function(field, func, _global);
+ }
+
+ void V8Scope::injectV8Function( const char *field, v8Function func, Handle<v8::Object>& obj ) {
+ V8_SIMPLE_HEADER
+
+ Handle< FunctionTemplate > ft = createV8Function(func);
+ Handle<v8::Function> f = ft->GetFunction();
+ obj->Set( getV8Str( field ), f );
+ }
+
+ void V8Scope::injectV8Function( const char *field, v8Function func, Handle<v8::Template>& t ) {
V8_SIMPLE_HEADER
- Handle< FunctionTemplate > f( newV8Function< nativeCallback >() );
- f->Set( v8::String::New( "_native_function" ), External::New( (void*)func ) );
- _global->Set( v8::String::New( field ), f->GetFunction() );
+ Handle< FunctionTemplate > ft = createV8Function(func);
+ Handle<v8::Function> f = ft->GetFunction();
+ t->Set( getV8Str( field ), f );
+ }
+
+ Handle<FunctionTemplate> V8Scope::createV8Function( v8Function func ) {
+ Handle< FunctionTemplate > ft = v8::FunctionTemplate::New(v8Callback, External::New( this ));
+ ft->Set( this->V8STR_V8_FUNC, External::New( (void*)func ) );
+ return ft;
}
void V8Scope::gc() {
@@ -479,7 +852,7 @@ namespace mongo {
v8::Locker::StartPreemption( 50 );
//_global->Set( v8::String::New( "Mongo" ) , _engine->_externalTemplate->GetFunction() );
- _global->Set( v8::String::New( "Mongo" ) , getMongoFunctionTemplate( true )->GetFunction() );
+ _global->Set( getV8Str( "Mongo" ) , getMongoFunctionTemplate( this, true )->GetFunction() );
execCoreFiles();
exec( "_mongo = new Mongo();" , "local connect 2" , false , true , true , 0 );
exec( (string)"db = _mongo.getDB(\"" + dbName + "\");" , "local connect 3" , false , true , true , 0 );
@@ -496,8 +869,8 @@ namespace mongo {
if ( _connectState == LOCAL )
throw UserException( 12512, "localConnect already called, can't call externalSetup" );
- installFork( _global, _context );
- _global->Set( v8::String::New( "Mongo" ) , getMongoFunctionTemplate( false )->GetFunction() );
+ installFork( this, _global, _context );
+ _global->Set( getV8Str( "Mongo" ) , getMongoFunctionTemplate( this, false )->GetFunction() );
execCoreFiles();
_connectState = EXTERNAL;
}
@@ -512,4 +885,663 @@ namespace mongo {
_error = "";
}
+ Local< v8::Value > newFunction( const char *code ) {
+ stringstream codeSS;
+ codeSS << "____MontoToV8_newFunction_temp = " << code;
+ string codeStr = codeSS.str();
+ Local< Script > compiled = Script::New( v8::String::New( codeStr.c_str() ) );
+ Local< Value > ret = compiled->Run();
+ return ret;
+ }
+
+ Local< v8::Value > V8Scope::newId( const OID &id ) {
+ v8::Function * idCons = this->getObjectIdCons();
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::String::New( id.str().c_str() );
+ return idCons->NewInstance( 1 , argv );
+ }
+
+ Local<v8::Object> V8Scope::mongoToV8( const BSONObj& m , bool array, bool readOnly ) {
+
+ Local<v8::Object> o;
+
+ // handle DBRef. needs to come first. isn't it? (metagoto)
+ static string ref = "$ref";
+ if ( ref == m.firstElement().fieldName() ) {
+ const BSONElement& id = m["$id"];
+ if (!id.eoo()) { // there's no check on $id exitence in sm implementation. risky ?
+ v8::Function* dbRef = getNamedCons( "DBRef" );
+ o = dbRef->NewInstance();
+ }
+ }
+
+ Local< v8::ObjectTemplate > readOnlyObjects;
+
+ if ( !o.IsEmpty() ) {
+ readOnly = false;
+ }
+ else if ( array ) {
+ // NOTE Looks like it's impossible to add interceptors to v8 arrays.
+ readOnly = false;
+ o = v8::Array::New();
+ }
+ else if ( !readOnly ) {
+ o = v8::Object::New();
+ }
+ else {
+ // NOTE Our readOnly implemention relies on undocumented ObjectTemplate
+ // functionality that may be fragile, but it still seems like the best option
+ // for now -- fwiw, the v8 docs are pretty sparse. I've determined experimentally
+ // that when property handlers are set for an object template, they will attach
+ // to objects previously created by that template. To get this to work, though,
+ // it is necessary to initialize the template's property handlers before
+ // creating objects from the template (as I have in the following few lines
+ // of code).
+ // NOTE In my first attempt, I configured the permanent property handlers before
+ // constructiong the object and replaced the Set() calls below with ForceSet().
+ // However, it turns out that ForceSet() only bypasses handlers for named
+ // properties and not for indexed properties.
+ readOnlyObjects = v8::ObjectTemplate::New();
+ // NOTE This internal field will store type info for special db types. For
+ // regular objects the field is unnecessary - for simplicity I'm creating just
+ // one readOnlyObjects template for objects where the field is & isn't necessary,
+ // assuming that the overhead of an internal field is slight.
+ readOnlyObjects->SetInternalFieldCount( 1 );
+ readOnlyObjects->SetNamedPropertyHandler( 0 );
+ readOnlyObjects->SetIndexedPropertyHandler( 0 );
+ o = readOnlyObjects->NewInstance();
+ }
+
+ mongo::BSONObj sub;
+
+ for ( BSONObjIterator i(m); i.more(); ) {
+ const BSONElement& f = i.next();
+
+ Local<Value> v;
+ Handle<v8::String> name = getV8Str(f.fieldName());
+
+ switch ( f.type() ) {
+
+ case mongo::Code:
+ o->Set( name, newFunction( f.valuestr() ) );
+ break;
+
+ case CodeWScope:
+ if ( f.codeWScopeObject().isEmpty() )
+ log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
+ o->Set( name, newFunction( f.codeWScopeCode() ) );
+ break;
+
+ case mongo::String:
+ o->Set( name , v8::String::New( f.valuestr() ) );
+ break;
+
+ case mongo::jstOID: {
+ v8::Function * idCons = getObjectIdCons();
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::String::New( f.__oid().str().c_str() );
+ o->Set( name ,
+ idCons->NewInstance( 1 , argv ) );
+ break;
+ }
+
+ case mongo::NumberDouble:
+ case mongo::NumberInt:
+ o->Set( name , v8::Number::New( f.number() ) );
+ break;
+
+// case mongo::NumberInt: {
+// Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+// int val = f.numberInt();
+// v8::Function* numberInt = getNamedCons( "NumberInt" );
+// v8::Handle<v8::Value> argv[1];
+// argv[0] = v8::Int32::New( val );
+// o->Set( name, numberInt->NewInstance( 1, argv ) );
+// break;
+// }
+
+ case mongo::Array:
+ sub = f.embeddedObject();
+ o->Set( name , mongoToV8( sub , true, readOnly ) );
+ break;
+ case mongo::Object:
+ sub = f.embeddedObject();
+ o->Set( name , mongoToLZV8( sub , false, readOnly ) );
+ break;
+
+ case mongo::Date:
+ o->Set( name , v8::Date::New( (double) ((long long)f.date().millis) ));
+ break;
+
+ case mongo::Bool:
+ o->Set( name , v8::Boolean::New( f.boolean() ) );
+ break;
+
+ case mongo::jstNULL:
+ case mongo::Undefined: // duplicate sm behavior
+ o->Set( name , v8::Null() );
+ break;
+
+ case mongo::RegEx: {
+ v8::Function * regex = getNamedCons( "RegExp" );
+
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = v8::String::New( f.regex() );
+ argv[1] = v8::String::New( f.regexFlags() );
+
+ o->Set( name , regex->NewInstance( 2 , argv ) );
+ break;
+ }
+
+ case mongo::BinData: {
+ Local<v8::Object> b = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+
+ int len;
+ const char *data = f.binData( len );
+
+ v8::Function* binData = getNamedCons( "BinData" );
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( len );
+ argv[1] = v8::Number::New( f.binDataType() );
+ argv[2] = v8::String::New( data, len );
+ o->Set( name, binData->NewInstance(3, argv) );
+ break;
+ }
+
+ case mongo::Timestamp: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+
+ sub->Set( V8STR_T , v8::Number::New( f.timestampTime() ) );
+ sub->Set( V8STR_I , v8::Number::New( f.timestampInc() ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+
+ o->Set( name , sub );
+ break;
+ }
+
+ case mongo::NumberLong: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+ unsigned long long val = f.numberLong();
+ v8::Function* numberLong = getNamedCons( "NumberLong" );
+ double floatApprox = (double)(long long)val;
+ if ( (long long)val == (long long)floatApprox ) {
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( floatApprox );
+ o->Set( name, numberLong->NewInstance( 1, argv ) );
+ }
+ else {
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( floatApprox );
+ argv[1] = v8::Integer::New( val >> 32 );
+ argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
+ o->Set( name, numberLong->NewInstance(3, argv) );
+ }
+ break;
+ }
+
+ case mongo::MinKey: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MINKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ o->Set( name , sub );
+ break;
+ }
+
+ case mongo::MaxKey: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MAXKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ o->Set( name , sub );
+ break;
+ }
+
+ case mongo::DBRef: {
+ v8::Function* dbPointer = getNamedCons( "DBPointer" );
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = getV8Str( f.dbrefNS() );
+ argv[1] = newId( f.dbrefOID() );
+ o->Set( name, dbPointer->NewInstance(2, argv) );
+ break;
+ }
+
+ default:
+ cout << "can't handle type: ";
+ cout << f.type() << " ";
+ cout << f.toString();
+ cout << endl;
+ break;
+ }
+
+ }
+
+ if ( readOnly ) {
+ readOnlyObjects->SetNamedPropertyHandler( 0, NamedReadOnlySet, 0, NamedReadOnlyDelete );
+ readOnlyObjects->SetIndexedPropertyHandler( 0, IndexedReadOnlySet, 0, IndexedReadOnlyDelete );
+ }
+
+ return o;
+ }
+
+ /**
+ * converts a BSONObj to a Lazy V8 object
+ */
+ Handle<v8::Object> V8Scope::mongoToLZV8( const BSONObj& m , bool array, bool readOnly ) {
+ Local<v8::Object> o;
+
+ if (readOnly) {
+ o = roObjectTemplate->NewInstance();
+ o->SetHiddenValue(V8STR_RO, v8::Boolean::New(true));
+ } else {
+ if (array) {
+ o = lzArrayTemplate->NewInstance();
+ o->SetPrototype(v8::Array::New(1)->GetPrototype());
+ o->Set(V8STR_LENGTH, v8::Integer::New(m.nFields()), DontEnum);
+ // o->Set(ARRAY_STRING, v8::Boolean::New(true), DontEnum);
+ } else {
+ o = lzObjectTemplate->NewInstance();
+
+ static string ref = "$ref";
+ if ( ref == m.firstElement().fieldName() ) {
+ const BSONElement& id = m["$id"];
+ if (!id.eoo()) {
+ v8::Function* dbRef = getNamedCons( "DBRef" );
+ o->SetPrototype(dbRef->NewInstance()->GetPrototype());
+ }
+ }
+ }
+
+ // need to set all keys with dummy values, so that order of keys is correct during enumeration
+ // otherwise v8 will list any newly set property in JS before the ones of underlying BSON obj.
+ for (BSONObjIterator it(m); it.more();) {
+ const BSONElement& f = it.next();
+ o->ForceSet(getV8Str(f.fieldName()), v8::Undefined());
+ }
+ }
+
+ BSONObj* own = new BSONObj(m.getOwned());
+// BSONObj* own = new BSONObj(m);
+ Persistent<v8::Object> p = wrapBSONObject(o, own);
+ return p;
+ }
+
+ Handle<v8::Value> V8Scope::mongoToV8Element( const BSONElement &f, bool readOnly ) {
+// Local< v8::ObjectTemplate > internalFieldObjects = v8::ObjectTemplate::New();
+// internalFieldObjects->SetInternalFieldCount( 1 );
+
+ switch ( f.type() ) {
+
+ case mongo::Code:
+ return newFunction( f.valuestr() );
+
+ case CodeWScope:
+ if ( f.codeWScopeObject().isEmpty() )
+ log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
+ return newFunction( f.codeWScopeCode() );
+
+ case mongo::String:
+// return v8::String::NewExternal( new ExternalString( f.valuestr() ));
+ return v8::String::New( f.valuestr() );
+// return getV8Str( f.valuestr() );
+
+ case mongo::jstOID:
+ return newId( f.__oid() );
+
+ case mongo::NumberDouble:
+ case mongo::NumberInt:
+ return v8::Number::New( f.number() );
+
+ case mongo::Array:
+ // for arrays it's better to use non lazy object because:
+ // - the lazy array is not a true v8 array and requires some v8 src change for all methods to work
+ // - it made several tests about 1.5x slower
+ // - most times when an array is accessed, all its values will be used
+ return mongoToV8( f.embeddedObject() , true, readOnly );
+ case mongo::Object:
+ return mongoToLZV8( f.embeddedObject() , false, readOnly);
+
+ case mongo::Date:
+ return v8::Date::New( (double) ((long long)f.date().millis) );
+
+ case mongo::Bool:
+ return v8::Boolean::New( f.boolean() );
+
+ case mongo::EOO:
+ case mongo::jstNULL:
+ case mongo::Undefined: // duplicate sm behavior
+ return v8::Null();
+
+ case mongo::RegEx: {
+ v8::Function * regex = getNamedCons( "RegExp" );
+
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = v8::String::New( f.regex() );
+ argv[1] = v8::String::New( f.regexFlags() );
+
+ return regex->NewInstance( 2 , argv );
+ break;
+ }
+
+ case mongo::BinData: {
+ int len;
+ const char *data = f.binData( len );
+
+ v8::Function* binData = getNamedCons( "BinData" );
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( len );
+ argv[1] = v8::Number::New( f.binDataType() );
+ argv[2] = v8::String::New( data, len );
+ return binData->NewInstance( 3, argv );
+ };
+
+ case mongo::Timestamp: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+
+ sub->Set( V8STR_T , v8::Number::New( f.timestampTime() ) );
+ sub->Set( V8STR_I , v8::Number::New( f.timestampInc() ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+
+ return sub;
+ }
+
+ case mongo::NumberLong: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+ unsigned long long val = f.numberLong();
+ v8::Function* numberLong = getNamedCons( "NumberLong" );
+ if ( (long long)val == (long long)(double)(long long)(val) ) {
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ return numberLong->NewInstance( 1, argv );
+ }
+ else {
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ argv[1] = v8::Integer::New( val >> 32 );
+ argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
+ return numberLong->NewInstance( 3, argv );
+ }
+ }
+
+// case mongo::NumberInt: {
+// Local<v8::Object> sub = internalFieldObjects->NewInstance();
+// int val = f.numberInt();
+// v8::Function* numberInt = getNamedCons( "NumberInt" );
+// v8::Handle<v8::Value> argv[1];
+// argv[0] = v8::Int32::New(val);
+// return numberInt->NewInstance( 1, argv );
+// }
+
+ case mongo::MinKey: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MINKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ return sub;
+ }
+
+ case mongo::MaxKey: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MAXKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ return sub;
+ }
+
+ case mongo::DBRef: {
+ v8::Function* dbPointer = getNamedCons( "DBPointer" );
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = getV8Str( f.dbrefNS() );
+ argv[1] = newId( f.dbrefOID() );
+ return dbPointer->NewInstance(2, argv);
+ }
+
+ default:
+ cout << "can't handle type: ";
+ cout << f.type() << " ";
+ cout << f.toString();
+ cout << endl;
+ break;
+ }
+
+ return v8::Undefined();
+ }
+
+ void V8Scope::append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ) {
+ V8_SIMPLE_HEADER
+ Handle<v8::String> v8name = getV8Str(scopeName);
+ Handle<Value> value = _global->Get( v8name );
+ v8ToMongoElement(builder, v8name, fieldName, value);
+ }
+
+ void V8Scope::v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value , int depth, BSONObj* originalParent ) {
+
+ if ( value->IsString() ) {
+// Handle<v8::String> str = Handle<v8::String>::Cast(value);
+// ExternalString* es = (ExternalString*) (str->GetExternalAsciiStringResource());
+// b.append( sname , es->data() );
+ b.append( sname , toSTLString( value ).c_str() );
+ return;
+ }
+
+ if ( value->IsFunction() ) {
+ b.appendCode( sname , toSTLString( value ) );
+ return;
+ }
+
+ if ( value->IsNumber() ) {
+ double val = value->ToNumber()->Value();
+ // if previous type was integer, keep it
+ int intval = (int)val;
+ if (val == intval && originalParent) {
+ BSONElement elmt = originalParent->getField(sname);
+ if (elmt.type() == mongo::NumberInt) {
+ b.append( sname , intval );
+ return;
+ }
+ }
+
+ b.append( sname , val );
+ return;
+ }
+
+ if ( value->IsArray() ) {
+ BSONObj sub = v8ToMongo( value->ToObject() , depth );
+ b.appendArray( sname , sub );
+ return;
+ }
+
+ if ( value->IsDate() ) {
+ long long dateval = (long long)(v8::Date::Cast( *value )->NumberValue());
+ b.appendDate( sname , Date_t( (unsigned long long) dateval ) );
+ return;
+ }
+
+ if ( value->IsExternal() )
+ return;
+
+ if ( value->IsObject() ) {
+ // The user could potentially modify the fields of these special objects,
+ // wreaking havoc when we attempt to reinterpret them. Not doing any validation
+ // for now...
+ Local< v8::Object > obj = value->ToObject();
+ if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) {
+ switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
+ case Timestamp:
+ b.appendTimestamp( sname,
+ Date_t( (unsigned long long)(obj->Get( V8STR_T )->ToNumber()->Value() )),
+ obj->Get( V8STR_I )->ToInt32()->Value() );
+ return;
+ case MinKey:
+ b.appendMinKey( sname );
+ return;
+ case MaxKey:
+ b.appendMaxKey( sname );
+ return;
+ default:
+ assert( "invalid internal field" == 0 );
+ }
+ }
+ string s = toSTLString( value );
+ if ( s.size() && s[0] == '/' ) {
+ s = s.substr( 1 );
+ string r = s.substr( 0 , s.rfind( "/" ) );
+ string o = s.substr( s.rfind( "/" ) + 1 );
+ b.appendRegex( sname , r , o );
+ }
+ else if ( value->ToObject()->GetPrototype()->IsObject() &&
+ value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( V8STR_ISOBJECTID ) ) {
+ OID oid;
+ oid.init( toSTLString( value ) );
+ b.appendOID( sname , &oid );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_NUMBERLONG ).IsEmpty() ) {
+ // TODO might be nice to potentially speed this up with an indexed internal
+ // field, but I don't yet know how to use an ObjectTemplate with a
+ // constructor.
+ v8::Handle< v8::Object > it = value->ToObject();
+ long long val;
+ if ( !it->Has( getV8Str( "top" ) ) ) {
+ val = (long long)( it->Get( getV8Str( "floatApprox" ) )->NumberValue() );
+ }
+ else {
+ val = (long long)
+ ( (unsigned long long)( it->Get( getV8Str( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( getV8Str( "bottom" ) )->ToInt32()->Value() );
+ }
+
+ b.append( sname, val );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_NUMBERINT ).IsEmpty() ) {
+ v8::Handle< v8::Object > it = value->ToObject();
+ b.append(sname, it->GetHiddenValue(V8STR_NUMBERINT)->Int32Value());
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_DBPTR ).IsEmpty() ) {
+ OID oid;
+ oid.init( toSTLString( value->ToObject()->Get( getV8Str( "id" ) ) ) );
+ string ns = toSTLString( value->ToObject()->Get( getV8Str( "ns" ) ) );
+ b.appendDBRef( sname, ns, oid );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_BINDATA ).IsEmpty() ) {
+ int len = obj->Get( getV8Str( "len" ) )->ToInt32()->Value();
+ Local<External> c = External::Cast( *(obj->GetInternalField( 0 )) );
+ const char* dataArray = (char*)(c->Value());;
+ b.appendBinData( sname,
+ len,
+ mongo::BinDataType( obj->Get( getV8Str( "type" ) )->ToInt32()->Value() ),
+ dataArray );
+ }
+ else {
+ BSONObj sub = v8ToMongo( value->ToObject() , depth );
+ b.append( sname , sub );
+ }
+ return;
+ }
+
+ if ( value->IsBoolean() ) {
+ b.appendBool( sname , value->ToBoolean()->Value() );
+ return;
+ }
+
+ else if ( value->IsUndefined() ) {
+ b.appendUndefined( sname );
+ return;
+ }
+
+ else if ( value->IsNull() ) {
+ b.appendNull( sname );
+ return;
+ }
+
+ cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl;
+ }
+
+ BSONObj V8Scope::v8ToMongo( v8::Handle<v8::Object> o , int depth ) {
+ BSONObj* originalBSON = 0;
+ if (o->HasNamedLookupInterceptor()) {
+ originalBSON = unwrapBSONObj(o);
+ }
+
+ if ( !o->GetHiddenValue( V8STR_RO ).IsEmpty() ||
+ (o->HasNamedLookupInterceptor() && o->GetHiddenValue( V8STR_MODIFIED ).IsEmpty()) ) {
+ // object was readonly, use bson as is
+ if (originalBSON)
+ return *originalBSON;
+ }
+
+ BSONObjBuilder b;
+
+ if ( depth == 0 ) {
+ if ( o->HasRealNamedProperty( V8STR_ID ) ) {
+ v8ToMongoElement( b , V8STR_ID , "_id" , o->Get( V8STR_ID ), 0, originalBSON );
+ }
+ }
+
+ Local<v8::Array> names = o->GetPropertyNames();
+ for ( unsigned int i=0; i<names->Length(); i++ ) {
+ v8::Local<v8::String> name = names->Get( i )->ToString();
+
+// if ( o->GetPrototype()->IsObject() &&
+// o->GetPrototype()->ToObject()->HasRealNamedProperty( name ) )
+// continue;
+
+ v8::Local<v8::Value> value = o->Get( name );
+
+ const string sname = toSTLString( name );
+ if ( depth == 0 && sname == "_id" )
+ continue;
+
+ v8ToMongoElement( b , name , sname , value , depth + 1, originalBSON );
+ }
+ return b.obj();
+ }
+
+ // --- random utils ----
+
+ v8::Function * V8Scope::getNamedCons( const char * name ) {
+ return v8::Function::Cast( *(v8::Context::GetCurrent()->Global()->Get( getV8Str( name ) ) ) );
+ }
+
+ v8::Function * V8Scope::getObjectIdCons() {
+ return getNamedCons( "ObjectId" );
+ }
+
+ Handle<v8::Value> V8Scope::Print(V8Scope* scope, const Arguments& args) {
+ bool first = true;
+ for (int i = 0; i < args.Length(); i++) {
+ HandleScope handle_scope;
+ if (first) {
+ first = false;
+ }
+ else {
+ printf(" ");
+ }
+ v8::String::Utf8Value str(args[i]);
+ printf("%s", *str);
+ }
+ printf("\n");
+ return v8::Undefined();
+ }
+
+ Handle<v8::Value> V8Scope::Version(V8Scope* scope, const Arguments& args) {
+ HandleScope handle_scope;
+ return handle_scope.Close( v8::String::New(v8::V8::GetVersion()) );
+ }
+
+ Handle<v8::Value> V8Scope::GCV8(V8Scope* scope, const Arguments& args) {
+ V8Lock l;
+ while( !V8::IdleNotification() );
+ return v8::Undefined();
+ }
+
+ /**
+ * Gets a V8 strings from the scope's cache, creating one if needed
+ */
+ v8::Handle<v8::String> V8Scope::getV8Str(string str) {
+ Persistent<v8::String> ptr = _strCache[str];
+ if (ptr.IsEmpty()) {
+ ptr = Persistent<v8::String>::New(v8::String::New(str.c_str()));
+ _strCache[str] = ptr;
+// cout << "Adding str " + str << endl;
+ }
+// cout << "Returning str " + str << endl;
+ return ptr;
+ }
+
} // namespace mongo
diff --git a/scripting/engine_v8.h b/scripting/engine_v8.h
index c770955..3f116c9 100644
--- a/scripting/engine_v8.h
+++ b/scripting/engine_v8.h
@@ -19,7 +19,6 @@
#include <vector>
#include "engine.h"
-#include "v8_db.h"
#include <v8.h>
using namespace v8;
@@ -27,6 +26,36 @@ using namespace v8;
namespace mongo {
class V8ScriptEngine;
+ class V8Scope;
+
+ typedef Handle< Value > (*v8Function) ( V8Scope* scope, const v8::Arguments& args );
+
+ // Preemption is going to be allowed for the v8 mutex, and some of our v8
+ // usage is not preemption safe. So we are using an additional mutex that
+ // will not be preempted. The V8Lock should be used in place of v8::Locker
+ // except in certain special cases involving interrupts.
+ namespace v8Locks {
+ // the implementations are quite simple - objects must be destroyed in
+ // reverse of the order created, and should not be shared between threads
+ struct RecursiveLock {
+ RecursiveLock();
+ ~RecursiveLock();
+ bool _unlock;
+ };
+ struct RecursiveUnlock {
+ RecursiveUnlock();
+ ~RecursiveUnlock();
+ bool _lock;
+ };
+ } // namespace v8Locks
+ class V8Lock {
+ v8Locks::RecursiveLock _noPreemptionLock;
+ v8::Locker _preemptionLock;
+ };
+ struct V8Unlock {
+ v8::Unlocker _preemptionUnlock;
+ v8Locks::RecursiveUnlock _noPreemptionUnlock;
+ };
class V8Scope : public Scope {
public:
@@ -47,6 +76,7 @@ namespace mongo {
virtual string getString( const char *field );
virtual bool getBoolean( const char *field );
virtual BSONObj getObject( const char *field );
+ Handle<v8::Object> getGlobalObject() { return _global; };
virtual int type( const char *field );
@@ -55,28 +85,82 @@ namespace mongo {
virtual void setBoolean( const char *field , bool val );
virtual void setElement( const char *field , const BSONElement& e );
virtual void setObject( const char *field , const BSONObj& obj , bool readOnly);
- virtual void setThis( const BSONObj * obj );
+ virtual void setFunction( const char *field , const char * code );
+// virtual void setThis( const BSONObj * obj );
virtual void rename( const char * from , const char * to );
virtual ScriptingFunction _createFunction( const char * code );
Local< v8::Function > __createFunction( const char * code );
- virtual int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = false );
+ virtual int invoke( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = false, bool readOnlyArgs = false, bool readOnlyRecv = false );
virtual bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs );
virtual string getError() { return _error; }
- virtual void injectNative( const char *field, NativeFunction func );
+ virtual void injectNative( const char *field, NativeFunction func, void* data = 0 );
+ void injectNative( const char *field, NativeFunction func, Handle<v8::Object>& obj, void* data = 0 );
+ void injectV8Function( const char *field, v8Function func );
+ void injectV8Function( const char *field, v8Function func, Handle<v8::Object>& obj );
+ void injectV8Function( const char *field, v8Function func, Handle<v8::Template>& t );
+ Handle<v8::FunctionTemplate> createV8Function( v8Function func );
void gc();
Handle< Context > context() const { return _context; }
+ v8::Local<v8::Object> mongoToV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
+ v8::Handle<v8::Object> mongoToLZV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
+ mongo::BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth = 0 );
+
+ void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name ,
+ const string sname , v8::Handle<v8::Value> value , int depth = 0, BSONObj* originalParent=0 );
+ v8::Handle<v8::Value> mongoToV8Element( const BSONElement &f, bool readOnly = false );
+ virtual void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName );
+
+ v8::Function * getNamedCons( const char * name );
+ v8::Function * getObjectIdCons();
+ Local< v8::Value > newId( const OID &id );
+
+ Persistent<v8::Object> wrapBSONObject(Local<v8::Object> obj, BSONObj* data);
+ Persistent<v8::Object> wrapArrayObject(Local<v8::Object> obj, char* data);
+
+ v8::Handle<v8::String> getV8Str(string str);
+// inline v8::Handle<v8::String> getV8Str(string str) { return v8::String::New(str.c_str()); }
+ inline v8::Handle<v8::String> getLocalV8Str(string str) { return v8::String::New(str.c_str()); }
+
+ Handle<v8::String> V8STR_CONN;
+ Handle<v8::String> V8STR_ID;
+ Handle<v8::String> V8STR_LENGTH;
+ Handle<v8::String> V8STR_LEN;
+ Handle<v8::String> V8STR_TYPE;
+ Handle<v8::String> V8STR_ISOBJECTID;
+ Handle<v8::String> V8STR_NATIVE_FUNC;
+ Handle<v8::String> V8STR_NATIVE_DATA;
+ Handle<v8::String> V8STR_V8_FUNC;
+ Handle<v8::String> V8STR_RETURN;
+ Handle<v8::String> V8STR_ARGS;
+ Handle<v8::String> V8STR_T;
+ Handle<v8::String> V8STR_I;
+ Handle<v8::String> V8STR_EMPTY;
+ Handle<v8::String> V8STR_MINKEY;
+ Handle<v8::String> V8STR_MAXKEY;
+ Handle<v8::String> V8STR_NUMBERLONG;
+ Handle<v8::String> V8STR_NUMBERINT;
+ Handle<v8::String> V8STR_DBPTR;
+ Handle<v8::String> V8STR_BINDATA;
+ Handle<v8::String> V8STR_WRAPPER;
+ Handle<v8::String> V8STR_RO;
+ Handle<v8::String> V8STR_MODIFIED;
+
private:
void _startCall();
- static Handle< Value > nativeCallback( const Arguments &args );
+ static Handle< Value > nativeCallback( V8Scope* scope, const Arguments &args );
+ static v8::Handle< v8::Value > v8Callback( const v8::Arguments &args );
+ static Handle< Value > load( V8Scope* scope, const Arguments &args );
+ static Handle< Value > Print(V8Scope* scope, const v8::Arguments& args);
+ static Handle< Value > Version(V8Scope* scope, const v8::Arguments& args);
+ static Handle< Value > GCV8(V8Scope* scope, const v8::Arguments& args);
- static Handle< Value > loadCallback( const Arguments &args );
V8ScriptEngine * _engine;
@@ -85,12 +169,19 @@ namespace mongo {
string _error;
vector< Persistent<Value> > _funcs;
- v8::Persistent<v8::Object> _this;
+ v8::Persistent<v8::Object> _emptyObj;
v8::Persistent<v8::Function> _wrapper;
enum ConnectState { NOT , LOCAL , EXTERNAL };
ConnectState _connectState;
+
+ std::map <string, v8::Persistent <v8::String> > _strCache;
+
+ Persistent<v8::ObjectTemplate> lzObjectTemplate;
+ Persistent<v8::ObjectTemplate> roObjectTemplate;
+ Persistent<v8::ObjectTemplate> lzArrayTemplate;
+ Persistent<v8::ObjectTemplate> internalFieldObjects;
};
class V8ScriptEngine : public ScriptEngine {
@@ -117,7 +208,24 @@ namespace mongo {
friend class V8Scope;
};
+ class ExternalString : public v8::String::ExternalAsciiStringResource {
+ public:
+ ExternalString(std::string str) : _data(str) {
+ }
+
+ ~ExternalString() {
+ }
+
+ const char* data () const { return _data.c_str(); }
+ size_t length () const { return _data.length(); }
+ private:
+// string _str;
+// const char* _data;
+ std::string _data;
+// size_t _len;
+ };
extern ScriptEngine * globalScriptEngine;
extern map< unsigned, int > __interruptSpecToThreadId;
+
}
diff --git a/scripting/sm_db.cpp b/scripting/sm_db.cpp
index 4c9d541..2a9169b 100644
--- a/scripting/sm_db.cpp
+++ b/scripting/sm_db.cpp
@@ -192,7 +192,15 @@ namespace mongo {
return JS_FALSE;
}
- ScriptEngine::runConnectCallback( *conn );
+ try{
+ ScriptEngine::runConnectCallback( *conn );
+ }
+ catch( std::exception& e ){
+ // Can happen if connection goes down while we're starting up here
+ // Catch so that we don't get a hard-to-trace segfault from SM
+ JS_ReportError( cx, ((string)( str::stream() << "Error during mongo startup." << causedBy( e ) )).c_str() );
+ return JS_FALSE;
+ }
assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( conn ) ) ) );
jsval host_val = c.toval( host.c_str() );
@@ -607,6 +615,7 @@ namespace mongo {
// UUID **************************
+#if 0
JSBool uuid_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
@@ -631,6 +640,8 @@ namespace mongo {
buf[i] = fromHex(encoded.c_str() + i * 2);
}
+zzz
+
assert( JS_SetPrivate( cx, obj, new BinDataHolder( buf, 16 ) ) );
c.setProperty( obj, "len", c.toval( (double)16 ) );
c.setProperty( obj, "type", c.toval( (double)3 ) );
@@ -676,6 +687,8 @@ namespace mongo {
{ 0 }
};
+#endif
+
// BinData **************************
JSBool bindata_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
@@ -744,27 +757,17 @@ namespace mongo {
assert( holder );
const char *data = ( ( BinDataHolder* )( holder ) )->c_;
stringstream ss;
- ss << hex;
+ ss.setf (ios_base::hex , ios_base::basefield);
+ ss.fill ('0');
+ ss.setf (ios_base::right , ios_base::adjustfield);
for( int i = 0; i < len; i++ ) {
unsigned v = (unsigned char) data[i];
- ss << v;
+ ss << setw(2) << v;
}
string ret = ss.str();
return *rval = c.toval( ret.c_str() );
}
- JSBool bindataLength(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
- Convertor c(cx);
- int len = (int)c.getNumber( obj, "len" );
- return *rval = c.toval((double) len);
- }
-
- JSBool bindataSubtype(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
- Convertor c(cx);
- int t = (int)c.getNumber( obj, "type" );
- return *rval = c.toval((double) t);
- }
-
void bindata_finalize( JSContext * cx , JSObject * obj ) {
Convertor c(cx);
void *holder = JS_GetPrivate( cx, obj );
@@ -785,8 +788,6 @@ namespace mongo {
{ "toString" , bindata_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
{ "hex", bindataAsHex, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
{ "base64", bindataBase64, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
- { "length", bindataLength, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
- { "subtype", bindataSubtype, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
{ 0 }
};
@@ -937,6 +938,79 @@ namespace mongo {
{ 0 }
};
+ JSClass numberint_class = {
+ "NumberInt" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool numberint_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "NumberInt needs 0 or 1 args" , argc == 0 || argc == 1 );
+
+ if ( ! JS_InstanceOf( cx , obj , &numberint_class , 0 ) ) {
+ obj = JS_NewObject( cx , &numberint_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "numberint_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ Convertor c( cx );
+ if ( argc == 0 ) {
+ c.setProperty( obj, "floatApprox", c.toval( 0.0 ) );
+ }
+ else if ( JSVAL_IS_NUMBER( argv[ 0 ] ) ) {
+ c.setProperty( obj, "floatApprox", argv[ 0 ] );
+ }
+ else {
+ string num = c.toString( argv[ 0 ] );
+ //PRINT(num);
+ const char *numStr = num.c_str();
+ int n;
+ try {
+ n = (int) parseLL( numStr );
+ //PRINT(n);
+ }
+ catch ( const AssertionException & ) {
+ smuassert( cx , "could not convert string to integer" , false );
+ }
+ c.makeIntObj( n, obj );
+ }
+
+ return JS_TRUE;
+ }
+
+ JSBool numberint_valueof(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ return *rval = c.toval( double( c.toNumberInt( obj ) ) );
+ }
+
+ JSBool numberint_tonumber(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ return numberint_valueof( cx, obj, argc, argv, rval );
+ }
+
+ JSBool numberint_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ int val = c.toNumberInt( obj );
+ string ret = str::stream() << "NumberInt(" << val << ")";
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ JSBool numberint_tojson(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ int val = c.toNumberInt( obj );
+ string ret = str::stream() << val;
+ return *rval = c.toval( ret.c_str() );
+ }
+
+
+ JSFunctionSpec numberint_functions[] = {
+ { "valueOf" , numberint_valueof , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toNumber" , numberint_tonumber , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toString" , numberint_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "tojson" , numberint_tojson , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
JSClass minkey_class = {
"MinKey" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -1039,10 +1113,11 @@ namespace mongo {
assert( JS_InitClass( cx , global , 0 , &dbquery_class , dbquery_constructor , 0 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &dbpointer_class , dbpointer_constructor , 0 , 0 , dbpointer_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &bindata_class , bindata_constructor , 0 , 0 , bindata_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &uuid_class , uuid_constructor , 0 , 0 , uuid_functions , 0 , 0 ) );
+// assert( JS_InitClass( cx , global , 0 , &uuid_class , uuid_constructor , 0 , 0 , uuid_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &timestamp_class , timestamp_constructor , 0 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &numberlong_class , numberlong_constructor , 0 , 0 , numberlong_functions , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &numberint_class , numberint_constructor , 0 , 0 , numberint_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &minkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &maxkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
@@ -1087,6 +1162,11 @@ namespace mongo {
return true;
}
+ if ( JS_InstanceOf( c->_context , o , &numberint_class , 0 ) ) {
+ b.append( name , c->toNumberInt( o ) );
+ return true;
+ }
+
if ( JS_InstanceOf( c->_context , o , &dbpointer_class , 0 ) ) {
b.appendDBRef( name , c->getString( o , "ns" ) , c->toOID( c->getProperty( o , "id" ) ) );
return true;
@@ -1120,8 +1200,8 @@ namespace mongo {
#else
if ( JS_InstanceOf( c->_context , o, &js_DateClass , 0 ) ) {
jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
- //TODO: make signed
- b.appendDate( name , Date_t((unsigned long long)d) );
+ long long d2 = (long long)d;
+ b.appendDate( name , Date_t((unsigned long long)d2) );
return true;
}
#endif
diff --git a/scripting/utils.cpp b/scripting/utils.cpp
index 97eea10..612b173 100644
--- a/scripting/utils.cpp
+++ b/scripting/utils.cpp
@@ -25,7 +25,7 @@ namespace mongo {
void installBenchmarkSystem( Scope& scope );
- BSONObj jsmd5( const BSONObj &a ) {
+ BSONObj jsmd5( const BSONObj &a, void* data ) {
uassert( 10261 , "js md5 needs a string" , a.firstElement().type() == String );
const char * s = a.firstElement().valuestrsafe();
@@ -38,7 +38,7 @@ namespace mongo {
return BSON( "" << digestToString( d ) );
}
- BSONObj JSVersion( const BSONObj& args ) {
+ BSONObj JSVersion( const BSONObj& args, void* data ) {
cout << "version: " << versionString << endl;
if ( strstr( versionString , "+" ) )
printGitVersion();
@@ -46,6 +46,20 @@ namespace mongo {
}
+ BSONObj JSSleep(const mongo::BSONObj &args, void* data) {
+ assert( args.nFields() == 1 );
+ assert( args.firstElement().isNumber() );
+ int ms = int( args.firstElement().number() );
+ {
+ auto_ptr< ScriptEngine::Unlocker > u = globalScriptEngine->newThreadUnlocker();
+ sleepmillis( ms );
+ }
+
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ return b.obj();
+ }
+
// ---------------------------------
// ---- installer --------
// ---------------------------------
@@ -53,6 +67,7 @@ namespace mongo {
void installGlobalUtils( Scope& scope ) {
scope.injectNative( "hex_md5" , jsmd5 );
scope.injectNative( "version" , JSVersion );
+ scope.injectNative( "sleep" , JSSleep );
installBenchmarkSystem( scope );
}
diff --git a/scripting/v8_db.cpp b/scripting/v8_db.cpp
index 4d12454..bda549c 100644
--- a/scripting/v8_db.cpp
+++ b/scripting/v8_db.cpp
@@ -15,10 +15,18 @@
* limitations under the License.
*/
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
#include "v8_wrapper.h"
#include "v8_utils.h"
-#include "v8_db.h"
#include "engine_v8.h"
+#include "v8_db.h"
#include "util/base64.h"
#include "util/text.h"
#include "../client/syncclusterconnection.h"
@@ -32,127 +40,161 @@ namespace mongo {
#define DDD(x)
- v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( bool local ) {
- v8::Local<v8::FunctionTemplate> mongo;
+ v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( V8Scope* scope, bool local ) {
+ v8::Handle<v8::FunctionTemplate> mongo;
if ( local ) {
- mongo = newV8Function< mongoConsLocal >();
+ mongo = scope->createV8Function(mongoConsLocal);
}
else {
- mongo = newV8Function< mongoConsExternal >();
+ mongo = scope->createV8Function(mongoConsExternal);
}
mongo->InstanceTemplate()->SetInternalFieldCount( 1 );
+ v8::Handle<v8::Template> proto = mongo->PrototypeTemplate();
+ scope->injectV8Function("find", mongoFind, proto);
+ scope->injectV8Function("insert", mongoInsert, proto);
+ scope->injectV8Function("remove", mongoRemove, proto);
+ scope->injectV8Function("update", mongoUpdate, proto);
- v8::Local<v8::Template> proto = mongo->PrototypeTemplate();
-
- proto->Set( v8::String::New( "find" ) , newV8Function< mongoFind >() );
- proto->Set( v8::String::New( "insert" ) , newV8Function< mongoInsert >() );
- proto->Set( v8::String::New( "remove" ) , newV8Function< mongoRemove >() );
- proto->Set( v8::String::New( "update" ) , newV8Function< mongoUpdate >() );
-
- Local<FunctionTemplate> ic = newV8Function< internalCursorCons >();
+ v8::Handle<FunctionTemplate> ic = scope->createV8Function(internalCursorCons);
ic->InstanceTemplate()->SetInternalFieldCount( 1 );
- ic->PrototypeTemplate()->Set( v8::String::New("next") , newV8Function< internalCursorNext >() );
- ic->PrototypeTemplate()->Set( v8::String::New("hasNext") , newV8Function< internalCursorHasNext >() );
- ic->PrototypeTemplate()->Set( v8::String::New("objsLeftInBatch") , newV8Function< internalCursorObjsLeftInBatch >() );
- proto->Set( v8::String::New( "internalCursor" ) , ic );
-
-
+ v8::Handle<v8::Template> icproto = ic->PrototypeTemplate();
+ scope->injectV8Function("next", internalCursorNext, icproto);
+ scope->injectV8Function("hasNext", internalCursorHasNext, icproto);
+ scope->injectV8Function("objsLeftInBatch", internalCursorObjsLeftInBatch, icproto);
+ proto->Set( scope->getV8Str( "internalCursor" ) , ic );
return mongo;
}
- v8::Handle<v8::FunctionTemplate> getNumberLongFunctionTemplate() {
- v8::Local<v8::FunctionTemplate> numberLong = newV8Function< numberLongInit >();
+ v8::Handle<v8::FunctionTemplate> getNumberLongFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> numberLong = scope->createV8Function(numberLongInit);
v8::Local<v8::Template> proto = numberLong->PrototypeTemplate();
-
- proto->Set( v8::String::New( "valueOf" ) , newV8Function< numberLongValueOf >() );
- proto->Set( v8::String::New( "toNumber" ) , newV8Function< numberLongToNumber >() );
- proto->Set( v8::String::New( "toString" ) , newV8Function< numberLongToString >() );
+ scope->injectV8Function("valueOf", numberLongValueOf, proto);
+ scope->injectV8Function("toNumber", numberLongToNumber, proto);
+ scope->injectV8Function("toString", numberLongToString, proto);
return numberLong;
}
- v8::Handle<v8::FunctionTemplate> getBinDataFunctionTemplate() {
- v8::Local<v8::FunctionTemplate> binData = newV8Function< binDataInit >();
- v8::Local<v8::Template> proto = binData->PrototypeTemplate();
+ v8::Handle<v8::FunctionTemplate> getNumberIntFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> numberInt = scope->createV8Function(numberIntInit);
+ v8::Local<v8::Template> proto = numberInt->PrototypeTemplate();
+ scope->injectV8Function("valueOf", numberIntValueOf, proto);
+ scope->injectV8Function("toNumber", numberIntToNumber, proto);
+ scope->injectV8Function("toString", numberIntToString, proto);
- proto->Set( v8::String::New( "toString" ) , newV8Function< binDataToString >() );
+ return numberInt;
+ }
+ v8::Handle<v8::FunctionTemplate> getBinDataFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> binData = scope->createV8Function(binDataInit);
+ binData->InstanceTemplate()->SetInternalFieldCount(1);
+ v8::Local<v8::Template> proto = binData->PrototypeTemplate();
+ scope->injectV8Function("toString", binDataToString, proto);
+ scope->injectV8Function("base64", binDataToBase64, proto);
+ scope->injectV8Function("hex", binDataToHex, proto);
return binData;
}
- v8::Handle<v8::FunctionTemplate> getTimestampFunctionTemplate() {
- v8::Local<v8::FunctionTemplate> ts = newV8Function< dbTimestampInit >();
- v8::Local<v8::Template> proto = ts->PrototypeTemplate();
-
- ts->InstanceTemplate()->SetInternalFieldCount( 1 );
-
- return ts;
+ v8::Handle<v8::FunctionTemplate> getUUIDFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> templ = scope->createV8Function(uuidInit);
+ templ->InstanceTemplate()->SetInternalFieldCount(1);
+ v8::Local<v8::Template> proto = templ->PrototypeTemplate();
+ scope->injectV8Function("toString", binDataToString, proto);
+ scope->injectV8Function("base64", binDataToBase64, proto);
+ scope->injectV8Function("hex", binDataToHex, proto);
+ return templ;
}
+ v8::Handle<v8::FunctionTemplate> getMD5FunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> templ = scope->createV8Function(md5Init);
+ templ->InstanceTemplate()->SetInternalFieldCount(1);
+ v8::Local<v8::Template> proto = templ->PrototypeTemplate();
+ scope->injectV8Function("toString", binDataToString, proto);
+ scope->injectV8Function("base64", binDataToBase64, proto);
+ scope->injectV8Function("hex", binDataToHex, proto);
+ return templ;
+ }
- void installDBTypes( Handle<ObjectTemplate>& global ) {
- v8::Local<v8::FunctionTemplate> db = newV8Function< dbInit >();
- db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
- global->Set(v8::String::New("DB") , db );
-
- v8::Local<v8::FunctionTemplate> dbCollection = newV8Function< collectionInit >();
- dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
- global->Set(v8::String::New("DBCollection") , dbCollection );
-
-
- v8::Local<v8::FunctionTemplate> dbQuery = newV8Function< dbQueryInit >();
- dbQuery->InstanceTemplate()->SetIndexedPropertyHandler( dbQueryIndexAccess );
- global->Set(v8::String::New("DBQuery") , dbQuery );
-
- global->Set( v8::String::New("ObjectId") , newV8Function< objectIdInit >() );
-
- global->Set( v8::String::New("DBRef") , newV8Function< dbRefInit >() );
-
- global->Set( v8::String::New("DBPointer") , newV8Function< dbPointerInit >() );
-
- global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate() );
+ v8::Handle<v8::FunctionTemplate> getHexDataFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> templ = scope->createV8Function(hexDataInit);
+ templ->InstanceTemplate()->SetInternalFieldCount(1);
+ v8::Local<v8::Template> proto = templ->PrototypeTemplate();
+ scope->injectV8Function("toString", binDataToString, proto);
+ scope->injectV8Function("base64", binDataToBase64, proto);
+ scope->injectV8Function("hex", binDataToHex, proto);
+ return templ;
+ }
- global->Set( v8::String::New("NumberLong") , getNumberLongFunctionTemplate() );
+ v8::Handle<v8::FunctionTemplate> getTimestampFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> ts = scope->createV8Function(dbTimestampInit);
+ v8::Local<v8::Template> proto = ts->PrototypeTemplate();
+ ts->InstanceTemplate()->SetInternalFieldCount( 1 );
- global->Set( v8::String::New("Timestamp") , getTimestampFunctionTemplate() );
+ return ts;
}
- void installDBTypes( Handle<v8::Object>& global ) {
- v8::Local<v8::FunctionTemplate> db = newV8Function< dbInit >();
+// void installDBTypes( V8Scope* scope, Handle<ObjectTemplate>& global ) {
+// v8::Handle<v8::FunctionTemplate> db = scope->createV8Function(dbInit);
+// db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
+// global->Set(v8::String::New("DB") , db );
+//
+// v8::Handle<v8::FunctionTemplate> dbCollection = scope->createV8Function(collectionInit);
+// dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
+// global->Set(v8::String::New("DBCollection") , dbCollection );
+//
+//
+// v8::Handle<v8::FunctionTemplate> dbQuery = scope->createV8Function(dbQueryInit);
+// dbQuery->InstanceTemplate()->SetIndexedPropertyHandler( dbQueryIndexAccess );
+// global->Set(v8::String::New("DBQuery") , dbQuery );
+//
+// global->Set( v8::String::New("ObjectId") , newV8Function< objectIdInit >(scope) );
+//
+// global->Set( v8::String::New("DBRef") , newV8Function< dbRefInit >(scope) );
+//
+// global->Set( v8::String::New("DBPointer") , newV8Function< dbPointerInit >(scope) );
+//
+// global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate(scope) );
+//
+// global->Set( v8::String::New("NumberLong") , getNumberLongFunctionTemplate(scope) );
+//
+// global->Set( v8::String::New("Timestamp") , getTimestampFunctionTemplate(scope) );
+// }
+
+ void installDBTypes( V8Scope* scope, v8::Handle<v8::Object>& global ) {
+ v8::Handle<v8::FunctionTemplate> db = scope->createV8Function(dbInit);
db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
- global->Set(v8::String::New("DB") , db->GetFunction() );
-
- v8::Local<v8::FunctionTemplate> dbCollection = newV8Function< collectionInit >();
+ global->Set(scope->getV8Str("DB") , db->GetFunction() );
+ v8::Handle<v8::FunctionTemplate> dbCollection = scope->createV8Function(collectionInit);
dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
- global->Set(v8::String::New("DBCollection") , dbCollection->GetFunction() );
+ global->Set(scope->getV8Str("DBCollection") , dbCollection->GetFunction() );
- v8::Local<v8::FunctionTemplate> dbQuery = newV8Function< dbQueryInit >();
+ v8::Handle<v8::FunctionTemplate> dbQuery = scope->createV8Function(dbQueryInit);
dbQuery->InstanceTemplate()->SetIndexedPropertyHandler( dbQueryIndexAccess );
- global->Set(v8::String::New("DBQuery") , dbQuery->GetFunction() );
-
- global->Set( v8::String::New("ObjectId") , newV8Function< objectIdInit >()->GetFunction() );
-
- global->Set( v8::String::New("DBRef") , newV8Function< dbRefInit >()->GetFunction() );
+ global->Set(scope->getV8Str("DBQuery") , dbQuery->GetFunction() );
- global->Set( v8::String::New("DBPointer") , newV8Function< dbPointerInit >()->GetFunction() );
+ scope->injectV8Function("ObjectId", objectIdInit, global);
+ scope->injectV8Function("DBRef", dbRefInit, global);
+ scope->injectV8Function("DBPointer", dbPointerInit, global);
- global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate()->GetFunction() );
-
- global->Set( v8::String::New("NumberLong") , getNumberLongFunctionTemplate()->GetFunction() );
-
- global->Set( v8::String::New("Timestamp") , getTimestampFunctionTemplate()->GetFunction() );
+ global->Set( scope->getV8Str("BinData") , getBinDataFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("UUID") , getUUIDFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("MD5") , getMD5FunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("HexData") , getHexDataFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("NumberLong") , getNumberLongFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("NumberInt") , getNumberIntFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("Timestamp") , getTimestampFunctionTemplate(scope)->GetFunction() );
BSONObjBuilder b;
b.appendMaxKey( "" );
b.appendMinKey( "" );
BSONObj o = b.obj();
BSONObjIterator i( o );
- global->Set( v8::String::New("MaxKey"), mongoToV8Element( i.next() ) );
- global->Set( v8::String::New("MinKey"), mongoToV8Element( i.next() ) );
+ global->Set( scope->getV8Str("MaxKey"), scope->mongoToV8Element( i.next() ) );
+ global->Set( scope->getV8Str("MinKey"), scope->mongoToV8Element( i.next() ) );
- global->Get( v8::String::New( "Object" ) )->ToObject()->Set( v8::String::New("bsonsize") , newV8Function< bsonsize >()->GetFunction() );
+ global->Get( scope->getV8Str( "Object" ) )->ToObject()->Set( scope->getV8Str("bsonsize") , scope->createV8Function(bsonsize)->GetFunction() );
}
void destroyConnection( Persistent<Value> self, void* parameter) {
@@ -161,7 +203,7 @@ namespace mongo {
self.Clear();
}
- Handle<Value> mongoConsExternal(const Arguments& args) {
+ Handle<Value> mongoConsExternal(V8Scope* scope, const Arguments& args) {
char host[255];
@@ -196,13 +238,13 @@ namespace mongo {
}
args.This()->SetInternalField( 0 , External::New( conn ) );
- args.This()->Set( v8::String::New( "slaveOk" ) , Boolean::New( false ) );
- args.This()->Set( v8::String::New( "host" ) , v8::String::New( host ) );
+ args.This()->Set( scope->getV8Str( "slaveOk" ) , Boolean::New( false ) );
+ args.This()->Set( scope->getV8Str( "host" ) , scope->getV8Str( host ) );
return v8::Undefined();
}
- Handle<Value> mongoConsLocal(const Arguments& args) {
+ Handle<Value> mongoConsLocal(V8Scope* scope, const Arguments& args) {
if ( args.Length() > 0 )
return v8::ThrowException( v8::String::New( "local Mongo constructor takes no args" ) );
@@ -218,8 +260,8 @@ namespace mongo {
// NOTE I don't believe the conn object will ever be freed.
args.This()->SetInternalField( 0 , External::New( conn ) );
- args.This()->Set( v8::String::New( "slaveOk" ) , Boolean::New( false ) );
- args.This()->Set( v8::String::New( "host" ) , v8::String::New( "EMBEDDED" ) );
+ args.This()->Set( scope->getV8Str( "slaveOk" ) , Boolean::New( false ) );
+ args.This()->Set( scope->getV8Str( "host" ) , scope->getV8Str( "EMBEDDED" ) );
return v8::Undefined();
}
@@ -255,7 +297,7 @@ namespace mongo {
3 - limit
4 - skip
*/
- Handle<Value> mongoFind(const Arguments& args) {
+ Handle<Value> mongoFind(V8Scope* scope, const Arguments& args) {
HandleScope handle_scope;
jsassert( args.Length() == 7 , "find needs 7 args" );
@@ -263,16 +305,16 @@ namespace mongo {
DBClientBase * conn = getConnection( args );
GETNS;
- BSONObj q = v8ToMongo( args[1]->ToObject() );
+ BSONObj q = scope->v8ToMongo( args[1]->ToObject() );
DDD( "query:" << q );
BSONObj fields;
bool haveFields = args[2]->IsObject() && args[2]->ToObject()->GetPropertyNames()->Length() > 0;
if ( haveFields )
- fields = v8ToMongo( args[2]->ToObject() );
+ fields = scope->v8ToMongo( args[2]->ToObject() );
Local<v8::Object> mongo = args.This();
- Local<v8::Value> slaveOkVal = mongo->Get( v8::String::New( "slaveOk" ) );
+ Local<v8::Value> slaveOkVal = mongo->Get( scope->getV8Str( "slaveOk" ) );
jsassert( slaveOkVal->IsBoolean(), "slaveOk member invalid" );
bool slaveOk = slaveOkVal->BooleanValue();
@@ -285,8 +327,10 @@ namespace mongo {
{
V8Unlock u;
cursor = conn->query( ns, q , nToReturn , nToSkip , haveFields ? &fields : 0, options | ( slaveOk ? QueryOption_SlaveOk : 0 ) , batchSize );
+ if ( ! cursor.get() )
+ return v8::ThrowException( v8::String::New( "error doing query: failed" ) );
}
- v8::Function * cons = (v8::Function*)( *( mongo->Get( v8::String::New( "internalCursor" ) ) ) );
+ v8::Function * cons = (v8::Function*)( *( mongo->Get( scope->getV8Str( "internalCursor" ) ) ) );
assert( cons );
Persistent<v8::Object> c = Persistent<v8::Object>::New( cons->NewInstance() );
@@ -300,11 +344,11 @@ namespace mongo {
}
}
- v8::Handle<v8::Value> mongoInsert(const v8::Arguments& args) {
+ v8::Handle<v8::Value> mongoInsert(V8Scope* scope, const v8::Arguments& args) {
jsassert( args.Length() == 2 , "insert needs 2 args" );
jsassert( args[1]->IsObject() , "have to insert an object" );
- if ( args.This()->Get( v8::String::New( "readOnly" ) )->BooleanValue() )
+ if ( args.This()->Get( scope->getV8Str( "readOnly" ) )->BooleanValue() )
return v8::ThrowException( v8::String::New( "js db in read only mode" ) );
DBClientBase * conn = getConnection( args );
@@ -312,12 +356,12 @@ namespace mongo {
v8::Handle<v8::Object> in = args[1]->ToObject();
- if ( ! in->Has( v8::String::New( "_id" ) ) ) {
+ if ( ! in->Has( scope->getV8Str( "_id" ) ) ) {
v8::Handle<v8::Value> argv[1];
- in->Set( v8::String::New( "_id" ) , getObjectIdCons()->NewInstance( 0 , argv ) );
+ in->Set( scope->getV8Str( "_id" ) , scope->getObjectIdCons()->NewInstance( 0 , argv ) );
}
- BSONObj o = v8ToMongo( in );
+ BSONObj o = scope->v8ToMongo( in );
DDD( "want to save : " << o.jsonString() );
try {
@@ -331,18 +375,18 @@ namespace mongo {
return v8::Undefined();
}
- v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args) {
+ v8::Handle<v8::Value> mongoRemove(V8Scope* scope, const v8::Arguments& args) {
jsassert( args.Length() == 2 || args.Length() == 3 , "remove needs 2 args" );
jsassert( args[1]->IsObject() , "have to remove an object template" );
- if ( args.This()->Get( v8::String::New( "readOnly" ) )->BooleanValue() )
+ if ( args.This()->Get( scope->getV8Str( "readOnly" ) )->BooleanValue() )
return v8::ThrowException( v8::String::New( "js db in read only mode" ) );
DBClientBase * conn = getConnection( args );
GETNS;
v8::Handle<v8::Object> in = args[1]->ToObject();
- BSONObj o = v8ToMongo( in );
+ BSONObj o = scope->v8ToMongo( in );
bool justOne = false;
if ( args.Length() > 2 ) {
@@ -361,12 +405,12 @@ namespace mongo {
return v8::Undefined();
}
- v8::Handle<v8::Value> mongoUpdate(const v8::Arguments& args) {
+ v8::Handle<v8::Value> mongoUpdate(V8Scope* scope, const v8::Arguments& args) {
jsassert( args.Length() >= 3 , "update needs at least 3 args" );
jsassert( args[1]->IsObject() , "1st param to update has to be an object" );
jsassert( args[2]->IsObject() , "2nd param to update has to be an object" );
- if ( args.This()->Get( v8::String::New( "readOnly" ) )->BooleanValue() )
+ if ( args.This()->Get( scope->getV8Str( "readOnly" ) )->BooleanValue() )
return v8::ThrowException( v8::String::New( "js db in read only mode" ) );
DBClientBase * conn = getConnection( args );
@@ -379,8 +423,8 @@ namespace mongo {
bool multi = args.Length() > 4 && args[4]->IsBoolean() && args[4]->ToBoolean()->Value();
try {
- BSONObj q1 = v8ToMongo( q );
- BSONObj o1 = v8ToMongo( o );
+ BSONObj q1 = scope->v8ToMongo( q );
+ BSONObj o1 = scope->v8ToMongo( o );
V8Unlock u;
conn->update( ns , q1 , o1 , upsert, multi );
}
@@ -403,11 +447,11 @@ namespace mongo {
return cursor;
}
- v8::Handle<v8::Value> internalCursorCons(const v8::Arguments& args) {
+ v8::Handle<v8::Value> internalCursorCons(V8Scope* scope, const v8::Arguments& args) {
return v8::Undefined();
}
- v8::Handle<v8::Value> internalCursorNext(const v8::Arguments& args) {
+ v8::Handle<v8::Value> internalCursorNext(V8Scope* scope, const v8::Arguments& args) {
mongo::DBClientCursor * cursor = getCursor( args );
if ( ! cursor )
return v8::Undefined();
@@ -416,10 +460,13 @@ namespace mongo {
V8Unlock u;
o = cursor->next();
}
- return mongoToV8( o );
+ bool ro = false;
+ if (args.This()->Has(scope->V8STR_RO))
+ ro = args.This()->Get(scope->V8STR_RO)->BooleanValue();
+ return scope->mongoToLZV8( o, false, ro );
}
- v8::Handle<v8::Value> internalCursorHasNext(const v8::Arguments& args) {
+ v8::Handle<v8::Value> internalCursorHasNext(V8Scope* scope, const v8::Arguments& args) {
mongo::DBClientCursor * cursor = getCursor( args );
if ( ! cursor )
return Boolean::New( false );
@@ -431,7 +478,7 @@ namespace mongo {
return Boolean::New( ret );
}
- v8::Handle<v8::Value> internalCursorObjsLeftInBatch(const v8::Arguments& args) {
+ v8::Handle<v8::Value> internalCursorObjsLeftInBatch(V8Scope* scope, const v8::Arguments& args) {
mongo::DBClientCursor * cursor = getCursor( args );
if ( ! cursor )
return v8::Number::New( (double) 0 );
@@ -443,14 +490,19 @@ namespace mongo {
return v8::Number::New( (double) ret );
}
+// v8::Handle<v8::Value> internalCursorReadOnly(V8Scope* scope, const v8::Arguments& args) {
+// Local<v8::Object> cursor = args.This();
+// cursor->Set(scope->V8STR_RO, v8::Undefined());
+// return cursor;
+// }
// --- DB ----
- v8::Handle<v8::Value> dbInit(const v8::Arguments& args) {
+ v8::Handle<v8::Value> dbInit(V8Scope* scope, const v8::Arguments& args) {
assert( args.Length() == 2 );
- args.This()->Set( v8::String::New( "_mongo" ) , args[0] );
- args.This()->Set( v8::String::New( "_name" ) , args[1] );
+ args.This()->Set( scope->getV8Str( "_mongo" ) , args[0] );
+ args.This()->Set( scope->getV8Str( "_name" ) , args[1] );
for ( int i=0; i<args.Length(); i++ )
assert( ! args[i]->IsUndefined() );
@@ -458,13 +510,13 @@ namespace mongo {
return v8::Undefined();
}
- v8::Handle<v8::Value> collectionInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> collectionInit( V8Scope* scope, const v8::Arguments& args ) {
assert( args.Length() == 4 );
- args.This()->Set( v8::String::New( "_mongo" ) , args[0] );
- args.This()->Set( v8::String::New( "_db" ) , args[1] );
- args.This()->Set( v8::String::New( "_shortName" ) , args[2] );
- args.This()->Set( v8::String::New( "_fullName" ) , args[3] );
+ args.This()->Set( scope->getV8Str( "_mongo" ) , args[0] );
+ args.This()->Set( scope->getV8Str( "_db" ) , args[1] );
+ args.This()->Set( scope->getV8Str( "_shortName" ) , args[2] );
+ args.This()->Set( scope->getV8Str( "_fullName" ) , args[3] );
if ( haveLocalShardingInfo( toSTLString( args[3] ) ) )
return v8::ThrowException( v8::String::New( "can't use sharded collection from db.eval" ) );
@@ -475,52 +527,52 @@ namespace mongo {
return v8::Undefined();
}
- v8::Handle<v8::Value> dbQueryInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> dbQueryInit( V8Scope* scope, const v8::Arguments& args ) {
v8::Handle<v8::Object> t = args.This();
assert( args.Length() >= 4 );
- t->Set( v8::String::New( "_mongo" ) , args[0] );
- t->Set( v8::String::New( "_db" ) , args[1] );
- t->Set( v8::String::New( "_collection" ) , args[2] );
- t->Set( v8::String::New( "_ns" ) , args[3] );
+ t->Set( scope->getV8Str( "_mongo" ) , args[0] );
+ t->Set( scope->getV8Str( "_db" ) , args[1] );
+ t->Set( scope->getV8Str( "_collection" ) , args[2] );
+ t->Set( scope->getV8Str( "_ns" ) , args[3] );
if ( args.Length() > 4 && args[4]->IsObject() )
- t->Set( v8::String::New( "_query" ) , args[4] );
+ t->Set( scope->getV8Str( "_query" ) , args[4] );
else
- t->Set( v8::String::New( "_query" ) , v8::Object::New() );
+ t->Set( scope->getV8Str( "_query" ) , v8::Object::New() );
if ( args.Length() > 5 && args[5]->IsObject() )
- t->Set( v8::String::New( "_fields" ) , args[5] );
+ t->Set( scope->getV8Str( "_fields" ) , args[5] );
else
- t->Set( v8::String::New( "_fields" ) , v8::Null() );
+ t->Set( scope->getV8Str( "_fields" ) , v8::Null() );
if ( args.Length() > 6 && args[6]->IsNumber() )
- t->Set( v8::String::New( "_limit" ) , args[6] );
+ t->Set( scope->getV8Str( "_limit" ) , args[6] );
else
- t->Set( v8::String::New( "_limit" ) , Number::New( 0 ) );
+ t->Set( scope->getV8Str( "_limit" ) , Number::New( 0 ) );
if ( args.Length() > 7 && args[7]->IsNumber() )
- t->Set( v8::String::New( "_skip" ) , args[7] );
+ t->Set( scope->getV8Str( "_skip" ) , args[7] );
else
- t->Set( v8::String::New( "_skip" ) , Number::New( 0 ) );
+ t->Set( scope->getV8Str( "_skip" ) , Number::New( 0 ) );
if ( args.Length() > 8 && args[8]->IsNumber() )
- t->Set( v8::String::New( "_batchSize" ) , args[8] );
+ t->Set( scope->getV8Str( "_batchSize" ) , args[8] );
else
- t->Set( v8::String::New( "_batchSize" ) , Number::New( 0 ) );
+ t->Set( scope->getV8Str( "_batchSize" ) , Number::New( 0 ) );
if ( args.Length() > 9 && args[9]->IsNumber() )
- t->Set( v8::String::New( "_options" ) , args[9] );
+ t->Set( scope->getV8Str( "_options" ) , args[9] );
else
- t->Set( v8::String::New( "_options" ) , Number::New( 0 ) );
+ t->Set( scope->getV8Str( "_options" ) , Number::New( 0 ) );
- t->Set( v8::String::New( "_cursor" ) , v8::Null() );
- t->Set( v8::String::New( "_numReturned" ) , v8::Number::New(0) );
- t->Set( v8::String::New( "_special" ) , Boolean::New(false) );
+ t->Set( scope->getV8Str( "_cursor" ) , v8::Null() );
+ t->Set( scope->getV8Str( "_numReturned" ) , v8::Number::New(0) );
+ t->Set( scope->getV8Str( "_special" ) , Boolean::New(false) );
return v8::Undefined();
}
@@ -560,11 +612,11 @@ namespace mongo {
return f->Call( info.This() , 1 , argv );
}
- v8::Handle<v8::Value> objectIdInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> objectIdInit( V8Scope* scope, const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
- v8::Function * f = getObjectIdCons();
+ v8::Function * f = scope->getObjectIdCons();
it = f->NewInstance();
}
@@ -585,12 +637,12 @@ namespace mongo {
oid.init( s );
}
- it->Set( v8::String::New( "str" ) , v8::String::New( oid.str().c_str() ) );
+ it->Set( scope->getV8Str( "str" ) , v8::String::New( oid.str().c_str() ) );
return it;
}
- v8::Handle<v8::Value> dbRefInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> dbRefInit( V8Scope* scope, const v8::Arguments& args ) {
if (args.Length() != 2 && args.Length() != 0) {
return v8::ThrowException( v8::String::New( "DBRef needs 2 arguments" ) );
@@ -599,19 +651,19 @@ namespace mongo {
v8::Handle<v8::Object> it = args.This();
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
- v8::Function* f = getNamedCons( "DBRef" );
+ v8::Function* f = scope->getNamedCons( "DBRef" );
it = f->NewInstance();
}
if ( args.Length() == 2 ) {
- it->Set( v8::String::New( "$ref" ) , args[0] );
- it->Set( v8::String::New( "$id" ) , args[1] );
+ it->Set( scope->getV8Str( "$ref" ) , args[0] );
+ it->Set( scope->getV8Str( "$id" ) , args[1] );
}
return it;
}
- v8::Handle<v8::Value> dbPointerInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> dbPointerInit( V8Scope* scope, const v8::Arguments& args ) {
if (args.Length() != 2) {
return v8::ThrowException( v8::String::New( "DBPointer needs 2 arguments" ) );
@@ -620,28 +672,28 @@ namespace mongo {
v8::Handle<v8::Object> it = args.This();
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
- v8::Function* f = getNamedCons( "DBPointer" );
+ v8::Function* f = scope->getNamedCons( "DBPointer" );
it = f->NewInstance();
}
- it->Set( v8::String::New( "ns" ) , args[0] );
- it->Set( v8::String::New( "id" ) , args[1] );
- it->SetHiddenValue( v8::String::New( "__DBPointer" ), v8::Number::New( 1 ) );
+ it->Set( scope->getV8Str( "ns" ) , args[0] );
+ it->Set( scope->getV8Str( "id" ) , args[1] );
+ it->SetHiddenValue( scope->getV8Str( "__DBPointer" ), v8::Number::New( 1 ) );
return it;
}
- v8::Handle<v8::Value> dbTimestampInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> dbTimestampInit( V8Scope* scope, const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
if ( args.Length() == 0 ) {
- it->Set( v8::String::New( "t" ) , v8::Number::New( 0 ) );
- it->Set( v8::String::New( "i" ) , v8::Number::New( 0 ) );
+ it->Set( scope->getV8Str( "t" ) , v8::Number::New( 0 ) );
+ it->Set( scope->getV8Str( "i" ) , v8::Number::New( 0 ) );
}
else if ( args.Length() == 2 ) {
- it->Set( v8::String::New( "t" ) , args[0] );
- it->Set( v8::String::New( "i" ) , args[1] );
+ it->Set( scope->getV8Str( "t" ) , args[0] );
+ it->Set( scope->getV8Str( "i" ) , args[1] );
}
else {
return v8::ThrowException( v8::String::New( "Timestamp needs 0 or 2 arguments" ) );
@@ -653,66 +705,157 @@ namespace mongo {
}
- v8::Handle<v8::Value> binDataInit( const v8::Arguments& args ) {
- v8::Handle<v8::Object> it = args.This();
+ v8::Handle<v8::Value> binDataInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Local<v8::Object> it = args.This();
- // 3 args: len, type, data
+ Handle<Value> type;
+ Handle<Value> len;
+ int rlen;
+ char* data;
if (args.Length() == 3) {
+ // 3 args: len, type, data
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
- v8::Function* f = getNamedCons( "BinData" );
+ v8::Function* f = scope->getNamedCons( "BinData" );
it = f->NewInstance();
}
- it->Set( v8::String::New( "len" ) , args[0] );
- it->Set( v8::String::New( "type" ) , args[1] );
- it->Set( v8::String::New( "data" ), args[2] );
- it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
-
- // 2 args: type, base64 string
+ len = args[0];
+ rlen = len->IntegerValue();
+ type = args[1];
+ v8::String::Utf8Value utf( args[ 2 ] );
+ char* tmp = *utf;
+ data = new char[rlen];
+ memcpy(data, tmp, rlen);
}
else if ( args.Length() == 2 ) {
+ // 2 args: type, base64 string
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
- v8::Function* f = getNamedCons( "BinData" );
+ v8::Function* f = scope->getNamedCons( "BinData" );
it = f->NewInstance();
}
- v8::String::Utf8Value data( args[ 1 ] );
- string decoded = base64::decode( *data );
- it->Set( v8::String::New( "len" ) , v8::Number::New( decoded.length() ) );
- it->Set( v8::String::New( "type" ) , args[ 0 ] );
- it->Set( v8::String::New( "data" ), v8::String::New( decoded.data(), decoded.length() ) );
- it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
-
+ type = args[0];
+ v8::String::Utf8Value utf( args[ 1 ] );
+ string decoded = base64::decode( *utf );
+ const char* tmp = decoded.data();
+ rlen = decoded.length();
+ data = new char[rlen];
+ memcpy(data, tmp, rlen);
+ len = v8::Number::New(rlen);
+// it->Set( scope->getV8Str( "data" ), v8::String::New( decoded.data(), decoded.length() ) );
}
else {
- return v8::ThrowException( v8::String::New( "BinData needs 3 arguments" ) );
+ return v8::ThrowException( v8::String::New( "BinData needs 2 or 3 arguments" ) );
}
- return it;
+ it->Set( scope->getV8Str( "len" ) , len );
+ it->Set( scope->getV8Str( "type" ) , type );
+ it->SetHiddenValue( scope->V8STR_BINDATA, v8::Number::New( 1 ) );
+ Persistent<v8::Object> res = scope->wrapArrayObject(it, data);
+ return res;
}
- v8::Handle<v8::Value> binDataToString( const v8::Arguments& args ) {
-
- if (args.Length() != 0) {
- return v8::ThrowException( v8::String::New( "toString needs 0 arguments" ) );
- }
-
+ v8::Handle<v8::Value> binDataToString( V8Scope* scope, const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
- int len = it->Get( v8::String::New( "len" ) )->ToInt32()->Value();
- int type = it->Get( v8::String::New( "type" ) )->ToInt32()->Value();
- v8::String::Utf8Value data( it->Get( v8::String::New( "data" ) ) );
+ int len = it->Get( scope->V8STR_LEN )->Int32Value();
+ int type = it->Get( scope->V8STR_TYPE )->Int32Value();
+ Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+ char* data = (char*)(c->Value());
stringstream ss;
ss << "BinData(" << type << ",\"";
- base64::encode( ss, *data, len );
+ base64::encode( ss, data, len );
ss << "\")";
string ret = ss.str();
return v8::String::New( ret.c_str() );
}
- v8::Handle<v8::Value> numberLongInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> binDataToBase64( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int len = Handle<v8::Number>::Cast(it->Get(scope->V8STR_LEN))->Int32Value();
+ Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+ char* data = (char*)(c->Value());
+ stringstream ss;
+ base64::encode( ss, (const char *)data, len );
+ return v8::String::New(ss.str().c_str());
+ }
+
+ v8::Handle<v8::Value> binDataToHex( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int len = Handle<v8::Number>::Cast(it->Get(scope->V8STR_LEN))->Int32Value();
+ Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+ char* data = (char*)(c->Value());
+ stringstream ss;
+ ss.setf (ios_base::hex , ios_base::basefield);
+ ss.fill ('0');
+ ss.setf (ios_base::right , ios_base::adjustfield);
+ for( int i = 0; i < len; i++ ) {
+ unsigned v = (unsigned char) data[i];
+ ss << setw(2) << v;
+ }
+ return v8::String::New(ss.str().c_str());
+ }
+
+ static v8::Handle<v8::Value> hexToBinData( V8Scope* scope, v8::Local<v8::Object> it, int type, string hexstr ) {
+ int len = hexstr.length() / 2;
+ char* data = new char[len];
+ const char* src = hexstr.c_str();
+ for( int i = 0; i < 16; i++ ) {
+ data[i] = fromHex(src + i * 2);
+ }
+
+ it->Set( scope->V8STR_LEN , v8::Number::New(len) );
+ it->Set( scope->V8STR_TYPE , v8::Number::New(type) );
+ it->SetHiddenValue( scope->V8STR_BINDATA, v8::Number::New( 1 ) );
+ Persistent<v8::Object> res = scope->wrapArrayObject(it, data);
+ return res;
+ }
+
+ v8::Handle<v8::Value> uuidInit( V8Scope* scope, const v8::Arguments& args ) {
+ if (args.Length() != 1) {
+ return v8::ThrowException( v8::String::New( "UUIS needs 1 argument" ) );
+ }
+ v8::String::Utf8Value utf( args[ 0 ] );
+ if( utf.length() != 32 ) {
+ return v8::ThrowException( v8::String::New( "UUIS string must have 32 characters" ) );
+ }
+
+ return hexToBinData(scope, args.This(), bdtUUID, *utf);
+ }
+
+// v8::Handle<v8::Value> uuidToString( V8Scope* scope, const v8::Arguments& args ) {
+// v8::Handle<v8::Object> it = args.This();
+// Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+// char* data = (char*)(c->Value());
+//
+// stringstream ss;
+// ss << "UUID(\"" << toHex(data, 16) << "\")";
+// return v8::String::New( ss.str().c_str() );
+// }
+
+ v8::Handle<v8::Value> md5Init( V8Scope* scope, const v8::Arguments& args ) {
+ if (args.Length() != 1) {
+ return v8::ThrowException( v8::String::New( "MD5 needs 1 argument" ) );
+ }
+ v8::String::Utf8Value utf( args[ 0 ] );
+ if( utf.length() != 32 ) {
+ return v8::ThrowException( v8::String::New( "MD5 string must have 32 characters" ) );
+ }
+
+ return hexToBinData(scope, args.This(), MD5Type, *utf);
+ }
+
+ v8::Handle<v8::Value> hexDataInit( V8Scope* scope, const v8::Arguments& args ) {
+ if (args.Length() != 2) {
+ return v8::ThrowException( v8::String::New( "HexData needs 2 arguments" ) );
+ }
+ v8::String::Utf8Value utf( args[ 1 ] );
+ return hexToBinData(scope, args.This(), args[0]->IntegerValue(), *utf);
+ }
+
+ v8::Handle<v8::Value> numberLongInit( V8Scope* scope, const v8::Arguments& args ) {
if (args.Length() != 0 && args.Length() != 1 && args.Length() != 3) {
return v8::ThrowException( v8::String::New( "NumberLong needs 0, 1 or 3 arguments" ) );
@@ -721,16 +864,16 @@ namespace mongo {
v8::Handle<v8::Object> it = args.This();
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
- v8::Function* f = getNamedCons( "NumberLong" );
+ v8::Function* f = scope->getNamedCons( "NumberLong" );
it = f->NewInstance();
}
if ( args.Length() == 0 ) {
- it->Set( v8::String::New( "floatApprox" ), v8::Number::New( 0 ) );
+ it->Set( scope->getV8Str( "floatApprox" ), v8::Number::New( 0 ) );
}
else if ( args.Length() == 1 ) {
if ( args[ 0 ]->IsNumber() ) {
- it->Set( v8::String::New( "floatApprox" ), args[ 0 ] );
+ it->Set( scope->getV8Str( "floatApprox" ), args[ 0 ] );
}
else {
v8::String::Utf8Value data( args[ 0 ] );
@@ -745,21 +888,21 @@ namespace mongo {
}
unsigned long long val = n;
if ( (long long)val == (long long)(double)(long long)(val) ) {
- it->Set( v8::String::New( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
+ it->Set( scope->getV8Str( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
}
else {
- it->Set( v8::String::New( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
- it->Set( v8::String::New( "top" ), v8::Integer::New( val >> 32 ) );
- it->Set( v8::String::New( "bottom" ), v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) ) );
+ it->Set( scope->getV8Str( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
+ it->Set( scope->getV8Str( "top" ), v8::Integer::New( val >> 32 ) );
+ it->Set( scope->getV8Str( "bottom" ), v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) ) );
}
}
}
else {
- it->Set( v8::String::New( "floatApprox" ) , args[0] );
- it->Set( v8::String::New( "top" ) , args[1] );
- it->Set( v8::String::New( "bottom" ) , args[2] );
+ it->Set( scope->getV8Str( "floatApprox" ) , args[0] );
+ it->Set( scope->getV8Str( "top" ) , args[1] );
+ it->Set( scope->getV8Str( "bottom" ) , args[2] );
}
- it->SetHiddenValue( v8::String::New( "__NumberLong" ), v8::Number::New( 1 ) );
+ it->SetHiddenValue( scope->V8STR_NUMBERLONG, v8::Number::New( 1 ) );
return it;
}
@@ -773,29 +916,17 @@ namespace mongo {
(unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
}
- v8::Handle<v8::Value> numberLongValueOf( const v8::Arguments& args ) {
-
- if (args.Length() != 0) {
- return v8::ThrowException( v8::String::New( "toNumber needs 0 arguments" ) );
- }
-
+ v8::Handle<v8::Value> numberLongValueOf( V8Scope* scope, const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
-
long long val = numberLongVal( it );
-
return v8::Number::New( double( val ) );
}
- v8::Handle<v8::Value> numberLongToNumber( const v8::Arguments& args ) {
- return numberLongValueOf( args );
+ v8::Handle<v8::Value> numberLongToNumber( V8Scope* scope, const v8::Arguments& args ) {
+ return numberLongValueOf( scope, args );
}
- v8::Handle<v8::Value> numberLongToString( const v8::Arguments& args ) {
-
- if (args.Length() != 0) {
- return v8::ThrowException( v8::String::New( "toString needs 0 arguments" ) );
- }
-
+ v8::Handle<v8::Value> numberLongToString( V8Scope* scope, const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
stringstream ss;
@@ -811,18 +942,62 @@ namespace mongo {
return v8::String::New( ret.c_str() );
}
- v8::Handle<v8::Value> bsonsize( const v8::Arguments& args ) {
+ v8::Handle<v8::Value> numberIntInit( V8Scope* scope, const v8::Arguments& args ) {
+
+ if (args.Length() != 0 && args.Length() != 1) {
+ return v8::ThrowException( v8::String::New( "NumberInt needs 0, 1 argument" ) );
+ }
+
+ v8::Handle<v8::Object> it = args.This();
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function* f = scope->getNamedCons( "NumberInt" );
+ it = f->NewInstance();
+ }
+
+ if ( args.Length() == 0 ) {
+ it->SetHiddenValue( scope->V8STR_NUMBERINT, v8::Number::New( 0 ) );
+ }
+ else if ( args.Length() == 1 ) {
+ it->SetHiddenValue( scope->V8STR_NUMBERINT, args[0]->ToInt32() );
+ }
+
+ return it;
+ }
+
+ v8::Handle<v8::Value> numberIntValueOf( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int val = it->GetHiddenValue( scope->V8STR_NUMBERINT )->Int32Value();
+ return v8::Number::New( double( val ) );
+ }
+
+ v8::Handle<v8::Value> numberIntToNumber( V8Scope* scope, const v8::Arguments& args ) {
+ return numberIntValueOf( scope, args );
+ }
+
+ v8::Handle<v8::Value> numberIntToString( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+
+ stringstream ss;
+ int val = it->GetHiddenValue( scope->V8STR_NUMBERINT )->Int32Value();
+ ss << "NumberInt(" << val << ")";
+
+ string ret = ss.str();
+ return v8::String::New( ret.c_str() );
+ }
+
+ v8::Handle<v8::Value> bsonsize( V8Scope* scope, const v8::Arguments& args ) {
if ( args.Length() != 1 )
- return v8::ThrowException( v8::String::New( "bonsisze needs 1 argument" ) );
+ return v8::ThrowException( v8::String::New( "bsonsize needs 1 argument" ) );
if ( args[0]->IsNull() )
return v8::Number::New(0);
if ( ! args[ 0 ]->IsObject() )
- return v8::ThrowException( v8::String::New( "argument to bonsisze has to be an object" ) );
+ return v8::ThrowException( v8::String::New( "argument to bsonsize has to be an object" ) );
- return v8::Number::New( v8ToMongo( args[ 0 ]->ToObject() ).objsize() );
+ return v8::Number::New( scope->v8ToMongo( args[ 0 ]->ToObject() ).objsize() );
}
// to be called with v8 mutex
diff --git a/scripting/v8_db.h b/scripting/v8_db.h
index 7dbca92..08d15d0 100644
--- a/scripting/v8_db.h
+++ b/scripting/v8_db.h
@@ -22,129 +22,75 @@
#include <cstdio>
#include <cstdlib>
-#include "engine.h"
+#include "engine_v8.h"
#include "../client/dbclient.h"
namespace mongo {
// These functions may depend on the caller creating a handle scope and context scope.
- v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( bool local );
- void installDBTypes( v8::Handle<v8::ObjectTemplate>& global );
- void installDBTypes( v8::Handle<v8::Object>& global );
+ v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( V8Scope * scope, bool local );
+// void installDBTypes( V8Scope * scope, v8::Handle<v8::ObjectTemplate>& global );
+ void installDBTypes( V8Scope * scope, v8::Handle<v8::Object>& global );
// the actual globals
mongo::DBClientBase * getConnection( const v8::Arguments& args );
// Mongo members
- v8::Handle<v8::Value> mongoConsLocal(const v8::Arguments& args);
- v8::Handle<v8::Value> mongoConsExternal(const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoConsLocal(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoConsExternal(V8Scope* scope, const v8::Arguments& args);
- v8::Handle<v8::Value> mongoFind(const v8::Arguments& args);
- v8::Handle<v8::Value> mongoInsert(const v8::Arguments& args);
- v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args);
- v8::Handle<v8::Value> mongoUpdate(const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoFind(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoInsert(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoRemove(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoUpdate(V8Scope* scope, const v8::Arguments& args);
- v8::Handle<v8::Value> internalCursorCons(const v8::Arguments& args);
- v8::Handle<v8::Value> internalCursorNext(const v8::Arguments& args);
- v8::Handle<v8::Value> internalCursorHasNext(const v8::Arguments& args);
- v8::Handle<v8::Value> internalCursorObjsLeftInBatch(const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorCons(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorNext(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorHasNext(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorObjsLeftInBatch(V8Scope* scope, const v8::Arguments& args);
// DB members
- v8::Handle<v8::Value> dbInit(const v8::Arguments& args);
- v8::Handle<v8::Value> collectionInit( const v8::Arguments& args );
- v8::Handle<v8::Value> objectIdInit( const v8::Arguments& args );
+ v8::Handle<v8::Value> dbInit(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> collectionInit(V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> objectIdInit( V8Scope* scope, const v8::Arguments& args );
- v8::Handle<v8::Value> dbRefInit( const v8::Arguments& args );
- v8::Handle<v8::Value> dbPointerInit( const v8::Arguments& args );
- v8::Handle<v8::Value> dbTimestampInit( const v8::Arguments& args );
+ v8::Handle<v8::Value> dbRefInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> dbPointerInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> dbTimestampInit( V8Scope* scope, const v8::Arguments& args );
- v8::Handle<v8::Value> binDataInit( const v8::Arguments& args );
- v8::Handle<v8::Value> binDataToString( const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToString( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToBase64( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToHex( V8Scope* scope, const v8::Arguments& args );
- v8::Handle<v8::Value> numberLongInit( const v8::Arguments& args );
- v8::Handle<v8::Value> numberLongToNumber(const v8::Arguments& args);
- v8::Handle<v8::Value> numberLongValueOf(const v8::Arguments& args);
- v8::Handle<v8::Value> numberLongToString(const v8::Arguments& args);
+ v8::Handle<v8::Value> uuidInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> md5Init( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> hexDataInit( V8Scope* scope, const v8::Arguments& args );
- v8::Handle<v8::Value> dbQueryInit( const v8::Arguments& args );
+ v8::Handle<v8::Value> numberLongInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> numberLongToNumber(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberLongValueOf(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberLongToString(V8Scope* scope, const v8::Arguments& args);
+
+ v8::Handle<v8::Value> numberIntInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> numberIntToNumber(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberIntValueOf(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberIntToString(V8Scope* scope, const v8::Arguments& args);
+
+ v8::Handle<v8::Value> dbQueryInit( V8Scope* scope, const v8::Arguments& args );
v8::Handle<v8::Value> dbQueryIndexAccess( uint32_t index , const v8::AccessorInfo& info );
v8::Handle<v8::Value> collectionFallback( v8::Local<v8::String> name, const v8::AccessorInfo &info);
- v8::Handle<v8::Value> bsonsize( const v8::Arguments& args );
+ v8::Handle<v8::Value> bsonsize( V8Scope* scope, const v8::Arguments& args );
// call with v8 mutex:
void enableV8Interrupt();
void disableV8Interrupt();
- // The implementation below assumes that SERVER-1816 has been fixed - in
- // particular, interrupted() must return true if an interrupt was ever
- // sent; currently that is not the case if a new killop overwrites the data
- // for an old one
- template < v8::Handle< v8::Value > ( *f ) ( const v8::Arguments& ) >
- v8::Handle< v8::Value > v8Callback( const v8::Arguments &args ) {
- disableV8Interrupt(); // we don't want to have to audit all v8 calls for termination exceptions, so we don't allow these exceptions during the callback
- if ( globalScriptEngine->interrupted() ) {
- v8::V8::TerminateExecution(); // experimentally it seems that TerminateExecution() will override the return value
- return v8::Undefined();
- }
- v8::Handle< v8::Value > ret;
- string exception;
- try {
- ret = f( args );
- }
- catch( const std::exception &e ) {
- exception = e.what();
- }
- catch( ... ) {
- exception = "unknown exception";
- }
- enableV8Interrupt();
- if ( globalScriptEngine->interrupted() ) {
- v8::V8::TerminateExecution();
- return v8::Undefined();
- }
- if ( !exception.empty() ) {
- // technically, ThrowException is supposed to be the last v8 call before returning
- ret = v8::ThrowException( v8::String::New( exception.c_str() ) );
- }
- return ret;
- }
-
- template < v8::Handle< v8::Value > ( *f ) ( const v8::Arguments& ) >
- v8::Local< v8::FunctionTemplate > newV8Function() {
- return v8::FunctionTemplate::New( v8Callback< f > );
- }
-
- // Preemption is going to be allowed for the v8 mutex, and some of our v8
- // usage is not preemption safe. So we are using an additional mutex that
- // will not be preempted. The V8Lock should be used in place of v8::Locker
- // except in certain special cases involving interrupts.
- namespace v8Locks {
- // the implementations are quite simple - objects must be destroyed in
- // reverse of the order created, and should not be shared between threads
- struct RecursiveLock {
- RecursiveLock();
- ~RecursiveLock();
- bool _unlock;
- };
- struct RecursiveUnlock {
- RecursiveUnlock();
- ~RecursiveUnlock();
- bool _lock;
- };
- } // namespace v8Locks
- class V8Lock {
- v8Locks::RecursiveLock _noPreemptionLock;
- v8::Locker _preemptionLock;
- };
- struct V8Unlock {
- v8::Unlocker _preemptionUnlock;
- v8Locks::RecursiveUnlock _noPreemptionUnlock;
- };
}
diff --git a/scripting/v8_utils.cpp b/scripting/v8_utils.cpp
index 171ced5..0f575cf 100644
--- a/scripting/v8_utils.cpp
+++ b/scripting/v8_utils.cpp
@@ -15,14 +15,20 @@
* limitations under the License.
*/
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
#include "v8_utils.h"
#include "v8_db.h"
#include <iostream>
#include <map>
#include <sstream>
#include <vector>
-#include <sys/socket.h>
-#include <netinet/in.h>
#include <boost/smart_ptr.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
@@ -33,23 +39,6 @@ using namespace v8;
namespace mongo {
- Handle<v8::Value> Print(const Arguments& args) {
- bool first = true;
- for (int i = 0; i < args.Length(); i++) {
- HandleScope handle_scope;
- if (first) {
- first = false;
- }
- else {
- printf(" ");
- }
- v8::String::Utf8Value str(args[i]);
- printf("%s", *str);
- }
- printf("\n");
- return v8::Undefined();
- }
-
std::string toSTLString( const Handle<v8::Value> & o ) {
v8::String::Utf8Value str(o);
const char * foo = *str;
@@ -136,12 +125,6 @@ namespace mongo {
return s;
}
-
- Handle<v8::Value> Version(const Arguments& args) {
- HandleScope handle_scope;
- return handle_scope.Close( v8::String::New(v8::V8::GetVersion()) );
- }
-
void ReportException(v8::TryCatch* try_catch) {
cout << try_catch << endl;
}
@@ -233,7 +216,7 @@ namespace mongo {
Persistent< Value > returnData_;
};
- Handle< Value > ThreadInit( const Arguments &args ) {
+ Handle< Value > ThreadInit( V8Scope* scope, const Arguments &args ) {
Handle<v8::Object> it = args.This();
// NOTE I believe the passed JSThreadConfig will never be freed. If this
// policy is changed, JSThread may no longer be able to store JSThreadConfig
@@ -242,7 +225,7 @@ namespace mongo {
return v8::Undefined();
}
- Handle< Value > ScopedThreadInit( const Arguments &args ) {
+ Handle< Value > ScopedThreadInit( V8Scope* scope, const Arguments &args ) {
Handle<v8::Object> it = args.This();
// NOTE I believe the passed JSThreadConfig will never be freed. If this
// policy is changed, JSThread may no longer be able to store JSThreadConfig
@@ -251,65 +234,58 @@ namespace mongo {
return v8::Undefined();
}
- JSThreadConfig *thisConfig( const Arguments &args ) {
+ JSThreadConfig *thisConfig( V8Scope* scope, const Arguments &args ) {
Local< External > c = External::Cast( *(args.This()->GetHiddenValue( v8::String::New( "_JSThreadConfig" ) ) ) );
JSThreadConfig *config = (JSThreadConfig *)( c->Value() );
return config;
}
- Handle< Value > ThreadStart( const Arguments &args ) {
- thisConfig( args )->start();
+ Handle< Value > ThreadStart( V8Scope* scope, const Arguments &args ) {
+ thisConfig( scope, args )->start();
return v8::Undefined();
}
- Handle< Value > ThreadJoin( const Arguments &args ) {
- thisConfig( args )->join();
+ Handle< Value > ThreadJoin( V8Scope* scope, const Arguments &args ) {
+ thisConfig( scope, args )->join();
return v8::Undefined();
}
- Handle< Value > ThreadReturnData( const Arguments &args ) {
+ Handle< Value > ThreadReturnData( V8Scope* scope, const Arguments &args ) {
HandleScope handle_scope;
- return handle_scope.Close( thisConfig( args )->returnData() );
+ return handle_scope.Close( thisConfig( scope, args )->returnData() );
}
- Handle< Value > ThreadInject( const Arguments &args ) {
+ Handle< Value > ThreadInject( V8Scope* scope, const Arguments &args ) {
jsassert( args.Length() == 1 , "threadInject takes exactly 1 argument" );
jsassert( args[0]->IsObject() , "threadInject needs to be passed a prototype" );
Local<v8::Object> o = args[0]->ToObject();
- o->Set( v8::String::New( "init" ) , newV8Function< ThreadInit >()->GetFunction() );
- o->Set( v8::String::New( "start" ) , newV8Function< ThreadStart >()->GetFunction() );
- o->Set( v8::String::New( "join" ) , newV8Function< ThreadJoin >()->GetFunction() );
- o->Set( v8::String::New( "returnData" ) , newV8Function< ThreadReturnData >()->GetFunction() );
+ scope->injectV8Function("init", ThreadInit, o);
+ scope->injectV8Function("start", ThreadStart, o);
+ scope->injectV8Function("join", ThreadJoin, o);
+ scope->injectV8Function("returnData", ThreadReturnData, o);
return v8::Undefined();
}
- Handle< Value > ScopedThreadInject( const Arguments &args ) {
+ Handle< Value > ScopedThreadInject( V8Scope* scope, const Arguments &args ) {
jsassert( args.Length() == 1 , "threadInject takes exactly 1 argument" );
jsassert( args[0]->IsObject() , "threadInject needs to be passed a prototype" );
Local<v8::Object> o = args[0]->ToObject();
- o->Set( v8::String::New( "init" ) , newV8Function< ScopedThreadInit >()->GetFunction() );
+ scope->injectV8Function("init", ScopedThreadInit, o);
// inheritance takes care of other member functions
return v8::Undefined();
}
- void installFork( v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context ) {
+ void installFork( V8Scope* scope, v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context ) {
if ( baseContext_.IsEmpty() ) // if this is the shell, first call will be with shell context, otherwise don't expect to use fork() anyway
baseContext_ = context;
- global->Set( v8::String::New( "_threadInject" ), newV8Function< ThreadInject >()->GetFunction() );
- global->Set( v8::String::New( "_scopedThreadInject" ), newV8Function< ScopedThreadInject >()->GetFunction() );
+ scope->injectV8Function("_threadInject", ThreadInject, global);
+ scope->injectV8Function("_scopedThreadInject", ScopedThreadInject, global);
}
- Handle<v8::Value> GCV8(const Arguments& args) {
- V8Lock l;
- while( !V8::IdleNotification() );
- return v8::Undefined();
- }
-
-
}
diff --git a/scripting/v8_utils.h b/scripting/v8_utils.h
index 40662d2..ca5d317 100644
--- a/scripting/v8_utils.h
+++ b/scripting/v8_utils.h
@@ -27,10 +27,6 @@
namespace mongo {
- v8::Handle<v8::Value> Print(const v8::Arguments& args);
- v8::Handle<v8::Value> Version(const v8::Arguments& args);
- v8::Handle<v8::Value> GCV8(const v8::Arguments& args);
-
void ReportException(v8::TryCatch* handler);
#define jsassert(x,msg) assert(x)
@@ -42,6 +38,6 @@ namespace mongo {
std::string toSTLString( const v8::TryCatch * try_catch );
class V8Scope;
- void installFork( v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context );
+ void installFork( V8Scope* scope, v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context );
}
diff --git a/scripting/v8_wrapper.cpp b/scripting/v8_wrapper.cpp
index ff67e8c..7c28a39 100644
--- a/scripting/v8_wrapper.cpp
+++ b/scripting/v8_wrapper.cpp
@@ -15,9 +15,18 @@
* limitations under the License.
*/
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
#include "v8_wrapper.h"
#include "v8_utils.h"
#include "v8_db.h"
+#include "engine_v8.h"
#include <iostream>
@@ -26,540 +35,14 @@ using namespace v8;
namespace mongo {
-#define CONN_STRING (v8::String::New( "_conn" ))
-
#define DDD(x)
- Handle<Value> NamedReadOnlySet( Local<v8::String> property, Local<Value> value, const AccessorInfo& info ) {
- cout << "cannot write to read-only object" << endl;
- return value;
- }
-
- Handle<Boolean> NamedReadOnlyDelete( Local<v8::String> property, const AccessorInfo& info ) {
- cout << "cannot delete from read-only object" << endl;
- return Boolean::New( false );
- }
-
- Handle<Value> IndexedReadOnlySet( uint32_t index, Local<Value> value, const AccessorInfo& info ) {
- cout << "cannot write to read-only array" << endl;
- return value;
- }
-
- Handle<Boolean> IndexedReadOnlyDelete( uint32_t index, const AccessorInfo& info ) {
- cout << "cannot delete from read-only array" << endl;
- return Boolean::New( false );
- }
-
- Local< v8::Value > newFunction( const char *code ) {
- stringstream codeSS;
- codeSS << "____MontoToV8_newFunction_temp = " << code;
- string codeStr = codeSS.str();
- Local< Script > compiled = Script::New( v8::String::New( codeStr.c_str() ) );
- Local< Value > ret = compiled->Run();
- return ret;
- }
-
- Local< v8::Value > newId( const OID &id ) {
- v8::Function * idCons = getObjectIdCons();
- v8::Handle<v8::Value> argv[1];
- argv[0] = v8::String::New( id.str().c_str() );
- return idCons->NewInstance( 1 , argv );
- }
-
- Local<v8::Object> mongoToV8( const BSONObj& m , bool array, bool readOnly ) {
-
- Local<v8::Object> o;
-
- // handle DBRef. needs to come first. isn't it? (metagoto)
- static string ref = "$ref";
- if ( ref == m.firstElement().fieldName() ) {
- const BSONElement& id = m["$id"];
- if (!id.eoo()) { // there's no check on $id exitence in sm implementation. risky ?
- v8::Function* dbRef = getNamedCons( "DBRef" );
- o = dbRef->NewInstance();
- }
- }
-
- Local< v8::ObjectTemplate > readOnlyObjects;
- // Hoping template construction is fast...
- Local< v8::ObjectTemplate > internalFieldObjects = v8::ObjectTemplate::New();
- internalFieldObjects->SetInternalFieldCount( 1 );
-
- if ( !o.IsEmpty() ) {
- readOnly = false;
- }
- else if ( array ) {
- // NOTE Looks like it's impossible to add interceptors to v8 arrays.
- readOnly = false;
- o = v8::Array::New();
- }
- else if ( !readOnly ) {
- o = v8::Object::New();
- }
- else {
- // NOTE Our readOnly implemention relies on undocumented ObjectTemplate
- // functionality that may be fragile, but it still seems like the best option
- // for now -- fwiw, the v8 docs are pretty sparse. I've determined experimentally
- // that when property handlers are set for an object template, they will attach
- // to objects previously created by that template. To get this to work, though,
- // it is necessary to initialize the template's property handlers before
- // creating objects from the template (as I have in the following few lines
- // of code).
- // NOTE In my first attempt, I configured the permanent property handlers before
- // constructiong the object and replaced the Set() calls below with ForceSet().
- // However, it turns out that ForceSet() only bypasses handlers for named
- // properties and not for indexed properties.
- readOnlyObjects = v8::ObjectTemplate::New();
- // NOTE This internal field will store type info for special db types. For
- // regular objects the field is unnecessary - for simplicity I'm creating just
- // one readOnlyObjects template for objects where the field is & isn't necessary,
- // assuming that the overhead of an internal field is slight.
- readOnlyObjects->SetInternalFieldCount( 1 );
- readOnlyObjects->SetNamedPropertyHandler( 0 );
- readOnlyObjects->SetIndexedPropertyHandler( 0 );
- o = readOnlyObjects->NewInstance();
- }
-
- mongo::BSONObj sub;
-
- for ( BSONObjIterator i(m); i.more(); ) {
- const BSONElement& f = i.next();
-
- Local<Value> v;
-
- switch ( f.type() ) {
-
- case mongo::Code:
- o->Set( v8::String::New( f.fieldName() ), newFunction( f.valuestr() ) );
- break;
-
- case CodeWScope:
- if ( f.codeWScopeObject().isEmpty() )
- log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
- o->Set( v8::String::New( f.fieldName() ), newFunction( f.codeWScopeCode() ) );
- break;
-
- case mongo::String:
- o->Set( v8::String::New( f.fieldName() ) , v8::String::New( f.valuestr() ) );
- break;
-
- case mongo::jstOID: {
- v8::Function * idCons = getObjectIdCons();
- v8::Handle<v8::Value> argv[1];
- argv[0] = v8::String::New( f.__oid().str().c_str() );
- o->Set( v8::String::New( f.fieldName() ) ,
- idCons->NewInstance( 1 , argv ) );
- break;
- }
-
- case mongo::NumberDouble:
- case mongo::NumberInt:
- o->Set( v8::String::New( f.fieldName() ) , v8::Number::New( f.number() ) );
- break;
-
- case mongo::Array:
- case mongo::Object:
- sub = f.embeddedObject();
- o->Set( v8::String::New( f.fieldName() ) , mongoToV8( sub , f.type() == mongo::Array, readOnly ) );
- break;
-
- case mongo::Date:
- o->Set( v8::String::New( f.fieldName() ) , v8::Date::New( f.date() ) );
- break;
-
- case mongo::Bool:
- o->Set( v8::String::New( f.fieldName() ) , v8::Boolean::New( f.boolean() ) );
- break;
-
- case mongo::jstNULL:
- case mongo::Undefined: // duplicate sm behavior
- o->Set( v8::String::New( f.fieldName() ) , v8::Null() );
- break;
-
- case mongo::RegEx: {
- v8::Function * regex = getNamedCons( "RegExp" );
-
- v8::Handle<v8::Value> argv[2];
- argv[0] = v8::String::New( f.regex() );
- argv[1] = v8::String::New( f.regexFlags() );
-
- o->Set( v8::String::New( f.fieldName() ) , regex->NewInstance( 2 , argv ) );
- break;
- }
-
- case mongo::BinData: {
- Local<v8::Object> b = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
-
- int len;
- const char *data = f.binData( len );
-
- v8::Function* binData = getNamedCons( "BinData" );
- v8::Handle<v8::Value> argv[3];
- argv[0] = v8::Number::New( len );
- argv[1] = v8::Number::New( f.binDataType() );
- argv[2] = v8::String::New( data, len );
- o->Set( v8::String::New( f.fieldName() ), binData->NewInstance(3, argv) );
- break;
- }
-
- case mongo::Timestamp: {
- Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
-
- sub->Set( v8::String::New( "t" ) , v8::Number::New( f.timestampTime() ) );
- sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
- sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
-
- o->Set( v8::String::New( f.fieldName() ) , sub );
- break;
- }
-
- case mongo::NumberLong: {
- Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
- unsigned long long val = f.numberLong();
- v8::Function* numberLong = getNamedCons( "NumberLong" );
- if ( (long long)val == (long long)(double)(long long)(val) ) {
- v8::Handle<v8::Value> argv[1];
- argv[0] = v8::Number::New( (double)(long long)( val ) );
- o->Set( v8::String::New( f.fieldName() ), numberLong->NewInstance( 1, argv ) );
- }
- else {
- v8::Handle<v8::Value> argv[3];
- argv[0] = v8::Number::New( (double)(long long)(val) );
- argv[1] = v8::Integer::New( val >> 32 );
- argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
- o->Set( v8::String::New( f.fieldName() ), numberLong->NewInstance(3, argv) );
- }
- break;
- }
-
- case mongo::MinKey: {
- Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
- sub->Set( v8::String::New( "$MinKey" ), v8::Boolean::New( true ) );
- sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
- o->Set( v8::String::New( f.fieldName() ) , sub );
- break;
- }
-
- case mongo::MaxKey: {
- Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
- sub->Set( v8::String::New( "$MaxKey" ), v8::Boolean::New( true ) );
- sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
- o->Set( v8::String::New( f.fieldName() ) , sub );
- break;
- }
-
- case mongo::DBRef: {
- v8::Function* dbPointer = getNamedCons( "DBPointer" );
- v8::Handle<v8::Value> argv[2];
- argv[0] = v8::String::New( f.dbrefNS() );
- argv[1] = newId( f.dbrefOID() );
- o->Set( v8::String::New( f.fieldName() ), dbPointer->NewInstance(2, argv) );
- break;
- }
-
- default:
- cout << "can't handle type: ";
- cout << f.type() << " ";
- cout << f.toString();
- cout << endl;
- break;
- }
-
- }
-
- if ( readOnly ) {
- readOnlyObjects->SetNamedPropertyHandler( 0, NamedReadOnlySet, 0, NamedReadOnlyDelete );
- readOnlyObjects->SetIndexedPropertyHandler( 0, IndexedReadOnlySet, 0, IndexedReadOnlyDelete );
- }
-
- return o;
- }
-
- Handle<v8::Value> mongoToV8Element( const BSONElement &f ) {
- Local< v8::ObjectTemplate > internalFieldObjects = v8::ObjectTemplate::New();
- internalFieldObjects->SetInternalFieldCount( 1 );
-
- switch ( f.type() ) {
-
- case mongo::Code:
- return newFunction( f.valuestr() );
-
- case CodeWScope:
- if ( f.codeWScopeObject().isEmpty() )
- log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
- return newFunction( f.codeWScopeCode() );
-
- case mongo::String:
- return v8::String::New( f.valuestr() );
-
- case mongo::jstOID:
- return newId( f.__oid() );
-
- case mongo::NumberDouble:
- case mongo::NumberInt:
- return v8::Number::New( f.number() );
-
- case mongo::Array:
- case mongo::Object:
- return mongoToV8( f.embeddedObject() , f.type() == mongo::Array );
-
- case mongo::Date:
- return v8::Date::New( f.date() );
-
- case mongo::Bool:
- return v8::Boolean::New( f.boolean() );
-
- case mongo::EOO:
- case mongo::jstNULL:
- case mongo::Undefined: // duplicate sm behavior
- return v8::Null();
-
- case mongo::RegEx: {
- v8::Function * regex = getNamedCons( "RegExp" );
-
- v8::Handle<v8::Value> argv[2];
- argv[0] = v8::String::New( f.regex() );
- argv[1] = v8::String::New( f.regexFlags() );
-
- return regex->NewInstance( 2 , argv );
- break;
- }
-
- case mongo::BinData: {
- int len;
- const char *data = f.binData( len );
-
- v8::Function* binData = getNamedCons( "BinData" );
- v8::Handle<v8::Value> argv[3];
- argv[0] = v8::Number::New( len );
- argv[1] = v8::Number::New( f.binDataType() );
- argv[2] = v8::String::New( data, len );
- return binData->NewInstance( 3, argv );
- };
-
- case mongo::Timestamp: {
- Local<v8::Object> sub = internalFieldObjects->NewInstance();
-
- sub->Set( v8::String::New( "t" ) , v8::Number::New( f.timestampTime() ) );
- sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
- sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
-
- return sub;
- }
-
- case mongo::NumberLong: {
- Local<v8::Object> sub = internalFieldObjects->NewInstance();
- unsigned long long val = f.numberLong();
- v8::Function* numberLong = getNamedCons( "NumberLong" );
- if ( (long long)val == (long long)(double)(long long)(val) ) {
- v8::Handle<v8::Value> argv[1];
- argv[0] = v8::Number::New( (double)(long long)( val ) );
- return numberLong->NewInstance( 1, argv );
- }
- else {
- v8::Handle<v8::Value> argv[3];
- argv[0] = v8::Number::New( (double)(long long)( val ) );
- argv[1] = v8::Integer::New( val >> 32 );
- argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
- return numberLong->NewInstance( 3, argv );
- }
- }
-
- case mongo::MinKey: {
- Local<v8::Object> sub = internalFieldObjects->NewInstance();
- sub->Set( v8::String::New( "$MinKey" ), v8::Boolean::New( true ) );
- sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
- return sub;
- }
-
- case mongo::MaxKey: {
- Local<v8::Object> sub = internalFieldObjects->NewInstance();
- sub->Set( v8::String::New( "$MaxKey" ), v8::Boolean::New( true ) );
- sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
- return sub;
- }
-
- case mongo::DBRef: {
- v8::Function* dbPointer = getNamedCons( "DBPointer" );
- v8::Handle<v8::Value> argv[2];
- argv[0] = v8::String::New( f.dbrefNS() );
- argv[1] = newId( f.dbrefOID() );
- return dbPointer->NewInstance(2, argv);
- }
-
- default:
- cout << "can't handle type: ";
- cout << f.type() << " ";
- cout << f.toString();
- cout << endl;
- break;
- }
-
- return v8::Undefined();
- }
-
- void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value , int depth ) {
-
- if ( value->IsString() ) {
- b.append( sname , toSTLString( value ).c_str() );
- return;
- }
-
- if ( value->IsFunction() ) {
- b.appendCode( sname , toSTLString( value ) );
- return;
- }
-
- if ( value->IsNumber() ) {
- if ( value->IsInt32() )
- b.append( sname, int( value->ToInt32()->Value() ) );
- else
- b.append( sname , value->ToNumber()->Value() );
- return;
- }
-
- if ( value->IsArray() ) {
- BSONObj sub = v8ToMongo( value->ToObject() , depth );
- b.appendArray( sname , sub );
- return;
- }
-
- if ( value->IsDate() ) {
- b.appendDate( sname , Date_t( (unsigned long long)(v8::Date::Cast( *value )->NumberValue())) );
- return;
- }
-
- if ( value->IsExternal() )
- return;
-
- if ( value->IsObject() ) {
- // The user could potentially modify the fields of these special objects,
- // wreaking havoc when we attempt to reinterpret them. Not doing any validation
- // for now...
- Local< v8::Object > obj = value->ToObject();
- if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) {
- switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
- case Timestamp:
- b.appendTimestamp( sname,
- Date_t( (unsigned long long)(obj->Get( v8::String::New( "t" ) )->ToNumber()->Value() )),
- obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() );
- return;
- case MinKey:
- b.appendMinKey( sname );
- return;
- case MaxKey:
- b.appendMaxKey( sname );
- return;
- default:
- assert( "invalid internal field" == 0 );
- }
- }
- string s = toSTLString( value );
- if ( s.size() && s[0] == '/' ) {
- s = s.substr( 1 );
- string r = s.substr( 0 , s.rfind( "/" ) );
- string o = s.substr( s.rfind( "/" ) + 1 );
- b.appendRegex( sname , r , o );
- }
- else if ( value->ToObject()->GetPrototype()->IsObject() &&
- value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ) {
- OID oid;
- oid.init( toSTLString( value ) );
- b.appendOID( sname , &oid );
- }
- else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__NumberLong" ) ).IsEmpty() ) {
- // TODO might be nice to potentially speed this up with an indexed internal
- // field, but I don't yet know how to use an ObjectTemplate with a
- // constructor.
- v8::Handle< v8::Object > it = value->ToObject();
- long long val;
- if ( !it->Has( v8::String::New( "top" ) ) ) {
- val = (long long)( it->Get( v8::String::New( "floatApprox" ) )->NumberValue() );
- }
- else {
- val = (long long)
- ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
- (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
- }
-
- b.append( sname, val );
- }
- else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) {
- OID oid;
- oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) );
- string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) );
- b.appendDBRef( sname, ns, oid );
- }
- else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__BinData" ) ).IsEmpty() ) {
- int len = obj->Get( v8::String::New( "len" ) )->ToInt32()->Value();
- v8::String::Utf8Value data( obj->Get( v8::String::New( "data" ) ) );
- const char *dataArray = *data;
- assert( data.length() == len );
- b.appendBinData( sname,
- len,
- mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ),
- dataArray );
- }
- else {
- BSONObj sub = v8ToMongo( value->ToObject() , depth );
- b.append( sname , sub );
- }
- return;
- }
-
- if ( value->IsBoolean() ) {
- b.appendBool( sname , value->ToBoolean()->Value() );
- return;
- }
-
- else if ( value->IsUndefined() ) {
- b.appendUndefined( sname );
- return;
- }
-
- else if ( value->IsNull() ) {
- b.appendNull( sname );
- return;
- }
-
- cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl;
- }
-
- BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth ) {
- BSONObjBuilder b;
-
- if ( depth == 0 ) {
- v8::Handle<v8::String> idName = v8::String::New( "_id" );
- if ( o->HasRealNamedProperty( idName ) ) {
- v8ToMongoElement( b , idName , "_id" , o->Get( idName ) );
- }
- }
-
- Local<v8::Array> names = o->GetPropertyNames();
- for ( unsigned int i=0; i<names->Length(); i++ ) {
- v8::Local<v8::String> name = names->Get(v8::Integer::New(i) )->ToString();
-
- if ( o->GetPrototype()->IsObject() &&
- o->GetPrototype()->ToObject()->HasRealNamedProperty( name ) )
- continue;
-
- v8::Local<v8::Value> value = o->Get( name );
-
- const string sname = toSTLString( name );
- if ( depth == 0 && sname == "_id" )
- continue;
-
- v8ToMongoElement( b , name , sname , value , depth + 1 );
- }
- return b.obj();
- }
-
// --- object wrapper ---
class WrapperHolder {
public:
- WrapperHolder( const BSONObj * o , bool readOnly , bool iDelete )
- : _o(o), _readOnly( readOnly ), _iDelete( iDelete ) {
+ WrapperHolder( V8Scope* scope, const BSONObj * o , bool readOnly , bool iDelete )
+ : _scope(scope), _o(o), _readOnly( readOnly ), _iDelete( iDelete ) {
}
~WrapperHolder() {
@@ -572,22 +55,21 @@ namespace mongo {
v8::Handle<v8::Value> get( v8::Local<v8::String> name ) {
const string& s = toSTLString( name );
const BSONElement& e = _o->getField( s );
- return mongoToV8Element(e);
+ return _scope->mongoToV8Element(e);
}
+ V8Scope* _scope;
const BSONObj * _o;
bool _readOnly;
bool _iDelete;
};
- WrapperHolder * createWrapperHolder( const BSONObj * o , bool readOnly , bool iDelete ) {
- return new WrapperHolder( o , readOnly , iDelete );
+ WrapperHolder * createWrapperHolder( V8Scope* scope, const BSONObj * o , bool readOnly , bool iDelete ) {
+ return new WrapperHolder( scope, o , readOnly , iDelete );
}
-#define WRAPPER_STRING (v8::String::New( "_wrapper" ) )
-
WrapperHolder * getWrapper( v8::Handle<v8::Object> o ) {
- Handle<v8::Value> t = o->GetRealNamedProperty( WRAPPER_STRING );
+ Handle<v8::Value> t = o->GetRealNamedProperty( v8::String::New( "_wrapper" ) );
assert( t->IsExternal() );
Local<External> c = External::Cast( *t );
WrapperHolder * w = (WrapperHolder*)(c->Value());
@@ -596,11 +78,11 @@ namespace mongo {
}
- Handle<Value> wrapperCons(const Arguments& args) {
+ Handle<Value> wrapperCons(V8Scope* scope, const Arguments& args) {
if ( ! ( args.Length() == 1 && args[0]->IsExternal() ) )
return v8::ThrowException( v8::String::New( "wrapperCons needs 1 External arg" ) );
- args.This()->Set( WRAPPER_STRING , args[0] );
+ args.This()->Set( v8::String::New( "_wrapper" ) , args[0] );
return v8::Undefined();
}
@@ -609,20 +91,9 @@ namespace mongo {
return getWrapper( info.This() )->get( name );
}
- v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate() {
- v8::Local<v8::FunctionTemplate> t = newV8Function< wrapperCons >();
+ v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> t = scope->createV8Function(wrapperCons);
t->InstanceTemplate()->SetNamedPropertyHandler( wrapperGetHandler );
return t;
}
-
- // --- random utils ----
-
- v8::Function * getNamedCons( const char * name ) {
- return v8::Function::Cast( *(v8::Context::GetCurrent()->Global()->Get( v8::String::New( name ) ) ) );
- }
-
- v8::Function * getObjectIdCons() {
- return getNamedCons( "ObjectId" );
- }
-
}
diff --git a/scripting/v8_wrapper.h b/scripting/v8_wrapper.h
index e0b79e3..22f14e6 100644
--- a/scripting/v8_wrapper.h
+++ b/scripting/v8_wrapper.h
@@ -22,22 +22,13 @@
#include <cstdio>
#include <cstdlib>
#include "../db/jsobj.h"
+#include "engine_v8.h"
namespace mongo {
- v8::Local<v8::Object> mongoToV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
- mongo::BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth = 0 );
-
- void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name ,
- const string sname , v8::Handle<v8::Value> value , int depth = 0 );
- v8::Handle<v8::Value> mongoToV8Element( const BSONElement &f );
-
- v8::Function * getNamedCons( const char * name );
- v8::Function * getObjectIdCons();
-
- v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate();
+ v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate(V8Scope* scope);
class WrapperHolder;
- WrapperHolder * createWrapperHolder( const BSONObj * o , bool readOnly , bool iDelete );
+ WrapperHolder * createWrapperHolder( V8Scope* scope, const BSONObj * o , bool readOnly , bool iDelete );
}
diff --git a/server.h b/server.h
new file mode 100644
index 0000000..fc30ceb
--- /dev/null
+++ b/server.h
@@ -0,0 +1,21 @@
+/** @file server.h
+
+ This file contains includes commonly needed in the server files (mongod, mongos, test). It is NOT included in the C++ client.
+
+ Over time we should move more here, and more out of pch.h. And get rid of pch.h at some point.
+*/
+
+// todo is there a boost thign for this already?
+
+#pragma once
+
+#include "bson/inline_decls.h"
+
+/* Note: do not clutter code with these -- ONLY use in hot spots / significant loops. */
+
+// branch prediction. indicate we expect to be true
+#define likely MONGO_likely
+
+// branch prediction. indicate we expect to be false
+#define unlikely MONGO_unlikely
+
diff --git a/shell/collection.js b/shell/collection.js
index 8d4d4c7..1e6fe03 100644
--- a/shell/collection.js
+++ b/shell/collection.js
@@ -60,7 +60,7 @@ DBCollection.prototype.help = function () {
print("\tdb." + shortName + ".totalIndexSize() - size in bytes of all the indexes");
print("\tdb." + shortName + ".totalSize() - storage allocated for all data and indexes");
print("\tdb." + shortName + ".update(query, object[, upsert_bool, multi_bool])");
- print("\tdb." + shortName + ".validate() - SLOW");
+ print("\tdb." + shortName + ".validate( <full> ) - SLOW");;
print("\tdb." + shortName + ".getShardVersion() - only for use with sharding");
return __magicNoPrint;
}
@@ -120,7 +120,7 @@ DBCollection.prototype._validateObject = function( o ){
throw "can't save a DBQuery object";
}
-DBCollection._allowedFields = { $id : 1 , $ref : 1 };
+DBCollection._allowedFields = { $id : 1 , $ref : 1 , $db : 1 , $MinKey : 1, $MaxKey : 1 };
DBCollection.prototype._validateForStorage = function( o ){
this._validateObject( o );
@@ -374,21 +374,32 @@ DBCollection.prototype.renameCollection = function( newName , dropTarget ){
dropTarget : dropTarget } )
}
-DBCollection.prototype.validate = function() {
- var res = this._db.runCommand( { validate: this.getName() } );
+DBCollection.prototype.validate = function(full) {
+ var cmd = { validate: this.getName() };
- res.valid = false;
+ if (typeof(full) == 'object') // support arbitrary options here
+ Object.extend(cmd, full);
+ else
+ cmd.full = full;
+
+ var res = this._db.runCommand( cmd );
+
+ if (typeof(res.valid) == 'undefined') {
+ // old-style format just put everything in a string. Now using proper fields
- var raw = res.result || res.raw;
+ res.valid = false;
- if ( raw ){
- var str = "-" + tojson( raw );
- res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );
+ var raw = res.result || res.raw;
- var p = /lastExtentSize:(\d+)/;
- var r = p.exec( str );
- if ( r ){
- res.lastExtentSize = Number( r[1] );
+ if ( raw ){
+ var str = "-" + tojson( raw );
+ res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );
+
+ var p = /lastExtentSize:(\d+)/;
+ var r = p.exec( str );
+ if ( r ){
+ res.lastExtentSize = Number( r[1] );
+ }
}
}
@@ -530,13 +541,21 @@ DBCollection.prototype.isCapped = function(){
return ( e && e.options && e.options.capped ) ? true : false;
}
+DBCollection.prototype._distinct = function( keyString , query ){
+ return this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );
+ if ( ! res.ok )
+ throw "distinct failed: " + tojson( res );
+ return res.values;
+}
+
DBCollection.prototype.distinct = function( keyString , query ){
- var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );
+ var res = this._distinct( keyString , query );
if ( ! res.ok )
throw "distinct failed: " + tojson( res );
return res.values;
}
+
DBCollection.prototype.group = function( params ){
params.ns = this._shortName;
return this._db.group( params );
@@ -578,7 +597,8 @@ MapReduceResult.prototype.drop = function(){
*/
MapReduceResult.prototype.convertToSingleObject = function(){
var z = {};
- this._coll.find().forEach( function(a){ z[a._id] = a.value; } );
+ var it = this.results != null ? this.results : this._coll.find();
+ it.forEach( function(a){ z[a._id] = a.value; } );
return z;
}
@@ -593,7 +613,7 @@ DBCollection.prototype.convertToSingleObject = function(valueField){
*/
DBCollection.prototype.mapReduce = function( map , reduce , optionsOrOutString ){
var c = { mapreduce : this._shortName , map : map , reduce : reduce };
- assert( optionsOrOutString , "need to an optionsOrOutString" )
+ assert( optionsOrOutString , "need to supply an optionsOrOutString" )
if ( typeof( optionsOrOutString ) == "string" )
c["out"] = optionsOrOutString;
diff --git a/shell/db.js b/shell/db.js
index 679f51e..2892359 100644
--- a/shell/db.js
+++ b/shell/db.js
@@ -22,8 +22,8 @@ DB.prototype.getName = function(){
return this._name;
}
-DB.prototype.stats = function(){
- return this.runCommand( { dbstats : 1 } );
+DB.prototype.stats = function(scale){
+ return this.runCommand( { dbstats : 1 , scale : scale } );
}
DB.prototype.getCollection = function( name ){
@@ -60,15 +60,26 @@ DB.prototype.adminCommand = function( obj ){
DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
DB.prototype.addUser = function( username , pass, readOnly ){
+ if ( pass == null || pass.length == 0 )
+ throw "password can't be empty";
+
readOnly = readOnly || false;
var c = this.getCollection( "system.users" );
var u = c.findOne( { user : username } ) || { user : username };
u.readOnly = readOnly;
u.pwd = hex_md5( username + ":mongo:" + pass );
- print( tojson( u ) );
c.save( u );
+ var le = this.getLastErrorObj();
+ printjson( le )
+ if ( le.err )
+ throw "couldn't add user: " + le.err
+ print( tojson( u ) );
+}
+
+DB.prototype.logout = function(){
+ return this.runCommand({logout : 1});
}
DB.prototype.removeUser = function( username ){
@@ -124,6 +135,8 @@ DB.prototype.auth = function( username , pass ){
DB.prototype.createCollection = function(name, opt) {
var options = opt || {};
var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };
+ if (options.autoIndexId != undefined)
+ cmd.autoIndexId = options.autoIndexId;
var res = this._dbCommand(cmd);
return res;
}
@@ -133,7 +146,7 @@ DB.prototype.createCollection = function(name, opt) {
* Returns the current profiling level of this database
* @return SOMETHING_FIXME or null on error
*/
-DB.prototype.getProfilingLevel = function() {
+DB.prototype.getProfilingLevel = function() {
var res = this._dbCommand( { profile: -1 } );
return res ? res.was : null;
}
@@ -143,7 +156,7 @@ DB.prototype.getProfilingLevel = function() {
* example { was : 0, slowms : 100 }
* @return SOMETHING_FIXME or null on error
*/
-DB.prototype.getProfilingStatus = function() {
+DB.prototype.getProfilingStatus = function() {
var res = this._dbCommand( { profile: -1 } );
if ( ! res.ok )
throw "profile command failed: " + tojson( res );
@@ -154,24 +167,37 @@ DB.prototype.getProfilingStatus = function() {
/**
Erase the entire database. (!)
-
+
* @return Object returned has member ok set to true if operation succeeds, false otherwise.
*/
-DB.prototype.dropDatabase = function() {
+DB.prototype.dropDatabase = function() {
if ( arguments.length )
throw "dropDatabase doesn't take arguments";
return this._dbCommand( { dropDatabase: 1 } );
}
-
-DB.prototype.shutdownServer = function() {
+/**
+ * Shuts down the database. Must be run while using the admin database.
+ * @param opts Options for shutdown. Possible options are:
+ * - force: (boolean) if the server should shut down, even if there is no
+ * up-to-date slave
+ * - timeoutSecs: (number) the server will continue checking over timeoutSecs
+ * if any other servers have caught up enough for it to shut down.
+ */
+DB.prototype.shutdownServer = function(opts) {
if( "admin" != this._name ){
return "shutdown command only works with the admin database; try 'use admin'";
}
+ cmd = {"shutdown" : 1};
+ opts = opts || {};
+ for (var o in opts) {
+ cmd[o] = opts[o];
+ }
+
try {
- var res = this._dbCommand("shutdown");
- if( res )
+ var res = this.runCommand(cmd);
+ if( res )
throw "shutdownServer failed: " + res.errmsg;
throw "shutdownServer failed";
}
@@ -298,6 +324,7 @@ DB.prototype.help = function() {
print("\tdb.isMaster() check replica primary status");
print("\tdb.killOp(opid) kills the current operation in the db");
print("\tdb.listCommands() lists all the db commands");
+ print("\tdb.logout()");
print("\tdb.printCollectionStats()");
print("\tdb.printReplicationInfo()");
print("\tdb.printSlaveReplicationInfo()");
@@ -312,6 +339,8 @@ DB.prototype.help = function() {
print("\tdb.stats()");
print("\tdb.version() current version of the server");
print("\tdb.getMongo().setSlaveOk() allow queries on a replication slave server");
+ print("\tdb.fsyncLock() flush data to disk and lock server for backups");
+ print("\tdb.fsyncUnock() unlocks server following a db.fsyncLock()");
return __magicNoPrint;
}
@@ -663,7 +692,12 @@ DB.prototype.getReplicationInfo = function() {
DB.prototype.printReplicationInfo = function() {
var result = this.getReplicationInfo();
- if( result.errmsg ) {
+ if( result.errmsg ) {
+ if (!this.isMaster().ismaster) {
+ print("this is a slave, printing slave replication info.");
+ this.printSlaveReplicationInfo();
+ return;
+ }
print(tojson(result));
return;
}
@@ -680,7 +714,7 @@ DB.prototype.printSlaveReplicationInfo = function() {
print("\t syncedTo: " + st.toString() );
var ago = (now-st)/1000;
var hrs = Math.round(ago/36)/100;
- print("\t\t = " + Math.round(ago) + "secs ago (" + hrs + "hrs)");
+ print("\t\t = " + Math.round(ago) + " secs ago (" + hrs + "hrs)");
};
function g(x) {
@@ -711,13 +745,14 @@ DB.prototype.printSlaveReplicationInfo = function() {
};
var L = this.getSiblingDB("local");
- if( L.sources.count() != 0 ) {
- L.sources.find().forEach(g);
- }
- else if (L.system.replset.count() != 0) {
+
+ if (L.system.replset.count() != 0) {
var status = this.adminCommand({'replSetGetStatus' : 1});
status.members.forEach(r);
}
+ else if( L.sources.count() != 0 ) {
+ L.sources.find().forEach(g);
+ }
else {
print("local.sources is empty; is this db a --slave?");
return;
@@ -773,6 +808,14 @@ DB.prototype.printShardingStatus = function( verbose ){
printShardingStatus( this.getSiblingDB( "config" ) , verbose );
}
+DB.prototype.fsyncLock = function() {
+ return db.adminCommand({fsync:1, lock:true});
+}
+
+DB.prototype.fsyncUnlock = function() {
+ return db.getSiblingDB("admin").$cmd.sys.unlock.findOne()
+}
+
DB.autocomplete = function(obj){
var colls = obj.getCollectionNames();
var ret=[];
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 2e93682..f3122c7 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -17,19 +17,11 @@
#include "pch.h"
#include <stdio.h>
+#include <string.h>
-#if defined(_WIN32)
-# if defined(USE_READLINE)
-# define USE_READLINE_STATIC
-# endif
-#endif
-#ifdef USE_READLINE
-#include <readline/readline.h>
-#include <readline/history.h>
-#include <setjmp.h>
-jmp_buf jbuf;
-#endif
+#define USE_LINENOISE
+#include "../third_party/linenoise/linenoise.h"
#include "../scripting/engine.h"
#include "../client/dbclient.h"
@@ -52,10 +44,17 @@ static volatile bool atPrompt = false; // can eval before getting to prompt
bool autoKillOp = false;
-#if defined(USE_READLINE) && !defined(__freebsd__) && !defined(__openbsd__) && !defined(_WIN32)
-#define CTRLC_HANDLE
+#if defined(USE_LINENOISE) && !defined(__freebsd__) && !defined(__openbsd__) && !defined(_WIN32)
+// this is for ctrl-c handling
+#include <setjmp.h>
+jmp_buf jbuf;
+#endif
+
+#if defined(USE_LINENOISE)
+#define USE_TABCOMPLETION
#endif
+
namespace mongo {
Scope * shellMainScope;
@@ -67,7 +66,8 @@ void generateCompletions( const string& prefix , vector<string>& all ) {
if ( prefix.find( '"' ) != string::npos )
return;
- shellMainScope->invokeSafe("function(x) {shellAutocomplete(x)}", BSON("0" << prefix), 1000);
+ BSONObj args = BSON("0" << prefix);
+ shellMainScope->invokeSafe("function(x) {shellAutocomplete(x)}", &args, 0, 1000);
BSONObjBuilder b;
shellMainScope->append( b , "" , "__autocomplete__" );
BSONObj res = b.obj();
@@ -81,45 +81,19 @@ void generateCompletions( const string& prefix , vector<string>& all ) {
}
-#ifdef USE_READLINE
-static char** completionHook(const char* text , int start ,int end ) {
- static map<string,string> m;
-
+#ifdef USE_TABCOMPLETION
+void completionHook(const char* text , linenoiseCompletions* lc ) {
vector<string> all;
+ generateCompletions( text , all );
- generateCompletions( string(text,end) , all );
-
- if ( all.size() == 0 ) {
- return 0;
- }
-
- string longest = all[0];
- for ( vector<string>::iterator i=all.begin(); i!=all.end(); ++i ) {
- string s = *i;
- for ( unsigned j=0; j<s.size(); j++ ) {
- if ( longest[j] == s[j] )
- continue;
- longest = longest.substr(0,j);
- break;
- }
- }
-
- char ** matches = (char**)malloc( sizeof(char*) * (all.size()+2) );
- unsigned x=0;
- matches[x++] = strdup( longest.c_str() );
- for ( unsigned i=0; i<all.size(); i++ ) {
- matches[x++] = strdup( all[i].c_str() );
- }
- matches[x++] = 0;
-
- rl_completion_append_character = '\0'; // don't add a space after completions
+ for ( unsigned i=0; i<all.size(); i++ )
+ linenoiseAddCompletion( lc , (char*)all[i].c_str() );
- return matches;
}
#endif
void shellHistoryInit() {
-#ifdef USE_READLINE
+#ifdef USE_LINENOISE
stringstream ss;
char * h = getenv( "HOME" );
if ( h )
@@ -127,22 +101,22 @@ void shellHistoryInit() {
ss << ".dbshell";
historyFile = ss.str();
- using_history();
- read_history( historyFile.c_str() );
-
- rl_attempted_completion_function = completionHook;
+ linenoiseHistoryLoad( (char*)historyFile.c_str() );
+#ifdef USE_TABCOMPLETION
+ linenoiseSetCompletionCallback( completionHook );
+#endif
#else
//cout << "type \"exit\" to exit" << endl;
#endif
}
void shellHistoryDone() {
-#ifdef USE_READLINE
- write_history( historyFile.c_str() );
+#ifdef USE_LINENOISE
+ linenoiseHistorySave( (char*)historyFile.c_str() );
#endif
}
void shellHistoryAdd( const char * line ) {
-#ifdef USE_READLINE
+#ifdef USE_LINENOISE
if ( line[0] == '\0' )
return;
@@ -153,7 +127,7 @@ void shellHistoryAdd( const char * line ) {
lastLine = line;
if ((strstr(line, ".auth")) == NULL)
- add_history( line );
+ linenoiseHistoryAdd( line );
#endif
}
@@ -163,7 +137,6 @@ void intr( int sig ) {
#endif
}
-#if !defined(_WIN32)
void killOps() {
if ( mongo::shellUtils::_nokillop || mongo::shellUtils::_allMyUris.size() == 0 )
return;
@@ -208,32 +181,26 @@ void quitNicely( int sig ) {
gotInterrupted = 1;
return;
}
+
+#if !defined(_WIN32)
if ( sig == SIGPIPE )
mongo::rawOut( "mongo got signal SIGPIPE\n" );
+#endif
+
killOps();
shellHistoryDone();
exit(0);
}
-#else
-void quitNicely( int sig ) {
- mongo::dbexitCalled = true;
- //killOps();
- shellHistoryDone();
- exit(0);
-}
-#endif
char * shellReadline( const char * prompt , int handlesigint = 0 ) {
atPrompt = true;
-#ifdef USE_READLINE
-
- rl_bind_key('\t',rl_complete);
+#ifdef USE_LINENOISE
#ifdef CTRLC_HANDLE
if ( ! handlesigint ) {
- char* ret = readline( prompt );
+ char* ret = linenoise( prompt );
atPrompt = false;
return ret;
}
@@ -246,7 +213,7 @@ char * shellReadline( const char * prompt , int handlesigint = 0 ) {
signal( SIGINT , intr );
#endif
- char * ret = readline( prompt );
+ char * ret = linenoise( prompt );
signal( SIGINT , quitNicely );
atPrompt = false;
return ret;
@@ -262,8 +229,18 @@ char * shellReadline( const char * prompt , int handlesigint = 0 ) {
#endif
}
-#if !defined(_WIN32)
-#include <string.h>
+#ifdef _WIN32
+char * strsignal(int sig){
+ switch (sig){
+ case SIGINT: return "SIGINT";
+ case SIGTERM: return "SIGTERM";
+ case SIGABRT: return "SIGABRT";
+ case SIGSEGV: return "SIGSEGV";
+ case SIGFPE: return "SIGFPE";
+ default: return "unknown";
+ }
+}
+#endif
void quitAbruptly( int sig ) {
ostringstream ossSig;
@@ -289,16 +266,17 @@ void myterminate() {
void setupSignals() {
signal( SIGINT , quitNicely );
signal( SIGTERM , quitNicely );
- signal( SIGPIPE , quitNicely ); // Maybe just log and continue?
signal( SIGABRT , quitAbruptly );
signal( SIGSEGV , quitAbruptly );
- signal( SIGBUS , quitAbruptly );
signal( SIGFPE , quitAbruptly );
+
+#if !defined(_WIN32) // surprisingly these are the only ones that don't work on windows
+ signal( SIGPIPE , quitNicely ); // Maybe just log and continue?
+ signal( SIGBUS , quitAbruptly );
+#endif
+
set_terminate( myterminate );
}
-#else
-inline void setupSignals() {}
-#endif
string fixHost( string url , string host , string port ) {
//cout << "fixHost url: " << url << " host: " << host << " port: " << port << endl;
@@ -337,7 +315,7 @@ string fixHost( string url , string host , string port ) {
return newurl;
}
-static string OpSymbols = "~!%^&*-+=|:,<>/?";
+static string OpSymbols = "~!%^&*-+=|:,<>/?.";
bool isOpSymbol( char c ) {
for ( size_t i = 0; i < OpSymbols.size(); i++ )
@@ -410,6 +388,9 @@ public:
assert( ! isBalanced( "x = 5 +") );
assert( isBalanced( " x ++") );
assert( isBalanced( "-- x") );
+ assert( !isBalanced( "a.") );
+ assert( !isBalanced( "a. ") );
+ assert( isBalanced( "a.b") );
}
} balnaced_test;
@@ -422,6 +403,8 @@ string finishCode( string code ) {
return "";
if ( ! line )
return "";
+ if ( code.find("\n\n") != string::npos ) // cancel multiline if two blank lines are entered
+ return ";";
while (startsWith(line, "... "))
line += 4;
@@ -461,18 +444,6 @@ namespace mongo {
extern DBClientWithCommands *latestConn;
}
-string stateToString(MemberState s) {
- if( s.s == MemberState::RS_STARTUP ) return "STARTUP";
- if( s.s == MemberState::RS_PRIMARY ) return "PRIMARY";
- if( s.s == MemberState::RS_SECONDARY ) return "SECONDARY";
- if( s.s == MemberState::RS_RECOVERING ) return "RECOVERING";
- if( s.s == MemberState::RS_FATAL ) return "FATAL";
- if( s.s == MemberState::RS_STARTUP2 ) return "STARTUP2";
- if( s.s == MemberState::RS_ARBITER ) return "ARBITER";
- if( s.s == MemberState::RS_DOWN ) return "DOWN";
- if( s.s == MemberState::RS_ROLLBACK ) return "ROLLBACK";
- return "";
-}
string sayReplSetMemberState() {
try {
if( latestConn ) {
@@ -482,8 +453,10 @@ string sayReplSetMemberState() {
ss << info["set"].String() << ':';
int s = info["myState"].Int();
MemberState ms(s);
- ss << stateToString(ms);
- return ss.str();
+ return ms.toString();
+ }
+ else if( str::equals(info.getStringField("info"), "mongos") ) {
+ return "mongos";
}
}
}
@@ -509,6 +482,7 @@ int _main(int argc, char* argv[]) {
bool runShell = false;
bool nodb = false;
+ bool norc = false;
string script;
@@ -520,6 +494,7 @@ int _main(int argc, char* argv[]) {
shell_options.add_options()
("shell", "run the shell after executing files")
("nodb", "don't connect to mongod on startup - no 'db address' arg expected")
+ ("norc", "will not run the \".mongorc.js\" file on start up")
("quiet", "be less chatty" )
("port", po::value<string>(&port), "port to connect to")
("host", po::value<string>(&dbhost), "server to connect to")
@@ -531,6 +506,9 @@ int _main(int argc, char* argv[]) {
("version", "show version information")
("verbose", "increase verbosity")
("ipv6", "enable IPv6 support (disabled by default)")
+#ifdef MONGO_SSL
+ ("ssl", "use all for connections")
+#endif
;
hidden_options.add_options()
@@ -582,6 +560,9 @@ int _main(int argc, char* argv[]) {
if (params.count("nodb")) {
nodb = true;
}
+ if (params.count("norc")) {
+ norc = true;
+ }
if (params.count("help")) {
show_help_text(argv[0], shell_options);
return mongo::EXIT_CLEAN;
@@ -596,6 +577,11 @@ int _main(int argc, char* argv[]) {
if (params.count("quiet")) {
mongo::cmdLine.quiet = true;
}
+#ifdef MONGO_SSL
+ if (params.count("ssl")) {
+ mongo::cmdLine.sslOnNormalPorts = true;
+ }
+#endif
if (params.count("nokillop")) {
mongo::shellUtils::_nokillop = true;
}
@@ -603,6 +589,8 @@ int _main(int argc, char* argv[]) {
autoKillOp = true;
}
+
+
/* This is a bit confusing, here are the rules:
*
* if nodb is set then all positional parameters are files
@@ -696,8 +684,28 @@ int _main(int argc, char* argv[]) {
mongo::shellUtils::MongoProgramScope s;
+ if (!norc) {
+ string rcLocation;
+#ifndef _WIN32
+ if ( getenv("HOME") != NULL )
+ rcLocation = str::stream() << getenv("HOME") << "/.mongorc.js" ;
+#else
+ if ( getenv("HOMEDRIVE") != NULL && getenv("HOMEPATH") != NULL )
+ rcLocation = str::stream() << getenv("HOMEDRIVE") << getenv("HOMEPATH") << "\\.mongorc.js";
+#endif
+ if ( !rcLocation.empty() && fileExists(rcLocation) ) {
+ if ( ! scope->execFile( rcLocation , false , true , false , 0 ) ) {
+ cout << "The \".mongorc.js\" file located in your home folder could not be executed" << endl;
+ return -5;
+ }
+ }
+ }
+
shellHistoryInit();
+ string prompt;
+ int promptType;
+
//v8::Handle<v8::Object> shellHelper = baseContext_->Global()->Get( v8::String::New( "shellHelper" ) )->ToObject();
while ( 1 ) {
@@ -706,7 +714,15 @@ int _main(int argc, char* argv[]) {
// shellMainScope->localConnect;
//DBClientWithCommands *c = getConnection( JSContext *cx, JSObject *obj );
- string prompt(sayReplSetMemberState()+"> ");
+ promptType = scope->type("prompt");
+ if (promptType == String){
+ prompt = scope->getString("prompt");
+ } else if (promptType == Code) {
+ scope->exec("__prompt__ = prompt();", "", false, false, false, 0);
+ prompt = scope->getString("__prompt__");
+ } else {
+ prompt = sayReplSetMemberState()+"> ";
+ }
char * line = shellReadline( prompt.c_str() );
diff --git a/shell/mongo.js b/shell/mongo.js
index e129784..2535769 100644
--- a/shell/mongo.js
+++ b/shell/mongo.js
@@ -24,8 +24,9 @@ if ( typeof mongoInject == "function" ){
mongoInject( Mongo.prototype );
}
-Mongo.prototype.setSlaveOk = function() {
- this.slaveOk = true;
+Mongo.prototype.setSlaveOk = function( value ) {
+ if( value == undefined ) value = true
+ this.slaveOk = value
}
Mongo.prototype.getDB = function( name ){
@@ -43,6 +44,10 @@ Mongo.prototype.adminCommand = function( cmd ){
return this.getDB( "admin" ).runCommand( cmd );
}
+Mongo.prototype.setLogLevel = function( logLevel ){
+ return this.adminCommand({ setParameter : 1, logLevel : logLevel })
+}
+
Mongo.prototype.getDBNames = function(){
return this.getDBs().databases.map(
function(z){
diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp
index c3c3751..5496ddb 100644
--- a/shell/mongo_vstudio.cpp
+++ b/shell/mongo_vstudio.cpp
@@ -15,7 +15,21 @@ const StringData _jscode_raw_utils =
"if ( a == b )\n"
"return true;\n"
"\n"
-"if ( tojson( a ) == tojson( b ) )\n"
+"a = tojson(a,false,true);\n"
+"b = tojson(b,false,true);\n"
+"\n"
+"if ( a == b )\n"
+"return true;\n"
+"\n"
+"var clean = function( s ){\n"
+"s = s.replace( /NumberInt\\((\\-?\\d+)\\)/g , \"$1\" );\n"
+"return s;\n"
+"}\n"
+"\n"
+"a = clean(a);\n"
+"b = clean(b);\n"
+"\n"
+"if ( a == b )\n"
"return true;\n"
"\n"
"return false;\n"
@@ -40,10 +54,8 @@ const StringData _jscode_raw_utils =
"\n"
"assert = function( b , msg ){\n"
"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
"if ( b )\n"
"return;\n"
-"\n"
"doassert( msg == undefined ? \"assert failed\" : \"assert failed : \" + msg );\n"
"}\n"
"\n"
@@ -77,6 +89,26 @@ const StringData _jscode_raw_utils =
"doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );\n"
"}\n"
"\n"
+"assert.contains = function( o, arr, msg ){\n"
+"var wasIn = false\n"
+"\n"
+"if( ! arr.length ){\n"
+"for( i in arr ){\n"
+"wasIn = arr[i] == o || ( ( arr[i] != null && o != null ) && friendlyEqual( arr[i] , o ) )\n"
+"return;\n"
+"if( wasIn ) break\n"
+"}\n"
+"}\n"
+"else {\n"
+"for( var i = 0; i < arr.length; i++ ){\n"
+"wasIn = arr[i] == o || ( ( arr[i] != null && o != null ) && friendlyEqual( arr[i] , o ) )\n"
+"if( wasIn ) break\n"
+"}\n"
+"}\n"
+"\n"
+"if( ! wasIn ) doassert( tojson( o ) + \" was not in \" + tojson( arr ) + \" : \" + msg )\n"
+"}\n"
+"\n"
"assert.repeat = function( f, msg, timeout, interval ) {\n"
"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
"\n"
@@ -204,6 +236,18 @@ const StringData _jscode_raw_utils =
"doassert( a + \" is not greater than or eq \" + b + \" : \" + msg );\n"
"}\n"
"\n"
+"assert.between = function( a, b, c, msg, inclusive ){\n"
+"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+"\n"
+"if( ( inclusive == undefined || inclusive == true ) &&\n"
+"a <= b && b <= c ) return;\n"
+"else if( a < b && b < c ) return;\n"
+"\n"
+"doassert( b + \" is not between \" + a + \" and \" + c + \" : \" + msg );\n"
+"}\n"
+"\n"
+"assert.betweenIn = function( a, b, c, msg ){ assert.between( a, b, c, msg, true ) }\n"
+"assert.betweenEx = function( a, b, c, msg ){ assert.between( a, b, c, msg, false ) }\n"
"\n"
"assert.close = function( a , b , msg , places ){\n"
"if (places === undefined) {\n"
@@ -231,6 +275,11 @@ const StringData _jscode_raw_utils =
"return dst;\n"
"}\n"
"\n"
+"Object.merge = function( dst, src, deep ){\n"
+"var clone = Object.extend( {}, dst, deep )\n"
+"return Object.extend( clone, src, deep )\n"
+"}\n"
+"\n"
"argumentsToArray = function( a ){\n"
"var arr = [];\n"
"for ( var i=0; i<a.length; i++ )\n"
@@ -375,20 +424,25 @@ const StringData _jscode_raw_utils =
"}\n"
"\n"
"\n"
-"Array.tojson = function( a , indent ){\n"
+"Array.tojson = function( a , indent , nolint ){\n"
+"var lineEnding = nolint ? \" \" : \"\\n\";\n"
+"\n"
"if (!indent)\n"
"indent = \"\";\n"
"\n"
+"if ( nolint )\n"
+"indent = \"\";\n"
+"\n"
"if (a.length == 0) {\n"
"return \"[ ]\";\n"
"}\n"
"\n"
-"var s = \"[\\n\";\n"
+"var s = \"[\" + lineEnding;\n"
"indent += \"\\t\";\n"
"for ( var i=0; i<a.length; i++){\n"
-"s += indent + tojson( a[i], indent );\n"
+"s += indent + tojson( a[i], indent , nolint );\n"
"if ( i < a.length - 1 ){\n"
-"s += \",\\n\";\n"
+"s += \",\" + lineEnding;\n"
"}\n"
"}\n"
"if ( a.length == 0 ) {\n"
@@ -396,7 +450,7 @@ const StringData _jscode_raw_utils =
"}\n"
"\n"
"indent = indent.substring(1);\n"
-"s += \"\\n\"+indent+\"]\";\n"
+"s += lineEnding+indent+\"]\";\n"
"return s;\n"
"}\n"
"\n"
@@ -464,6 +518,14 @@ const StringData _jscode_raw_utils =
"return this.toString();\n"
"}\n"
"\n"
+"if ( ! NumberInt.prototype ) {\n"
+"NumberInt.prototype = {}\n"
+"}\n"
+"\n"
+"NumberInt.prototype.tojson = function() {\n"
+"return this.toString();\n"
+"}\n"
+"\n"
"if ( ! ObjectId.prototype )\n"
"ObjectId.prototype = {}\n"
"\n"
@@ -538,16 +600,24 @@ const StringData _jscode_raw_utils =
"//return \"BinData type: \" + this.type + \" len: \" + this.len;\n"
"return this.toString();\n"
"}\n"
+"\n"
+"BinData.prototype.subtype = function () {\n"
+"return this.type;\n"
+"}\n"
+"\n"
+"BinData.prototype.length = function () {\n"
+"return this.len;\n"
+"}\n"
"}\n"
"else {\n"
"print( \"warning: no BinData class\" );\n"
"}\n"
"\n"
-"if ( typeof( UUID ) != \"undefined\" ){\n"
+"/*if ( typeof( UUID ) != \"undefined\" ){\n"
"UUID.prototype.tojson = function () {\n"
"return this.toString();\n"
"}\n"
-"}\n"
+"}*/\n"
"\n"
"if ( typeof _threadInject != \"undefined\" ){\n"
"print( \"fork() available!\" );\n"
@@ -672,7 +742,9 @@ const StringData _jscode_raw_utils =
"\"jstests/killop.js\",\n"
"\"jstests/run_program1.js\",\n"
"\"jstests/notablescan.js\",\n"
-"\"jstests/drop2.js\"] );\n"
+"\"jstests/drop2.js\",\n"
+"\"jstests/dropdb_race.js\",\n"
+"\"jstests/bench_test1.js\"] );\n"
"\n"
"// some tests can't be run in parallel with each other\n"
"var serialTestsArr = [ \"jstests/fsync.js\",\n"
@@ -908,6 +980,35 @@ const StringData _jscode_raw_utils =
"print( tojsononeline( x ) );\n"
"}\n"
"\n"
+"if ( typeof TestData == \"undefined\" ){\n"
+"TestData = undefined\n"
+"}\n"
+"\n"
+"jsTestName = function(){\n"
+"if( TestData ) return TestData.testName\n"
+"return \"__unknown_name__\"\n"
+"}\n"
+"\n"
+"jsTestFile = function(){\n"
+"if( TestData ) return TestData.testFile\n"
+"return \"__unknown_file__\"\n"
+"}\n"
+"\n"
+"jsTestPath = function(){\n"
+"if( TestData ) return TestData.testPath\n"
+"return \"__unknown_path__\"\n"
+"}\n"
+"\n"
+"jsTestOptions = function(){\n"
+"if( TestData ) return { noJournal : TestData.noJournal,\n"
+"noJournalPrealloc : TestData.noJournalPrealloc }\n"
+"return {}\n"
+"}\n"
+"\n"
+"testLog = function(x){\n"
+"print( jsTestFile() + \" - \" + x )\n"
+"}\n"
+"\n"
"shellPrintHelper = function (x) {\n"
"\n"
"if (typeof (x) == \"undefined\") {\n"
@@ -961,7 +1062,6 @@ const StringData _jscode_raw_utils =
"\n"
"builtinMethods[Mongo] = \"find update insert remove\".split(' ');\n"
"builtinMethods[BinData] = \"hex base64 length subtype\".split(' ');\n"
-"builtinMethods[NumberLong] = \"toNumber\".split(' ');\n"
"\n"
"var extraGlobals = \"Infinity NaN undefined null true false decodeURI decodeURIComponent encodeURI encodeURIComponent escape eval isFinite isNaN parseFloat parseInt unescape Array Boolean Date Math Number RegExp String print load gc MinKey MaxKey Mongo NumberLong ObjectId DBPointer UUID BinData Map\".split(' ');\n"
"\n"
@@ -1022,7 +1122,7 @@ const StringData _jscode_raw_utils =
"var p = possibilities[i];\n"
"if (typeof(curObj[p]) == \"undefined\" && curObj != global) continue; // extraGlobals aren't in the global object\n"
"if (p.length == 0 || p.length < lastPrefix.length) continue;\n"
-"if (isPrivate(p)) continue;\n"
+"if (lastPrefix[0] != '_' && isPrivate(p)) continue;\n"
"if (p.match(/^[0-9]+$/)) continue; // don't array number indexes\n"
"if (p.substr(0, lastPrefix.length) != lastPrefix) continue;\n"
"\n"
@@ -1051,7 +1151,7 @@ const StringData _jscode_raw_utils =
"\n"
"shellHelper = function( command , rest , shouldPrint ){\n"
"command = command.trim();\n"
-"var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
+"var args = rest.trim().replace(/\\s*;$/,\"\").split( \"\\s+\" );\n"
"\n"
"if ( ! shellHelper[command] )\n"
"throw \"no command [\" + command + \"]\";\n"
@@ -1084,6 +1184,10 @@ const StringData _jscode_raw_utils =
"shellHelper.show = function (what) {\n"
"assert(typeof what == \"string\");\n"
"\n"
+"var args = what.split( /\\s+/ );\n"
+"what = args[0]\n"
+"args = args.splice(1)\n"
+"\n"
"if (what == \"profile\") {\n"
"if (db.system.profile.count() == 0) {\n"
"print(\"db.system.profile is empty\");\n"
@@ -1092,7 +1196,32 @@ const StringData _jscode_raw_utils =
"}\n"
"else {\n"
"print();\n"
-"db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(function (x) { print(\"\" + x.millis + \"ms \" + String(x.ts).substring(0, 24)); print(x.info); print(\"\\n\"); })\n"
+"db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(\n"
+"function (x) {\n"
+"print(\"\" + x.op + \"\\t\" + x.ns + \" \" + x.millis + \"ms \" + String(x.ts).substring(0, 24));\n"
+"var l = \"\";\n"
+"for ( var z in x ){\n"
+"if ( z == \"op\" || z == \"ns\" || z == \"millis\" || z == \"ts\" )\n"
+"continue;\n"
+"\n"
+"var val = x[z];\n"
+"var mytype = typeof(val);\n"
+"\n"
+"if ( mytype == \"string\" ||\n"
+"mytype == \"number\" )\n"
+"l += z + \":\" + val + \" \";\n"
+"else if ( mytype == \"object\" )\n"
+"l += z + \":\" + tojson(val ) + \" \";\n"
+"else if ( mytype == \"boolean\" )\n"
+"l += z + \" \";\n"
+"else\n"
+"l += z + \":\" + val + \" \";\n"
+"\n"
+"}\n"
+"print( l );\n"
+"print(\"\\n\");\n"
+"}\n"
+")\n"
"}\n"
"return \"\";\n"
"}\n"
@@ -1123,6 +1252,27 @@ const StringData _jscode_raw_utils =
"return \"\";\n"
"}\n"
"\n"
+"if (what == \"log\" ) {\n"
+"var n = \"global\";\n"
+"if ( args.length > 0 )\n"
+"n = args[0]\n"
+"\n"
+"var res = db.adminCommand( { getLog : n } )\n"
+"for ( var i=0; i<res.log.length; i++){\n"
+"print( res.log[i] )\n"
+"}\n"
+"return \"\"\n"
+"}\n"
+"\n"
+"if (what == \"logs\" ) {\n"
+"var res = db.adminCommand( { getLog : \"*\" } )\n"
+"for ( var i=0; i<res.names.length; i++){\n"
+"print( res.names[i] )\n"
+"}\n"
+"return \"\"\n"
+"}\n"
+"\n"
+"\n"
"throw \"don't know how to show [\" + what + \"]\";\n"
"\n"
"}\n"
@@ -1319,18 +1469,40 @@ const StringData _jscode_raw_utils =
"rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
"rs.isMaster = function () { return db.isMaster(); }\n"
"rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
-"rs.reconfig = function (cfg) {\n"
-"cfg.version = rs.conf().version + 1;\n"
+"rs._runCmd = function (c) {\n"
+"// after the command, catch the disconnect and reconnect if necessary\n"
"var res = null;\n"
"try {\n"
-"res = db.adminCommand({ replSetReconfig: cfg });\n"
+"res = db.adminCommand(c);\n"
"}\n"
"catch (e) {\n"
-"print(\"shell got exception during reconfig: \" + e);\n"
+"if ((\"\" + e).indexOf(\"error doing query\") >= 0) {\n"
+"// closed connection. reconnect.\n"
+"db.getLastErrorObj();\n"
+"var o = db.getLastErrorObj();\n"
+"if (o.ok) {\n"
+"print(\"reconnected to server after rs command (which is normal)\");\n"
+"}\n"
+"else {\n"
+"printjson(o);\n"
+"}\n"
+"}\n"
+"else {\n"
+"print(\"shell got exception during repl set operation: \" + e);\n"
"print(\"in some circumstances, the primary steps down and closes connections on a reconfig\");\n"
"}\n"
+"return \"\";\n"
+"}\n"
"return res;\n"
"}\n"
+"rs.reconfig = function (cfg, options) {\n"
+"cfg.version = rs.conf().version + 1;\n"
+"cmd = { replSetReconfig: cfg };\n"
+"for (var i in options) {\n"
+"cmd[i] = options[i];\n"
+"}\n"
+"return this._runCmd(cmd);\n"
+"}\n"
"rs.add = function (hostport, arb) {\n"
"var cfg = hostport;\n"
"\n"
@@ -1350,20 +1522,13 @@ const StringData _jscode_raw_utils =
"cfg.arbiterOnly = true;\n"
"}\n"
"c.members.push(cfg);\n"
-"var res = null;\n"
-"try {\n"
-"res = db.adminCommand({ replSetReconfig: c });\n"
-"}\n"
-"catch (e) {\n"
-"print(\"shell got exception during reconfig: \" + e);\n"
-"print(\"in some circumstances, the primary steps down and closes connections on a reconfig\");\n"
+"return this._runCmd({ replSetReconfig: c });\n"
"}\n"
-"return res;\n"
-"}\n"
-"rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:secs||60}); }\n"
+"rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:(secs === undefined) ? 60:secs}); }\n"
"rs.freeze = function (secs) { return db._adminCommand({replSetFreeze:secs}); }\n"
"rs.addArb = function (hn) { return this.add(hn, true); }\n"
"rs.conf = function () { return db.getSisterDB(\"local\").system.replset.findOne(); }\n"
+"rs.config = function () { return rs.conf(); }\n"
"\n"
"rs.remove = function (hn) {\n"
"var local = db.getSisterDB(\"local\");\n"
@@ -1382,6 +1547,41 @@ const StringData _jscode_raw_utils =
"return \"error: couldn't find \"+hn+\" in \"+tojson(c.members);\n"
"};\n"
"\n"
+"rs.debug = {};\n"
+"\n"
+"rs.debug.nullLastOpWritten = function(primary, secondary) {\n"
+"var p = connect(primary+\"/local\");\n"
+"var s = connect(secondary+\"/local\");\n"
+"s.getMongo().setSlaveOk();\n"
+"\n"
+"var secondToLast = s.oplog.rs.find().sort({$natural : -1}).limit(1).next();\n"
+"var last = p.runCommand({findAndModify : \"oplog.rs\",\n"
+"query : {ts : {$gt : secondToLast.ts}},\n"
+"sort : {$natural : 1},\n"
+"update : {$set : {op : \"n\"}}});\n"
+"\n"
+"if (!last.value.o || !last.value.o._id) {\n"
+"print(\"couldn't find an _id?\");\n"
+"}\n"
+"else {\n"
+"last.value.o = {_id : last.value.o._id};\n"
+"}\n"
+"\n"
+"print(\"nulling out this op:\");\n"
+"printjson(last);\n"
+"};\n"
+"\n"
+"rs.debug.getLastOpWritten = function(server) {\n"
+"var s = db.getSisterDB(\"local\");\n"
+"if (server) {\n"
+"s = connect(server+\"/local\");\n"
+"}\n"
+"s.getMongo().setSlaveOk();\n"
+"\n"
+"return s.oplog.rs.find().sort({$natural : -1}).limit(1).next();\n"
+"};\n"
+"\n"
+"\n"
"help = shellHelper.help = function (x) {\n"
"if (x == \"mr\") {\n"
"print(\"\\nSee also http://www.mongodb.org/display/DOCS/MapReduce\");\n"
@@ -1413,6 +1613,17 @@ const StringData _jscode_raw_utils =
"print(\"\\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\\n\");\n"
"return;\n"
"}\n"
+"else if (x == \"keys\") {\n"
+"print(\"Tab completion and command history is available at the command prompt.\\n\");\n"
+"print(\"Some emacs keystrokes are available too:\");\n"
+"print(\" Ctrl-A start of line\");\n"
+"print(\" Ctrl-E end of line\");\n"
+"print(\" Ctrl-K del to end of line\");\n"
+"print(\"\\nMulti-line commands\");\n"
+"print(\"You can enter a multi line javascript expression. If parens, braces, etc. are not closed, you will see a new line \");\n"
+"print(\"beginning with '...' characters. Type the rest of your expression. Press Ctrl-C to abort the data entry if you\");\n"
+"print(\"get stuck.\\n\");\n"
+"}\n"
"else if (x == \"misc\") {\n"
"print(\"\\tb = new BinData(subtype,base64str) create a BSON BinData value\");\n"
"print(\"\\tb.subtype() the BinData subtype (0..255)\");\n"
@@ -1421,6 +1632,10 @@ const StringData _jscode_raw_utils =
"print(\"\\tb.base64() the data as a base 64 encoded string\");\n"
"print(\"\\tb.toString()\");\n"
"print();\n"
+"print(\"\\tb = HexData(subtype,hexstr) create a BSON BinData value from a hex string\");\n"
+"print(\"\\tb = UUID(hexstr) create a BSON BinData value of UUID subtype\");\n"
+"print(\"\\tb = MD5(hexstr) create a BSON BinData value of MD5 subtype\");\n"
+"print();\n"
"print(\"\\to = new ObjectId() create a new ObjectId\");\n"
"print(\"\\to.getTimestamp() return timestamp derived from first 32 bits of the OID\");\n"
"print(\"\\to.isObjectId()\");\n"
@@ -1459,15 +1674,18 @@ const StringData _jscode_raw_utils =
"print(\"\\t\" + \"db.help() help on db methods\");\n"
"print(\"\\t\" + \"db.mycoll.help() help on collection methods\");\n"
"print(\"\\t\" + \"rs.help() help on replica set methods\");\n"
-"print(\"\\t\" + \"help connect connecting to a db help\");\n"
"print(\"\\t\" + \"help admin administrative help\");\n"
+"print(\"\\t\" + \"help connect connecting to a db help\");\n"
+"print(\"\\t\" + \"help keys key shortcuts\");\n"
"print(\"\\t\" + \"help misc misc things to know\");\n"
-"print(\"\\t\" + \"help mr mapreduce help\");\n"
+"print(\"\\t\" + \"help mr mapreduce\");\n"
"print();\n"
"print(\"\\t\" + \"show dbs show database names\");\n"
"print(\"\\t\" + \"show collections show collections in current database\");\n"
"print(\"\\t\" + \"show users show users in current database\");\n"
"print(\"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
+"print(\"\\t\" + \"show logs show the accessible logger names\");\n"
+"print(\"\\t\" + \"show log [name] prints out the last segment of log in memory, 'global' is default\");\n"
"print(\"\\t\" + \"use <db_name> set current database\");\n"
"print(\"\\t\" + \"db.foo.find() list objects in collection foo\");\n"
"print(\"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\");\n"
@@ -1481,6 +1699,108 @@ const StringData _jscode_raw_utils =
;
extern const JSFile utils;
const JSFile utils = { "shell/utils.js" , _jscode_raw_utils };
+const StringData _jscode_raw_utils_sh =
+"sh = function() { return \"try sh.help();\" }\n"
+"\n"
+"\n"
+"sh._checkMongos = function() {\n"
+"var x = db.runCommand( \"ismaster\" );\n"
+"if ( x.msg != \"isdbgrid\" )\n"
+"throw \"not connected to a mongos\"\n"
+"}\n"
+"\n"
+"sh._checkFullName = function( fullName ) {\n"
+"assert( fullName , \"neeed a full name\" )\n"
+"assert( fullName.indexOf( \".\" ) > 0 , \"name needs to be fully qualified <db>.<collection>'\" )\n"
+"}\n"
+"\n"
+"sh._adminCommand = function( cmd , skipCheck ) {\n"
+"if ( ! skipCheck ) sh._checkMongos();\n"
+"var res = db.getSisterDB( \"admin\" ).runCommand( cmd );\n"
+"\n"
+"if ( res == null || ! res.ok ) {\n"
+"print( \"command failed: \" + tojson( res ) )\n"
+"}\n"
+"\n"
+"return res;\n"
+"}\n"
+"\n"
+"sh.help = function() {\n"
+"print( \"\\tsh.addShard( host ) server:port OR setname/server:port\" )\n"
+"print( \"\\tsh.enableSharding(dbname) enables sharding on the database dbname\" )\n"
+"print( \"\\tsh.shardCollection(fullName,key,unique) shards the collection\" );\n"
+"\n"
+"print( \"\\tsh.splitFind(fullName,find) splits the chunk that find is in at the median\" );\n"
+"print( \"\\tsh.splitAt(fullName,middle) splits the chunk that middle is in at middle\" );\n"
+"print( \"\\tsh.moveChunk(fullName,find,to) move the chunk where 'find' is to 'to' (name of shard)\");\n"
+"\n"
+"print( \"\\tsh.setBalancerState( <bool on or not> ) turns the balancer on or off true=on, false=off\" );\n"
+"print( \"\\tsh.getBalancerState() return true if on, off if not\" );\n"
+"print( \"\\tsh.isBalancerRunning() return true if the balancer is running on any mongos\" );\n"
+"\n"
+"print( \"\\tsh.status() prints a general overview of the cluster\" )\n"
+"}\n"
+"\n"
+"sh.status = function( verbose , configDB ) {\n"
+"// TODO: move the actual commadn here\n"
+"printShardingStatus( configDB , verbose );\n"
+"}\n"
+"\n"
+"sh.addShard = function( url ){\n"
+"sh._adminCommand( { addShard : url } , true )\n"
+"}\n"
+"\n"
+"sh.enableSharding = function( dbname ) {\n"
+"assert( dbname , \"need a valid dbname\" )\n"
+"sh._adminCommand( { enableSharding : dbname } )\n"
+"}\n"
+"\n"
+"sh.shardCollection = function( fullName , key , unique ) {\n"
+"sh._checkFullName( fullName )\n"
+"assert( key , \"need a key\" )\n"
+"assert( typeof( key ) == \"object\" , \"key needs to be an object\" )\n"
+"\n"
+"var cmd = { shardCollection : fullName , key : key }\n"
+"if ( unique )\n"
+"cmd.unique = true;\n"
+"\n"
+"sh._adminCommand( cmd )\n"
+"}\n"
+"\n"
+"\n"
+"sh.splitFind = function( fullName , find ) {\n"
+"sh._checkFullName( fullName )\n"
+"sh._adminCommand( { split : fullName , find : find } )\n"
+"}\n"
+"\n"
+"sh.splitAt = function( fullName , middle ) {\n"
+"sh._checkFullName( fullName )\n"
+"sh._adminCommand( { split : fullName , middle : middle } )\n"
+"}\n"
+"\n"
+"sh.moveChunk = function( fullName , find , to ) {\n"
+"sh._checkFullName( fullName );\n"
+"sh._adminCommand( { moveChunk : fullName , find : find , to : to } )\n"
+"}\n"
+"\n"
+"sh.setBalancerState = function( onOrNot ) {\n"
+"db.getSisterDB( \"config\" ).settings.update({ _id: \"balancer\" }, { $set : { stopped: onOrNot ? false : true } }, true );\n"
+"}\n"
+"\n"
+"sh.getBalancerState = function() {\n"
+"var x = db.getSisterDB( \"config\" ).settings.findOne({ _id: \"balancer\" } )\n"
+"if ( x == null )\n"
+"return true;\n"
+"return ! x.stopped;\n"
+"}\n"
+"\n"
+"sh.isBalancerRunning = function() {\n"
+"var x = db.getSisterDB( \"config\" ).locks.findOne( { _id : \"balancer\" } );\n"
+"return x.state > 0;\n"
+"}\n"
+;
+extern const JSFile utils_sh;
+const JSFile utils_sh = { "shell/utils_sh.js" , _jscode_raw_utils_sh };
const StringData _jscode_raw_db =
"// db.js\n"
"\n"
@@ -1506,8 +1826,8 @@ const StringData _jscode_raw_db =
"return this._name;\n"
"}\n"
"\n"
-"DB.prototype.stats = function(){\n"
-"return this.runCommand( { dbstats : 1 } );\n"
+"DB.prototype.stats = function(scale){\n"
+"return this.runCommand( { dbstats : 1 , scale : scale } );\n"
"}\n"
"\n"
"DB.prototype.getCollection = function( name ){\n"
@@ -1544,15 +1864,26 @@ const StringData _jscode_raw_db =
"DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name\n"
"\n"
"DB.prototype.addUser = function( username , pass, readOnly ){\n"
+"if ( pass == null || pass.length == 0 )\n"
+"throw \"password can't be empty\";\n"
+"\n"
"readOnly = readOnly || false;\n"
"var c = this.getCollection( \"system.users\" );\n"
"\n"
"var u = c.findOne( { user : username } ) || { user : username };\n"
"u.readOnly = readOnly;\n"
"u.pwd = hex_md5( username + \":mongo:\" + pass );\n"
-"print( tojson( u ) );\n"
"\n"
"c.save( u );\n"
+"var le = this.getLastErrorObj();\n"
+"printjson( le )\n"
+"if ( le.err )\n"
+"throw \"couldn't add user: \" + le.err\n"
+"print( tojson( u ) );\n"
+"}\n"
+"\n"
+"DB.prototype.logout = function(){\n"
+"return this.runCommand({logout : 1});\n"
"}\n"
"\n"
"DB.prototype.removeUser = function( username ){\n"
@@ -1608,6 +1939,8 @@ const StringData _jscode_raw_db =
"DB.prototype.createCollection = function(name, opt) {\n"
"var options = opt || {};\n"
"var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };\n"
+"if (options.autoIndexId != undefined)\n"
+"cmd.autoIndexId = options.autoIndexId;\n"
"var res = this._dbCommand(cmd);\n"
"return res;\n"
"}\n"
@@ -1647,14 +1980,27 @@ const StringData _jscode_raw_db =
"return this._dbCommand( { dropDatabase: 1 } );\n"
"}\n"
"\n"
-"\n"
-"DB.prototype.shutdownServer = function() {\n"
+"/**\n"
+"* Shuts down the database. Must be run while using the admin database.\n"
+"* @param opts Options for shutdown. Possible options are:\n"
+"* - force: (boolean) if the server should shut down, even if there is no\n"
+"* up-to-date slave\n"
+"* - timeoutSecs: (number) the server will continue checking over timeoutSecs\n"
+"* if any other servers have caught up enough for it to shut down.\n"
+"*/\n"
+"DB.prototype.shutdownServer = function(opts) {\n"
"if( \"admin\" != this._name ){\n"
"return \"shutdown command only works with the admin database; try 'use admin'\";\n"
"}\n"
"\n"
+"cmd = {\"shutdown\" : 1};\n"
+"opts = opts || {};\n"
+"for (var o in opts) {\n"
+"cmd[o] = opts[o];\n"
+"}\n"
+"\n"
"try {\n"
-"var res = this._dbCommand(\"shutdown\");\n"
+"var res = this.runCommand(cmd);\n"
"if( res )\n"
"throw \"shutdownServer failed: \" + res.errmsg;\n"
"throw \"shutdownServer failed\";\n"
@@ -1782,6 +2128,7 @@ const StringData _jscode_raw_db =
"print(\"\\tdb.isMaster() check replica primary status\");\n"
"print(\"\\tdb.killOp(opid) kills the current operation in the db\");\n"
"print(\"\\tdb.listCommands() lists all the db commands\");\n"
+"print(\"\\tdb.logout()\");\n"
"print(\"\\tdb.printCollectionStats()\");\n"
"print(\"\\tdb.printReplicationInfo()\");\n"
"print(\"\\tdb.printSlaveReplicationInfo()\");\n"
@@ -1796,6 +2143,8 @@ const StringData _jscode_raw_db =
"print(\"\\tdb.stats()\");\n"
"print(\"\\tdb.version() current version of the server\");\n"
"print(\"\\tdb.getMongo().setSlaveOk() allow queries on a replication slave server\");\n"
+"print(\"\\tdb.fsyncLock() flush data to disk and lock server for backups\");\n"
+"print(\"\\tdb.fsyncUnock() unlocks server following a db.fsyncLock()\");\n"
"\n"
"return __magicNoPrint;\n"
"}\n"
@@ -2148,6 +2497,11 @@ const StringData _jscode_raw_db =
"DB.prototype.printReplicationInfo = function() {\n"
"var result = this.getReplicationInfo();\n"
"if( result.errmsg ) {\n"
+"if (!this.isMaster().ismaster) {\n"
+"print(\"this is a slave, printing slave replication info.\");\n"
+"this.printSlaveReplicationInfo();\n"
+"return;\n"
+"}\n"
"print(tojson(result));\n"
"return;\n"
"}\n"
@@ -2164,7 +2518,7 @@ const StringData _jscode_raw_db =
"print(\"\\t syncedTo: \" + st.toString() );\n"
"var ago = (now-st)/1000;\n"
"var hrs = Math.round(ago/36)/100;\n"
-"print(\"\\t\\t = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");\n"
+"print(\"\\t\\t = \" + Math.round(ago) + \" secs ago (\" + hrs + \"hrs)\");\n"
"};\n"
"\n"
"function g(x) {\n"
@@ -2195,13 +2549,14 @@ const StringData _jscode_raw_db =
"};\n"
"\n"
"var L = this.getSiblingDB(\"local\");\n"
-"if( L.sources.count() != 0 ) {\n"
-"L.sources.find().forEach(g);\n"
-"}\n"
-"else if (L.system.replset.count() != 0) {\n"
+"\n"
+"if (L.system.replset.count() != 0) {\n"
"var status = this.adminCommand({'replSetGetStatus' : 1});\n"
"status.members.forEach(r);\n"
"}\n"
+"else if( L.sources.count() != 0 ) {\n"
+"L.sources.find().forEach(g);\n"
+"}\n"
"else {\n"
"print(\"local.sources is empty; is this db a --slave?\");\n"
"return;\n"
@@ -2257,6 +2612,14 @@ const StringData _jscode_raw_db =
"printShardingStatus( this.getSiblingDB( \"config\" ) , verbose );\n"
"}\n"
"\n"
+"DB.prototype.fsyncLock = function() {\n"
+"return db.adminCommand({fsync:1, lock:true});\n"
+"}\n"
+"\n"
+"DB.prototype.fsyncUnlock = function() {\n"
+"return db.getSiblingDB(\"admin\").$cmd.sys.unlock.findOne()\n"
+"}\n"
+"\n"
"DB.autocomplete = function(obj){\n"
"var colls = obj.getCollectionNames();\n"
"var ret=[];\n"
@@ -2296,8 +2659,9 @@ const StringData _jscode_raw_mongo =
"mongoInject( Mongo.prototype );\n"
"}\n"
"\n"
-"Mongo.prototype.setSlaveOk = function() {\n"
-"this.slaveOk = true;\n"
+"Mongo.prototype.setSlaveOk = function( value ) {\n"
+"if( value == undefined ) value = true\n"
+"this.slaveOk = value\n"
"}\n"
"\n"
"Mongo.prototype.getDB = function( name ){\n"
@@ -2315,6 +2679,10 @@ const StringData _jscode_raw_mongo =
"return this.getDB( \"admin\" ).runCommand( cmd );\n"
"}\n"
"\n"
+"Mongo.prototype.setLogLevel = function( logLevel ){\n"
+"return this.adminCommand({ setParameter : 1, logLevel : logLevel })\n"
+"}\n"
+"\n"
"Mongo.prototype.getDBNames = function(){\n"
"return this.getDBs().databases.map(\n"
"function(z){\n"
@@ -2500,7 +2868,6 @@ const StringData _jscode_raw_query =
"print(\"\\t.showDiskLoc() - adds a $diskLoc field to each returned object\")\n"
"print(\"\\nCursor methods\");\n"
"print(\"\\t.forEach( func )\")\n"
-"print(\"\\t.print() - output to console in full pretty format\")\n"
"print(\"\\t.map( func )\")\n"
"print(\"\\t.hasNext()\")\n"
"print(\"\\t.next()\")\n"
@@ -2847,7 +3214,7 @@ const StringData _jscode_raw_collection =
"print(\"\\tdb.\" + shortName + \".totalIndexSize() - size in bytes of all the indexes\");\n"
"print(\"\\tdb.\" + shortName + \".totalSize() - storage allocated for all data and indexes\");\n"
"print(\"\\tdb.\" + shortName + \".update(query, object[, upsert_bool, multi_bool])\");\n"
-"print(\"\\tdb.\" + shortName + \".validate() - SLOW\");\n"
+"print(\"\\tdb.\" + shortName + \".validate( <full> ) - SLOW\");;\n"
"print(\"\\tdb.\" + shortName + \".getShardVersion() - only for use with sharding\");\n"
"return __magicNoPrint;\n"
"}\n"
@@ -2907,7 +3274,7 @@ const StringData _jscode_raw_collection =
"throw \"can't save a DBQuery object\";\n"
"}\n"
"\n"
-"DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
+"DBCollection._allowedFields = { $id : 1 , $ref : 1 , $db : 1 , $MinKey : 1, $MaxKey : 1 };\n"
"\n"
"DBCollection.prototype._validateForStorage = function( o ){\n"
"this._validateObject( o );\n"
@@ -3161,8 +3528,18 @@ const StringData _jscode_raw_collection =
"dropTarget : dropTarget } )\n"
"}\n"
"\n"
-"DBCollection.prototype.validate = function() {\n"
-"var res = this._db.runCommand( { validate: this.getName() } );\n"
+"DBCollection.prototype.validate = function(full) {\n"
+"var cmd = { validate: this.getName() };\n"
+"\n"
+"if (typeof(full) == 'object') // support arbitrary options here\n"
+"Object.extend(cmd, full);\n"
+"else\n"
+"cmd.full = full;\n"
+"\n"
+"var res = this._db.runCommand( cmd );\n"
+"\n"
+"if (typeof(res.valid) == 'undefined') {\n"
+"// old-style format just put everything in a string. Now using proper fields\n"
"\n"
"res.valid = false;\n"
"\n"
@@ -3178,6 +3555,7 @@ const StringData _jscode_raw_collection =
"res.lastExtentSize = Number( r[1] );\n"
"}\n"
"}\n"
+"}\n"
"\n"
"return res;\n"
"}\n"
@@ -3317,13 +3695,21 @@ const StringData _jscode_raw_collection =
"return ( e && e.options && e.options.capped ) ? true : false;\n"
"}\n"
"\n"
+"DBCollection.prototype._distinct = function( keyString , query ){\n"
+"return this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
+"if ( ! res.ok )\n"
+"throw \"distinct failed: \" + tojson( res );\n"
+"return res.values;\n"
+"}\n"
+"\n"
"DBCollection.prototype.distinct = function( keyString , query ){\n"
-"var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
+"var res = this._distinct( keyString , query );\n"
"if ( ! res.ok )\n"
"throw \"distinct failed: \" + tojson( res );\n"
"return res.values;\n"
"}\n"
"\n"
+"\n"
"DBCollection.prototype.group = function( params ){\n"
"params.ns = this._shortName;\n"
"return this._db.group( params );\n"
@@ -3365,7 +3751,8 @@ const StringData _jscode_raw_collection =
"*/\n"
"MapReduceResult.prototype.convertToSingleObject = function(){\n"
"var z = {};\n"
-"this._coll.find().forEach( function(a){ z[a._id] = a.value; } );\n"
+"var it = this.results != null ? this.results : this._coll.find();\n"
+"it.forEach( function(a){ z[a._id] = a.value; } );\n"
"return z;\n"
"}\n"
"\n"
@@ -3380,7 +3767,7 @@ const StringData _jscode_raw_collection =
"*/\n"
"DBCollection.prototype.mapReduce = function( map , reduce , optionsOrOutString ){\n"
"var c = { mapreduce : this._shortName , map : map , reduce : reduce };\n"
-"assert( optionsOrOutString , \"need to an optionsOrOutString\" )\n"
+"assert( optionsOrOutString , \"need to supply an optionsOrOutString\" )\n"
"\n"
"if ( typeof( optionsOrOutString ) == \"string\" )\n"
"c[\"out\"] = optionsOrOutString;\n"
diff --git a/shell/msvc/mongo.vcxproj b/shell/msvc/mongo.vcxproj
index af5927c..0718d3a 100644
--- a/shell/msvc/mongo.vcxproj
+++ b/shell/msvc/mongo.vcxproj
@@ -53,7 +53,7 @@
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>XP_WIN;PCRE_STATIC;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>XP_WIN;PCRE_STATIC;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>\boost\</AdditionalIncludeDirectories>
<PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
<DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
@@ -71,7 +71,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>USE_READLINE;XP_WIN;_WIN32;PCRE_STATIC;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>USE_READLINE;XP_WIN;PCRE_STATIC;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>\boost\</AdditionalIncludeDirectories>
<PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
@@ -117,15 +117,21 @@
<ClCompile Include="..\..\scripting\engine_spidermonkey.cpp" />
<ClCompile Include="..\..\scripting\utils.cpp" />
<ClCompile Include="..\..\s\shardconnection.cpp" />
+ <ClCompile Include="..\..\third_party\linenoise\linenoise.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="..\..\util\background.cpp" />
+ <ClCompile Include="..\..\util\concurrency\spin_lock.cpp" />
<ClCompile Include="..\..\util\log.cpp" />
<ClCompile Include="..\..\util\mmap.cpp" />
+ <ClCompile Include="..\..\util\net\listen.cpp" />
+ <ClCompile Include="..\..\util\net\message.cpp" />
+ <ClCompile Include="..\..\util\net\message_port.cpp" />
+ <ClCompile Include="..\..\util\net\sock.cpp" />
<ClCompile Include="..\..\util\password.cpp" />
<ClCompile Include="..\..\util\text.cpp" />
<ClCompile Include="..\..\util\mmap_win.cpp" />
<ClCompile Include="..\..\util\processinfo_win32.cpp" />
- <ClCompile Include="..\..\util\sock.cpp" />
- <ClCompile Include="..\..\util\message.cpp" />
<ClCompile Include="..\..\util\assert_util.cpp" />
<ClCompile Include="..\..\util\md5main.cpp" />
<ClCompile Include="..\..\util\md5.c">
diff --git a/shell/msvc/mongo.vcxproj.filters b/shell/msvc/mongo.vcxproj.filters
index 5d0a9a6..57b4ab7 100644
--- a/shell/msvc/mongo.vcxproj.filters
+++ b/shell/msvc/mongo.vcxproj.filters
@@ -39,6 +39,9 @@
<Filter Include="shell\generated_from_js">
<UniqueIdentifier>{96e4c411-7ab4-4bcd-b7c6-a33059f5d492}</UniqueIdentifier>
</Filter>
+ <Filter Include="thirdparty">
+ <UniqueIdentifier>{5eca87ab-5987-4fb0-97be-e80cc721e328}</UniqueIdentifier>
+ </Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\dbshell.cpp">
@@ -66,9 +69,6 @@
<ClCompile Include="..\..\db\json.cpp">
<Filter>shared source files</Filter>
</ClCompile>
- <ClCompile Include="..\..\util\sock.cpp">
- <Filter>shell</Filter>
- </ClCompile>
<ClCompile Include="..\..\util\debug_util.cpp">
<Filter>shell</Filter>
</ClCompile>
@@ -162,9 +162,6 @@
<ClCompile Include="..\..\util\md5main.cpp">
<Filter>util</Filter>
</ClCompile>
- <ClCompile Include="..\..\util\message.cpp">
- <Filter>util</Filter>
- </ClCompile>
<ClCompile Include="..\..\util\util.cpp">
<Filter>util</Filter>
</ClCompile>
@@ -225,6 +222,24 @@
<ClCompile Include="..\..\client\dbclient_rs.cpp">
<Filter>client</Filter>
</ClCompile>
+ <ClCompile Include="..\..\third_party\linenoise\linenoise.cpp">
+ <Filter>thirdparty</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\concurrency\spin_lock.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\listen.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\message.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\sock.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\message_port.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="..\..\SConstruct" />
diff --git a/shell/query.js b/shell/query.js
index 4044894..78734ca 100644
--- a/shell/query.js
+++ b/shell/query.js
@@ -35,7 +35,6 @@ DBQuery.prototype.help = function () {
print("\t.showDiskLoc() - adds a $diskLoc field to each returned object")
print("\nCursor methods");
print("\t.forEach( func )")
- print("\t.print() - output to console in full pretty format")
print("\t.map( func )")
print("\t.hasNext()")
print("\t.next()")
diff --git a/shell/servers.js b/shell/servers.js
index f713ecc..ad3b5eb 100644..100755
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -1,5 +1,3 @@
-
-
_parsePath = function() {
var dbpath = "";
for( var i = 0; i < arguments.length; ++i )
@@ -23,6 +21,25 @@ _parsePort = function() {
return port;
}
+connectionURLTheSame = function( a , b ){
+ if ( a == b )
+ return true;
+
+ if ( ! a || ! b )
+ return false;
+
+ a = a.split( "/" )[0]
+ b = b.split( "/" )[0]
+
+ return a == b;
+}
+
+assert( connectionURLTheSame( "foo" , "foo" ) )
+assert( ! connectionURLTheSame( "foo" , "bar" ) )
+
+assert( connectionURLTheSame( "foo/a,b" , "foo/b,a" ) )
+assert( ! connectionURLTheSame( "foo/a,b" , "bar/a,b" ) )
+
createMongoArgs = function( binaryName , args ){
var fullArgs = [ binaryName ];
@@ -78,9 +95,12 @@ startMongodTest = function (port, dirname, restart, extraOptions ) {
dbpath: "/data/db/" + dirname,
noprealloc: "",
smallfiles: "",
- oplogSize: "2",
+ oplogSize: "40",
nohttpinterface: ""
};
+
+ if( jsTestOptions().noJournal ) options["nojournal"] = ""
+ if( jsTestOptions().noJournalPrealloc ) options["nopreallocj"] = ""
if ( extraOptions )
Object.extend( options , extraOptions );
@@ -104,7 +124,7 @@ startMongodEmpty = function () {
return startMongoProgram.apply(null, args);
}
startMongod = function () {
- print("WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING RENAME YOUR INVOCATION");
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
return startMongodEmpty.apply(null, arguments);
}
startMongodNoReset = function(){
@@ -135,7 +155,7 @@ startMongoProgram = function(){
} catch( e ) {
}
return false;
- }, "unable to connect to mongo program on port " + port, 300 * 1000 );
+ }, "unable to connect to mongo program on port " + port, 600 * 1000 );
return m;
}
@@ -160,6 +180,19 @@ myPort = function() {
* * useHostname to use the hostname (instead of localhost)
*/
ShardingTest = function( testName , numShards , verboseLevel , numMongos , otherParams ){
+
+ // Check if testName is an object, if so, pull params from there
+ var keyFile = undefined
+ if( testName && ! testName.charAt ){
+ var params = testName
+ testName = params.name || "test"
+ numShards = params.shards || 2
+ verboseLevel = params.verbose || 0
+ numMongos = params.mongos || 1
+ otherParams = params.other || {}
+ keyFile = params.keyFile || otherParams.keyFile
+ }
+
this._testName = testName;
if ( ! otherParams )
@@ -172,16 +205,26 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var localhost = otherParams.useHostname ? getHostName() : "localhost";
this._alldbpaths = []
-
-
+
if ( otherParams.rs ){
localhost = getHostName();
// start replica sets
this._rs = []
for ( var i=0; i<numShards; i++){
var setName = testName + "-rs" + i;
- var rs = new ReplSetTest( { name : setName , nodes : 3 , startPort : 31100 + ( i * 100 ) } );
- this._rs[i] = { setName : setName , test : rs , nodes : rs.startSet( { oplogSize:40 } ) , url : rs.getURL() };
+
+ var rsDefaults = { oplogSize : 40, nodes : 3 }
+ var rsParams = otherParams["rs" + i]
+
+ for( var param in rsParams ){
+ rsDefaults[param] = rsParams[param]
+ }
+
+ var numReplicas = rsDefaults.nodes || otherParams.numReplicas || 3
+ delete rsDefaults.nodes
+
+ var rs = new ReplSetTest( { name : setName , nodes : numReplicas , startPort : 31100 + ( i * 100 ), keyFile : keyFile } );
+ this._rs[i] = { setName : setName , test : rs , nodes : rs.startSet( rsDefaults ) , url : rs.getURL() };
rs.initiate();
}
@@ -190,27 +233,35 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var rs = this._rs[i].test;
rs.getMaster().getDB( "admin" ).foo.save( { x : 1 } )
rs.awaitReplication();
- this._connections.push( new Mongo( rs.getURL() ) );
+ var xxx = new Mongo( rs.getURL() );
+ xxx.name = rs.getURL();
+ this._connections.push( xxx );
}
this._configServers = []
- for ( var i=0; i<3; i++ ){
- var conn = startMongodTest( 30000 + i , testName + "-config" + i );
+ for ( var i=0; i<3; i++ ){
+ var options = otherParams.extraOptions
+ if( keyFile ) options["keyFile"] = keyFile
+ var conn = startMongodTest( 30000 + i , testName + "-config" + i, false, options );
this._alldbpaths.push( testName + "-config" + i )
this._configServers.push( conn );
}
-
+
this._configDB = localhost + ":30000," + localhost + ":30001," + localhost + ":30002";
this._configConnection = new Mongo( this._configDB );
- this._configConnection.getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || 50 } );
+ if (!otherParams.noChunkSize) {
+ this._configConnection.getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || 50 } );
+ }
}
else {
for ( var i=0; i<numShards; i++){
- var conn = startMongodTest( 30000 + i , testName + i, 0, {useHostname : otherParams.useHostname} );
+ var options = { useHostname : otherParams.useHostname }
+ if( keyFile ) options["keyFile"] = keyFile
+ var conn = startMongodTest( 30000 + i , testName + i, 0, options );
this._alldbpaths.push( testName +i )
this._connections.push( conn );
}
-
+
if ( otherParams.sync ){
this._configDB = localhost+":30000,"+localhost+":30001,"+localhost+":30002";
this._configConnection = new Mongo( this._configDB );
@@ -226,12 +277,19 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var startMongosPort = 31000;
for ( var i=0; i<(numMongos||1); i++ ){
var myPort = startMongosPort - i;
- print("config: "+this._configDB);
- var conn = startMongos( { port : startMongosPort - i , v : verboseLevel || 0 , configdb : this._configDB } );
+ print("ShardingTest config: "+this._configDB);
+ var opts = { port : startMongosPort - i , v : verboseLevel || 0 , configdb : this._configDB };
+ if( keyFile ) opts["keyFile"] = keyFile
+ for (var j in otherParams.extraOptions) {
+ opts[j] = otherParams.extraOptions[j];
+ }
+ var conn = startMongos( opts );
conn.name = localhost + ":" + myPort;
this._mongos.push( conn );
- if ( i == 0 )
+ if ( i == 0 ) {
this.s = conn;
+ }
+ this["s" + i] = conn
}
var admin = this.admin = this.s.getDB( "admin" );
@@ -246,7 +304,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
if ( ! n )
n = z;
}
- print( "going to add shard: " + n )
+ print( "ShardingTest going to add shard: " + n )
x = admin.runCommand( { addshard : n } );
printjson( x )
}
@@ -266,7 +324,7 @@ ShardingTest.prototype.getDB = function( name ){
}
ShardingTest.prototype.getServerName = function( dbname ){
- var x = this.config.databases.findOne( { _id : dbname } );
+ var x = this.config.databases.findOne( { _id : "" + dbname } );
if ( x )
return x.primary;
this.config.databases.find().forEach( printjson );
@@ -300,12 +358,17 @@ ShardingTest.prototype.getServer = function( dbname ){
if ( x )
name = x.host;
+ var rsName = null;
+ if ( name.indexOf( "/" ) > 0 )
+ rsName = name.substring( 0 , name.indexOf( "/" ) );
+
for ( var i=0; i<this._connections.length; i++ ){
var c = this._connections[i];
- if ( name == c.name )
+ if ( connectionURLTheSame( name , c.name ) ||
+ connectionURLTheSame( rsName , c.name ) )
return c;
}
-
+
throw "can't find server for: " + dbname + " name:" + name;
}
@@ -318,15 +381,30 @@ ShardingTest.prototype.normalize = function( x ){
}
ShardingTest.prototype.getOther = function( one ){
- if ( this._connections.length != 2 )
+ if ( this._connections.length < 2 )
throw "getOther only works with 2 servers";
if ( one._mongo )
one = one._mongo
+
+ for( var i = 0; i < this._connections.length; i++ ){
+ if( this._connections[i] != one ) return this._connections[i]
+ }
+
+ return null
+}
- if ( this._connections[0] == one )
- return this._connections[1];
- return this._connections[0];
+ShardingTest.prototype.getAnother = function( one ){
+ if(this._connections.length < 2)
+ throw "getAnother() only works with multiple servers";
+
+ if ( one._mongo )
+ one = one._mongo
+
+ for(var i = 0; i < this._connections.length; i++){
+ if(this._connections[i] == one)
+ return this._connections[(i + 1) % this._connections.length];
+ }
}
ShardingTest.prototype.getFirstOther = function( one ){
@@ -355,7 +433,7 @@ ShardingTest.prototype.stop = function(){
}
}
- print('*** ' + this._testName + " completed successfully ***");
+ print('*** ShardingTest ' + this._testName + " completed successfully ***");
}
ShardingTest.prototype.adminCommand = function(cmd){
@@ -388,7 +466,7 @@ ShardingTest.prototype.printChangeLog = function(){
msg += tojsononeline( z.details );
}
- print( msg )
+ print( "ShardingTest " + msg )
}
);
@@ -410,7 +488,7 @@ ShardingTest.prototype.getChunksString = function( ns ){
}
ShardingTest.prototype.printChunks = function( ns ){
- print( this.getChunksString( ns ) );
+ print( "ShardingTest " + this.getChunksString( ns ) );
}
ShardingTest.prototype.printShardingStatus = function(){
@@ -433,7 +511,7 @@ ShardingTest.prototype.printCollectionInfo = function( ns , msg ){
out += this.getChunksString( ns );
- print( out );
+ print( "ShardingTest " + out );
}
printShardingStatus = function( configDB , verbose ){
@@ -442,7 +520,7 @@ printShardingStatus = function( configDB , verbose ){
var version = configDB.getCollection( "version" ).findOne();
if ( version == null ){
- print( "not a shard db!" );
+ print( "printShardingStatus: not a shard db!" );
return;
}
@@ -454,16 +532,16 @@ printShardingStatus = function( configDB , verbose ){
output( " sharding version: " + tojson( configDB.getCollection( "version" ).findOne() ) );
output( " shards:" );
- configDB.shards.find().forEach(
+ configDB.shards.find().sort( { _id : 1 } ).forEach(
function(z){
- output( " " + tojson(z) );
+ output( "\t" + tojsononeline( z ) );
}
);
output( " databases:" );
configDB.databases.find().sort( { name : 1 } ).forEach(
function(db){
- output( "\t" + tojson(db,"",true) );
+ output( "\t" + tojsononeline(db,"",true) );
if (db.partitioned){
configDB.collections.find( { _id : new RegExp( "^" + db._id + "\." ) } ).sort( { _id : 1 } ).forEach(
@@ -487,7 +565,7 @@ printShardingStatus = function( configDB , verbose ){
);
}
else {
- output( "\t\t\ttoo many chunksn to print, use verbose if you want to force print" );
+ output( "\t\t\ttoo many chunks to print, use verbose if you want to force print" );
}
}
}
@@ -504,7 +582,7 @@ printShardingSizes = function(){
var version = configDB.getCollection( "version" ).findOne();
if ( version == null ){
- print( "not a shard db!" );
+ print( "printShardingSizes : not a shard db!" );
return;
}
@@ -614,23 +692,124 @@ ShardingTest.prototype.chunkDiff = function( collName , dbName ){
if ( c[s] > max )
max = c[s];
}
- print( "input: " + tojson( c ) + " min: " + min + " max: " + max );
+ print( "ShardingTest input: " + tojson( c ) + " min: " + min + " max: " + max );
return max - min;
}
+ShardingTest.prototype.getShard = function( coll, query ){
+ var shards = this.getShards( coll, query )
+ assert.eq( shards.length, 1 )
+ return shards[0]
+}
+
+// Returns the shards on which documents matching a particular query reside
+ShardingTest.prototype.getShards = function( coll, query ){
+ if( ! coll.getDB )
+ coll = this.s.getCollection( coll )
+
+ var explain = coll.find( query ).explain()
+
+ var shards = []
+
+ if( explain.shards ){
+
+ for( var shardName in explain.shards ){
+ for( var i = 0; i < explain.shards[shardName].length; i++ ){
+ if( explain.shards[shardName][i].n && explain.shards[shardName][i].n > 0 )
+ shards.push( shardName )
+ }
+ }
+
+ }
+
+ for( var i = 0; i < shards.length; i++ ){
+ for( var j = 0; j < this._connections.length; j++ ){
+ if ( connectionURLTheSame( this._connections[j].name , shards[i] ) ){
+ shards[i] = this._connections[j]
+ break;
+ }
+ }
+ }
+
+ return shards
+}
+
+ShardingTest.prototype.isSharded = function( collName ){
+
+ var collName = "" + collName
+ var dbName = undefined
+
+ if( typeof collName.getCollectionNames == 'function' ){
+ dbName = "" + collName
+ collName = undefined
+ }
+
+ if( dbName ){
+ var x = this.config.databases.findOne( { _id : dbname } )
+ if( x ) return x.partitioned
+ else return false
+ }
+
+ if( collName ){
+ var x = this.config.collections.findOne( { _id : collName } )
+ if( x ) return true
+ else return false
+ }
+
+}
+
ShardingTest.prototype.shardGo = function( collName , key , split , move , dbName ){
+
split = split || key;
move = move || split;
- dbName = dbName || "test";
+
+ if( collName.getDB )
+ dbName = "" + collName.getDB()
+ else dbName = dbName || "test";
var c = dbName + "." + collName;
+ if( collName.getDB )
+ c = "" + collName
- s.adminCommand( { shardcollection : c , key : key } );
- s.adminCommand( { split : c , middle : split } );
- s.adminCommand( { movechunk : c , find : move , to : this.getOther( s.getServer( dbName ) ).name } );
+ var isEmpty = this.s.getCollection( c ).count() == 0
+
+ if( ! this.isSharded( dbName ) )
+ this.s.adminCommand( { enableSharding : dbName } )
+
+ var result = this.s.adminCommand( { shardcollection : c , key : key } )
+ if( ! result.ok ){
+ printjson( result )
+ assert( false )
+ }
+
+ result = this.s.adminCommand( { split : c , middle : split } );
+ if( ! result.ok ){
+ printjson( result )
+ assert( false )
+ }
+
+ var result = null
+ for( var i = 0; i < 5; i++ ){
+ result = this.s.adminCommand( { movechunk : c , find : move , to : this.getOther( this.getServer( dbName ) ).name } );
+ if( result.ok ) break;
+ sleep( 5 * 1000 );
+ }
+ printjson( result )
+ assert( result.ok )
};
+ShardingTest.prototype.shardColl = ShardingTest.prototype.shardGo
+
+ShardingTest.prototype.setBalancer = function( balancer ){
+ if( balancer || balancer == undefined ){
+ this.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true )
+ }
+ else if( balancer == false ){
+ this.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true )
+ }
+}
+
/**
* Run a mongod process.
*
@@ -666,15 +845,6 @@ MongodRunner.prototype.start = function( reuseData ) {
args.push( this.port_ );
args.push( "--dbpath" );
args.push( this.dbpath_ );
- if ( this.peer_ && this.arbiter_ ) {
- args.push( "--pairwith" );
- args.push( this.peer_ );
- args.push( "--arbiter" );
- args.push( this.arbiter_ );
- args.push( "--oplogSize" );
- // small oplog by default so startup fast
- args.push( "1" );
- }
args.push( "--nohttpinterface" );
args.push( "--noprealloc" );
args.push( "--smallfiles" );
@@ -697,154 +867,6 @@ MongodRunner.prototype.port = function() { return this.port_; }
MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); }
-
-ReplPair = function( left, right, arbiter ) {
- this.left_ = left;
- this.leftC_ = null;
- this.right_ = right;
- this.rightC_ = null;
- this.arbiter_ = arbiter;
- this.arbiterC_ = null;
- this.master_ = null;
- this.slave_ = null;
-}
-
-ReplPair.prototype.start = function( reuseData ) {
- if ( this.arbiterC_ == null ) {
- this.arbiterC_ = this.arbiter_.start();
- }
- if ( this.leftC_ == null ) {
- this.leftC_ = this.left_.start( reuseData );
- }
- if ( this.rightC_ == null ) {
- this.rightC_ = this.right_.start( reuseData );
- }
-}
-
-ReplPair.prototype.isMaster = function( mongo, debug ) {
- var im = mongo.getDB( "admin" ).runCommand( { ismaster : 1 } );
- assert( im && im.ok, "command ismaster failed" );
- if ( debug ) {
- printjson( im );
- }
- return im.ismaster;
-}
-
-ReplPair.prototype.isInitialSyncComplete = function( mongo, debug ) {
- var isc = mongo.getDB( "admin" ).runCommand( { isinitialsynccomplete : 1 } );
- assert( isc && isc.ok, "command isinitialsynccomplete failed" );
- if ( debug ) {
- printjson( isc );
- }
- return isc.initialsynccomplete;
-}
-
-ReplPair.prototype.checkSteadyState = function( state, expectedMasterHost, twoMasterOk, leftValues, rightValues, debug ) {
- leftValues = leftValues || {};
- rightValues = rightValues || {};
-
- var lm = null;
- var lisc = null;
- if ( this.leftC_ != null ) {
- lm = this.isMaster( this.leftC_, debug );
- leftValues[ lm ] = true;
- lisc = this.isInitialSyncComplete( this.leftC_, debug );
- }
- var rm = null;
- var risc = null;
- if ( this.rightC_ != null ) {
- rm = this.isMaster( this.rightC_, debug );
- rightValues[ rm ] = true;
- risc = this.isInitialSyncComplete( this.rightC_, debug );
- }
-
- var stateSet = {}
- state.forEach( function( i ) { stateSet[ i ] = true; } );
- if ( !( 1 in stateSet ) || ( ( risc || risc == null ) && ( lisc || lisc == null ) ) ) {
- if ( rm == 1 && lm != 1 ) {
- assert( twoMasterOk || !( 1 in leftValues ) );
- this.master_ = this.rightC_;
- this.slave_ = this.leftC_;
- } else if ( lm == 1 && rm != 1 ) {
- assert( twoMasterOk || !( 1 in rightValues ) );
- this.master_ = this.leftC_;
- this.slave_ = this.rightC_;
- }
- if ( !twoMasterOk ) {
- assert( lm != 1 || rm != 1, "two masters" );
- }
- // check for expected state
- if ( state.sort().toString() == [ lm, rm ].sort().toString() ) {
- if ( expectedMasterHost != null ) {
- if( expectedMasterHost == this.master_.host ) {
- return true;
- }
- } else {
- return true;
- }
- }
- }
-
- this.master_ = null;
- this.slave_ = null;
- return false;
-}
-
-ReplPair.prototype.waitForSteadyState = function( state, expectedMasterHost, twoMasterOk, debug ) {
- state = state || [ 1, 0 ];
- twoMasterOk = twoMasterOk || false;
- var rp = this;
- var leftValues = {};
- var rightValues = {};
- assert.soon( function() { return rp.checkSteadyState( state, expectedMasterHost, twoMasterOk, leftValues, rightValues, debug ); },
- "rp (" + rp + ") failed to reach expected steady state (" + state + ")" , 60000 );
-}
-
-ReplPair.prototype.master = function() { return this.master_; }
-ReplPair.prototype.slave = function() { return this.slave_; }
-ReplPair.prototype.right = function() { return this.rightC_; }
-ReplPair.prototype.left = function() { return this.leftC_; }
-ReplPair.prototype.arbiter = function() { return this.arbiterC_; }
-
-ReplPair.prototype.killNode = function( mongo, signal ) {
- signal = signal || 15;
- if ( this.leftC_ != null && this.leftC_.host == mongo.host ) {
- stopMongod( this.left_.port_ );
- this.leftC_ = null;
- }
- if ( this.rightC_ != null && this.rightC_.host == mongo.host ) {
- stopMongod( this.right_.port_ );
- this.rightC_ = null;
- }
- if ( this.arbiterC_ != null && this.arbiterC_.host == mongo.host ) {
- stopMongod( this.arbiter_.port_ );
- this.arbiterC_ = null;
- }
-}
-
-ReplPair.prototype._annotatedNode = function( mongo ) {
- var ret = "";
- if ( mongo != null ) {
- ret += " (connected)";
- if ( this.master_ != null && mongo.host == this.master_.host ) {
- ret += "(master)";
- }
- if ( this.slave_ != null && mongo.host == this.slave_.host ) {
- ret += "(slave)";
- }
- }
- return ret;
-}
-
-ReplPair.prototype.toString = function() {
- var ret = "";
- ret += "left: " + this.left_;
- ret += " " + this._annotatedNode( this.leftC_ );
- ret += " right: " + this.right_;
- ret += " " + this._annotatedNode( this.rightC_ );
- return ret;
-}
-
ToolTest = function( name ){
this.name = name;
this.port = allocatePorts(1)[0];
@@ -922,7 +944,7 @@ ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norep
extra = {};
if ( ! extra.oplogSize )
- extra.oplogSize = "1";
+ extra.oplogSize = "40";
var a = []
if ( putBinaryFirst )
@@ -935,6 +957,8 @@ ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norep
a.push( "--dbpath" );
a.push( this.getPath( master ) );
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
if ( !norepl ) {
if ( master ){
@@ -1050,8 +1074,8 @@ var testingReplication = false;
function skipIfTestingReplication(){
if (testingReplication) {
- print( "skipping" );
- quit(0);
+ print("skipIfTestingReplication skipping");
+ quit(0);
}
}
@@ -1059,10 +1083,11 @@ ReplSetTest = function( opts ){
this.name = opts.name || "testReplSet";
this.host = opts.host || getHostName();
this.numNodes = opts.nodes || 0;
- this.oplogSize = opts.oplogSize || 2;
+ this.oplogSize = opts.oplogSize || 40;
this.useSeedList = opts.useSeedList || false;
this.bridged = opts.bridged || false;
this.ports = [];
+ this.keyFile = opts.keyFile
this.startPort = opts.startPort || 31000;
@@ -1084,6 +1109,10 @@ ReplSetTest = function( opts ){
this.nodes = [];
this.nodeIds = {};
this.initLiveNodes();
+
+ Object.extend( this, ReplSetTest.Health )
+ Object.extend( this, ReplSetTest.State )
+
}
ReplSetTest.prototype.initBridges = function() {
@@ -1109,16 +1138,43 @@ ReplSetTest.prototype.initLiveNodes = function(){
}
ReplSetTest.prototype.getNodeId = function(node) {
- return this.nodeIds[node];
+
+ var result = this.nodeIds[node]
+ if( result ) return result
+
+ if( node.toFixed ) return node
+ return node.nodeId
+
}
ReplSetTest.prototype.getPort = function( n ){
+ if( n.getDB ){
+ // is a connection, look up
+ for( var i = 0; i < this.nodes.length; i++ ){
+ if( this.nodes[i] == n ){
+ n = i
+ break
+ }
+ }
+ }
+
+ if ( typeof(n) == "object" && n.floatApprox )
+ n = n.floatApprox
+
+ // this is a hack for NumberInt
+ if ( n == 0 )
+ n = 0;
+
+ print( "ReplSetTest n: " + n + " ports: " + tojson( this.ports ) + "\t" + this.ports[n] + " " + typeof(n) );
return this.ports[ n ];
}
ReplSetTest.prototype.getPath = function( n ){
- var p = "/data/db/" + this.name + "-";
- p += n.toString();
+
+ if( n.host )
+ n = this.getNodeId( n )
+
+ var p = "/data/db/" + this.name + "-"+n;
if ( ! this._alldbpaths )
this._alldbpaths = [ p ];
else
@@ -1186,17 +1242,22 @@ ReplSetTest.prototype.getOptions = function( n , extra , putBinaryFirst ){
if ( putBinaryFirst )
- a.push( "mongod" )
-
- a.push( "--replSet" );
+ a.push( "mongod" );
- if( this.useSeedList ) {
- a.push( this.getURL() );
+ if ( extra.noReplSet ) {
+ delete extra.noReplSet;
}
else {
- a.push( this.name );
+ a.push( "--replSet" );
+
+ if( this.useSeedList ) {
+ a.push( this.getURL() );
+ }
+ else {
+ a.push( this.name );
+ }
}
-
+
a.push( "--noprealloc", "--smallfiles" );
a.push( "--rest" );
@@ -1205,13 +1266,27 @@ ReplSetTest.prototype.getOptions = function( n , extra , putBinaryFirst ){
a.push( this.getPort( n ) );
a.push( "--dbpath" );
- a.push( this.getPath( n ) );
-
+ a.push( this.getPath( ( n.host ? this.getNodeId( n ) : n ) ) );
+
+ if( this.keyFile ){
+ a.push( "--keyFile" )
+ a.push( keyFile )
+ }
+
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
+
for ( var k in extra ){
- var v = extra[k];
+ var v = extra[k];
a.push( "--" + k );
- if ( v != null )
+ if ( v != null ){
+ if( v.replace ){
+ v = v.replace(/\$node/g, "" + ( n.host ? this.getNodeId( n ) : n ) )
+ v = v.replace(/\$set/g, this.name )
+ v = v.replace(/\$path/g, this.getPath( n ) )
+ }
a.push( v );
+ }
}
return a;
@@ -1219,7 +1294,7 @@ ReplSetTest.prototype.getOptions = function( n , extra , putBinaryFirst ){
ReplSetTest.prototype.startSet = function(options) {
var nodes = [];
- print( "Starting Set" );
+ print( "ReplSetTest Starting Set" );
for(n=0; n<this.ports.length; n++) {
node = this.start(n, options);
@@ -1231,33 +1306,81 @@ ReplSetTest.prototype.startSet = function(options) {
}
ReplSetTest.prototype.callIsMaster = function() {
+
var master = null;
this.initLiveNodes();
+
for(var i=0; i<this.nodes.length; i++) {
try {
var n = this.nodes[i].getDB('admin').runCommand({ismaster:1});
-
+
if(n['ismaster'] == true) {
master = this.nodes[i];
this.liveNodes.master = master;
this.nodeIds[master] = i;
+ master.nodeId = i
}
else {
this.nodes[i].setSlaveOk();
this.liveNodes.slaves.push(this.nodes[i]);
this.nodeIds[this.nodes[i]] = i;
+ this.nodes[i].nodeId = i
}
}
catch(err) {
- print("Could not call ismaster on node " + i);
+ print("ReplSetTest Could not call ismaster on node " + i);
}
}
return master || false;
}
+ReplSetTest.awaitRSClientHosts = function( conn, host, hostOk, rs ) {
+
+ if( host.length ){
+ for( var i = 0; i < host.length; i++ ) this.awaitOk( conn, host[i] )
+ return
+ }
+
+ if( hostOk == undefined ) hostOk = { ok : true }
+ if( host.host ) host = host.host
+ if( rs && rs.getMaster ) rs = rs.name
+
+ print( "Awaiting " + host + " to be " + tojson( hostOk ) + " for " + conn + " (rs: " + rs + ")" )
+
+ var tests = 0
+ assert.soon( function() {
+ var rsClientHosts = conn.getDB( "admin" ).runCommand( "connPoolStats" )[ "replicaSets" ]
+ if( tests++ % 10 == 0 )
+ printjson( rsClientHosts )
+
+ for ( rsName in rsClientHosts ){
+ if( rs && rs != rsName ) continue
+ for ( var i = 0; i < rsClientHosts[rsName].hosts.length; i++ ){
+ var clientHost = rsClientHosts[rsName].hosts[ i ];
+ if( clientHost.addr != host ) continue
+
+ // Check that *all* host properties are set correctly
+ var propOk = true
+ for( var prop in hostOk ){
+ if( clientHost[prop] != hostOk[prop] ){
+ propOk = false
+ break
+ }
+ }
+
+ if( propOk ) return true;
+
+ }
+ }
+ return false;
+ }, "timed out waiting for replica set client to recognize hosts",
+ 3 * 20 * 1000 /* ReplicaSetMonitorWatcher updates every 20s */ )
+
+}
+
ReplSetTest.prototype.awaitSecondaryNodes = function( timeout ) {
var master = this.getMaster();
var slaves = this.liveNodes.slaves;
@@ -1283,6 +1406,29 @@ ReplSetTest.prototype.getMaster = function( timeout ) {
return master;
}
+ReplSetTest.prototype.getPrimary = ReplSetTest.prototype.getMaster
+
+ReplSetTest.prototype.getSecondaries = function( timeout ){
+ var master = this.getMaster( timeout )
+ var secs = []
+ for( var i = 0; i < this.nodes.length; i++ ){
+ if( this.nodes[i] != master ){
+ secs.push( this.nodes[i] )
+ }
+ }
+ return secs
+}
+
+ReplSetTest.prototype.getSecondary = function( timeout ){
+ return this.getSecondaries( timeout )[0];
+}
+
+ReplSetTest.prototype.status = function( timeout ){
+ var master = this.callIsMaster()
+ if( ! master ) master = this.liveNodes.slaves[0]
+ return master.getDB("admin").runCommand({replSetGetStatus: 1})
+}
+
// Add a node to the test set
ReplSetTest.prototype.add = function( config ) {
if(this.ports.length == 0) {
@@ -1291,13 +1437,13 @@ ReplSetTest.prototype.add = function( config ) {
else {
var nextPort = this.ports[this.ports.length-1] + 1;
}
- print("Next port: " + nextPort);
+ print("ReplSetTest Next port: " + nextPort);
this.ports.push(nextPort);
printjson(this.ports);
var nextId = this.nodes.length;
printjson(this.nodes);
- print(nextId);
+ print("ReplSetTest nextId:" + nextId);
var newNode = this.start(nextId);
this.nodes.push(newNode);
@@ -1323,7 +1469,7 @@ ReplSetTest.prototype.attempt = function( opts, func ) {
tries += 1;
sleep(sleepTime);
if( tries * sleepTime > timeout) {
- throw('[' + opts['desc'] + ']' + " timed out");
+ throw('[' + opts['desc'] + ']' + " timed out after " + timeout + "ms ( " + tries + " tries )");
}
}
@@ -1354,50 +1500,76 @@ ReplSetTest.prototype.reInitiate = function() {
this.initiate( config , 'replSetReconfig' );
}
-ReplSetTest.prototype.awaitReplication = function() {
- this.getMaster();
-
- latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts']
- print(latest);
-
- this.attempt({context: this, timeout: 30000, desc: "awaiting replication"},
- function() {
- var synced = true;
- for(var i=0; i<this.liveNodes.slaves.length; i++) {
- var slave = this.liveNodes.slaves[i];
-
- // Continue if we're connected to an arbiter
- if(res = slave.getDB("admin").runCommand({replSetGetStatus: 1})) {
- if(res.myState == 7) {
- continue;
- }
- }
-
- slave.getDB("admin").getMongo().setSlaveOk();
- var log = slave.getDB("local")['oplog.rs'];
- if(log.find({}).sort({'$natural': -1}).limit(1).hasNext()) {
- var entry = log.find({}).sort({'$natural': -1}).limit(1).next();
- printjson( entry );
- var ts = entry['ts'];
- print("TS for " + slave + " is " + ts.t+":"+ts.i + " and latest is " + latest.t+":"+latest.i);
-
- if (latest.t < ts.t || (latest.t == ts.t && latest.i < ts.i)) {
- latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts'];
- }
-
- print("Oplog size for " + slave + " is " + log.count());
- synced = (synced && friendlyEqual(latest,ts))
- }
- else {
- synced = false;
- }
- }
-
- if(synced) {
- print("Synced = " + synced);
- }
- return synced;
- });
+ReplSetTest.prototype.getLastOpTimeWritten = function() {
+ this.getMaster();
+ this.attempt({context : this, desc : "awaiting oplog query"},
+ function() {
+ try {
+ this.latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts'];
+ }
+ catch(e) {
+ print("ReplSetTest caught exception " + e);
+ return false;
+ }
+ return true;
+ });
+};
+
+ReplSetTest.prototype.awaitReplication = function(timeout) {
+ timeout = timeout || 30000;
+
+ this.getLastOpTimeWritten();
+
+ print("ReplSetTest " + this.latest);
+
+ this.attempt({context: this, timeout: timeout, desc: "awaiting replication"},
+ function() {
+ try {
+ var synced = true;
+ for(var i=0; i<this.liveNodes.slaves.length; i++) {
+ var slave = this.liveNodes.slaves[i];
+
+ // Continue if we're connected to an arbiter
+ if(res = slave.getDB("admin").runCommand({replSetGetStatus: 1})) {
+ if(res.myState == 7) {
+ continue;
+ }
+ }
+
+ slave.getDB("admin").getMongo().setSlaveOk();
+ var log = slave.getDB("local")['oplog.rs'];
+ if(log.find({}).sort({'$natural': -1}).limit(1).hasNext()) {
+ var entry = log.find({}).sort({'$natural': -1}).limit(1).next();
+ printjson( entry );
+ var ts = entry['ts'];
+ print("ReplSetTest await TS for " + slave + " is " + ts.t+":"+ts.i + " and latest is " + this.latest.t+":"+this.latest.i);
+
+ if (this.latest.t < ts.t || (this.latest.t == ts.t && this.latest.i < ts.i)) {
+ this.latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts'];
+ }
+
+ print("ReplSetTest await oplog size for " + slave + " is " + log.count());
+ synced = (synced && friendlyEqual(this.latest,ts))
+ }
+ else {
+ synced = false;
+ }
+ }
+
+ if(synced) {
+ print("ReplSetTest await synced=" + synced);
+ }
+ return synced;
+ }
+ catch (e) {
+ print("ReplSetTest.awaitReplication: caught exception "+e);
+
+ // we might have a new master now
+ this.getLastOpTimeWritten();
+
+ return false;
+ }
+ });
}
ReplSetTest.prototype.getHashes = function( db ){
@@ -1409,55 +1581,462 @@ ReplSetTest.prototype.getHashes = function( db ){
}
/**
- * Starts up a server.
+ * Starts up a server. Options are saved by default for subsequent starts.
+ *
+ *
+ * Options { remember : true } re-applies the saved options from a prior start.
+ * Options { noRemember : true } ignores the current properties.
+ * Options { appendOptions : true } appends the current options to those remembered.
+ * Options { startClean : true } clears the data directory before starting.
*
- * @param {int} n server number (0, 1, 2, ...)
+ * @param @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn
* @param {object} [options]
* @param {boolean} [restart] If false, the data directory will be cleared
* before the server starts. Defaults to false.
+ *
*/
-ReplSetTest.prototype.start = function( n , options , restart ){
+ReplSetTest.prototype.start = function( n , options , restart , wait ){
+
+ if( n.length ){
+
+ var nodes = n
+ var started = []
+
+ for( var i = 0; i < nodes.length; i++ ){
+ if( this.start( nodes[i], Object.extend({}, options), restart, wait ) ){
+ started.push( nodes[i] )
+ }
+ }
+
+ return started
+
+ }
+
+ print( "ReplSetTest n is : " + n )
+
var lockFile = this.getPath( n ) + "/mongod.lock";
removeFile( lockFile );
- var o = this.getOptions( n , options , restart );
+
+ options = options || {}
+ var noRemember = options.noRemember
+ delete options.noRemember
+ var appendOptions = options.appendOptions
+ delete options.appendOptions
+ var startClean = options.startClean
+ delete options.startClean
+
+ if( restart && options.remember ){
+ delete options.remember
+
+ var oldOptions = {}
+ if( this.savedStartOptions && this.savedStartOptions[n] ){
+ oldOptions = this.savedStartOptions[n]
+ }
+
+ var newOptions = options
+ var options = {}
+ Object.extend( options, oldOptions )
+ Object.extend( options, newOptions )
+
+ }
+
+ var shouldRemember = ( ! restart && ! noRemember ) || ( restart && appendOptions )
+
+ if ( shouldRemember ){
+ this.savedStartOptions = this.savedStartOptions || {}
+ this.savedStartOptions[n] = options
+ }
+
+ if( tojson(options) != tojson({}) )
+ printjson(options)
+
+ var o = this.getOptions( n , options , restart && ! startClean );
- print("Starting....");
- print( o );
+ print("ReplSetTest " + (restart ? "(Re)" : "") + "Starting....");
+ print("ReplSetTest " + o );
+
+ var rval = null
if ( restart ) {
- this.nodes[n] = startMongoProgram.apply( null , o );
- printjson(this.nodes);
- return this.nodes[n];
+ n = this.getNodeId( n )
+ this.nodes[n] = ( startClean ? startMongod.apply( null , o ) : startMongoProgram.apply( null , o ) );
+ this.nodes[n].host = this.nodes[n].host.replace( "127.0.0.1", this.host )
+ if( shouldRemember ) this.savedStartOptions[this.nodes[n]] = options
+ printjson( this.nodes )
+ rval = this.nodes[n];
}
else {
- return startMongod.apply( null , o );
+ var conn = startMongod.apply( null , o );
+ if( shouldRemember ) this.savedStartOptions[conn] = options
+ conn.host = conn.host.replace( "127.0.0.1", this.host )
+ rval = conn;
+ }
+
+ wait = wait || false
+ if( ! wait.toFixed ){
+ if( wait ) wait = 0
+ else wait = -1
}
+
+ if( rval == null || wait < 0 ) return rval
+
+ // Wait for startup
+ this.waitForHealth( rval, this.UP, wait )
+
+ return rval
+
}
+
/**
- * Restarts a db without clearing the data directory. If the server is not
- * stopped first, this function will not work.
+ * Restarts a db without clearing the data directory by default. If the server is not
+ * stopped first, this function will not work.
*
- * @param {int} n server number (0, 1, 2, ...)
+ * Option { startClean : true } forces clearing the data directory.
+ *
+ * @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn
*/
-ReplSetTest.prototype.restart = function( n , options ){
- return this.start( n , options , true );
+ReplSetTest.prototype.restart = function( n , options, signal, wait ){
+ // Can specify wait as third parameter, if using default signal
+ if( signal == true || signal == false ){
+ wait = signal
+ signal = undefined
+ }
+
+ this.stop( n, signal, wait && wait.toFixed ? wait : true )
+ return this.start( n , options , true, wait );
}
-ReplSetTest.prototype.stop = function( n , signal ){
+ReplSetTest.prototype.stopMaster = function( signal , wait ) {
+ var master = this.getMaster();
+ var master_id = this.getNodeId( master );
+ return this.stop( master_id , signal , wait );
+}
+
+// Stops a particular node or nodes, specified by conn or id
+ReplSetTest.prototype.stop = function( n , signal, wait /* wait for stop */ ){
+
+ // Flatten array of nodes to stop
+ if( n.length ){
+ nodes = n
+
+ var stopped = []
+ for( var i = 0; i < nodes.length; i++ ){
+ if( this.stop( nodes[i], signal, wait ) )
+ stopped.push( nodes[i] )
+ }
+
+ return stopped
+ }
+
+
+ // Can specify wait as second parameter, if using default signal
+ if( signal == true || signal == false ){
+ wait = signal
+ signal = undefined
+ }
+
+ wait = wait || false
+ if( ! wait.toFixed ){
+ if( wait ) wait = 0
+ else wait = -1
+ }
+
var port = this.getPort( n );
- print('*** Shutting down mongod in port ' + port + ' ***');
- return stopMongod( port , signal || 15 );
+ print('ReplSetTest stop *** Shutting down mongod in port ' + port + ' ***');
+ var ret = stopMongod( port , signal || 15 );
+
+ if( ! ret || wait < 0 ) return ret
+
+ // Wait for shutdown
+ this.waitForHealth( n, this.DOWN, wait )
+
+ return true
}
+
ReplSetTest.prototype.stopSet = function( signal , forRestart ) {
for(i=0; i < this.ports.length; i++) {
this.stop( i, signal );
}
if ( ! forRestart && this._alldbpaths ){
+ print("ReplSetTest stopSet deleting all dbpaths");
for( i=0; i<this._alldbpaths.length; i++ ){
resetDbpath( this._alldbpaths[i] );
}
}
- print('*** Shut down repl set - test worked ****' )
+ print('ReplSetTest stopSet *** Shut down repl set - test worked ****' )
+};
+
+
+/**
+ * Waits until there is a master node
+ */
+ReplSetTest.prototype.waitForMaster = function( timeout ){
+
+ var master = undefined
+
+ this.attempt({context: this, timeout: timeout, desc: "waiting for master"}, function() {
+ return ( master = this.getMaster() )
+ });
+
+ return master
+}
+
+
+/**
+ * Wait for a health indicator to go to a particular state or states.
+ *
+ * @param node is a single node or list of nodes, by id or conn
+ * @param state is a single state or list of states
+ *
+ */
+ReplSetTest.prototype.waitForHealth = function( node, state, timeout ){
+ this.waitForIndicator( node, state, "health", timeout )
+}
+
+/**
+ * Wait for a state indicator to go to a particular state or states.
+ *
+ * @param node is a single node or list of nodes, by id or conn
+ * @param state is a single state or list of states
+ *
+ */
+ReplSetTest.prototype.waitForState = function( node, state, timeout ){
+ this.waitForIndicator( node, state, "state", timeout )
+}
+
+/**
+ * Wait for a rs indicator to go to a particular state or states.
+ *
+ * @param node is a single node or list of nodes, by id or conn
+ * @param states is a single state or list of states
+ * @param ind is the indicator specified
+ *
+ */
+ReplSetTest.prototype.waitForIndicator = function( node, states, ind, timeout ){
+
+ if( node.length ){
+
+ var nodes = node
+ for( var i = 0; i < nodes.length; i++ ){
+ if( states.length )
+ this.waitForIndicator( nodes[i], states[i], ind, timeout )
+ else
+ this.waitForIndicator( nodes[i], states, ind, timeout )
+ }
+
+ return;
+ }
+
+ timeout = timeout || 30000;
+
+ if( ! node.getDB ){
+ node = this.nodes[node]
+ }
+
+ if( ! states.length ) states = [ states ]
+
+ print( "ReplSetTest waitForIndicator " + ind )
+ printjson( states )
+ print( "ReplSetTest waitForIndicator from node " + node )
+
+ var lastTime = null
+ var currTime = new Date().getTime()
+ var status = undefined
+
+ this.attempt({context: this, timeout: timeout, desc: "waiting for state indicator " + ind + " for " + timeout + "ms" }, function() {
+
+ status = this.status()
+
+ if( lastTime == null || ( currTime = new Date().getTime() ) - (1000 * 5) > lastTime ){
+ if( lastTime == null ) print( "ReplSetTest waitForIndicator Initial status ( timeout : " + timeout + " ) :" )
+ printjson( status )
+ lastTime = new Date().getTime()
+ }
+
+ for( var i = 0; i < status.members.length; i++ ){
+ if( status.members[i].name == node.host ){
+ for( var j = 0; j < states.length; j++ ){
+ if( status.members[i][ind] == states[j] ) return true;
+ }
+ }
+ }
+
+ return false
+
+ });
+
+ print( "ReplSetTest waitForIndicator final status:" )
+ printjson( status )
+
+}
+
+ReplSetTest.Health = {}
+ReplSetTest.Health.UP = 1
+ReplSetTest.Health.DOWN = 0
+
+ReplSetTest.State = {}
+ReplSetTest.State.PRIMARY = 1
+ReplSetTest.State.SECONDARY = 2
+ReplSetTest.State.RECOVERING = 3
+
+/**
+ * Overflows a replica set secondary or secondaries, specified by id or conn.
+ */
+ReplSetTest.prototype.overflow = function( secondaries ){
+
+ // Create a new collection to overflow, allow secondaries to replicate
+ var master = this.getMaster()
+ var overflowColl = master.getCollection( "_overflow.coll" )
+ overflowColl.insert({ replicated : "value" })
+ this.awaitReplication()
+
+ this.stop( secondaries, undefined, 5 * 60 * 1000 )
+
+ var count = master.getDB("local").oplog.rs.count();
+ var prevCount = -1;
+
+ // Keep inserting till we hit our capped coll limits
+ while (count != prevCount) {
+
+ print("ReplSetTest overflow inserting 10000");
+
+ for (var i = 0; i < 10000; i++) {
+ overflowColl.insert({ overflow : "value" });
+ }
+ prevCount = count;
+ this.awaitReplication();
+
+ count = master.getDB("local").oplog.rs.count();
+
+ print( "ReplSetTest overflow count : " + count + " prev : " + prevCount );
+
+ }
+
+ // Restart all our secondaries and wait for recovery state
+ this.start( secondaries, { remember : true }, true, true )
+ this.waitForState( secondaries, this.RECOVERING, 5 * 60 * 1000 )
+
}
+
+
+
+
+/**
+ * Bridging allows you to test network partitioning. For example, you can set
+ * up a replica set, run bridge(), then kill the connection between any two
+ * nodes x and y with partition(x, y).
+ *
+ * Once you have called bridging, you cannot reconfigure the replica set.
+ */
+ReplSetTest.prototype.bridge = function() {
+ if (this.bridges) {
+ print("ReplSetTest bridge bridges have already been created!");
+ return;
+ }
+
+ var n = this.nodes.length;
+
+ // create bridges
+ this.bridges = [];
+ for (var i=0; i<n; i++) {
+ var nodeBridges = [];
+ for (var j=0; j<n; j++) {
+ if (i == j) {
+ continue;
+ }
+ nodeBridges[j] = new ReplSetBridge(this, i, j);
+ }
+ this.bridges.push(nodeBridges);
+ }
+ print("ReplSetTest bridge bridges: " + this.bridges);
+
+ // restart everyone independently
+ this.stopSet(null, true);
+ for (var i=0; i<n; i++) {
+ this.restart(i, {noReplSet : true});
+ }
+
+ // create new configs
+ for (var i=0; i<n; i++) {
+ config = this.nodes[i].getDB("local").system.replset.findOne();
+
+ if (!config) {
+ print("ReplSetTest bridge couldn't find config for "+this.nodes[i]);
+ printjson(this.nodes[i].getDB("local").system.namespaces.find().toArray());
+ assert(false);
+ }
+
+ var updateMod = {"$set" : {}};
+ for (var j = 0; j<config.members.length; j++) {
+ if (config.members[j].host == this.host+":"+this.ports[i]) {
+ continue;
+ }
+
+ updateMod['$set']["members."+j+".host"] = this.bridges[i][j].host;
+ }
+ print("ReplSetTest bridge for node " + i + ":");
+ printjson(updateMod);
+ this.nodes[i].getDB("local").system.replset.update({},updateMod);
+ }
+
+ this.stopSet(null, true);
+
+ // start set
+ for (var i=0; i<n; i++) {
+ this.restart(i);
+ }
+
+ return this.getMaster();
+};
+
+/**
+ * This kills the bridge between two nodes. As parameters, specify the from and
+ * to node numbers.
+ *
+ * For example, with a three-member replica set, we'd have nodes 0, 1, and 2,
+ * with the following bridges: 0->1, 0->2, 1->0, 1->2, 2->0, 2->1. We can kill
+ * the connection between nodes 0 and 2 by calling replTest.partition(0,2) or
+ * replTest.partition(2,0) (either way is identical). Then the replica set would
+ * have the following bridges: 0->1, 1->0, 1->2, 2->1.
+ */
+ReplSetTest.prototype.partition = function(from, to) {
+ this.bridges[from][to].stop();
+ this.bridges[to][from].stop();
+};
+
+/**
+ * This reverses a partition created by partition() above.
+ */
+ReplSetTest.prototype.unPartition = function(from, to) {
+ this.bridges[from][to].start();
+ this.bridges[to][from].start();
+};
+
+ReplSetBridge = function(rst, from, to) {
+ var n = rst.nodes.length;
+
+ var startPort = rst.startPort+n;
+ this.port = (startPort+(from*n+to));
+ this.host = rst.host+":"+this.port;
+
+ this.dest = rst.host+":"+rst.ports[to];
+ this.start();
+};
+
+ReplSetBridge.prototype.start = function() {
+ var args = ["mongobridge", "--port", this.port, "--dest", this.dest];
+ print("ReplSetBridge starting: "+tojson(args));
+ this.bridge = startMongoProgram.apply( null , args );
+ print("ReplSetBridge started " + this.bridge);
+};
+
+ReplSetBridge.prototype.stop = function() {
+ print("ReplSetBridge stopping: " + this.port);
+ stopMongod(this.port);
+};
+
+ReplSetBridge.prototype.toString = function() {
+ return this.host+" -> "+this.dest;
+};
diff --git a/shell/shell_utils.cpp b/shell/shell_utils.cpp
index 09a3e46..e09309c 100644
--- a/shell/shell_utils.cpp
+++ b/shell/shell_utils.cpp
@@ -60,6 +60,10 @@ namespace mongo {
inline int pipe(int fds[2]) { return _pipe(fds, 4096, _O_TEXT | _O_NOINHERIT); }
#endif
+ namespace JSFiles {
+ extern const JSFile servers;
+ }
+
// these functions have not been audited for thread safety - currently they are called with an exclusive js mutex
namespace shellUtils {
@@ -86,19 +90,8 @@ namespace mongo {
// real methods
- mongo::BSONObj JSSleep(const mongo::BSONObj &args) {
- assert( args.nFields() == 1 );
- assert( args.firstElement().isNumber() );
- int ms = int( args.firstElement().number() );
- {
- auto_ptr< ScriptEngine::Unlocker > u = globalScriptEngine->newThreadUnlocker();
- sleepmillis( ms );
- }
- return undefined_;
- }
-
void goingAwaySoon();
- BSONObj Quit(const BSONObj& args) {
+ BSONObj Quit(const BSONObj& args, void* data) {
// If not arguments are given first element will be EOO, which
// converts to the integer value 0.
goingAwaySoon();
@@ -107,7 +100,7 @@ namespace mongo {
return undefined_;
}
- BSONObj JSGetMemInfo( const BSONObj& args ) {
+ BSONObj JSGetMemInfo( const BSONObj& args, void* data ) {
ProcessInfo pi;
uassert( 10258 , "processinfo not supported" , pi.supported() );
@@ -124,13 +117,13 @@ namespace mongo {
#ifndef MONGO_SAFE_SHELL
- BSONObj listFiles(const BSONObj& _args) {
+ BSONObj listFiles(const BSONObj& _args, void* data) {
static BSONObj cd = BSON( "0" << "." );
BSONObj args = _args.isEmpty() ? cd : _args;
uassert( 10257 , "need to specify 1 argument to listFiles" , args.nFields() == 1 );
- BSONObjBuilder lst;
+ BSONArrayBuilder lst;
string rootname = args.firstElement().valuestrsafe();
path root( rootname );
@@ -142,7 +135,6 @@ namespace mongo {
directory_iterator end;
directory_iterator i( root);
- int num =0;
while ( i != end ) {
path p = *i;
BSONObjBuilder b;
@@ -158,21 +150,17 @@ namespace mongo {
}
}
- stringstream ss;
- ss << num;
- string name = ss.str();
- lst.append( name, b.done() );
- num++;
+ lst.append( b.obj() );
i++;
}
-
+
BSONObjBuilder ret;
ret.appendArray( "", lst.done() );
return ret.obj();
}
- BSONObj ls(const BSONObj& args) {
- BSONObj o = listFiles(args);
+ BSONObj ls(const BSONObj& args, void* data) {
+ BSONObj o = listFiles(args, data);
if( !o.isEmpty() ) {
for( BSONObj::iterator i = o.firstElement().Obj().begin(); i.more(); ) {
BSONObj f = i.next().Obj();
@@ -185,7 +173,7 @@ namespace mongo {
return BSONObj();
}
- BSONObj cd(const BSONObj& args) {
+ BSONObj cd(const BSONObj& args, void* data) {
#if defined(_WIN32)
std::wstring dir = toWideString( args.firstElement().String().c_str() );
if( SetCurrentDirectory(dir.c_str()) )
@@ -200,12 +188,12 @@ namespace mongo {
return BSON( "" << "change directory failed" );
}
- BSONObj pwd(const BSONObj&) {
+ BSONObj pwd(const BSONObj&, void* data) {
boost::filesystem::path p = boost::filesystem::current_path();
return BSON( "" << p.string() );
}
- BSONObj hostname(const BSONObj&) {
+ BSONObj hostname(const BSONObj&, void* data) {
return BSON( "" << getHostName() );
}
@@ -216,7 +204,7 @@ namespace mongo {
const int CANT_OPEN_FILE = 13300;
- BSONObj cat(const BSONObj& args) {
+ BSONObj cat(const BSONObj& args, void* data) {
BSONElement e = oneArg(args);
stringstream ss;
ifstream f(e.valuestrsafe());
@@ -235,7 +223,7 @@ namespace mongo {
return BSON( "" << ss.str() );
}
- BSONObj md5sumFile(const BSONObj& args) {
+ BSONObj md5sumFile(const BSONObj& args, void* data) {
BSONElement e = oneArg(args);
stringstream ss;
FILE* f = fopen(e.valuestrsafe(), "rb");
@@ -256,12 +244,12 @@ namespace mongo {
return BSON( "" << digestToString( d ) );
}
- BSONObj mkdir(const BSONObj& args) {
+ BSONObj mkdir(const BSONObj& args, void* data) {
boost::filesystem::create_directories(args.firstElement().String());
return BSON( "" << true );
}
- BSONObj removeFile(const BSONObj& args) {
+ BSONObj removeFile(const BSONObj& args, void* data) {
BSONElement e = oneArg(args);
bool found = false;
@@ -280,7 +268,7 @@ namespace mongo {
* @param args - [ name, byte index ]
* In this initial implementation, all bits in the specified byte are flipped.
*/
- BSONObj fuzzFile(const BSONObj& args) {
+ BSONObj fuzzFile(const BSONObj& args, void* data) {
uassert( 13619, "fuzzFile takes 2 arguments", args.nFields() == 2 );
shared_ptr< File > f( new File() );
f->open( args.getStringField( "0" ) );
@@ -314,7 +302,7 @@ namespace mongo {
if( mongo::dbexitCalled ) throw "program is terminating";
stringstream buf;
if ( port > 0 )
- buf << "m" << port << "| " << line;
+ buf << " m" << port << "| " << line;
else
buf << "sh" << pid << "| " << line;
cout << buf.str() << endl;
@@ -322,7 +310,7 @@ namespace mongo {
}
// only returns last 100000 characters
- BSONObj RawMongoProgramOutput( const BSONObj &args ) {
+ BSONObj RawMongoProgramOutput( const BSONObj &args, void* data ) {
mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
string out = mongoProgramOutput_.str();
size_t len = out.length();
@@ -331,7 +319,7 @@ namespace mongo {
return BSON( "" << out );
}
- BSONObj ClearRawMongoProgramOutput( const BSONObj &args ) {
+ BSONObj ClearRawMongoProgramOutput( const BSONObj &args, void* data ) {
mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
mongoProgramOutput_.str( "" );
return undefined_;
@@ -466,12 +454,16 @@ namespace mongo {
try {
// This assumes there aren't any 0's in the mongo program output.
// Hope that's ok.
- const unsigned bufSize = 64000;
+ const unsigned bufSize = 128 * 1024;
char buf[ bufSize ];
char temp[ bufSize ];
char *start = buf;
while( 1 ) {
int lenToRead = ( bufSize - 1 ) - ( start - buf );
+ if ( lenToRead <= 0 ) {
+ cout << "error: lenToRead: " << lenToRead << endl;
+ cout << "first 300: " << string(buf,0,300) << endl;
+ }
assert( lenToRead > 0 );
int ret = read( pipe_, (void *)start, lenToRead );
if( mongo::dbexitCalled )
@@ -541,7 +533,7 @@ namespace mongo {
{
stringstream ss;
ss << "couldn't start process " << argv_[0];
- uassert(13294, ss.str(), success);
+ uassert(14042, ss.str(), success);
}
CloseHandle(pi.hThread);
@@ -624,14 +616,14 @@ namespace mongo {
#endif
}
- BSONObj WaitProgram( const BSONObj& a ) {
+ BSONObj WaitProgram( const BSONObj& a, void* data ) {
int pid = oneArg( a ).numberInt();
BSONObj x = BSON( "" << wait_for_pid( pid ) );
shells.erase( pid );
return x;
}
- BSONObj WaitMongoProgramOnPort( const BSONObj &a ) {
+ BSONObj WaitMongoProgramOnPort( const BSONObj &a, void* data ) {
int port = oneArg( a ).numberInt();
uassert( 13621, "no known mongo program on port", dbs.count( port ) != 0 );
log() << "waiting port: " << port << ", pid: " << dbs[ port ].first << endl;
@@ -642,7 +634,7 @@ namespace mongo {
return BSON( "" << ret );
}
- BSONObj StartMongoProgram( const BSONObj &a ) {
+ BSONObj StartMongoProgram( const BSONObj &a, void* data ) {
_nokillop = true;
ProgramRunner r( a );
r.start();
@@ -650,7 +642,7 @@ namespace mongo {
return BSON( string( "" ) << int( r.pid() ) );
}
- BSONObj RunMongoProgram( const BSONObj &a ) {
+ BSONObj RunMongoProgram( const BSONObj &a, void* data ) {
ProgramRunner r( a );
r.start();
boost::thread t( r );
@@ -665,7 +657,7 @@ namespace mongo {
return BSON( string( "" ) << exit_code );
}
- BSONObj RunProgram(const BSONObj &a) {
+ BSONObj RunProgram(const BSONObj &a, void* data) {
ProgramRunner r( a, false );
r.start();
boost::thread t( r );
@@ -675,7 +667,7 @@ namespace mongo {
return BSON( string( "" ) << exit_code );
}
- BSONObj ResetDbpath( const BSONObj &a ) {
+ BSONObj ResetDbpath( const BSONObj &a, void* data ) {
assert( a.nFields() == 1 );
string path = a.firstElement().valuestrsafe();
assert( !path.empty() );
@@ -705,7 +697,7 @@ namespace mongo {
}
// NOTE target dbpath will be cleared first
- BSONObj CopyDbpath( const BSONObj &a ) {
+ BSONObj CopyDbpath( const BSONObj &a, void* data ) {
assert( a.nFields() == 2 );
BSONObjIterator i( a );
string from = i.next().str();
@@ -816,22 +808,22 @@ namespace mongo {
}
/** stopMongoProgram(port[, signal]) */
- BSONObj StopMongoProgram( const BSONObj &a ) {
+ BSONObj StopMongoProgram( const BSONObj &a, void* data ) {
assert( a.nFields() == 1 || a.nFields() == 2 );
- assert( a.firstElement().isNumber() );
+ uassert( 15853 , "stopMongo needs a number" , a.firstElement().isNumber() );
int port = int( a.firstElement().number() );
int code = killDb( port, 0, getSignal( a ) );
cout << "shell: stopped mongo program on port " << port << endl;
- return BSON( "" << code );
+ return BSON( "" << (double)code );
}
- BSONObj StopMongoProgramByPid( const BSONObj &a ) {
+ BSONObj StopMongoProgramByPid( const BSONObj &a, void* data ) {
assert( a.nFields() == 1 || a.nFields() == 2 );
- assert( a.firstElement().isNumber() );
+ uassert( 15852 , "stopMongoByPid needs a number" , a.firstElement().isNumber() );
int pid = int( a.firstElement().number() );
int code = killDb( 0, pid, getSignal( a ) );
cout << "shell: stopped mongo program on pid " << pid << endl;
- return BSON( "" << code );
+ return BSON( "" << (double)code );
}
void KillMongoProgramInstances() {
@@ -853,20 +845,20 @@ namespace mongo {
MongoProgramScope::~MongoProgramScope() {
DESTRUCTOR_GUARD(
KillMongoProgramInstances();
- ClearRawMongoProgramOutput( BSONObj() );
+ ClearRawMongoProgramOutput( BSONObj(), 0 );
)
}
unsigned _randomSeed;
- BSONObj JSSrand( const BSONObj &a ) {
+ BSONObj JSSrand( const BSONObj &a, void* data ) {
uassert( 12518, "srand requires a single numeric argument",
a.nFields() == 1 && a.firstElement().isNumber() );
_randomSeed = (unsigned)a.firstElement().numberLong(); // grab least significant digits
return undefined_;
}
- BSONObj JSRand( const BSONObj &a ) {
+ BSONObj JSRand( const BSONObj &a, void* data ) {
uassert( 12519, "rand accepts no arguments", a.nFields() == 0 );
unsigned r;
#if !defined(_WIN32)
@@ -877,7 +869,7 @@ namespace mongo {
return BSON( "" << double( r ) / ( double( RAND_MAX ) + 1 ) );
}
- BSONObj isWindows(const BSONObj& a) {
+ BSONObj isWindows(const BSONObj& a, void* data) {
uassert( 13006, "isWindows accepts no arguments", a.nFields() == 0 );
#ifdef _WIN32
return BSON( "" << true );
@@ -886,7 +878,7 @@ namespace mongo {
#endif
}
- BSONObj getHostName(const BSONObj& a) {
+ BSONObj getHostName(const BSONObj& a, void* data) {
uassert( 13411, "getHostName accepts no arguments", a.nFields() == 0 );
char buf[260]; // HOST_NAME_MAX is usually 255
assert(gethostname(buf, 260) == 0);
@@ -897,7 +889,6 @@ namespace mongo {
void installShellUtils( Scope& scope ) {
theScope = &scope;
- scope.injectNative( "sleep" , JSSleep );
scope.injectNative( "quit", Quit );
scope.injectNative( "getMemInfo" , JSGetMemInfo );
scope.injectNative( "_srand" , JSSrand );
diff --git a/shell/utils.js b/shell/utils.js
index 6b52ab9..8380607 100644
--- a/shell/utils.js
+++ b/shell/utils.js
@@ -9,10 +9,24 @@ chatty = function(s){
friendlyEqual = function( a , b ){
if ( a == b )
return true;
+
+ a = tojson(a,false,true);
+ b = tojson(b,false,true);
- if ( tojson( a ) == tojson( b ) )
+ if ( a == b )
return true;
+ var clean = function( s ){
+ s = s.replace( /NumberInt\((\-?\d+)\)/g , "$1" );
+ return s;
+ }
+
+ a = clean(a);
+ b = clean(b);
+
+ if ( a == b )
+ return true;
+
return false;
}
@@ -35,10 +49,8 @@ doassert = function (msg) {
assert = function( b , msg ){
if ( assert._debug && msg ) print( "in assert for: " + msg );
-
if ( b )
- return;
-
+ return;
doassert( msg == undefined ? "assert failed" : "assert failed : " + msg );
}
@@ -72,6 +84,26 @@ assert.neq = function( a , b , msg ){
doassert( "[" + a + "] != [" + b + "] are equal : " + msg );
}
+assert.contains = function( o, arr, msg ){
+ var wasIn = false
+
+ if( ! arr.length ){
+ for( i in arr ){
+ wasIn = arr[i] == o || ( ( arr[i] != null && o != null ) && friendlyEqual( arr[i] , o ) )
+ return;
+ if( wasIn ) break
+ }
+ }
+ else {
+ for( var i = 0; i < arr.length; i++ ){
+ wasIn = arr[i] == o || ( ( arr[i] != null && o != null ) && friendlyEqual( arr[i] , o ) )
+ if( wasIn ) break
+ }
+ }
+
+ if( ! wasIn ) doassert( tojson( o ) + " was not in " + tojson( arr ) + " : " + msg )
+}
+
assert.repeat = function( f, msg, timeout, interval ) {
if ( assert._debug && msg ) print( "in assert for: " + msg );
@@ -199,6 +231,18 @@ assert.gte = function( a , b , msg ){
doassert( a + " is not greater than or eq " + b + " : " + msg );
}
+assert.between = function( a, b, c, msg, inclusive ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if( ( inclusive == undefined || inclusive == true ) &&
+ a <= b && b <= c ) return;
+ else if( a < b && b < c ) return;
+
+ doassert( b + " is not between " + a + " and " + c + " : " + msg );
+}
+
+assert.betweenIn = function( a, b, c, msg ){ assert.between( a, b, c, msg, true ) }
+assert.betweenEx = function( a, b, c, msg ){ assert.between( a, b, c, msg, false ) }
assert.close = function( a , b , msg , places ){
if (places === undefined) {
@@ -226,6 +270,11 @@ Object.extend = function( dst , src , deep ){
return dst;
}
+Object.merge = function( dst, src, deep ){
+ var clone = Object.extend( {}, dst, deep )
+ return Object.extend( clone, src, deep )
+}
+
argumentsToArray = function( a ){
var arr = [];
for ( var i=0; i<a.length; i++ )
@@ -370,20 +419,25 @@ Array.shuffle = function( arr ){
}
-Array.tojson = function( a , indent ){
+Array.tojson = function( a , indent , nolint ){
+ var lineEnding = nolint ? " " : "\n";
+
if (!indent)
indent = "";
+
+ if ( nolint )
+ indent = "";
if (a.length == 0) {
return "[ ]";
}
- var s = "[\n";
+ var s = "[" + lineEnding;
indent += "\t";
for ( var i=0; i<a.length; i++){
- s += indent + tojson( a[i], indent );
+ s += indent + tojson( a[i], indent , nolint );
if ( i < a.length - 1 ){
- s += ",\n";
+ s += "," + lineEnding;
}
}
if ( a.length == 0 ) {
@@ -391,7 +445,7 @@ Array.tojson = function( a , indent ){
}
indent = indent.substring(1);
- s += "\n"+indent+"]";
+ s += lineEnding+indent+"]";
return s;
}
@@ -459,6 +513,14 @@ NumberLong.prototype.tojson = function() {
return this.toString();
}
+if ( ! NumberInt.prototype ) {
+ NumberInt.prototype = {}
+}
+
+NumberInt.prototype.tojson = function() {
+ return this.toString();
+}
+
if ( ! ObjectId.prototype )
ObjectId.prototype = {}
@@ -533,16 +595,24 @@ if ( typeof( BinData ) != "undefined" ){
//return "BinData type: " + this.type + " len: " + this.len;
return this.toString();
}
+
+ BinData.prototype.subtype = function () {
+ return this.type;
+ }
+
+ BinData.prototype.length = function () {
+ return this.len;
+ }
}
else {
print( "warning: no BinData class" );
}
-if ( typeof( UUID ) != "undefined" ){
+/*if ( typeof( UUID ) != "undefined" ){
UUID.prototype.tojson = function () {
return this.toString();
}
-}
+}*/
if ( typeof _threadInject != "undefined" ){
print( "fork() available!" );
@@ -667,7 +737,9 @@ if ( typeof _threadInject != "undefined" ){
"jstests/killop.js",
"jstests/run_program1.js",
"jstests/notablescan.js",
- "jstests/drop2.js"] );
+ "jstests/drop2.js",
+ "jstests/dropdb_race.js",
+ "jstests/bench_test1.js"] );
// some tests can't be run in parallel with each other
var serialTestsArr = [ "jstests/fsync.js",
@@ -903,6 +975,35 @@ printjsononeline = function(x){
print( tojsononeline( x ) );
}
+if ( typeof TestData == "undefined" ){
+ TestData = undefined
+}
+
+jsTestName = function(){
+ if( TestData ) return TestData.testName
+ return "__unknown_name__"
+}
+
+jsTestFile = function(){
+ if( TestData ) return TestData.testFile
+ return "__unknown_file__"
+}
+
+jsTestPath = function(){
+ if( TestData ) return TestData.testPath
+ return "__unknown_path__"
+}
+
+jsTestOptions = function(){
+ if( TestData ) return { noJournal : TestData.noJournal,
+ noJournalPrealloc : TestData.noJournalPrealloc }
+ return {}
+}
+
+testLog = function(x){
+ print( jsTestFile() + " - " + x )
+}
+
shellPrintHelper = function (x) {
if (typeof (x) == "undefined") {
@@ -956,7 +1057,6 @@ shellAutocomplete = function (/*prefix*/){ // outer scope function called on ini
builtinMethods[Mongo] = "find update insert remove".split(' ');
builtinMethods[BinData] = "hex base64 length subtype".split(' ');
- builtinMethods[NumberLong] = "toNumber".split(' ');
var extraGlobals = "Infinity NaN undefined null true false decodeURI decodeURIComponent encodeURI encodeURIComponent escape eval isFinite isNaN parseFloat parseInt unescape Array Boolean Date Math Number RegExp String print load gc MinKey MaxKey Mongo NumberLong ObjectId DBPointer UUID BinData Map".split(' ');
@@ -1017,7 +1117,7 @@ shellAutocomplete = function (/*prefix*/){ // outer scope function called on ini
var p = possibilities[i];
if (typeof(curObj[p]) == "undefined" && curObj != global) continue; // extraGlobals aren't in the global object
if (p.length == 0 || p.length < lastPrefix.length) continue;
- if (isPrivate(p)) continue;
+ if (lastPrefix[0] != '_' && isPrivate(p)) continue;
if (p.match(/^[0-9]+$/)) continue; // don't array number indexes
if (p.substr(0, lastPrefix.length) != lastPrefix) continue;
@@ -1046,7 +1146,7 @@ shellAutocomplete.showPrivate = false; // toggle to show (useful when working on
shellHelper = function( command , rest , shouldPrint ){
command = command.trim();
- var args = rest.trim().replace(/;$/,"").split( "\s+" );
+ var args = rest.trim().replace(/\s*;$/,"").split( "\s+" );
if ( ! shellHelper[command] )
throw "no command [" + command + "]";
@@ -1079,6 +1179,10 @@ shellHelper.it = function(){
shellHelper.show = function (what) {
assert(typeof what == "string");
+ var args = what.split( /\s+/ );
+ what = args[0]
+ args = args.splice(1)
+
if (what == "profile") {
if (db.system.profile.count() == 0) {
print("db.system.profile is empty");
@@ -1087,7 +1191,32 @@ shellHelper.show = function (what) {
}
else {
print();
- db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(function (x) { print("" + x.millis + "ms " + String(x.ts).substring(0, 24)); print(x.info); print("\n"); })
+ db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(
+ function (x) {
+ print("" + x.op + "\t" + x.ns + " " + x.millis + "ms " + String(x.ts).substring(0, 24));
+ var l = "";
+ for ( var z in x ){
+ if ( z == "op" || z == "ns" || z == "millis" || z == "ts" )
+ continue;
+
+ var val = x[z];
+ var mytype = typeof(val);
+
+ if ( mytype == "string" ||
+ mytype == "number" )
+ l += z + ":" + val + " ";
+ else if ( mytype == "object" )
+ l += z + ":" + tojson(val ) + " ";
+ else if ( mytype == "boolean" )
+ l += z + " ";
+ else
+ l += z + ":" + val + " ";
+
+ }
+ print( l );
+ print("\n");
+ }
+ )
}
return "";
}
@@ -1117,6 +1246,27 @@ shellHelper.show = function (what) {
//db.getMongo().getDBNames().sort().forEach(function (x) { print(x) });
return "";
}
+
+ if (what == "log" ) {
+ var n = "global";
+ if ( args.length > 0 )
+ n = args[0]
+
+ var res = db.adminCommand( { getLog : n } )
+ for ( var i=0; i<res.log.length; i++){
+ print( res.log[i] )
+ }
+ return ""
+ }
+
+ if (what == "logs" ) {
+ var res = db.adminCommand( { getLog : "*" } )
+ for ( var i=0; i<res.names.length; i++){
+ print( res.names[i] )
+ }
+ return ""
+ }
+
throw "don't know how to show [" + what + "]";
@@ -1314,18 +1464,40 @@ rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }
rs.status = function () { return db._adminCommand("replSetGetStatus"); }
rs.isMaster = function () { return db.isMaster(); }
rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }
-rs.reconfig = function (cfg) {
- cfg.version = rs.conf().version + 1;
+rs._runCmd = function (c) {
+ // after the command, catch the disconnect and reconnect if necessary
var res = null;
try {
- res = db.adminCommand({ replSetReconfig: cfg });
+ res = db.adminCommand(c);
}
catch (e) {
- print("shell got exception during reconfig: " + e);
- print("in some circumstances, the primary steps down and closes connections on a reconfig");
+ if (("" + e).indexOf("error doing query") >= 0) {
+ // closed connection. reconnect.
+ db.getLastErrorObj();
+ var o = db.getLastErrorObj();
+ if (o.ok) {
+ print("reconnected to server after rs command (which is normal)");
+ }
+ else {
+ printjson(o);
+ }
+ }
+ else {
+ print("shell got exception during repl set operation: " + e);
+ print("in some circumstances, the primary steps down and closes connections on a reconfig");
+ }
+ return "";
}
return res;
}
+rs.reconfig = function (cfg, options) {
+ cfg.version = rs.conf().version + 1;
+ cmd = { replSetReconfig: cfg };
+ for (var i in options) {
+ cmd[i] = options[i];
+ }
+ return this._runCmd(cmd);
+}
rs.add = function (hostport, arb) {
var cfg = hostport;
@@ -1345,20 +1517,13 @@ rs.add = function (hostport, arb) {
cfg.arbiterOnly = true;
}
c.members.push(cfg);
- var res = null;
- try {
- res = db.adminCommand({ replSetReconfig: c });
- }
- catch (e) {
- print("shell got exception during reconfig: " + e);
- print("in some circumstances, the primary steps down and closes connections on a reconfig");
- }
- return res;
+ return this._runCmd({ replSetReconfig: c });
}
-rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:secs||60}); }
+rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:(secs === undefined) ? 60:secs}); }
rs.freeze = function (secs) { return db._adminCommand({replSetFreeze:secs}); }
rs.addArb = function (hn) { return this.add(hn, true); }
rs.conf = function () { return db.getSisterDB("local").system.replset.findOne(); }
+rs.config = function () { return rs.conf(); }
rs.remove = function (hn) {
var local = db.getSisterDB("local");
@@ -1377,6 +1542,41 @@ rs.remove = function (hn) {
return "error: couldn't find "+hn+" in "+tojson(c.members);
};
+rs.debug = {};
+
+rs.debug.nullLastOpWritten = function(primary, secondary) {
+ var p = connect(primary+"/local");
+ var s = connect(secondary+"/local");
+ s.getMongo().setSlaveOk();
+
+ var secondToLast = s.oplog.rs.find().sort({$natural : -1}).limit(1).next();
+ var last = p.runCommand({findAndModify : "oplog.rs",
+ query : {ts : {$gt : secondToLast.ts}},
+ sort : {$natural : 1},
+ update : {$set : {op : "n"}}});
+
+ if (!last.value.o || !last.value.o._id) {
+ print("couldn't find an _id?");
+ }
+ else {
+ last.value.o = {_id : last.value.o._id};
+ }
+
+ print("nulling out this op:");
+ printjson(last);
+};
+
+rs.debug.getLastOpWritten = function(server) {
+ var s = db.getSisterDB("local");
+ if (server) {
+ s = connect(server+"/local");
+ }
+ s.getMongo().setSlaveOk();
+
+ return s.oplog.rs.find().sort({$natural : -1}).limit(1).next();
+};
+
+
help = shellHelper.help = function (x) {
if (x == "mr") {
print("\nSee also http://www.mongodb.org/display/DOCS/MapReduce");
@@ -1408,6 +1608,17 @@ help = shellHelper.help = function (x) {
print("\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\n");
return;
}
+ else if (x == "keys") {
+ print("Tab completion and command history is available at the command prompt.\n");
+ print("Some emacs keystrokes are available too:");
+ print(" Ctrl-A start of line");
+ print(" Ctrl-E end of line");
+ print(" Ctrl-K del to end of line");
+ print("\nMulti-line commands");
+ print("You can enter a multi line javascript expression. If parens, braces, etc. are not closed, you will see a new line ");
+ print("beginning with '...' characters. Type the rest of your expression. Press Ctrl-C to abort the data entry if you");
+ print("get stuck.\n");
+ }
else if (x == "misc") {
print("\tb = new BinData(subtype,base64str) create a BSON BinData value");
print("\tb.subtype() the BinData subtype (0..255)");
@@ -1416,6 +1627,10 @@ help = shellHelper.help = function (x) {
print("\tb.base64() the data as a base 64 encoded string");
print("\tb.toString()");
print();
+ print("\tb = HexData(subtype,hexstr) create a BSON BinData value from a hex string");
+ print("\tb = UUID(hexstr) create a BSON BinData value of UUID subtype");
+ print("\tb = MD5(hexstr) create a BSON BinData value of MD5 subtype");
+ print();
print("\to = new ObjectId() create a new ObjectId");
print("\to.getTimestamp() return timestamp derived from first 32 bits of the OID");
print("\to.isObjectId()");
@@ -1454,15 +1669,18 @@ help = shellHelper.help = function (x) {
print("\t" + "db.help() help on db methods");
print("\t" + "db.mycoll.help() help on collection methods");
print("\t" + "rs.help() help on replica set methods");
- print("\t" + "help connect connecting to a db help");
print("\t" + "help admin administrative help");
+ print("\t" + "help connect connecting to a db help");
+ print("\t" + "help keys key shortcuts");
print("\t" + "help misc misc things to know");
- print("\t" + "help mr mapreduce help");
+ print("\t" + "help mr mapreduce");
print();
print("\t" + "show dbs show database names");
print("\t" + "show collections show collections in current database");
print("\t" + "show users show users in current database");
print("\t" + "show profile show most recent system.profile entries with time >= 1ms");
+ print("\t" + "show logs show the accessible logger names");
+ print("\t" + "show log [name] prints out the last segment of log in memory, 'global' is default");
print("\t" + "use <db_name> set current database");
print("\t" + "db.foo.find() list objects in collection foo");
print("\t" + "db.foo.find( { a : 1 } ) list objects in foo where a == 1");
diff --git a/shell/utils_sh.js b/shell/utils_sh.js
new file mode 100644
index 0000000..5bd449b
--- /dev/null
+++ b/shell/utils_sh.js
@@ -0,0 +1,98 @@
+sh = function() { return "try sh.help();" }
+
+
+sh._checkMongos = function() {
+ var x = db.runCommand( "ismaster" );
+ if ( x.msg != "isdbgrid" )
+ throw "not connected to a mongos"
+}
+
+sh._checkFullName = function( fullName ) {
+ assert( fullName , "neeed a full name" )
+ assert( fullName.indexOf( "." ) > 0 , "name needs to be fully qualified <db>.<collection>'" )
+}
+
+sh._adminCommand = function( cmd , skipCheck ) {
+ if ( ! skipCheck ) sh._checkMongos();
+ var res = db.getSisterDB( "admin" ).runCommand( cmd );
+
+ if ( res == null || ! res.ok ) {
+ print( "command failed: " + tojson( res ) )
+ }
+
+ return res;
+}
+
+sh.help = function() {
+ print( "\tsh.addShard( host ) server:port OR setname/server:port" )
+ print( "\tsh.enableSharding(dbname) enables sharding on the database dbname" )
+ print( "\tsh.shardCollection(fullName,key,unique) shards the collection" );
+
+ print( "\tsh.splitFind(fullName,find) splits the chunk that find is in at the median" );
+ print( "\tsh.splitAt(fullName,middle) splits the chunk that middle is in at middle" );
+ print( "\tsh.moveChunk(fullName,find,to) move the chunk where 'find' is to 'to' (name of shard)");
+
+ print( "\tsh.setBalancerState( <bool on or not> ) turns the balancer on or off true=on, false=off" );
+ print( "\tsh.getBalancerState() return true if on, off if not" );
+ print( "\tsh.isBalancerRunning() return true if the balancer is running on any mongos" );
+
+ print( "\tsh.status() prints a general overview of the cluster" )
+}
+
+sh.status = function( verbose , configDB ) {
+ // TODO: move the actual commadn here
+ printShardingStatus( configDB , verbose );
+}
+
+sh.addShard = function( url ){
+ sh._adminCommand( { addShard : url } , true )
+}
+
+sh.enableSharding = function( dbname ) {
+ assert( dbname , "need a valid dbname" )
+ sh._adminCommand( { enableSharding : dbname } )
+}
+
+sh.shardCollection = function( fullName , key , unique ) {
+ sh._checkFullName( fullName )
+ assert( key , "need a key" )
+ assert( typeof( key ) == "object" , "key needs to be an object" )
+
+ var cmd = { shardCollection : fullName , key : key }
+ if ( unique )
+ cmd.unique = true;
+
+ sh._adminCommand( cmd )
+}
+
+
+sh.splitFind = function( fullName , find ) {
+ sh._checkFullName( fullName )
+ sh._adminCommand( { split : fullName , find : find } )
+}
+
+sh.splitAt = function( fullName , middle ) {
+ sh._checkFullName( fullName )
+ sh._adminCommand( { split : fullName , middle : middle } )
+}
+
+sh.moveChunk = function( fullName , find , to ) {
+ sh._checkFullName( fullName );
+ sh._adminCommand( { moveChunk : fullName , find : find , to : to } )
+}
+
+sh.setBalancerState = function( onOrNot ) {
+ db.getSisterDB( "config" ).settings.update({ _id: "balancer" }, { $set : { stopped: onOrNot ? false : true } }, true );
+}
+
+sh.getBalancerState = function() {
+ var x = db.getSisterDB( "config" ).settings.findOne({ _id: "balancer" } )
+ if ( x == null )
+ return true;
+ return ! x.stopped;
+}
+
+sh.isBalancerRunning = function() {
+ var x = db.getSisterDB( "config" ).locks.findOne( { _id : "balancer" } );
+ return x.state > 0;
+}
diff --git a/speed.js b/speed.js
new file mode 100755
index 0000000..c5aa3a3
--- /dev/null
+++ b/speed.js
@@ -0,0 +1,13 @@
+t = db.fooo;
+t.drop();
+x = { str:'aaaabbbbcc' }
+s = new Date();
+for( var i = 0; i < 100000; i++ ) {
+ x.i = i;
+ t.insert(x);
+}
+print( (new Date())-s );
+t.ensureIndex({x:1});
+t.ensureIndex({str:1});
+print( (new Date())-s );
+
diff --git a/third_party/README b/third_party/README
new file mode 100644
index 0000000..57f702d
--- /dev/null
+++ b/third_party/README
@@ -0,0 +1,6 @@
+
+linenoise
+ when making changes here, also publish to
+ http://github.com/erh/linenoise
+ 6cdc775807e57b2c3fd64bd207814f8ee1fe35f3
+
diff --git a/third_party/js-1.7/Makefile.in b/third_party/js-1.7/Makefile.in
new file mode 100644
index 0000000..08bb674
--- /dev/null
+++ b/third_party/js-1.7/Makefile.in
@@ -0,0 +1,388 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+DEPTH = ../..
+topsrcdir = @top_srcdir@
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+include $(DEPTH)/config/autoconf.mk
+
+MODULE = js
+LIBRARY_NAME = mozjs
+LIB_IS_C_ONLY = 1
+GRE_MODULE = 1
+
+ifeq (,$(filter-out WINNT WINCE,$(OS_ARCH)))
+LIBRARY_NAME = js$(MOZ_BITS)$(VERSION_NUMBER)
+RESFILE = js$(MOZ_BITS)40.res
+endif
+
+PACKAGE_FILE = js.pkg
+
+# JavaScript must be built shared, even for static builds, as it is used by
+# other modules which are always built shared. Failure to do so results in
+# the js code getting copied into xpinstall and jsd as well as mozilla-bin,
+# and then the static data cells used for locking no longer work.
+
+ifndef JS_STATIC_BUILD
+FORCE_SHARED_LIB = 1
+endif
+
+CSRCS = \
+ jsapi.c \
+ jsarena.c \
+ jsarray.c \
+ jsatom.c \
+ jsbool.c \
+ jscntxt.c \
+ jsdate.c \
+ jsdbgapi.c \
+ jsdhash.c \
+ jsdtoa.c \
+ jsemit.c \
+ jsexn.c \
+ jsfun.c \
+ jsgc.c \
+ jshash.c \
+ jsinterp.c \
+ jsiter.c \
+ jslock.c \
+ jslog2.c \
+ jslong.c \
+ jsmath.c \
+ jsnum.c \
+ jsobj.c \
+ jsopcode.c \
+ jsparse.c \
+ jsprf.c \
+ jsregexp.c \
+ jsscan.c \
+ jsscope.c \
+ jsscript.c \
+ jsstr.c \
+ jsutil.c \
+ jsxdrapi.c \
+ jsxml.c \
+ prmjtime.c \
+ $(NULL)
+
+EXPORTS = \
+ jsautocfg.h \
+ jsautokw.h \
+ js.msg \
+ jsapi.h \
+ jsarray.h \
+ jsarena.h \
+ jsatom.h \
+ jsbit.h \
+ jsbool.h \
+ jsclist.h \
+ jscntxt.h \
+ jscompat.h \
+ jsconfig.h \
+ jsdate.h \
+ jsdbgapi.h \
+ jsdhash.h \
+ jsemit.h \
+ jsfun.h \
+ jsgc.h \
+ jshash.h \
+ jsinterp.h \
+ jsiter.h \
+ jslock.h \
+ jslong.h \
+ jsmath.h \
+ jsnum.h \
+ jsobj.h \
+ jsopcode.tbl \
+ jsopcode.h \
+ jsosdep.h \
+ jsotypes.h \
+ jsparse.h \
+ jsprf.h \
+ jsproto.tbl \
+ jsprvtd.h \
+ jspubtd.h \
+ jsregexp.h \
+ jsscan.h \
+ jsscope.h \
+ jsscript.h \
+ jsstddef.h \
+ jsstr.h \
+ jstypes.h \
+ jsutil.h \
+ jsxdrapi.h \
+ jsxml.h \
+ $(NULL)
+
+ifeq (,$(filter-out WINNT WINCE,$(OS_ARCH)))
+EXPORTS += jscpucfg.h
+endif
+
+JS_SAFE_ARENA = 1
+
+DASH_R = -r
+
+include $(topsrcdir)/config/config.mk
+
+EXTRA_DSO_LDOPTS += $(NSPR_LIBS)
+
+# When using gcc the assembly is inlined in the C-file (see jslock.c)
+ifeq ($(OS_ARCH),SunOS)
+ifneq ($(OS_TEST),i86pc)
+ifndef GNU_CC
+ASFILES = lock_$(OS_ARCH).s
+endif
+endif
+endif
+
+ifndef BUILD_OPT
+MOCHAFILE = 1
+endif
+
+ifndef NSBUILDROOT
+JSJAVA_STUBHEADERS = \
+ -I$(topsrcdir)/sun-java/include/_gen \
+ -I$(topsrcdir)/sun-java/netscape/javascript/_jri \
+ -I$(topsrcdir)/sun-java/netscape/security/_jri
+else
+JSJAVA_STUBHEADERS = -I$(JRI_GEN_DIR) -I$(JDK_GEN_DIR)
+endif
+
+JSJAVA_CFLAGS = \
+ -I$(topsrcdir)/sun-java/md-include \
+ -I$(topsrcdir)/sun-java/include \
+ $(JSJAVA_STUBHEADERS)
+
+# Define keyword generator before rules.mk, see bug 323979 comment 50
+
+HOST_SIMPLE_PROGRAMS += host_jskwgen$(HOST_BIN_SUFFIX)
+GARBAGE += jsautokw.h host_jskwgen$(HOST_BIN_SUFFIX)
+
+include $(topsrcdir)/config/rules.mk
+
+DEFINES += -DEXPORT_JS_API
+
+INCLUDES += -I$(srcdir)
+
+# MSVC '-Gy' cc flag and '/OPT:REF' linker flag cause JS_GetArgument and
+# JS_GetLocalVariable to be folded to the same address by the linker,
+# leading to a crash on startup. See bug 151066. So, in optimized builds,
+# add the /OPT:NOICF flag, which turns off 'identical COMDAT folding'.
+#
+# N.B.: 'identical COMDAT folding' that folds functions whose addresses
+# are taken violates the ISO C and C++ standards.
+ifndef MOZ_DEBUG
+ifeq (_WINNT,$(GNU_CC)_$(OS_ARCH))
+LDFLAGS += -OPT:NOICF
+endif
+endif
+
+GARBAGE += jscpucfg.o jsautocfg.h jsautocfg.tmp jscpucfg
+
+ifneq (,$(CROSS_COMPILE)$(filter-out WINNT,$(OS_ARCH)))
+TARGETS += jscpucfg$(HOST_BIN_SUFFIX)
+endif
+
+ifdef JS_SAFE_ARENA
+DEFINES += -DJS_USE_SAFE_ARENA
+endif
+
+ifdef JS_THREADSAFE
+DEFINES += -DJS_THREADSAFE
+endif
+
+ifdef JS_NO_THIN_LOCKS
+DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
+endif
+
+ifdef JS_VERSION
+DEFINES += -DJS_VERSION=$(JS_VERSION)
+endif
+
+ifneq ($(findstring -L,$(NSPR_LIBS)),)
+NSPR_STATIC_PATH = $(subst -L,,$(findstring -L,$(NSPR_LIBS)))
+else
+NSPR_STATIC_PATH = $(DIST)/lib
+endif
+
+LDFLAGS += $(pathsubst -l%,$(NSPR_STATIC_PATH)/%.a,$(NSPR_LIBS))
+
+# BeOS and HP-UX do not require the extra linking of "-lm"
+ifeq (,$(filter BeOS HP-UX WINNT WINCE OpenVMS,$(OS_ARCH)))
+LDFLAGS += -lm
+endif
+
+# Prevent floating point errors caused by VC++ optimizations
+ifeq ($(OS_ARCH)_$(GNU_CC),WINNT_)
+ifeq (,$(filter-out 1200 1300 1310,$(_MSC_VER)))
+CFLAGS += -Op
+else
+CFLAGS += -fp:precise
+endif
+endif # WINNT
+
+ifeq ($(OS_ARCH),FreeBSD)
+LDFLAGS += -pthread
+endif
+ifeq ($(OS_ARCH),IRIX)
+ifdef USE_N32
+DASH_R += -n32
+endif
+endif
+ifeq ($(OS_ARCH),Linux)
+LDFLAGS += -ldl
+endif
+ifeq ($(OS_ARCH),OSF1)
+LDFLAGS += -lc_r
+endif
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(TARGET_CPU),sparc)
+
+ifdef JS_ULTRASPARC_OPTS
+DEFINES += -DULTRA_SPARC
+ifdef GNU_CC
+CFLAGS += -Wa,-xarch=v8plus,-DULTRA_SPARC,-P,-L,-D_ASM,-D__STDC__=0
+CXXFLAGS += -Wa,-xarch=v8plus,-DULTRA_SPARC,-P,-L,-D_ASM,-D__STDC__=0,-K,PIC
+else
+ASFLAGS += -xarch=v8plus -DULTRA_SPARC -P -L -D_ASM -D__STDC__=0 -K PIC
+endif # GNU_CC
+endif # JS_ULTRASPARC_OPTS
+
+endif
+ifeq ($(OS_RELEASE),4.1)
+LDFLAGS += -ldl -lnsl
+else
+LDFLAGS += -lposix4 -ldl -lnsl -lsocket
+endif
+endif
+
+ifeq ($(OS_ARCH),IRIX)
+ifndef GNU_CC
+_COMPILE_CFLAGS = $(patsubst -O%,-O1,$(COMPILE_CFLAGS))
+jsapi.o jsarena.o jsarray.o jsatom.o jsemit.o jsfun.o jsinterp.o jsregexp.o jsparse.o jsopcode.o jsscript.o: %.o: %.c Makefile.in
+ $(REPORT_BUILD)
+ @$(MAKE_DEPS_AUTO)
+ $(CC) -o $@ -c $(_COMPILE_CFLAGS) $<
+endif
+endif
+
+# An AIX Optimization bug causes PR_dtoa() & JS_dtoa to produce wrong result.
+# This suppresses optimization for this single compilation unit.
+ifeq ($(OS_ARCH),AIX)
+jsatom.o: jsatom.c Makefile.in
+ $(REPORT_BUILD)
+ @$(MAKE_DEPS_AUTO)
+ $(CC) -o $@ -c $(filter-out $(MOZ_OPTIMIZE_FLAGS), $(COMPILE_CFLAGS)) $<
+jsdtoa.o: jsdtoa.c Makefile.in
+ $(REPORT_BUILD)
+ @$(MAKE_DEPS_AUTO)
+ $(CC) -o $@ -c $(filter-out $(MOZ_OPTIMIZE_FLAGS), $(COMPILE_CFLAGS)) $<
+endif
+
+jsopcode.h jsopcode.c: jsopcode.tbl
+
+ifeq (,$(CROSS_COMPILE)$(filter-out WINNT,$(OS_ARCH)))
+jsautocfg.h:
+ touch $@
+else
+ifeq ($(OS_ARCH),WINCE)
+jsautocfg.h:
+ touch $@
+else
+jsautocfg.h: jscpucfg$(HOST_BIN_SUFFIX)
+ @rm -f $@ jsautocfg.tmp
+ ./jscpucfg > jsautocfg.tmp
+ mv jsautocfg.tmp $@
+endif
+endif
+
+# jscpucfg is a strange target
+# Needs to be built with the host compiler but needs to include
+# the mdcpucfg for the target so it needs the appropriate target defines
+ifdef HOST_NSPR_MDCPUCFG
+HOST_CC := $(HOST_CC) -DMDCPUCFG=$(TARGET_NSPR_MDCPUCFG)
+HOST_CFLAGS := $(patsubst -DXP_%,,$(HOST_CFLAGS))
+endif
+
+ifdef CROSS_COMPILE
+# jscpucfg needs to know when it's supposed to produce a config for the target
+JSCPUCFG_DEFINES = $(ACDEFINES)
+
+# This is incredibly hacky. Darwin NSPR uses the same MDCPUCFG for multiple
+# processors, and determines which processor to configure for based on
+# #ifdef i386. This macro is among the NSPR defines, but is also automatically
+# defined by the compiler when building for i386. It therefore needs to be
+# defined here if targeting i386, and explicitly undefined otherwise.
+ifeq ($(OS_ARCH),Darwin)
+ifeq ($(TARGET_CPU),powerpc)
+JSCPUCFG_DEFINES += -Ui386
+else
+JSCPUCFG_DEFINES += -Di386=1
+endif
+endif
+endif
+
+ifeq ($(OS_ARCH),QNX)
+ifneq ($(OS_TARGET),NTO)
+# QNX's compiler apparently can't build a binary directly from a source file.
+jscpucfg.o: jscpucfg.c Makefile.in
+ $(HOST_CC) $(HOST_CFLAGS) -c $(JSCPUCFG_DEFINES) $(DEFINES) $(NSPR_CFLAGS) -o $@ $<
+
+jscpucfg: jscpucfg.o
+ $(HOST_CC) $(HOST_CFLAGS) $(JSCPUCFG_DEFINES) $(DEFINES) -o $@ $<
+endif
+else
+ifeq ($(OS_ARCH),WINCE)
+jscpucfg$(HOST_BIN_SUFFIX):
+ echo no need to build jscpucfg $<
+else
+jscpucfg$(HOST_BIN_SUFFIX): jscpucfg.c Makefile.in
+ $(HOST_CC) $(HOST_CFLAGS) $(JSCPUCFG_DEFINES) $(DEFINES) $(NSPR_CFLAGS) $(OUTOPTION)$@ $<
+endif
+endif
+
+# Extra dependancies and rules for keyword switch code
+jsscan.$(OBJ_SUFFIX): jsautokw.h jskeyword.tbl
+
+host_jskwgen.$(OBJ_SUFFIX): jsconfig.h jskeyword.tbl
+
+jsautokw.h: host_jskwgen$(HOST_BIN_SUFFIX)
+ ./host_jskwgen$(HOST_BIN_SUFFIX) $@
diff --git a/third_party/js-1.7/Makefile.ref b/third_party/js-1.7/Makefile.ref
new file mode 100644
index 0000000..587ab86
--- /dev/null
+++ b/third_party/js-1.7/Makefile.ref
@@ -0,0 +1,375 @@
+# -*- Mode: makefile -*-
+# vim: ft=make
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Michael Ang <mang@subcarrier.org>
+# Kevin Buhr <buhr@stat.wisc.edu>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# JSRef GNUmake makefile.
+#
+# Note: dependency rules are missing for some files (some
+# .h, all .msg, etc.) Re-make clean if in doubt.
+#
+
+
+DEPTH = .
+
+include config.mk
+
+#NS_USE_NATIVE = 1
+
+ifdef NARCISSUS
+DEFINES += -DNARCISSUS
+endif
+
+# Look in OBJDIR to find jsautocfg.h and jsautokw.h
+INCLUDES += -I$(OBJDIR)
+
+ifdef JS_THREADSAFE
+DEFINES += -DJS_THREADSAFE
+INCLUDES += -I$(DIST)/include/nspr
+ifdef USE_MSVC
+OTHER_LIBS += $(DIST)/lib/libnspr$(NSPR_LIBSUFFIX).lib
+else
+OTHER_LIBS += -L$(DIST)/lib -lnspr$(NSPR_LIBSUFFIX)
+endif
+endif
+
+ifdef JS_NO_THIN_LOCKS
+DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
+endif
+
+ifdef JS_HAS_FILE_OBJECT
+DEFINES += -DJS_HAS_FILE_OBJECT
+endif
+
+#
+# XCFLAGS may be set in the environment or on the gmake command line
+#
+CFLAGS += $(OPTIMIZER) $(OS_CFLAGS) $(DEFINES) $(INCLUDES) $(XCFLAGS)
+
+LDFLAGS = $(XLDFLAGS)
+
+ifndef NO_LIBM
+LDFLAGS += -lm
+endif
+
+# Prevent floating point errors caused by VC++ optimizations
+ifeq ($(OS_ARCH),WINNT)
+_MSC_VER = $(shell $(CC) 2>&1 | sed -n 's/.*Compiler Version \([0-9]*\)\.\([0-9]*\).*/\1\2/p')
+ifeq (,$(filter-out 1200 1300 1310,$(_MSC_VER)))
+CFLAGS += -Op
+else
+CFLAGS += -fp:precise
+endif
+endif # WINNT
+
+#
+# Ask perl what flags it was built with, so we can build js with similar flags
+# and link properly. Viva gmake.
+#
+ifdef JS_PERLCONNECT
+DEFINES += -DPERLCONNECT -D_GNU_SOURCE
+
+PERLCFLAGS := $(shell perl -MExtUtils::Embed -e ccopts)
+PERLLDFLAGS := $(shell perl -MExtUtils::Embed -e ldopts)
+
+# perl erroneously reports compiler flag -rdynamic (interpreted by ld
+# as -r) when it really meant -export-dynamic.
+PERLLDFLAGS := $(subst -rdynamic,-export-dynamic,$(PERLLDFLAGS))
+
+CFLAGS += $(PERLCFLAGS)
+#LDFLAGS += $(PERLLDFLAGS) #PH removed this assgnment
+INCLUDES += -I. #needed for perlconnect/jsperl.c
+endif
+
+#
+# Server-related changes :
+#
+ifdef NES40
+DEFINES += -DNES40
+endif
+
+#
+# Line editing support.
+# Define JS_READLINE or JS_EDITLINE to enable line editing in the
+# js command-line interpreter.
+#
+ifdef JS_READLINE
+# For those platforms with the readline library installed.
+DEFINES += -DEDITLINE
+PROG_LIBS += -lreadline -ltermcap
+else
+ifdef JS_EDITLINE
+# Use the editline library, built locally.
+PREDIRS += editline
+DEFINES += -DEDITLINE
+PROG_LIBS += editline/$(OBJDIR)/libedit.a
+endif
+endif
+
+# For purify
+PURE_CFLAGS = -DXP_UNIX $(OPTIMIZER) $(PURE_OS_CFLAGS) $(DEFINES) \
+ $(INCLUDES) $(XCFLAGS)
+
+#
+# JS file lists
+#
+JS_HFILES = \
+ jsarray.h \
+ jsatom.h \
+ jsbool.h \
+ jsconfig.h \
+ jscntxt.h \
+ jsdate.h \
+ jsemit.h \
+ jsexn.h \
+ jsfun.h \
+ jsgc.h \
+ jsinterp.h \
+ jsiter.h \
+ jslibmath.h \
+ jslock.h \
+ jsmath.h \
+ jsnum.h \
+ jsobj.h \
+ jsopcode.h \
+ jsparse.h \
+ jsarena.h \
+ jsclist.h \
+ jsdhash.h \
+ jsdtoa.h \
+ jshash.h \
+ jslong.h \
+ jsosdep.h \
+ jstypes.h \
+ jsprvtd.h \
+ jspubtd.h \
+ jsregexp.h \
+ jsscan.h \
+ jsscope.h \
+ jsscript.h \
+ jsstr.h \
+ jsxdrapi.h \
+ jsxml.h \
+ $(NULL)
+
+API_HFILES = \
+ jsapi.h \
+ jsdbgapi.h \
+ $(NULL)
+
+OTHER_HFILES = \
+ jsbit.h \
+ jscompat.h \
+ jscpucfg.h \
+ jsotypes.h \
+ jsstddef.h \
+ prmjtime.h \
+ resource.h \
+ jsopcode.tbl \
+ jsproto.tbl \
+ js.msg \
+ jsshell.msg \
+ jskeyword.tbl \
+ $(NULL)
+
+ifndef PREBUILT_CPUCFG
+OTHER_HFILES += $(OBJDIR)/jsautocfg.h
+endif
+OTHER_HFILES += $(OBJDIR)/jsautokw.h
+
+HFILES = $(JS_HFILES) $(API_HFILES) $(OTHER_HFILES)
+
+JS_CFILES = \
+ jsapi.c \
+ jsarena.c \
+ jsarray.c \
+ jsatom.c \
+ jsbool.c \
+ jscntxt.c \
+ jsdate.c \
+ jsdbgapi.c \
+ jsdhash.c \
+ jsdtoa.c \
+ jsemit.c \
+ jsexn.c \
+ jsfun.c \
+ jsgc.c \
+ jshash.c \
+ jsinterp.c \
+ jsiter.c \
+ jslock.c \
+ jslog2.c \
+ jslong.c \
+ jsmath.c \
+ jsnum.c \
+ jsobj.c \
+ jsopcode.c \
+ jsparse.c \
+ jsprf.c \
+ jsregexp.c \
+ jsscan.c \
+ jsscope.c \
+ jsscript.c \
+ jsstr.c \
+ jsutil.c \
+ jsxdrapi.c \
+ jsxml.c \
+ prmjtime.c \
+ $(NULL)
+
+ifdef JS_LIVECONNECT
+DIRS += liveconnect
+endif
+
+ifdef JS_PERLCONNECT
+JS_CFILES += perlconnect/jsperl.c
+endif
+
+ifdef JS_HAS_FILE_OBJECT
+JS_CFILES += jsfile.c
+JS_HFILES += jsfile.h
+endif
+
+LIB_CFILES = $(JS_CFILES)
+LIB_ASFILES := $(wildcard *_$(OS_ARCH).s)
+PROG_CFILES = js.c
+
+ifdef USE_MSVC
+LIBRARY = $(OBJDIR)/js32.lib
+SHARED_LIBRARY = $(OBJDIR)/js32.dll
+PROGRAM = $(OBJDIR)/js.exe
+else
+LIBRARY = $(OBJDIR)/libjs.a
+SHARED_LIBRARY = $(OBJDIR)/libjs.$(SO_SUFFIX)
+PROGRAM = $(OBJDIR)/js
+ifdef JS_PERLCONNECT
+PROG_LIBS += $(PERLLDFLAGS)
+endif
+endif
+
+include rules.mk
+
+MOZ_DEPTH = ../..
+include jsconfig.mk
+
+nsinstall-target:
+ cd ../../config; $(MAKE) OBJDIR=$(OBJDIR) OBJDIR_NAME=$(OBJDIR)
+
+#
+# Rules for keyword switch generation
+#
+
+GARBAGE += $(OBJDIR)/jsautokw.h $(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX)
+GARBAGE += $(OBJDIR)/jskwgen.$(OBJ_SUFFIX)
+
+$(OBJDIR)/jsscan.$(OBJ_SUFFIX): $(OBJDIR)/jsautokw.h jskeyword.tbl
+
+$(OBJDIR)/jskwgen.$(OBJ_SUFFIX): jskwgen.c jskeyword.tbl
+
+$(OBJDIR)/jsautokw.h: $(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX) jskeyword.tbl
+ $(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX) $@
+
+ifdef USE_MSVC
+
+$(OBJDIR)/jskwgen.obj: jskwgen.c jskeyword.tbl
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $<
+
+$(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX): $(OBJDIR)/jskwgen.$(OBJ_SUFFIX)
+ link.exe -out:"$@" $(EXE_LINK_FLAGS) $^
+
+else
+
+$(OBJDIR)/jskwgen.o: jskwgen.c jskeyword.tbl
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $<
+
+$(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX): $(OBJDIR)/jskwgen.$(OBJ_SUFFIX)
+ $(CC) -o $@ $(CFLAGS) $(LDFLAGS) $^
+
+endif
+
+#
+# JS shell executable
+#
+
+ifdef USE_MSVC
+$(PROGRAM): $(PROG_OBJS) $(LIBRARY)
+ link.exe -out:"$@" $(EXE_LINK_FLAGS) $^
+else
+$(PROGRAM): $(PROG_OBJS) $(LIBRARY)
+ $(CC) -o $@ $(CFLAGS) $(PROG_OBJS) $(LIBRARY) $(LDFLAGS) $(OTHER_LIBS) \
+ $(PROG_LIBS)
+endif
+
+$(PROGRAM).pure: $(PROG_OBJS) $(LIBRARY)
+ purify $(PUREFLAGS) \
+ $(CC) -o $@ $(PURE_OS_CFLAGS) $(PROG_OBJS) $(LIBRARY) $(LDFLAGS) \
+ $(OTHER_LIBS) $(PROG_LIBS)
+
+ifndef PREBUILT_CPUCFG
+$(HFILES) $(CFILES): $(OBJDIR)/jsautocfg.h
+
+$(OBJDIR)/jsautocfg.h: $(OBJDIR)/jscpucfg
+ rm -f $@
+ $(OBJDIR)/jscpucfg > $@
+
+$(OBJDIR)/jscpucfg: $(OBJDIR)/jscpucfg.o
+ $(CC) -o $@ $(OBJDIR)/jscpucfg.o
+
+# Add to TARGETS for clobber rule
+TARGETS += $(OBJDIR)/jsautocfg.h $(OBJDIR)/jscpucfg \
+ $(OBJDIR)/jscpucfg.o
+endif
+
+#
+# Hardwire dependencies on jsopcode.tbl
+#
+jsopcode.h jsopcode.c: jsopcode.tbl
+
+-include $(DEPENDENCIES)
+
+TARNAME = jsref.tar
+TARFILES = files `cat files`
+
+SUFFIXES: .i
+%.i: %.c
+ $(CC) -C -E $(CFLAGS) $< > $*.i
diff --git a/third_party/js-1.7/README.html b/third_party/js-1.7/README.html
new file mode 100644
index 0000000..b2942e3
--- /dev/null
+++ b/third_party/js-1.7/README.html
@@ -0,0 +1,826 @@
+<!-- ***** BEGIN LICENSE BLOCK *****
+ - Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ -
+ - The contents of this file are subject to the Mozilla Public License Version
+ - 1.1 (the "License"); you may not use this file except in compliance with
+ - the License. You may obtain a copy of the License at
+ - http://www.mozilla.org/MPL/
+ -
+ - Software distributed under the License is distributed on an "AS IS" basis,
+ - WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ - for the specific language governing rights and limitations under the
+ - License.
+ -
+ - The Original Code is Mozilla Communicator client code, released
+ - March 31, 1998.
+ -
+ - The Initial Developer of the Original Code is
+ - Netscape Communications Corporation.
+ - Portions created by the Initial Developer are Copyright (C) 1998-1999
+ - the Initial Developer. All Rights Reserved.
+ -
+ - Contributor(s):
+ -
+ - Alternatively, the contents of this file may be used under the terms of
+ - either of the GNU General Public License Version 2 or later (the "GPL"),
+ - or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ - in which case the provisions of the GPL or the LGPL are applicable instead
+ - of those above. If you wish to allow use of your version of this file only
+ - under the terms of either the GPL or the LGPL, and not to allow others to
+ - use your version of this file under the terms of the MPL, indicate your
+ - decision by deleting the provisions above and replace them with the notice
+ - and other provisions required by the GPL or the LGPL. If you do not delete
+ - the provisions above, a recipient may use your version of this file under
+ - the terms of any one of the MPL, the GPL or the LGPL.
+ -
+ - ***** END LICENSE BLOCK ***** -->
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.5 [en] (WinNT; I) [Netscape]">
+ <title>JavaScript Reference Implementation (JSRef) README</title>
+</head>
+<body>
+
+<h2>
+Table of Contents</h2>
+
+<ul>
+<li>
+<a href="#Introduction">Introduction</a></li>
+
+<li>
+<a href="#Build">Build conventions (standalone JS engine and shell)</a></li>
+
+<li>
+<a href="#Debugging">Debugging notes</a></li>
+
+<li>
+<a href="#Conventions">Naming and coding conventions</a></li>
+
+<li>
+<a href="#JSAPI">Using the JS API</a></li>
+
+<li>
+<a href="#Design">Design walk-through</a></li>
+
+<li>
+<a href="#Resources">Additional Resources (links, API docs, and newsgroups)</a></li>
+
+</ul>
+
+<h2>
+<a NAME="Introduction"></a>Introduction</h2>
+This is the README file for the&nbsp;<span CLASS=LXRSHORTDESC>JavaScript
+Reference (JSRef, now better known as SpiderMonkey) implementation.</span>
+It consists of build conventions
+and instructions, source code conventions, a design walk-through, and a
+brief file-by-file description of the source.
+<p><span CLASS=LXRLONGDESC>JSRef builds a library or DLL containing the
+JavaScript runtime (compiler, interpreter, decompiler, garbage collector,
+atom manager, standard classes). It then compiles a small "shell" program
+and links that with the library to make an interpreter that can be used
+interactively and with test .js files to run scripts.&nbsp; The code has
+no dependencies on the rest of the Mozilla codebase.</span>
+<p><i>Quick start tip</i>: skip to "Using the JS API" below, build the
+js shell, and play with the object named "it" (start by setting 'it.noisy
+= true').
+<h2>
+<a NAME="Build"></a>Build conventions (standalone JS engine and shell)
+(OUT OF DATE!)</h2>
+These build directions refer only to building the standalone JavaScript
+engine and shell.&nbsp; To build within the browser, refer to the <a
+href="http://www.mozilla.org/build/">build
+directions</a> on the mozilla.org website.
+<p>By default, all platforms build a version of the JS engine that is <i>not</i>
+threadsafe.&nbsp; If you require thread-safety, you must also populate
+the <tt>mozilla/dist</tt> directory with <a href="http://www.mozilla.org/projects/nspr/reference/html/"
+>NSPR</a>
+headers and libraries.&nbsp; (NSPR implements a portable threading library,
+among other things.&nbsp; The source is downloadable via <a href="http://www.mozilla.org/cvs.html">CVS</a>
+from <tt><a href="http://lxr.mozilla.org/mozilla/source/nsprpub">mozilla/nsprpub</a></tt>.)&nbsp;
+Next, you must define <tt>JS_THREADSAFE</tt> when building the JS engine,
+either on the command-line (gmake/nmake) or in a universal header file.
+<h3>
+Windows</h3>
+
+<ul>
+<li>
+Use MSVC 4.2 or 5.0.</li>
+
+<li>
+For building from the IDE use <tt>js/src/js.mdp</tt>.&nbsp; (<tt>js.mdp</tt>
+is an MSVC4.2 project file, but if you load it into MSVC5, it will be converted
+to the newer project file format.)&nbsp; <font color="#CC0000">NOTE: makefile.win
+is an nmake file used only for building the JS-engine in the Mozilla browser.&nbsp;
+Don't attempt to use it to build the standalone JS-engine.</font></li>
+
+<li>
+If you prefer to build from the command-line, use '<tt>nmake -f js.mak</tt>'</li>
+
+<li>
+Executable shell <tt>js.exe</tt> and runtime library <tt>js32.dll</tt>
+are created in either <tt>js/src/Debug</tt> or <tt>js/src/Release</tt>.</li>
+</ul>
+
+<h3>
+Macintosh</h3>
+
+<ul>
+<li>
+Use CodeWarrior 3.x</li>
+
+<li>
+Load the project file <tt>js:src:macbuild:JSRef.mcp </tt>and select "Make"
+from the menu.</li>
+</ul>
+
+<h3>
+Unix</h3>
+
+<ul>
+<li>
+Use '<tt>gmake -f Makefile.ref</tt>' to build. To compile optimized code,
+pass <tt>BUILD_OPT=1</tt> on the gmake command line or preset it in the
+environment or <tt>Makefile.ref</tt>.&nbsp; <font color="#CC0000">NOTE:
+Do not attempt to use Makefile to build the standalone JavaScript engine.&nbsp;
+This file is used only for building the JS-engine in the Mozilla browser.</font></li>
+
+<li>
+<font color="#000000">Each platform on which JS is built must have a <tt>*.mk</tt>
+configuration file in the <tt>js/src/config</tt> directory.&nbsp; The configuration
+file specifies the compiler/linker to be used and allows for customization
+of command-line options.&nbsp; To date, the build system has been tested
+on Solaris, AIX, HP/UX, OSF, IRIX, x86 Linux and Windows NT.</font></li>
+
+<li>
+<font color="#000000">Most platforms will work with either the vendor compiler
+</font>or
+<a href="ftp://prep.ai.mit.edu/pub/gnu">gcc</a>.&nbsp;
+(Except that HP builds only work using the native compiler.&nbsp; gcc won't
+link correctly with shared libraries on that platform.&nbsp; If someone
+knows a way to fix this, <a href="mailto:wynholds@netscape.com">let us
+know</a>.)</li>
+
+<li>
+<font color="#000000">If you define <tt>JS_LIVECONNECT</tt>, gmake will
+descend into the liveconnect directory and build
+<a href="http://lxr.mozilla.org/mozilla/source/js/src/liveconnect/README.html">LiveConnect</a>
+after building the JS engine.</font></li>
+
+<li>
+To build a binary drop (a zip'ed up file of headers, libraries, binaries),
+check out <tt>mozilla/config</tt> and <tt>mozilla/nsprpub/config</tt>.&nbsp;
+Use '<tt>gmake -f Makefile.ref nsinstall-target all export ship</tt>'</li>
+</ul>
+
+<h2>
+<a NAME="Debugging"></a>Debugging notes</h2>
+
+<ul>
+<li>
+To turn on GC instrumentation, define <tt>JS_GCMETER</tt>.</li>
+
+<ul>
+<li>
+To turn on GC mark-phase debugging, useful to find leaked objects by their
+address, and to dump the GC heap, define <tt>GC_MARK_DEBUG</tt>.
+See the code in jsgc.c around the declaration and use of
+<tt>js_LiveThingToFind</tt>.</li>
+
+<li>
+To turn on the arena package's instrumentation, define <tt>JS_ARENAMETER</tt>.</li>
+
+<li>
+To turn on the hash table package's metering, define <tt>JS_HASHMETER</tt>.</li>
+</ul>
+
+<h2>
+<a NAME="Conventions"></a>Naming and coding conventions</h2>
+
+<ul>
+<li>
+Public function names begin with <tt>JS_</tt> followed by capitalized "intercaps",
+e.g. <tt>JS_NewObject</tt>.</li>
+
+<li>
+Extern but library-private function names use a <tt>js_</tt> prefix and
+mixed case, e.g. <tt>js_SearchScope</tt>.</li>
+
+<li>
+Most static function names have unprefixed, mixed-case names: <tt>GetChar</tt>.</li>
+
+<li>
+But static native methods of JS objects have lowercase, underscore-separated
+or intercaps names, e.g., <tt>str_indexOf</tt>.</li>
+
+<li>
+And library-private and static data use underscores, not intercaps (but
+library-private data do use a <tt>js_</tt> prefix).</li>
+
+<li>
+Scalar type names are lowercase and js-prefixed: <tt>jsdouble</tt>.</li>
+
+<li>
+Aggregate type names are JS-prefixed and mixed-case: <tt>JSObject.</tt></li>
+
+<li>
+Macros are generally <tt>ALL_CAPS </tt>and underscored, to call out potential
+side effects, multiple uses of a formal argument, etc.</li>
+
+<li>
+Four spaces of indentation per statement nesting level.</li>
+
+<li>
+Tabs are taken to be eight spaces, and an Emacs magic comment at the top
+of each file tries to help. If you're using MSVC or similar, you'll want
+to set tab width to 8, and help convert these files to be space-filled.
+<font color="#CC0000">Do not add hard tabs to source files; do remove them
+whenever possible.</font></li>
+
+<li>
+DLL entry points have their return type expanded within a <tt>JS_PUBLIC_API()</tt>
+macro call, to get the right Windows secret type qualifiers in the right
+places for all build variants.</li>
+
+<li>
+Callback functions that might be called from a DLL are similarly macroized
+with <tt>JS_STATIC_DLL_CALLBACK</tt> (if the function otherwise would be
+static to hide its name) or <tt>JS_DLL_CALLBACK</tt> (this macro takes
+no type argument; it should be used after the return type and before the
+function name).</li>
+</ul>
+
+<h2>
+<a NAME="JSAPI"></a>Using the JS API</h2>
+
+<h4>
+Starting up</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Tune this to avoid wasting space for shallow stacks, while saving on
+&nbsp;&nbsp;&nbsp;&nbsp; * malloc overhead/fragmentation for deep or highly-variable stacks.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; #define STACK_CHUNK_SIZE&nbsp;&nbsp;&nbsp; 8192
+
+&nbsp;&nbsp;&nbsp; JSRuntime *rt;
+&nbsp;&nbsp;&nbsp; JSContext *cx;
+
+&nbsp;&nbsp;&nbsp; /* You need a runtime and one or more contexts to do anything with JS. */
+&nbsp;&nbsp;&nbsp; rt = JS_NewRuntime(0x400000L);
+&nbsp;&nbsp;&nbsp; if (!rt)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; fail("can't create JavaScript runtime");
+&nbsp;&nbsp;&nbsp; cx = JS_NewContext(rt, STACK_CHUNK_SIZE);
+&nbsp;&nbsp;&nbsp; if (!cx)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; fail("can't create JavaScript context");
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * The context definitely wants a global object, in order to have standard
+&nbsp;&nbsp;&nbsp;&nbsp; * classes and functions like Date and parseInt.&nbsp; See below for details on
+&nbsp;&nbsp;&nbsp;&nbsp; * JS_NewObject.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; JSObject *globalObj;
+
+&nbsp;&nbsp;&nbsp; globalObj = JS_NewObject(cx, &amp;my_global_class, 0, 0);
+&nbsp;&nbsp;&nbsp; JS_InitStandardClasses(cx, globalObj);</tt></pre>
+
+<h4>
+Defining objects and properties</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* Statically initialize a class to make "one-off" objects. */
+&nbsp;&nbsp;&nbsp; JSClass my_class = {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; "MyClass",
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* All of these can be replaced with the corresponding JS_*Stub
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; function pointers. */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_addProperty, my_delProperty, my_getProperty, my_setProperty,
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_enumerate,&nbsp;&nbsp; my_resolve,&nbsp;&nbsp;&nbsp;&nbsp; my_convert,&nbsp;&nbsp;&nbsp;&nbsp; my_finalize
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; JSObject *obj;
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Define an object named in the global scope that can be enumerated by
+&nbsp;&nbsp;&nbsp;&nbsp; * for/in loops.&nbsp; The parent object is passed as the second argument, as
+&nbsp;&nbsp;&nbsp;&nbsp; * with all other API calls that take an object/name pair.&nbsp; The prototype
+&nbsp;&nbsp;&nbsp;&nbsp; * passed in is null, so the default object prototype will be used.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; obj = JS_DefineObject(cx, globalObj, "myObject", &amp;my_class, NULL,
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE);
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Define a bunch of properties with a JSPropertySpec array statically
+&nbsp;&nbsp;&nbsp;&nbsp; * initialized and terminated with a null-name entry.&nbsp; Besides its name,
+&nbsp;&nbsp;&nbsp;&nbsp; * each property has a "tiny" identifier (MY_COLOR, e.g.) that can be used
+&nbsp;&nbsp;&nbsp;&nbsp; * in switch statements (in a common my_getProperty function, for example).
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; enum my_tinyid {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_COLOR, MY_HEIGHT, MY_WIDTH, MY_FUNNY, MY_ARRAY, MY_RDONLY
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; static JSPropertySpec my_props[] = {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"color",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_COLOR,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"height",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_HEIGHT,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"width",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_WIDTH,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"funny",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_FUNNY,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"array",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_ARRAY,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"rdonly",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_RDONLY,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_READONLY},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {0}
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; JS_DefineProperties(cx, obj, my_props);
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Given the above definitions and call to JS_DefineProperties, obj will
+&nbsp;&nbsp;&nbsp;&nbsp; * need this sort of "getter" method in its class (my_class, above).&nbsp; See
+&nbsp;&nbsp;&nbsp;&nbsp; * the example for the "It" class in js.c.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; static JSBool
+&nbsp;&nbsp;&nbsp; my_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (JSVAL_IS_INT(id)) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; switch (JSVAL_TO_INT(id)) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_COLOR:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_HEIGHT: *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_WIDTH:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_FUNNY:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_ARRAY:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_RDONLY: *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return JS_TRUE;
+&nbsp;&nbsp;&nbsp; }</tt></pre>
+
+<h4>
+Defining functions</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* Define a bunch of native functions first: */
+&nbsp;&nbsp;&nbsp; static JSBool
+&nbsp;&nbsp;&nbsp; my_abs(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; jsdouble x, z;
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (!JS_ValueToNumber(cx, argv[0], &amp;x))
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return JS_FALSE;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; z = (x &lt; 0) ? -x : x;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return JS_NewDoubleValue(cx, z, rval);
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; . . .
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Use a JSFunctionSpec array terminated with a null name to define a
+&nbsp;&nbsp;&nbsp;&nbsp; * bunch of native functions.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; static JSFunctionSpec my_functions[] = {
+&nbsp;&nbsp;&nbsp; /*&nbsp;&nbsp;&nbsp; name&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; native&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; nargs&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"abs",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_abs,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"acos",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_acos,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"asin",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_asin,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; . . .
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {0}
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Pass a particular object to define methods for it alone.&nbsp; If you pass
+&nbsp;&nbsp;&nbsp;&nbsp; * a prototype object, the methods will apply to all instances past and
+&nbsp;&nbsp;&nbsp;&nbsp; * future of the prototype's class (see below for classes).
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; JS_DefineFunctions(cx, globalObj, my_functions);</tt></pre>
+
+<h4>
+Defining classes</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * This pulls together the above API elements by defining a constructor
+&nbsp;&nbsp;&nbsp;&nbsp; * function, a prototype object, and properties of the prototype and of
+&nbsp;&nbsp;&nbsp;&nbsp; * the constructor, all with one API call.
+&nbsp;&nbsp;&nbsp;&nbsp; *
+&nbsp;&nbsp;&nbsp;&nbsp; * Initialize a class by defining its constructor function, prototype, and
+&nbsp;&nbsp;&nbsp;&nbsp; * per-instance and per-class properties.&nbsp; The latter are called "static"
+&nbsp;&nbsp;&nbsp;&nbsp; * below by analogy to Java.&nbsp; They are defined in the constructor object's
+&nbsp;&nbsp;&nbsp;&nbsp; * scope, so that 'MyClass.myStaticProp' works along with 'new MyClass()'.
+&nbsp;&nbsp;&nbsp;&nbsp; *
+&nbsp;&nbsp;&nbsp;&nbsp; * JS_InitClass takes a lot of arguments, but you can pass null for any of
+&nbsp;&nbsp;&nbsp;&nbsp; * the last four if there are no such properties or methods.
+&nbsp;&nbsp;&nbsp;&nbsp; *
+&nbsp;&nbsp;&nbsp;&nbsp; * Note that you do not need to call JS_InitClass to make a new instance of
+&nbsp;&nbsp;&nbsp;&nbsp; * that class -- otherwise there would be a chicken-and-egg problem making
+&nbsp;&nbsp;&nbsp;&nbsp; * the global object -- but you should call JS_InitClass if you require a
+&nbsp;&nbsp;&nbsp;&nbsp; * constructor function for script authors to call via new, and/or a class
+&nbsp;&nbsp;&nbsp;&nbsp; * prototype object ('MyClass.prototype') for authors to extend with new
+&nbsp;&nbsp;&nbsp;&nbsp; * properties at run-time. In general, if you want to support multiple
+&nbsp;&nbsp;&nbsp;&nbsp; * instances that share behavior, use JS_InitClass.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; protoObj = JS_InitClass(cx, globalObj, NULL, &amp;my_class,
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* native constructor function and min arg count */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MyClass, 0,
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* prototype object properties and methods -- these
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; will be "inherited" by all instances through
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; delegation up the instance's prototype link. */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_props, my_methods,
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* class constructor properties and methods */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_static_props, my_static_methods);</tt></pre>
+
+<h4>
+Running scripts</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* These should indicate source location for diagnostics. */
+&nbsp;&nbsp;&nbsp; char *filename;
+&nbsp;&nbsp;&nbsp; uintN lineno;
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * The return value comes back here -- if it could be a GC thing, you must
+&nbsp;&nbsp;&nbsp;&nbsp; * add it to the GC's "root set" with JS_AddRoot(cx, &amp;thing) where thing
+&nbsp;&nbsp;&nbsp;&nbsp; * is a JSString *, JSObject *, or jsdouble *, and remove the root before
+&nbsp;&nbsp;&nbsp;&nbsp; * rval goes out of scope, or when rval is no longer needed.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; jsval rval;
+&nbsp;&nbsp;&nbsp; JSBool ok;
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Some example source in a C string.&nbsp; Larger, non-null-terminated buffers
+&nbsp;&nbsp;&nbsp;&nbsp; * can be used, if you pass the buffer length to JS_EvaluateScript.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; char *source = "x * f(y)";
+
+&nbsp;&nbsp;&nbsp; ok = JS_EvaluateScript(cx, globalObj, source, strlen(source),
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; filename, lineno, &amp;rval);
+
+&nbsp;&nbsp;&nbsp; if (ok) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* Should get a number back from the example source. */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; jsdouble d;
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; ok = JS_ValueToNumber(cx, rval, &amp;d);
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; . . .
+&nbsp;&nbsp;&nbsp; }</tt></pre>
+
+<h4>
+Calling functions</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* Call a global function named "foo" that takes no arguments. */
+&nbsp;&nbsp;&nbsp; ok = JS_CallFunctionName(cx, globalObj, "foo", 0, 0, &amp;rval);
+
+&nbsp;&nbsp;&nbsp; jsval argv[2];
+
+&nbsp;&nbsp;&nbsp; /* Call a function in obj's scope named "method", passing two arguments. */
+&nbsp;&nbsp;&nbsp; argv[0] = . . .;
+&nbsp;&nbsp;&nbsp; argv[1] = . . .;
+&nbsp;&nbsp;&nbsp; ok = JS_CallFunctionName(cx, obj, "method", 2, argv, &amp;rval);</tt></pre>
+
+<h4>
+Shutting down</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* For each context you've created: */
+&nbsp;&nbsp;&nbsp; JS_DestroyContext(cx);
+
+&nbsp;&nbsp;&nbsp; /* For each runtime: */
+&nbsp;&nbsp;&nbsp; JS_DestroyRuntime(rt);
+
+&nbsp;&nbsp;&nbsp; /* And finally: */
+&nbsp;&nbsp;&nbsp; JS_ShutDown();</tt></pre>
+
+<h4>
+Debugging API</h4>
+See the<tt> trap, untrap, watch, unwatch, line2pc</tt>, and <tt>pc2line</tt>
+commands in <tt>js.c</tt>. Also the (scant) comments in <i>jsdbgapi.h</i>.
+<h2>
+<a NAME="Design"></a>Design walk-through</h2>
+This section must be brief for now -- it could easily turn into a book.
+<h4>
+JS "JavaScript Proper"</h4>
+JS modules declare and implement the JavaScript compiler, interpreter,
+decompiler, GC and atom manager, and standard classes.
+<p>JavaScript uses untyped bytecode and runtime type tagging of data values.
+The <tt>jsval</tt> type is a signed machine word that contains either a
+signed integer value (if the low bit is set), or a type-tagged pointer
+or boolean value (if the low bit is clear). Tagged pointers all refer to
+8-byte-aligned things in the GC heap.
+<p>Objects consist of a possibly shared structural description, called
+the map or scope; and unshared property values in a vector, called the
+slots. Object properties are associated with nonnegative integers stored
+in <tt>jsval</tt>'s, or with atoms (unique string descriptors) if named
+by an identifier or a non-integral index expression.
+<p>Scripts contain bytecode, source annotations, and a pool of string,
+number, and identifier literals. Functions are objects that extend scripts
+or native functions with formal parameters, a literal syntax, and a distinct
+primitive type ("function").
+<p>The compiler consists of a recursive-descent parser and a random-logic
+rather than table-driven lexical scanner. Semantic and lexical feedback
+are used to disambiguate hard cases such as missing semicolons, assignable
+expressions ("lvalues" in C parlance), etc. The parser generates bytecode
+as it parses, using fixup lists for downward branches and code buffering
+and rewriting for exceptional cases such as for loops. It attempts no error
+recovery. The interpreter executes the bytecode of top-level scripts, and
+calls itself indirectly to interpret function bodies (which are also scripts).
+All state associated with an interpreter instance is passed through formal
+parameters to the interpreter entry point; most implicit state is collected
+in a type named JSContext. Therefore, all API and almost all other functions
+in JSRef take a JSContext pointer as their first argument.
+<p>The decompiler translates postfix bytecode into infix source by consulting
+a separate byte-sized code, called source notes, to disambiguate bytecodes
+that result from more than one grammatical production.
+<p>The GC is a mark-and-sweep, non-conservative (exact) collector. It
+can allocate only fixed-sized things -- the current size is two machine
+words. It is used to hold JS object and string descriptors (but not property
+lists or string bytes), and double-precision floating point numbers. It
+runs automatically only when maxbytes (as passed to <tt>JS_NewRuntime()</tt>)
+bytes of GC things have been allocated and another thing-allocation request
+is made. JS API users should call <tt>JS_GC()</tt> or <tt>JS_MaybeGC()</tt>
+between script executions or from the branch callback, as often as necessary.
+<p>An important point about the GC's "exactness": you must add roots for
+new objects created by your native methods if you store references to them
+into a non-JS structure in the malloc heap or in static data. Also, if
+you make a new object in a native method, but do not store it through the
+<tt>rval</tt>
+result parameter (see math_abs in the "Using the JS API" section above)
+so that it is in a known root, the object is guaranteed to survive only
+until another new object is created. Either lock the first new object when
+making two in a row, or store it in a root you've added, or store it via
+rval.
+See the <a href="http://www.mozilla.org/js/spidermonkey/gctips.html">GC tips</a>
+document for more.
+<p>The atom manager consists of a hash table associating strings uniquely
+with scanner/parser information such as keyword type, index in script or
+function literal pool, etc. Atoms play three roles in JSRef: as literals
+referred to by unaligned 16-bit immediate bytecode operands, as unique
+string descriptors for efficient property name hashing, and as members
+of the root GC set for exact GC.
+<p>Native objects and methods for arrays, booleans, dates, functions, numbers,
+and strings are implemented using the JS API and certain internal interfaces
+used as "fast paths".
+<p>In general, errors are signaled by false or unoverloaded-null return
+values, and are reported using <tt>JS_ReportError()</tt> or one of its
+variants by the lowest level in order to provide the most detail. Client
+code can substitute its own error reporting function and suppress errors,
+or reflect them into Java or some other runtime system as exceptions, GUI
+dialogs, etc..
+<h2>
+File walk-through (OUT OF DATE!)</h2>
+
+<h4>
+jsapi.c, jsapi.h</h4>
+The public API to be used by almost all client code.&nbsp; If your client
+code can't make do with <tt>jsapi.h</tt>, and must reach into a friend
+or private js* file, please let us know so we can extend <tt>jsapi.h</tt>
+to include what you need in a fashion that we can support over the long
+run.
+<h4>
+jspubtd.h, jsprvtd.h</h4>
+These files exist to group struct and scalar typedefs so they can be used
+everywhere without dragging in struct definitions from N different files.
+The <tt>jspubtd.h</tt> file contains public typedefs, and is included by
+<tt>jsapi.h</tt>.
+The <tt>jsprvtd.h</tt> file contains private typedefs and is included by
+various .h files that need type names, but not type sizes or declarations.
+<h4>
+jsdbgapi.c, jsdbgapi.h</h4>
+The Debugging API, still very much under development. Provided so far:
+<ul>
+<li>
+Traps, with which breakpoints, single-stepping, step over, step out, and
+so on can be implemented. The debugger will have to consult jsopcode.def
+on its own to figure out where to plant trap instructions to implement
+functions like step out, but a future jsdbgapi.h will provide convenience
+interfaces to do these things. At most one trap per bytecode can be set.
+When a script (<tt>JSScript</tt>) is destroyed, all traps set in its bytecode
+are cleared.</li>
+
+<li>
+Watchpoints, for intercepting set operations on properties and running
+a debugger-supplied function that receives the old value and a pointer
+to the new one, which it can use to modify the new value being set.</li>
+
+<li>
+Line number to PC and back mapping functions. The line-to-PC direction
+"rounds" toward the next bytecode generated from a line greater than or
+equal to the input line, and may return the PC of a for-loop update part,
+if given the line number of the loop body's closing brace. Any line after
+the last one in a script or function maps to a PC one byte beyond the last
+bytecode in the script. An example, from perfect.js:</li>
+
+<pre><tt>14&nbsp;&nbsp; function perfect(n)
+15&nbsp;&nbsp; {
+16&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("The perfect numbers up to " +&nbsp; n + " are:");
+17
+18&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // We build sumOfDivisors[i] to hold a string expression for
+19&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // the sum of the divisors of i, excluding i itself.
+20&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; var sumOfDivisors = new ExprArray(n+1,1);
+21&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for (var divisor = 2; divisor &lt;= n; divisor++) {
+22&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for (var j = divisor + divisor; j &lt;= n; j += divisor) {
+23&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sumOfDivisors[j] += " + " + divisor;
+24&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+25&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // At this point everything up to 'divisor' has its sumOfDivisors
+26&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // expression calculated, so we can determine whether it's perfect
+27&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // already by evaluating.
+28&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (eval(sumOfDivisors[divisor]) == divisor) {
+29&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("" + divisor + " = " + sumOfDivisors[divisor]);
+30&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+31&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+32&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; delete sumOfDivisors;
+33&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("That's all.");
+34&nbsp;&nbsp; }</tt></pre>
+The line number to PC and back mappings can be tested using the js program
+with the following script:
+<pre><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; load("perfect.js")
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print(perfect)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dis(perfect)
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print()
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for (var ln = 0; ln &lt;= 40; ln++) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; var pc = line2pc(perfect,ln)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; var ln2 = pc2line(perfect,pc)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("\tline " + ln + " => pc " + pc + " => line " + ln2)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }</tt></pre>
+The result of the for loop over lines 0 to 40 inclusive is:
+<pre><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 0 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 1 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 2 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 3 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 4 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 5 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 6 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 7 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 8 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 9 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 10 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 11 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 12 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 13 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 14 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 15 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 16 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 17 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 18 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 19 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 20 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 21 => pc 36 => line 21
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 22 => pc 53 => line 22
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 23 => pc 74 => line 23
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 24 => pc 92 => line 22
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 25 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 26 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 27 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 28 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 29 => pc 127 => line 29
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 30 => pc 154 => line 21
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 31 => pc 154 => line 21
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 32 => pc 161 => line 32
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 33 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 34 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 35 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 36 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 37 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 38 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 39 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 40 => pc 172 => line 33</tt></pre>
+</ul>
+
+<h4>
+jsconfig.h</h4>
+Various configuration macros defined as 0 or 1 depending on how <tt>JS_VERSION</tt>
+is defined (as 10 for JavaScript 1.0, 11 for JavaScript 1.1, etc.). Not
+all macros are tested around related code yet. In particular, JS 1.0 support
+is missing from JSRef. JS 1.2 support will appear in a future JSRef release.
+<br>&nbsp;
+<h4>
+js.c</h4>
+The "JS shell", a simple interpreter program that uses the JS API and more
+than a few internal interfaces (some of these internal interfaces could
+be replaced by <tt>jsapi.h</tt> calls). The js program built from this
+source provides a test vehicle for evaluating scripts and calling functions,
+trying out new debugger primitives, etc.
+<h4>
+jsarray.*, jsbool.*, jdsdate.*, jsfun.*, jsmath.*, jsnum.*, jsstr.*</h4>
+These file pairs implement the standard classes and (where they exist)
+their underlying primitive types. They have similar structure, generally
+starting with class definitions and continuing with internal constructors,
+finalizers, and helper functions.
+<h4>
+jsobj.*, jsscope.*</h4>
+These two pairs declare and implement the JS object system. All of the
+following happen here:
+<ul>
+<li>
+creating objects by class and prototype, and finalizing objects;</li>
+
+<li>
+defining, looking up, getting, setting, and deleting properties;</li>
+
+<li>
+creating and destroying properties and binding names to them.</li>
+</ul>
+The details of a native object's map (scope) are mostly hidden in
+<tt>jsscope.[ch]</tt>.
+<h4>
+jsatom.c, jsatom.h</h4>
+The atom manager. Contains well-known string constants, their atoms, the
+global atom hash table and related state, the js_Atomize() function that
+turns a counted string of bytes into an atom, and literal pool (<tt>JSAtomMap</tt>)
+methods.
+<h4>
+jsgc.c, jsgc.h</h4>
+[TBD]
+<h4>
+jsinterp.*, jscntxt.*</h4>
+The bytecode interpreter, and related functions such as Call and AllocStack,
+live in <i>jsinterp.c</i>. The JSContext constructor and destructor are
+factored out into <i>jscntxt.c</i> for minimal linking when the compiler
+part of JS is split from the interpreter part into a separate program.
+<h4>
+jsemit.*, jsopcode.tbl, jsopcode.*, jsparse.*, jsscan.*, jsscript.*</h4>
+Compiler and decompiler modules. The <i>jsopcode.tbl</i> file is a C preprocessor
+source that defines almost everything there is to know about JS bytecodes.
+See its major comment for how to use it. For now, a debugger will use it
+and its dependents such as <i>jsopcode.h</i> directly, but over time we
+intend to extend <i>jsdbgapi.h</i> to hide uninteresting details and provide
+conveniences. The code generator is split across paragraphs of code in
+<i>jsparse.c</i>,
+and the utility methods called on <tt>JSCodeGenerator</tt> appear in <i>jsemit.c</i>.
+Source notes generated by <i>jsparse.c</i> and
+<i>jsemit.c</i> are used
+in <i>jsscript.c</i> to map line number to program counter and back.
+<h4>
+jstypes.h, jslog2.c</h4>
+Fundamental representation types and utility macros. This file alone among
+all .h files in JSRef must be included first by .c files. It is not nested
+in .h files, as other prerequisite .h files generally are, since it is
+also a direct dependency of most .c files and would be over-included if
+nested in addition to being directly included. The one "not-quite-a-macro
+macro" is the <tt>JS_CeilingLog2()</tt> function in <i>jslog2.c</i>.
+<h4>
+jsarena.c, jsarena.h</h4>
+Last-In-First-Out allocation macros that amortize malloc costs and allow
+for en-masse freeing. See the paper mentioned in prarena.h's major comment.
+<h4>
+jsutil.c, jsutil.h</h4>
+The <tt>JS_ASSERT</tt> macro is used throughout JSRef source as a proof
+device to make invariants and preconditions clear to the reader, and to
+hold the line during maintenance and evolution against regressions or violations
+of assumptions that it would be too expensive to test unconditionally at
+run-time. Certain assertions are followed by run-time tests that cope with
+assertion failure, but only where I'm too smart or paranoid to believe
+the assertion will never fail...
+<h4>
+jsclist.h</h4>
+Doubly-linked circular list struct and macros.
+<h4>
+jscpucfg.c</h4>
+This standalone program generates <i>jscpucfg.h</i>, a header file containing
+bytes per word and other constants that depend on CPU architecture and
+C compiler type model. It tries to discover most of these constants by
+running its own experiments on the build host, so if you are cross-compiling,
+beware.
+<h4>
+prdtoa.c, prdtoa.h</h4>
+David Gay's portable double-precision floating point to string conversion
+code, with Permission To Use notice included.
+<h4>
+prhash.c, prhash.h</h4>
+Portable, extensible hash tables. These use multiplicative hash for strength
+reduction over division hash, yet with very good key distribution over
+power of two table sizes. Collisions resolve via chaining, so each entry
+burns a malloc and can fragment the heap.
+<h4>
+prlong.c, prlong.h</h4>
+64-bit integer emulation, and compatible macros that use C's long long
+type where it exists (my last company mapped long long to a 128-bit type,
+but no real architecture does 128-bit ints yet).
+<h4>
+jsosdep.h</h4>
+Annoying OS dependencies rationalized into a few "feature-test" macros
+such as <tt>JS_HAVE_LONG_LONG</tt>.
+<h4>
+jsprf.*</h4>
+Portable, buffer-overrun-resistant sprintf and friends. For no good reason
+save lack of time, the %e, %f, and %g formats cause your system's native
+sprintf, rather than <tt>JS_dtoa()</tt>, to be used. This bug doesn't affect
+JSRef, because it uses its own <tt>JS_dtoa()</tt> call in <i>jsnum.c</i>
+to convert from double to string, but it's a bug that we'll fix later,
+and one you should be aware of if you intend to use a <tt>JS_*printf()</tt>&nbsp;
+function with your own floating type arguments - various vendor sprintf's
+mishandle NaN, +/-Inf, and some even print normal floating values inaccurately.
+<h4>
+prmjtime.c, prmjtime.h</h4>
+Time functions. These interfaces are named in a way that makes local vs.
+universal time confusion likely. Caveat emptor, and we're working on it.
+To make matters worse, Java (and therefore JavaScript) uses "local" time
+numbers (offsets from the epoch) in its Date class.
+
+
+<h2>
+<a NAME="Resources"></a>Additional Resources (links, API docs, and newsgroups)</h2>
+<ul>
+<li><a href ="http://www.mozilla.org/js/">http://www.mozilla.org/js/</a>
+<li><a href ="http://www.mozilla.org/js/spidermonkey/">http://www.mozilla.org/js/spidermonkey/</a>
+<li><a href ="news://news.mozilla.org/netscape.public.mozilla.jseng">news://news.mozilla.org/netscape.public.mozilla.jseng</a>
+</ul>
+
+
+
+</body>
+</html>
diff --git a/third_party/js-1.7/SpiderMonkey.rsp b/third_party/js-1.7/SpiderMonkey.rsp
new file mode 100644
index 0000000..8025c6c
--- /dev/null
+++ b/third_party/js-1.7/SpiderMonkey.rsp
@@ -0,0 +1,12 @@
+mozilla/js/src/*
+mozilla/js/src/config/*
+mozilla/js/src/fdlibm/*
+mozilla/js/src/liveconnect/*
+mozilla/js/src/liveconnect/_jni/*
+mozilla/js/src/liveconnect/classes/*
+mozilla/js/src/liveconnect/classes/netscape/*
+mozilla/js/src/liveconnect/classes/netscape/javascript/*
+mozilla/js/src/liveconnect/config/*
+mozilla/js/src/liveconnect/macbuild/*
+mozilla/js/src/liveconnect/macbuild/JavaSession/*
+mozilla/js/src/macbuild/*
diff --git a/third_party/js-1.7/Y.js b/third_party/js-1.7/Y.js
new file mode 100644
index 0000000..e92a65a
--- /dev/null
+++ b/third_party/js-1.7/Y.js
@@ -0,0 +1,19 @@
+// The Y combinator, applied to the factorial function
+
+function factorial(proc) {
+ return function (n) {
+ return (n <= 1) ? 1 : n * proc(n-1);
+ }
+}
+
+function Y(outer) {
+ function inner(proc) {
+ function apply(arg) {
+ return proc(proc)(arg);
+ }
+ return outer(apply);
+ }
+ return inner(inner);
+}
+
+print("5! is " + Y(factorial)(5));
diff --git a/third_party/js-1.7/config.mk b/third_party/js-1.7/config.mk
new file mode 100644
index 0000000..f622d30
--- /dev/null
+++ b/third_party/js-1.7/config.mk
@@ -0,0 +1,186 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998-1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+ifdef JS_DIST
+DIST = $(JS_DIST)
+else
+DIST = $(DEPTH)/../../dist
+endif
+
+# Set os+release dependent make variables
+OS_ARCH := $(subst /,_,$(shell uname -s | sed /\ /s//_/))
+
+# Attempt to differentiate between SunOS 5.4 and x86 5.4
+OS_CPUARCH := $(shell uname -m)
+ifeq ($(OS_CPUARCH),i86pc)
+OS_RELEASE := $(shell uname -r)_$(OS_CPUARCH)
+else
+ifeq ($(OS_ARCH),AIX)
+OS_RELEASE := $(shell uname -v).$(shell uname -r)
+else
+OS_RELEASE := $(shell uname -r)
+endif
+endif
+ifeq ($(OS_ARCH),IRIX64)
+OS_ARCH := IRIX
+endif
+
+# Handle output from win32 unames other than Netscape's version
+ifeq (,$(filter-out Windows_95 Windows_98 CYGWIN_95-4.0 CYGWIN_98-4.10, $(OS_ARCH)))
+ OS_ARCH := WIN95
+endif
+ifeq ($(OS_ARCH),WIN95)
+ OS_ARCH := WINNT
+ OS_RELEASE := 4.0
+endif
+ifeq ($(OS_ARCH), Windows_NT)
+ OS_ARCH := WINNT
+ OS_MINOR_RELEASE := $(shell uname -v)
+ ifeq ($(OS_MINOR_RELEASE),00)
+ OS_MINOR_RELEASE = 0
+ endif
+ OS_RELEASE := $(OS_RELEASE).$(OS_MINOR_RELEASE)
+endif
+ifeq (CYGWIN_NT,$(findstring CYGWIN_NT,$(OS_ARCH)))
+ OS_RELEASE := $(patsubst CYGWIN_NT-%,%,$(OS_ARCH))
+ OS_ARCH := WINNT
+endif
+ifeq ($(OS_ARCH), CYGWIN32_NT)
+ OS_ARCH := WINNT
+endif
+ifeq (MINGW32_NT,$(findstring MINGW32_NT,$(OS_ARCH)))
+ OS_RELEASE := $(patsubst MINGW32_NT-%,%,$(OS_ARCH))
+ OS_ARCH := WINNT
+endif
+
+# Virtually all Linux versions are identical.
+# Any distinctions are handled in linux.h
+ifeq ($(OS_ARCH),Linux)
+OS_CONFIG := Linux_All
+else
+ifeq ($(OS_ARCH),dgux)
+OS_CONFIG := dgux
+else
+ifeq ($(OS_ARCH),Darwin)
+OS_CONFIG := Darwin
+else
+OS_CONFIG := $(OS_ARCH)$(OS_OBJTYPE)$(OS_RELEASE)
+endif
+endif
+endif
+
+ASFLAGS =
+DEFINES =
+
+ifeq ($(OS_ARCH), WINNT)
+INSTALL = nsinstall
+CP = cp
+else
+INSTALL = $(DIST)/bin/nsinstall
+CP = cp
+endif
+
+ifdef BUILD_OPT
+OPTIMIZER = -O
+DEFINES += -UDEBUG -DNDEBUG -UDEBUG_$(USER)
+OBJDIR_TAG = _OPT
+else
+ifdef USE_MSVC
+OPTIMIZER = -Zi
+else
+OPTIMIZER = -g
+endif
+DEFINES += -DDEBUG -DDEBUG_$(USER)
+OBJDIR_TAG = _DBG
+endif
+
+SO_SUFFIX = so
+
+NS_USE_NATIVE = 1
+
+# Java stuff
+CLASSDIR = $(DEPTH)/liveconnect/classes
+JAVA_CLASSES = $(patsubst %.java,%.class,$(JAVA_SRCS))
+TARGETS += $(addprefix $(CLASSDIR)/$(OBJDIR)/$(JARPATH)/, $(JAVA_CLASSES))
+JAVAC = $(JDK)/bin/javac
+JAVAC_FLAGS = -classpath "$(CLASSPATH)" -d $(CLASSDIR)/$(OBJDIR)
+ifeq ($(OS_ARCH), WINNT)
+ SEP = ;
+else
+ SEP = :
+endif
+CLASSPATH = $(JDK)/lib/classes.zip$(SEP)$(CLASSDIR)/$(OBJDIR)
+
+include $(DEPTH)/config/$(OS_CONFIG).mk
+
+ifndef OBJ_SUFFIX
+ifdef USE_MSVC
+OBJ_SUFFIX = obj
+else
+OBJ_SUFFIX = o
+endif
+endif
+
+ifndef HOST_BIN_SUFFIX
+ifeq ($(OS_ARCH),WINNT)
+HOST_BIN_SUFFIX = .exe
+else
+HOST_BIN_SUFFIX =
+endif
+endif
+
+# Name of the binary code directories
+ifdef BUILD_IDG
+OBJDIR = $(OS_CONFIG)$(OBJDIR_TAG).OBJD
+else
+OBJDIR = $(OS_CONFIG)$(OBJDIR_TAG).OBJ
+endif
+VPATH = $(OBJDIR)
+
+# Automatic make dependencies file
+DEPENDENCIES = $(OBJDIR)/.md
+
+LCJAR = js15lc30.jar
+
+# Library name
+LIBDIR := lib
+ifeq ($(CPU_ARCH), x86_64)
+LIBDIR := lib64
+endif
+
diff --git a/third_party/js-1.7/config/AIX4.1.mk b/third_party/js-1.7/config/AIX4.1.mk
new file mode 100644
index 0000000..09c7cb9
--- /dev/null
+++ b/third_party/js-1.7/config/AIX4.1.mk
@@ -0,0 +1,65 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for AIX
+#
+
+CC = xlC_r
+CCC = xlC_r
+
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+ARCH := aix
+CPU_ARCH = rs6000
+GFX_ARCH = x
+INLINES = js_compare_and_swap:js_fast_lock1:js_fast_unlock1:js_lock_get_slot:js_lock_set_slot:js_lock_scope1
+
+OS_CFLAGS = -qarch=com -qinline+$(INLINES) -DXP_UNIX -DAIX -DAIXV3 -DSYSV -DHAVE_LOCALTIME_R
+OS_LIBS = -lbsd -lsvld -lm
+#-lpthreads -lc_r
+
+MKSHLIB = $(LD) -bM:SRE -bh:4 -bnoentry -berok
+XLDFLAGS += -lc
+
+ifdef JS_THREADSAFE
+XLDFLAGS += -lsvld
+endif
diff --git a/third_party/js-1.7/config/AIX4.2.mk b/third_party/js-1.7/config/AIX4.2.mk
new file mode 100644
index 0000000..1e3f1f1
--- /dev/null
+++ b/third_party/js-1.7/config/AIX4.2.mk
@@ -0,0 +1,64 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for AIX
+#
+
+CC = xlC_r
+CCC = xlC_r
+CFLAGS += -qarch=com -qnoansialias -qinline+$(INLINES) -DXP_UNIX -DAIX -DAIXV3 -DSYSV -DHAVE_LOCALTIME_R
+
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+ARCH := aix
+CPU_ARCH = rs6000
+GFX_ARCH = x
+INLINES = js_compare_and_swap:js_fast_lock1:js_fast_unlock1:js_lock_get_slot:js_lock_set_slot:js_lock_scope1
+
+#-lpthreads -lc_r
+
+MKSHLIB = /usr/lpp/xlC/bin/makeC++SharedLib_r -p 0 -G -berok
+
+ifdef JS_THREADSAFE
+XLDFLAGS += -ldl
+endif
+
diff --git a/third_party/js-1.7/config/AIX4.3.mk b/third_party/js-1.7/config/AIX4.3.mk
new file mode 100644
index 0000000..df05d8c
--- /dev/null
+++ b/third_party/js-1.7/config/AIX4.3.mk
@@ -0,0 +1,65 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for AIX
+#
+
+CC = xlC_r
+CCC = xlC_r
+CFLAGS += -qarch=com -qnoansialias -qinline+$(INLINES) -DXP_UNIX -DAIX -DAIXV3 -DSYSV -DAIX4_3 -DHAVE_LOCALTIME_R
+
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+ARCH := aix
+CPU_ARCH = rs6000
+GFX_ARCH = x
+INLINES = js_compare_and_swap:js_fast_lock1:js_fast_unlock1:js_lock_get_slot:js_lock_set_slot:js_lock_scope1
+
+#-lpthreads -lc_r
+
+MKSHLIB_BIN = /usr/ibmcxx/bin/makeC++SharedLib_r
+MKSHLIB = $(MKSHLIB_BIN) -p 0 -G -berok -bM:UR
+
+ifdef JS_THREADSAFE
+XLDFLAGS += -ldl
+endif
+
diff --git a/third_party/js-1.7/config/CVS/Entries b/third_party/js-1.7/config/CVS/Entries
new file mode 100644
index 0000000..01df8fb
--- /dev/null
+++ b/third_party/js-1.7/config/CVS/Entries
@@ -0,0 +1,36 @@
+/AIX4.1.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/AIX4.2.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/AIX4.3.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin.mk/1.6/Mon Feb 5 16:24:49 2007//TJS_170
+/Darwin1.3.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin1.4.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin5.2.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin5.3.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/HP-UXB.10.10.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/HP-UXB.10.20.mk/1.8/Sat Feb 12 20:10:33 2005//TJS_170
+/HP-UXB.11.00.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX5.3.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.1.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.2.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.3.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.5.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/Linux_All.mk/1.14/Tue May 10 19:53:44 2005//TJS_170
+/Mac_OS10.0.mk/1.4/Sat Feb 12 20:10:33 2005//TJS_170
+/OSF1V4.0.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/OSF1V5.0.mk/1.5/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS4.1.4.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.3.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.4.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.5.1.mk/1.8/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.5.mk/1.10/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.6.mk/1.13/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.7.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.8.mk/1.4/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.9.mk/1.2/Sat Feb 12 20:10:33 2005//TJS_170
+/WINNT4.0.mk/1.15/Wed Jul 18 19:55:15 2007//TJS_170
+/WINNT5.0.mk/1.10/Fri Aug 10 23:23:38 2007//TJS_170
+/WINNT5.1.mk/1.6/Fri Aug 10 23:23:38 2007//TJS_170
+/WINNT5.2.mk/1.5/Fri Aug 10 23:23:38 2007//TJS_170
+/dgux.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+D
diff --git a/third_party/js-1.7/config/CVS/Repository b/third_party/js-1.7/config/CVS/Repository
new file mode 100644
index 0000000..d0ce95c
--- /dev/null
+++ b/third_party/js-1.7/config/CVS/Repository
@@ -0,0 +1 @@
+mozilla/js/src/config
diff --git a/third_party/js-1.7/config/CVS/Root b/third_party/js-1.7/config/CVS/Root
new file mode 100644
index 0000000..cdb6f4a
--- /dev/null
+++ b/third_party/js-1.7/config/CVS/Root
@@ -0,0 +1 @@
+:pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot
diff --git a/third_party/js-1.7/config/CVS/Tag b/third_party/js-1.7/config/CVS/Tag
new file mode 100644
index 0000000..2a8b158
--- /dev/null
+++ b/third_party/js-1.7/config/CVS/Tag
@@ -0,0 +1 @@
+NJS_170
diff --git a/third_party/js-1.7/config/Darwin.mk b/third_party/js-1.7/config/Darwin.mk
new file mode 100644
index 0000000..23b503e
--- /dev/null
+++ b/third_party/js-1.7/config/Darwin.mk
@@ -0,0 +1,83 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DDARWIN
+
+RANLIB = ranlib
+MKSHLIB = $(CC) -dynamiclib $(XMKSHLIBOPTS) -framework System
+
+SO_SUFFIX = dylib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/third_party/js-1.7/config/Darwin1.3.mk b/third_party/js-1.7/config/Darwin1.3.mk
new file mode 100755
index 0000000..05d3767
--- /dev/null
+++ b/third_party/js-1.7/config/Darwin1.3.mk
@@ -0,0 +1,81 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DRHAPSODY
+
+RANLIB = ranlib
+MKSHLIB = libtool $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/third_party/js-1.7/config/Darwin1.4.mk b/third_party/js-1.7/config/Darwin1.4.mk
new file mode 100755
index 0000000..f7b6af8
--- /dev/null
+++ b/third_party/js-1.7/config/Darwin1.4.mk
@@ -0,0 +1,41 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mike McCabe <mike+mozilla@meer.net>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+include $(DEPTH)/config/Darwin1.3.mk
diff --git a/third_party/js-1.7/config/Darwin5.2.mk b/third_party/js-1.7/config/Darwin5.2.mk
new file mode 100755
index 0000000..9b9b6ff
--- /dev/null
+++ b/third_party/js-1.7/config/Darwin5.2.mk
@@ -0,0 +1,81 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DDARWIN
+
+RANLIB = ranlib
+MKSHLIB = libtool $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/third_party/js-1.7/config/Darwin5.3.mk b/third_party/js-1.7/config/Darwin5.3.mk
new file mode 100644
index 0000000..9b9b6ff
--- /dev/null
+++ b/third_party/js-1.7/config/Darwin5.3.mk
@@ -0,0 +1,81 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DDARWIN
+
+RANLIB = ranlib
+MKSHLIB = libtool $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/third_party/js-1.7/config/HP-UXB.10.10.mk b/third_party/js-1.7/config/HP-UXB.10.10.mk
new file mode 100644
index 0000000..8cd9d20
--- /dev/null
+++ b/third_party/js-1.7/config/HP-UXB.10.10.mk
@@ -0,0 +1,77 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for HPUX
+#
+
+# CC = gcc
+# CCC = g++
+# CFLAGS += -Wall -Wno-format -fPIC
+
+CC = cc -Ae +Z
+CCC = CC -Ae +a1 +eh +Z
+
+RANLIB = echo
+MKSHLIB = $(LD) -b
+
+SO_SUFFIX = sl
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = hppa
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DHPUX -DSYSV -DHAVE_LOCALTIME_R
+OS_LIBS = -ldld
+
+ifeq ($(OS_RELEASE),B.10)
+PLATFORM_FLAGS += -DHPUX10 -Dhpux10
+PORT_FLAGS += -DRW_NO_OVERLOAD_SCHAR -DHAVE_MODEL_H
+ifeq ($(OS_VERSION),.10)
+PLATFORM_FLAGS += -DHPUX10_10
+endif
+ifeq ($(OS_VERSION),.20)
+PLATFORM_FLAGS += -DHPUX10_20
+endif
+ifeq ($(OS_VERSION),.30)
+PLATFORM_FLAGS += -DHPUX10_30
+endif
+endif
diff --git a/third_party/js-1.7/config/HP-UXB.10.20.mk b/third_party/js-1.7/config/HP-UXB.10.20.mk
new file mode 100644
index 0000000..8cd9d20
--- /dev/null
+++ b/third_party/js-1.7/config/HP-UXB.10.20.mk
@@ -0,0 +1,77 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for HPUX
+#
+
+# CC = gcc
+# CCC = g++
+# CFLAGS += -Wall -Wno-format -fPIC
+
+CC = cc -Ae +Z
+CCC = CC -Ae +a1 +eh +Z
+
+RANLIB = echo
+MKSHLIB = $(LD) -b
+
+SO_SUFFIX = sl
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = hppa
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DHPUX -DSYSV -DHAVE_LOCALTIME_R
+OS_LIBS = -ldld
+
+ifeq ($(OS_RELEASE),B.10)
+PLATFORM_FLAGS += -DHPUX10 -Dhpux10
+PORT_FLAGS += -DRW_NO_OVERLOAD_SCHAR -DHAVE_MODEL_H
+ifeq ($(OS_VERSION),.10)
+PLATFORM_FLAGS += -DHPUX10_10
+endif
+ifeq ($(OS_VERSION),.20)
+PLATFORM_FLAGS += -DHPUX10_20
+endif
+ifeq ($(OS_VERSION),.30)
+PLATFORM_FLAGS += -DHPUX10_30
+endif
+endif
diff --git a/third_party/js-1.7/config/HP-UXB.11.00.mk b/third_party/js-1.7/config/HP-UXB.11.00.mk
new file mode 100644
index 0000000..239188d
--- /dev/null
+++ b/third_party/js-1.7/config/HP-UXB.11.00.mk
@@ -0,0 +1,80 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for HPUX
+#
+
+ifdef NS_USE_NATIVE
+ CC = cc +Z +DAportable +DS2.0 +u4
+# LD = aCC +Z -b -Wl,+s -Wl,-B,symbolic
+else
+ CC = gcc -Wall -Wno-format -fPIC
+ CCC = g++ -Wall -Wno-format -fPIC
+endif
+
+RANLIB = echo
+MKSHLIB = $(LD) -b
+
+SO_SUFFIX = sl
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = hppa
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DHPUX -DSYSV -D_HPUX -DNATIVE -D_POSIX_C_SOURCE=199506L -DHAVE_LOCALTIME_R
+OS_LIBS = -ldld
+
+XLDFLAGS = -lpthread
+
+ifeq ($(OS_RELEASE),B.10)
+PLATFORM_FLAGS += -DHPUX10 -Dhpux10
+PORT_FLAGS += -DRW_NO_OVERLOAD_SCHAR -DHAVE_MODEL_H
+ifeq ($(OS_VERSION),.10)
+PLATFORM_FLAGS += -DHPUX10_10
+endif
+ifeq ($(OS_VERSION),.20)
+PLATFORM_FLAGS += -DHPUX10_20
+endif
+ifeq ($(OS_VERSION),.30)
+PLATFORM_FLAGS += -DHPUX10_30
+endif
+endif
diff --git a/third_party/js-1.7/config/IRIX.mk b/third_party/js-1.7/config/IRIX.mk
new file mode 100644
index 0000000..88b162f
--- /dev/null
+++ b/third_party/js-1.7/config/IRIX.mk
@@ -0,0 +1,87 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX
+#
+
+CPU_ARCH = mips
+GFX_ARCH = x
+
+RANLIB = /bin/true
+
+#NS_USE_GCC = 1
+
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+AS = $(CC) -x assembler-with-cpp
+ODD_CFLAGS = -Wall -Wno-format
+ifdef BUILD_OPT
+OPTIMIZER = -O6
+endif
+else
+ifeq ($(OS_RELEASE),6.2)
+CC = cc -n32 -DIRIX6_2
+endif
+ifeq ($(OS_RELEASE),6.3)
+CC = cc -n32 -DIRIX6_3
+endif
+ifeq ($(OS_RELEASE),6.5)
+CC = cc -n32 -DIRIX6_5
+endif
+CCC = CC
+# LD = CC
+ODD_CFLAGS = -fullwarn -xansi
+ifdef BUILD_OPT
+OPTIMIZER += -Olimit 4000
+endif
+endif
+
+# For purify
+HAVE_PURIFY = 1
+PURE_OS_CFLAGS = $(ODD_CFLAGS) -DXP_UNIX -DSVR4 -DSW_THREADS -DIRIX -DHAVE_LOCALTIME_R
+
+OS_CFLAGS = $(PURE_OS_CFLAGS) -MDupdate $(DEPENDENCIES)
+
+BSDECHO = echo
+MKSHLIB = $(LD) -n32 -shared
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
diff --git a/third_party/js-1.7/config/IRIX5.3.mk b/third_party/js-1.7/config/IRIX5.3.mk
new file mode 100644
index 0000000..f38cc94
--- /dev/null
+++ b/third_party/js-1.7/config/IRIX5.3.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX5.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/third_party/js-1.7/config/IRIX6.1.mk b/third_party/js-1.7/config/IRIX6.1.mk
new file mode 100644
index 0000000..354f1d1
--- /dev/null
+++ b/third_party/js-1.7/config/IRIX6.1.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/third_party/js-1.7/config/IRIX6.2.mk b/third_party/js-1.7/config/IRIX6.2.mk
new file mode 100644
index 0000000..354f1d1
--- /dev/null
+++ b/third_party/js-1.7/config/IRIX6.2.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/third_party/js-1.7/config/IRIX6.3.mk b/third_party/js-1.7/config/IRIX6.3.mk
new file mode 100644
index 0000000..354f1d1
--- /dev/null
+++ b/third_party/js-1.7/config/IRIX6.3.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/third_party/js-1.7/config/IRIX6.5.mk b/third_party/js-1.7/config/IRIX6.5.mk
new file mode 100644
index 0000000..354f1d1
--- /dev/null
+++ b/third_party/js-1.7/config/IRIX6.5.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/third_party/js-1.7/config/Linux_All.mk b/third_party/js-1.7/config/Linux_All.mk
new file mode 100644
index 0000000..0c43df4
--- /dev/null
+++ b/third_party/js-1.7/config/Linux_All.mk
@@ -0,0 +1,103 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for all versions of Linux
+#
+
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DHAVE_LOCALTIME_R
+
+RANLIB = echo
+MKSHLIB = $(LD) -shared $(XMKSHLIBOPTS)
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+# don't filter in x86-64 architecture
+ifneq (x86_64,$(CPU_ARCH))
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+
+ifeq (gcc, $(CC))
+# if using gcc on x86, check version for opt bug
+# (http://bugzilla.mozilla.org/show_bug.cgi?id=24892)
+GCC_VERSION := $(shell gcc -v 2>&1 | grep version | awk '{ print $$3 }')
+GCC_LIST:=$(sort 2.91.66 $(GCC_VERSION) )
+
+ifeq (2.91.66, $(firstword $(GCC_LIST)))
+CFLAGS+= -DGCC_OPT_BUG
+endif
+endif
+endif
+endif
+
+GFX_ARCH = x
+
+OS_LIBS = -lm -lc
+
+ASFLAGS += -x assembler-with-cpp
+
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+ifeq ($(CPU_ARCH),x86_64)
+# Use VA_COPY() standard macro on x86-64
+# FIXME: better use it everywhere
+OS_CFLAGS += -DHAVE_VA_COPY -DVA_COPY=va_copy
+endif
+
+ifeq ($(CPU_ARCH),x86_64)
+# We need PIC code for shared libraries
+# FIXME: better patch rules.mk & fdlibm/Makefile*
+OS_CFLAGS += -DPIC -fPIC
+endif
diff --git a/third_party/js-1.7/config/Mac_OS10.0.mk b/third_party/js-1.7/config/Mac_OS10.0.mk
new file mode 100755
index 0000000..74ba151
--- /dev/null
+++ b/third_party/js-1.7/config/Mac_OS10.0.mk
@@ -0,0 +1,82 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE
+-DRHAPSODY
+
+RANLIB = ranlib
+MKSHLIB = libtool -dynamic $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/third_party/js-1.7/config/OSF1V4.0.mk b/third_party/js-1.7/config/OSF1V4.0.mk
new file mode 100644
index 0000000..337ca74
--- /dev/null
+++ b/third_party/js-1.7/config/OSF1V4.0.mk
@@ -0,0 +1,72 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for Data General DG/UX
+#
+
+#
+# Initial DG/UX port by Marc Fraioli (fraioli@dg-rtp.dg.com)
+#
+
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+CFLAGS += -mieee -Wall -Wno-format
+else
+CC = cc
+CCC = cxx
+CFLAGS += -ieee -std
+# LD = cxx
+endif
+
+RANLIB = echo
+MKSHLIB = $(LD) -shared -taso -all -expect_unresolved "*"
+
+#
+# _DGUX_SOURCE is needed to turn on a lot of stuff in the headers if
+# you're not using DG's compiler. It shouldn't hurt if you are.
+#
+# _POSIX4A_DRAFT10_SOURCE is needed to pick up localtime_r, used in
+# prtime.c
+#
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DDGUX -D_DGUX_SOURCE -D_POSIX4A_DRAFT10_SOURCE -DOSF1 -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl
+
+NOSUCHFILE = /no-such-file
diff --git a/third_party/js-1.7/config/OSF1V5.0.mk b/third_party/js-1.7/config/OSF1V5.0.mk
new file mode 100644
index 0000000..b65738c
--- /dev/null
+++ b/third_party/js-1.7/config/OSF1V5.0.mk
@@ -0,0 +1,69 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for Tru64 Unix 5.0
+#
+
+#
+# Initial DG/UX port by Marc Fraioli (fraioli@dg-rtp.dg.com)
+#
+
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+CFLAGS += -mieee -Wall -Wno-format
+else
+CC = cc
+CCC = cxx
+CFLAGS += -ieee -std -pthread
+# LD = cxx
+endif
+
+RANLIB = echo
+MKSHLIB = $(LD) -shared -all -expect_unresolved "*"
+
+#
+# _POSIX4A_DRAFT10_SOURCE is needed to pick up localtime_r, used in
+# prtime.c
+#
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_POSIX4A_DRAFT10_SOURCE -DOSF1 -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl
+
+NOSUCHFILE = /no-such-file
diff --git a/third_party/js-1.7/config/SunOS4.1.4.mk b/third_party/js-1.7/config/SunOS4.1.4.mk
new file mode 100644
index 0000000..62f4815
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS4.1.4.mk
@@ -0,0 +1,101 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS4.1
+#
+
+CC = gcc
+CCC = g++
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+# A pile of -D's to build xfe on sunos
+MOZ_CFLAGS = -DSTRINGS_ALIGNED -DNO_REGEX -DNO_ISDIR -DUSE_RE_COMP \
+ -DNO_REGCOMP -DUSE_GETWD -DNO_MEMMOVE -DNO_ALLOCA \
+ -DBOGUS_MB_MAX -DNO_CONST
+
+# Purify doesn't like -MDupdate
+NOMD_OS_CFLAGS = -DXP_UNIX -Wall -Wno-format -DSW_THREADS -DSUNOS4 -DNEED_SYSCALL \
+ $(MOZ_CFLAGS)
+
+OS_CFLAGS = $(NOMD_OS_CFLAGS) -MDupdate $(DEPENDENCIES)
+OS_LIBS = -ldl -lm
+
+MKSHLIB = $(LD) -L$(MOTIF)/lib
+
+HAVE_PURIFY = 1
+MOTIF = /home/motif/usr
+MOTIFLIB = -L$(MOTIF)/lib -lXm
+INCLUDES += -I/usr/X11R5/include -I$(MOTIF)/include
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+LOCALE_MAP = $(DEPTH)/cmd/xfe/intl/sunos.lm
+
+EN_LOCALE = en_US
+DE_LOCALE = de
+FR_LOCALE = fr
+JP_LOCALE = ja
+SJIS_LOCALE = ja_JP.SJIS
+KR_LOCALE = ko
+CN_LOCALE = zh
+TW_LOCALE = zh_TW
+I2_LOCALE = i2
+IT_LOCALE = it
+SV_LOCALE = sv
+ES_LOCALE = es
+NL_LOCALE = nl
+PT_LOCALE = pt
+
+LOC_LIB_DIR = /usr/openwin/lib/locale
+
+BSDECHO = echo
+
+#
+# These defines are for building unix plugins
+#
+BUILD_UNIX_PLUGINS = 1
+DSO_LDOPTS =
+DSO_LDFLAGS =
diff --git a/third_party/js-1.7/config/SunOS5.3.mk b/third_party/js-1.7/config/SunOS5.3.mk
new file mode 100644
index 0000000..bd615de
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.3.mk
@@ -0,0 +1,91 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.3
+#
+
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+
+#CC = /opt/SUNWspro/SC3.0.1/bin/cc
+RANLIB = echo
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifndef JS_NO_ULTRA
+ULTRA_OPTIONS := -xarch=v8plus
+ULTRA_OPTIONSD := -DULTRA_SPARC
+else
+ULTRA_OPTIONS := -xarch=v8
+ULTRA_OPTIONSD :=
+endif
+
+ifeq ($(OS_CPUARCH),sun4u)
+DEFINES += $(ULTRA_OPTIONSD)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,$(ULTRA_OPTIONS),$(ULTRA_OPTIONSD)
+else
+ASFLAGS += $(ULTRA_OPTIONS) $(ULTRA_OPTIONSD)
+endif
+endif
+
+ifeq ($(OS_CPUARCH),sun4m)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,-xarch=v8
+else
+ASFLAGS += -xarch=v8
+endif
+endif
+
+MKSHLIB = $(LD) -G
diff --git a/third_party/js-1.7/config/SunOS5.4.mk b/third_party/js-1.7/config/SunOS5.4.mk
new file mode 100644
index 0000000..de01924
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.4.mk
@@ -0,0 +1,92 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.4
+#
+
+ifdef NS_USE_NATIVE
+CC = cc
+CCC = CC
+else
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+endif
+
+RANLIB = echo
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D__svr4 -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifndef JS_NO_ULTRA
+ULTRA_OPTIONS := -xarch=v8plus
+ULTRA_OPTIONSD := -DULTRA_SPARC
+else
+ULTRA_OPTIONS := -xarch=v8
+ULTRA_OPTIONSD :=
+endif
+
+ifeq ($(OS_CPUARCH),sun4u)
+DEFINES += $(ULTRA_OPTIONSD)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,$(ULTRA_OPTIONS),$(ULTRA_OPTIONSD)
+else
+ASFLAGS += $(ULTRA_OPTIONS) $(ULTRA_OPTIONSD)
+endif
+endif
+
+ifeq ($(OS_CPUARCH),sun4m)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,-xarch=v8
+else
+ASFLAGS += -xarch=v8
+endif
+endif
+
+MKSHLIB = $(LD) -G
diff --git a/third_party/js-1.7/config/SunOS5.5.1.mk b/third_party/js-1.7/config/SunOS5.5.1.mk
new file mode 100644
index 0000000..648f72f
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.5.1.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.5.1
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/third_party/js-1.7/config/SunOS5.5.mk b/third_party/js-1.7/config/SunOS5.5.mk
new file mode 100644
index 0000000..e26b3a3
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.5.mk
@@ -0,0 +1,87 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.5
+#
+
+AS = /usr/ccs/bin/as
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+else
+CC = cc
+CCC = CC
+endif
+
+RANLIB = echo
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifeq ($(OS_CPUARCH),sun4u) # ultra sparc?
+ifeq ($(CC),gcc) # using gcc?
+ifndef JS_NO_ULTRA # do we want ultra?
+ifdef JS_THREADSAFE # only in thread-safe mode
+DEFINES += -DULTRA_SPARC
+DEFINES += -Wa,-xarch=v8plus,-DULTRA_SPARC
+else
+ASFLAGS += -xarch=v8plus -DULTRA_SPARC
+endif
+endif
+endif
+endif
+
+MKSHLIB = $(LD) -G
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
diff --git a/third_party/js-1.7/config/SunOS5.6.mk b/third_party/js-1.7/config/SunOS5.6.mk
new file mode 100644
index 0000000..efe1152
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.6.mk
@@ -0,0 +1,89 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.5
+#
+
+AS = /usr/ccs/bin/as
+ifndef NS_USE_NATIVE
+ CC = gcc
+ CCC = g++
+ CFLAGS += -Wall -Wno-format
+else
+ CC = cc
+ CCC = CC
+ CFLAGS += -mt -KPIC
+# LD = CC
+endif
+
+RANLIB = echo
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifeq ($(OS_CPUARCH),sun4u) # ultra sparc?
+ifeq ($(CC),gcc) # using gcc?
+ifndef JS_NO_ULTRA # do we want ultra?
+ifdef JS_THREADSAFE # only in thread-safe mode
+DEFINES += -DULTRA_SPARC
+DEFINES += -Wa,-xarch=v8plus,-DULTRA_SPARC
+else
+ASFLAGS += -xarch=v8plus -DULTRA_SPARC
+endif
+endif
+endif
+endif
+
+MKSHLIB = $(LD) -G
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
diff --git a/third_party/js-1.7/config/SunOS5.7.mk b/third_party/js-1.7/config/SunOS5.7.mk
new file mode 100644
index 0000000..2cb02f2
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.7.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.7
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/third_party/js-1.7/config/SunOS5.8.mk b/third_party/js-1.7/config/SunOS5.8.mk
new file mode 100644
index 0000000..dd8a32d
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.8.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.8
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/third_party/js-1.7/config/SunOS5.9.mk b/third_party/js-1.7/config/SunOS5.9.mk
new file mode 100644
index 0000000..b01ec9c
--- /dev/null
+++ b/third_party/js-1.7/config/SunOS5.9.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.9
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/third_party/js-1.7/config/WINNT4.0.mk b/third_party/js-1.7/config/WINNT4.0.mk
new file mode 100644
index 0000000..15a5a6f
--- /dev/null
+++ b/third_party/js-1.7/config/WINNT4.0.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/third_party/js-1.7/config/WINNT5.0.mk b/third_party/js-1.7/config/WINNT5.0.mk
new file mode 100644
index 0000000..2b796a4
--- /dev/null
+++ b/third_party/js-1.7/config/WINNT5.0.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 -DWINVER=0x500 -D_WIN32_WINNT=0x500 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/third_party/js-1.7/config/WINNT5.1.mk b/third_party/js-1.7/config/WINNT5.1.mk
new file mode 100644
index 0000000..2b796a4
--- /dev/null
+++ b/third_party/js-1.7/config/WINNT5.1.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 -DWINVER=0x500 -D_WIN32_WINNT=0x500 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/third_party/js-1.7/config/WINNT5.2.mk b/third_party/js-1.7/config/WINNT5.2.mk
new file mode 100644
index 0000000..2b796a4
--- /dev/null
+++ b/third_party/js-1.7/config/WINNT5.2.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 -DWINVER=0x500 -D_WIN32_WINNT=0x500 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/third_party/js-1.7/config/dgux.mk b/third_party/js-1.7/config/dgux.mk
new file mode 100644
index 0000000..3b5967e
--- /dev/null
+++ b/third_party/js-1.7/config/dgux.mk
@@ -0,0 +1,64 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for Data General DG/UX
+#
+
+#
+# Initial DG/UX port by Marc Fraioli (fraioli@dg-rtp.dg.com)
+#
+
+AS = as
+CC = gcc
+CCC = g++
+
+RANLIB = echo
+
+#
+# _DGUX_SOURCE is needed to turn on a lot of stuff in the headers if
+# you're not using DG's compiler. It shouldn't hurt if you are.
+#
+# _POSIX4A_DRAFT10_SOURCE is needed to pick up localtime_r, used in
+# prtime.c
+#
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DDGUX -D_DGUX_SOURCE -D_POSIX4A_DRAFT10_SOURCE -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl
+
+NOSUCHFILE = /no-such-file
diff --git a/third_party/js-1.7/fdlibm/.cvsignore b/third_party/js-1.7/fdlibm/.cvsignore
new file mode 100644
index 0000000..bb5cc66
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/.cvsignore
@@ -0,0 +1,7 @@
+*.pdb
+*.ncb
+*.opt
+*.plg
+Debug
+Release
+Makefile
diff --git a/third_party/js-1.7/fdlibm/CVS/Entries b/third_party/js-1.7/fdlibm/CVS/Entries
new file mode 100644
index 0000000..4c58638
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/CVS/Entries
@@ -0,0 +1,87 @@
+/.cvsignore/1.3/Sat Dec 5 09:02:32 1998//TJS_170
+/Makefile.in/1.13/Sat Nov 15 00:11:04 2003//TJS_170
+/Makefile.ref/1.7/Sat Nov 15 00:11:04 2003//TJS_170
+/e_acos.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_acosh.c/1.8/Sat Nov 15 00:11:04 2003//TJS_170
+/e_asin.c/1.10/Sat Nov 15 00:11:04 2003//TJS_170
+/e_atan2.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_atanh.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_cosh.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_exp.c/1.10/Sat Nov 15 00:11:04 2003//TJS_170
+/e_fmod.c/1.8/Sat Nov 15 00:11:04 2003//TJS_170
+/e_gamma.c/1.7/Sat Nov 15 00:11:04 2003//TJS_170
+/e_gamma_r.c/1.7/Sat Nov 15 00:11:04 2003//TJS_170
+/e_hypot.c/1.8/Sat Nov 15 00:11:04 2003//TJS_170
+/e_j0.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/e_j1.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/e_jn.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_lgamma.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/e_lgamma_r.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_log.c/1.10/Sat Nov 15 00:11:05 2003//TJS_170
+/e_log10.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_pow.c/1.12/Sat Nov 15 00:11:05 2003//TJS_170
+/e_rem_pio2.c/1.9/Thu Jul 7 18:26:28 2005//TJS_170
+/e_remainder.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_scalb.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/e_sinh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_sqrt.c/1.10/Sat Nov 15 00:11:05 2003//TJS_170
+/fdlibm.h/1.16.2.1/Mon Mar 27 05:55:15 2006//TJS_170
+/fdlibm.mak/1.3/Sun Apr 4 19:46:38 2004//TJS_170
+/fdlibm.mdp/1.3/Wed May 26 01:34:31 1999/-kb/TJS_170
+/k_cos.c/1.9/Thu Jul 7 18:26:28 2005//TJS_170
+/k_rem_pio2.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/k_sin.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/k_standard.c/1.12/Sat Nov 15 00:11:05 2003//TJS_170
+/k_tan.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_asinh.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_atan.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_cbrt.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_ceil.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_copysign.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_cos.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_erf.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_expm1.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_fabs.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_finite.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_floor.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_frexp.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_ilogb.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_isnan.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_ldexp.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_lib_version.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_log1p.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_logb.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_matherr.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_modf.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_nextafter.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_rint.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_scalbn.c/1.10/Sat Nov 15 00:11:05 2003//TJS_170
+/s_signgam.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_significand.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_sin.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_tan.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_tanh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_acos.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_acosh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_asin.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_atan2.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_atanh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_cosh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_exp.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_fmod.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_gamma.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_gamma_r.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_hypot.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_j0.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_j1.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_jn.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_lgamma.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_lgamma_r.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_log.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_log10.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_pow.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_remainder.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_scalb.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_sinh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_sqrt.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+D
diff --git a/third_party/js-1.7/fdlibm/CVS/Repository b/third_party/js-1.7/fdlibm/CVS/Repository
new file mode 100644
index 0000000..88fbd65
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/CVS/Repository
@@ -0,0 +1 @@
+mozilla/js/src/fdlibm
diff --git a/third_party/js-1.7/fdlibm/CVS/Root b/third_party/js-1.7/fdlibm/CVS/Root
new file mode 100644
index 0000000..cdb6f4a
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/CVS/Root
@@ -0,0 +1 @@
+:pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot
diff --git a/third_party/js-1.7/fdlibm/CVS/Tag b/third_party/js-1.7/fdlibm/CVS/Tag
new file mode 100644
index 0000000..2a8b158
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/CVS/Tag
@@ -0,0 +1 @@
+NJS_170
diff --git a/third_party/js-1.7/fdlibm/Makefile.in b/third_party/js-1.7/fdlibm/Makefile.in
new file mode 100644
index 0000000..fdec7b7
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/Makefile.in
@@ -0,0 +1,127 @@
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+DEPTH = ../../..
+topsrcdir = @top_srcdir@
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+include $(DEPTH)/config/autoconf.mk
+
+MODULE = js
+LIBRARY_NAME = fdm
+
+CSRCS = \
+ e_acos.c \
+ e_asin.c \
+ e_atan2.c \
+ e_exp.c \
+ e_fmod.c \
+ e_log.c \
+ e_pow.c \
+ e_rem_pio2.c \
+ s_scalbn.c \
+ e_sqrt.c \
+ k_cos.c \
+ k_sin.c \
+ k_rem_pio2.c \
+ k_tan.c \
+ s_atan.c \
+ s_ceil.c \
+ s_copysign.c \
+ s_cos.c \
+ s_fabs.c \
+ s_finite.c \
+ s_floor.c \
+ s_isnan.c \
+ s_lib_version.c \
+ s_sin.c \
+ s_tan.c \
+ w_acos.c \
+ w_asin.c \
+ w_atan2.c \
+ w_exp.c \
+ w_fmod.c \
+ w_log.c \
+ w_pow.c \
+ w_sqrt.c \
+ $(NULL)
+
+EXPORTS = fdlibm.h
+
+# we need to force a static lib for the linking that js/src/Makefile.in wants
+# to do, and we don't really need a shared library ever, so:
+FORCE_STATIC_LIB = 1
+FORCE_USE_PIC = 1
+
+include $(topsrcdir)/config/rules.mk
+
+#
+# Default IEEE libm
+#
+CFLAGS += -D_IEEE_LIBM
+
+ifeq ($(OS_ARCH),Linux)
+LDFLAGS += -ldl
+endif
+
+ifeq ($(OS_ARCH),OSF1)
+LDFLAGS += -lc_r
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+LDFLAGS += -lposix4 -ldl -lnsl -lsocket
+ifeq ($(CPU_ARCH),sparc)
+
+ifndef JS_NO_ULTRA
+ULTRA_OPTIONS := -xarch=v8plus,-DULTRA_SPARC
+ULTRA_OPTIONSCC := -DULTRA_SPARC
+else
+ULTRA_OPTIONS := -xarch=v8
+ULTRA_OPTIONSCC :=
+endif
+
+ifeq ($(shell uname -m),sun4u)
+ASFLAGS += -Wa,$(ULTRA_OPTIONS),-P,-L,-D_ASM,-D__STDC__=0 $(ULTRA_OPTIONSCC)
+else
+ASFLAGS += -Wa,-xarch=v8,-P,-L,-D_ASM,-D__STDC__=0
+endif
+
+endif
+endif
+
diff --git a/third_party/js-1.7/fdlibm/Makefile.ref b/third_party/js-1.7/fdlibm/Makefile.ref
new file mode 100644
index 0000000..de37802
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/Makefile.ref
@@ -0,0 +1,192 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Sun Microsystems, Inc.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# @(#)Makefile 1.4 95/01/18
+#
+# ====================================================
+# Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+#
+# Developed at SunSoft, a Sun Microsystems, Inc. business.
+# Permission to use, copy, modify, and distribute this
+# software is freely granted, provided that this notice
+# is preserved.
+# ====================================================
+#
+#
+
+#
+# There are two options in making libm at fdlibm compile time:
+# _IEEE_LIBM --- IEEE libm; smaller, and somewhat faster
+# _MULTI_LIBM --- Support multi-standard at runtime by
+# imposing wrapper functions defined in
+# fdlibm.h:
+# _IEEE_MODE -- IEEE
+# _XOPEN_MODE -- X/OPEN
+# _POSIX_MODE -- POSIX/ANSI
+# _SVID3_MODE -- SVID
+#
+# Here is how to set up CFLAGS to create the desired libm at
+# compile time:
+#
+# CFLAGS = -D_IEEE_LIBM ... IEEE libm (recommended)
+# CFLAGS = -D_SVID3_MODE ... Multi-standard supported
+# libm with SVID as the
+# default standard
+# CFLAGS = -D_XOPEN_MODE ... Multi-standard supported
+# libm with XOPEN as the
+# default standard
+# CFLAGS = -D_POSIX_MODE ... Multi-standard supported
+# libm with POSIX as the
+# default standard
+# CFLAGS = ... Multi-standard supported
+# libm with IEEE as the
+# default standard
+#
+# NOTE: if scalb's second arguement is an int, then one must
+# define _SCALB_INT in CFLAGS. The default prototype of scalb
+# is double scalb(double, double)
+#
+
+DEPTH = ..
+
+include $(DEPTH)/config.mk
+
+#
+# Default IEEE libm
+#
+CFLAGS += -DXP_UNIX $(OPTIMIZER) $(OS_CFLAGS) $(DEFINES) $(INCLUDES) \
+ -DJSFILE $(XCFLAGS) -D_IEEE_LIBM
+
+# Need for jstypes.h and friends
+INCLUDES += -I..
+INCLUDES += -I../$(OBJDIR)
+
+#CC = cc
+
+INCFILES = fdlibm.h
+.INIT: $(INCFILES)
+.KEEP_STATE:
+FDLIBM_CFILES = \
+ k_standard.c k_rem_pio2.c \
+ k_cos.c k_sin.c k_tan.c \
+ e_acos.c e_acosh.c e_asin.c e_atan2.c \
+ e_atanh.c e_cosh.c e_exp.c e_fmod.c \
+ e_gamma.c e_gamma_r.c e_hypot.c e_j0.c \
+ e_j1.c e_jn.c e_lgamma.c e_lgamma_r.c \
+ e_log.c e_log10.c e_pow.c e_rem_pio2.c e_remainder.c \
+ e_scalb.c e_sinh.c e_sqrt.c \
+ w_acos.c w_acosh.c w_asin.c w_atan2.c \
+ w_atanh.c w_cosh.c w_exp.c w_fmod.c \
+ w_gamma.c w_gamma_r.c w_hypot.c w_j0.c \
+ w_j1.c w_jn.c w_lgamma.c w_lgamma_r.c \
+ w_log.c w_log10.c w_pow.c w_remainder.c \
+ w_scalb.c w_sinh.c w_sqrt.c \
+ s_asinh.c s_atan.c s_cbrt.c s_ceil.c s_copysign.c \
+ s_cos.c s_erf.c s_expm1.c s_fabs.c s_finite.c s_floor.c \
+ s_frexp.c s_ilogb.c s_isnan.c s_ldexp.c s_lib_version.c \
+ s_log1p.c s_logb.c s_matherr.c s_modf.c s_nextafter.c \
+ s_rint.c s_scalbn.c s_signgam.c s_significand.c s_sin.c \
+ s_tan.c s_tanh.c
+
+ifdef USE_MSVC
+FDLIBM_OBJS = $(addprefix $(OBJDIR)/, $(FDLIBM_CFILES:.c=.obj))
+else
+FDLIBM_OBJS = $(addprefix $(OBJDIR)/, $(FDLIBM_CFILES:.c=.o))
+endif
+
+ifdef USE_MSVC
+LIBRARY = $(OBJDIR)/fdlibm.lib
+else
+LIBRARY = $(OBJDIR)/libfdm.a
+endif
+
+define MAKE_OBJDIR
+if test ! -d $(@D); then rm -rf $(@D); mkdir -p $(@D); fi
+endef
+
+all: $(LIBRARY)
+
+export:
+
+$(OBJDIR)/%: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ $(CFLAGS) $*.c $(LDFLAGS)
+
+$(OBJDIR)/%.o: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $*.c
+
+$(OBJDIR)/%.o: %.s
+ @$(MAKE_OBJDIR)
+ $(AS) -o $@ $(ASFLAGS) $*.s
+
+# windows only
+$(OBJDIR)/%.obj: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $*.c
+
+ifeq ($(OS_ARCH),OS2)
+$(LIBRARY): $(FDLIBM_OBJS)
+ $(AR) $@ $? $(AR_OS2_SUFFIX)
+ $(RANLIB) $@
+else
+ifdef USE_MSVC
+$(LIBRARY): $(FDLIBM_OBJS)
+ lib.exe /out:"$@" $?
+else
+$(LIBRARY): $(FDLIBM_OBJS)
+ $(AR) rv $@ $?
+ $(RANLIB) $@
+endif
+endif
+
+libfdm.a : $(FDLIBM_OBJS)
+ $(AR) cru $(OBJDIR)/libfdm.a $(FDLIBM_OBJS)
+ $(RANLIB) $(OBJDIR)/libfdm.a
+
+clean:
+ rm -rf $(FDLIBM_OBJS)
+
+clobber:
+ rm -rf $(FDLIBM_OBJS) $(LIBRARY) $(DEPENDENCIES)
+
+SUFFIXES: .i
+%.i: %.c
+ $(CC) -C -E $(CFLAGS) $< > $*.i
diff --git a/third_party/js-1.7/fdlibm/e_acos.c b/third_party/js-1.7/fdlibm/e_acos.c
new file mode 100644
index 0000000..a07c1ee
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_acos.c
@@ -0,0 +1,147 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_acos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_acos(x)
+ * Method :
+ * acos(x) = pi/2 - asin(x)
+ * acos(-x) = pi/2 + asin(x)
+ * For |x|<=0.5
+ * acos(x) = pi/2 - (x + x*x^2*R(x^2)) (see asin.c)
+ * For x>0.5
+ * acos(x) = pi/2 - (pi/2 - 2asin(sqrt((1-x)/2)))
+ * = 2asin(sqrt((1-x)/2))
+ * = 2s + 2s*z*R(z) ...z=(1-x)/2, s=sqrt(z)
+ * = 2f + (2c + 2s*z*R(z))
+ * where f=hi part of s, and c = (z-f*f)/(s+f) is the correction term
+ * for f so that f+c ~ sqrt(z).
+ * For x<-0.5
+ * acos(x) = pi - 2asin(sqrt((1-|x|)/2))
+ * = pi - 0.5*(s+s*z*R(z)), where z=(1-|x|)/2,s=sqrt(z)
+ *
+ * Special cases:
+ * if x is NaN, return x itself;
+ * if |x|>1, return NaN with invalid signal.
+ *
+ * Function needed: sqrt
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one= 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
+pio2_hi = 1.57079632679489655800e+00, /* 0x3FF921FB, 0x54442D18 */
+pio2_lo = 6.12323399573676603587e-17, /* 0x3C91A626, 0x33145C07 */
+pS0 = 1.66666666666666657415e-01, /* 0x3FC55555, 0x55555555 */
+pS1 = -3.25565818622400915405e-01, /* 0xBFD4D612, 0x03EB6F7D */
+pS2 = 2.01212532134862925881e-01, /* 0x3FC9C155, 0x0E884455 */
+pS3 = -4.00555345006794114027e-02, /* 0xBFA48228, 0xB5688F3B */
+pS4 = 7.91534994289814532176e-04, /* 0x3F49EFE0, 0x7501B288 */
+pS5 = 3.47933107596021167570e-05, /* 0x3F023DE1, 0x0DFDF709 */
+qS1 = -2.40339491173441421878e+00, /* 0xC0033A27, 0x1C8A2D4B */
+qS2 = 2.02094576023350569471e+00, /* 0x40002AE5, 0x9C598AC8 */
+qS3 = -6.88283971605453293030e-01, /* 0xBFE6066C, 0x1B8D0159 */
+qS4 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+#ifdef __STDC__
+ double __ieee754_acos(double x)
+#else
+ double __ieee754_acos(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double df;
+ double z,p,q,r,w,s,c;
+ int hx,ix;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x3ff00000) { /* |x| >= 1 */
+ if(((ix-0x3ff00000)|__LO(u))==0) { /* |x|==1 */
+ if(hx>0) return 0.0; /* acos(1) = 0 */
+ else return pi+2.0*pio2_lo; /* acos(-1)= pi */
+ }
+ return (x-x)/(x-x); /* acos(|x|>1) is NaN */
+ }
+ if(ix<0x3fe00000) { /* |x| < 0.5 */
+ if(ix<=0x3c600000) return pio2_hi+pio2_lo;/*if|x|<2**-57*/
+ z = x*x;
+ p = z*(pS0+z*(pS1+z*(pS2+z*(pS3+z*(pS4+z*pS5)))));
+ q = one+z*(qS1+z*(qS2+z*(qS3+z*qS4)));
+ r = p/q;
+ return pio2_hi - (x - (pio2_lo-x*r));
+ } else if (hx<0) { /* x < -0.5 */
+ z = (one+x)*0.5;
+ p = z*(pS0+z*(pS1+z*(pS2+z*(pS3+z*(pS4+z*pS5)))));
+ q = one+z*(qS1+z*(qS2+z*(qS3+z*qS4)));
+ s = fd_sqrt(z);
+ r = p/q;
+ w = r*s-pio2_lo;
+ return pi - 2.0*(s+w);
+ } else { /* x > 0.5 */
+ z = (one-x)*0.5;
+ s = fd_sqrt(z);
+ u.d = s;
+ __LO(u) = 0;
+ df = u.d;
+ c = (z-df*df)/(s+df);
+ p = z*(pS0+z*(pS1+z*(pS2+z*(pS3+z*(pS4+z*pS5)))));
+ q = one+z*(qS1+z*(qS2+z*(qS3+z*qS4)));
+ r = p/q;
+ w = r*s+c;
+ return 2.0*(df+w);
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/e_acosh.c b/third_party/js-1.7/fdlibm/e_acosh.c
new file mode 100644
index 0000000..725ccee
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_acosh.c
@@ -0,0 +1,105 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_acosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_acosh(x)
+ * Method :
+ * Based on
+ * acosh(x) = log [ x + sqrt(x*x-1) ]
+ * we have
+ * acosh(x) := log(x)+ln2, if x is large; else
+ * acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
+ * acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
+ *
+ * Special cases:
+ * acosh(x) is NaN with signal if x<1.
+ * acosh(NaN) is NaN without signal.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.0,
+ln2 = 6.93147180559945286227e-01; /* 0x3FE62E42, 0xFEFA39EF */
+
+#ifdef __STDC__
+ double __ieee754_acosh(double x)
+#else
+ double __ieee754_acosh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t;
+ int hx;
+ u.d = x;
+ hx = __HI(u);
+ if(hx<0x3ff00000) { /* x < 1 */
+ return (x-x)/(x-x);
+ } else if(hx >=0x41b00000) { /* x > 2**28 */
+ if(hx >=0x7ff00000) { /* x is inf of NaN */
+ return x+x;
+ } else
+ return __ieee754_log(x)+ln2; /* acosh(huge)=log(2x) */
+ } else if(((hx-0x3ff00000)|__LO(u))==0) {
+ return 0.0; /* acosh(1) = 0 */
+ } else if (hx > 0x40000000) { /* 2**28 > x > 2 */
+ t=x*x;
+ return __ieee754_log(2.0*x-one/(x+fd_sqrt(t-one)));
+ } else { /* 1<x<2 */
+ t = x-one;
+ return fd_log1p(t+fd_sqrt(2.0*t+t*t));
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/e_asin.c b/third_party/js-1.7/fdlibm/e_asin.c
new file mode 100644
index 0000000..624c4d2
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_asin.c
@@ -0,0 +1,156 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_asin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_asin(x)
+ * Method :
+ * Since asin(x) = x + x^3/6 + x^5*3/40 + x^7*15/336 + ...
+ * we approximate asin(x) on [0,0.5] by
+ * asin(x) = x + x*x^2*R(x^2)
+ * where
+ * R(x^2) is a rational approximation of (asin(x)-x)/x^3
+ * and its remez error is bounded by
+ * |(asin(x)-x)/x^3 - R(x^2)| < 2^(-58.75)
+ *
+ * For x in [0.5,1]
+ * asin(x) = pi/2-2*asin(sqrt((1-x)/2))
+ * Let y = (1-x), z = y/2, s := sqrt(z), and pio2_hi+pio2_lo=pi/2;
+ * then for x>0.98
+ * asin(x) = pi/2 - 2*(s+s*z*R(z))
+ * = pio2_hi - (2*(s+s*z*R(z)) - pio2_lo)
+ * For x<=0.98, let pio4_hi = pio2_hi/2, then
+ * f = hi part of s;
+ * c = sqrt(z) - f = (z-f*f)/(s+f) ...f+c=sqrt(z)
+ * and
+ * asin(x) = pi/2 - 2*(s+s*z*R(z))
+ * = pio4_hi+(pio4-2s)-(2s*z*R(z)-pio2_lo)
+ * = pio4_hi+(pio4-2f)-(2s*z*R(z)-(pio2_lo+2c))
+ *
+ * Special cases:
+ * if x is NaN, return x itself;
+ * if |x|>1, return NaN with invalid signal.
+ *
+ */
+
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+really_big = 1.000e+300,
+pio2_hi = 1.57079632679489655800e+00, /* 0x3FF921FB, 0x54442D18 */
+pio2_lo = 6.12323399573676603587e-17, /* 0x3C91A626, 0x33145C07 */
+pio4_hi = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
+ /* coefficient for R(x^2) */
+pS0 = 1.66666666666666657415e-01, /* 0x3FC55555, 0x55555555 */
+pS1 = -3.25565818622400915405e-01, /* 0xBFD4D612, 0x03EB6F7D */
+pS2 = 2.01212532134862925881e-01, /* 0x3FC9C155, 0x0E884455 */
+pS3 = -4.00555345006794114027e-02, /* 0xBFA48228, 0xB5688F3B */
+pS4 = 7.91534994289814532176e-04, /* 0x3F49EFE0, 0x7501B288 */
+pS5 = 3.47933107596021167570e-05, /* 0x3F023DE1, 0x0DFDF709 */
+qS1 = -2.40339491173441421878e+00, /* 0xC0033A27, 0x1C8A2D4B */
+qS2 = 2.02094576023350569471e+00, /* 0x40002AE5, 0x9C598AC8 */
+qS3 = -6.88283971605453293030e-01, /* 0xBFE6066C, 0x1B8D0159 */
+qS4 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+#ifdef __STDC__
+ double __ieee754_asin(double x)
+#else
+ double __ieee754_asin(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double w,t,p,q,c,r,s;
+ int hx,ix;
+ u.d = x;
+ hx = __HI(u);
+ x = u.d;
+ ix = hx&0x7fffffff;
+ if(ix>= 0x3ff00000) { /* |x|>= 1 */
+ if(((ix-0x3ff00000)|__LO(u))==0)
+ /* asin(1)=+-pi/2 with inexact */
+ return x*pio2_hi+x*pio2_lo;
+ return (x-x)/(x-x); /* asin(|x|>1) is NaN */
+ } else if (ix<0x3fe00000) { /* |x|<0.5 */
+ if(ix<0x3e400000) { /* if |x| < 2**-27 */
+ if(really_big+x>one) return x;/* return x with inexact if x!=0*/
+ } else
+ t = x*x;
+ p = t*(pS0+t*(pS1+t*(pS2+t*(pS3+t*(pS4+t*pS5)))));
+ q = one+t*(qS1+t*(qS2+t*(qS3+t*qS4)));
+ w = p/q;
+ return x+x*w;
+ }
+ /* 1> |x|>= 0.5 */
+ w = one-fd_fabs(x);
+ t = w*0.5;
+ p = t*(pS0+t*(pS1+t*(pS2+t*(pS3+t*(pS4+t*pS5)))));
+ q = one+t*(qS1+t*(qS2+t*(qS3+t*qS4)));
+ s = fd_sqrt(t);
+ if(ix>=0x3FEF3333) { /* if |x| > 0.975 */
+ w = p/q;
+ t = pio2_hi-(2.0*(s+s*w)-pio2_lo);
+ } else {
+ u.d = s;
+ __LO(u) = 0;
+ w = u.d;
+ c = (t-w*w)/(s+w);
+ r = p/q;
+ p = 2.0*s*r-(pio2_lo-2.0*c);
+ q = pio4_hi-2.0*w;
+ t = pio4_hi-(p-q);
+ }
+ if(hx>0) return t; else return -t;
+}
diff --git a/third_party/js-1.7/fdlibm/e_atan2.c b/third_party/js-1.7/fdlibm/e_atan2.c
new file mode 100644
index 0000000..9c9a2c0
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_atan2.c
@@ -0,0 +1,165 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_atan2.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_atan2(y,x)
+ * Method :
+ * 1. Reduce y to positive by atan2(y,x)=-atan2(-y,x).
+ * 2. Reduce x to positive by (if x and y are unexceptional):
+ * ARG (x+iy) = arctan(y/x) ... if x > 0,
+ * ARG (x+iy) = pi - arctan[y/(-x)] ... if x < 0,
+ *
+ * Special cases:
+ *
+ * ATAN2((anything), NaN ) is NaN;
+ * ATAN2(NAN , (anything) ) is NaN;
+ * ATAN2(+-0, +(anything but NaN)) is +-0 ;
+ * ATAN2(+-0, -(anything but NaN)) is +-pi ;
+ * ATAN2(+-(anything but 0 and NaN), 0) is +-pi/2;
+ * ATAN2(+-(anything but INF and NaN), +INF) is +-0 ;
+ * ATAN2(+-(anything but INF and NaN), -INF) is +-pi;
+ * ATAN2(+-INF,+INF ) is +-pi/4 ;
+ * ATAN2(+-INF,-INF ) is +-3pi/4;
+ * ATAN2(+-INF, (anything but,0,NaN, and INF)) is +-pi/2;
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+tiny = 1.0e-300,
+zero = 0.0,
+pi_o_4 = 7.8539816339744827900E-01, /* 0x3FE921FB, 0x54442D18 */
+pi_o_2 = 1.5707963267948965580E+00, /* 0x3FF921FB, 0x54442D18 */
+pi = 3.1415926535897931160E+00, /* 0x400921FB, 0x54442D18 */
+pi_lo = 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
+
+#ifdef __STDC__
+ double __ieee754_atan2(double y, double x)
+#else
+ double __ieee754_atan2(y,x)
+ double y,x;
+#endif
+{
+ fd_twoints ux, uy, uz;
+ double z;
+ int k,m,hx,hy,ix,iy;
+ unsigned lx,ly;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); ix = hx&0x7fffffff;
+ lx = __LO(ux);
+ hy = __HI(uy); iy = hy&0x7fffffff;
+ ly = __LO(uy);
+ if(((ix|((lx|-(int)lx)>>31))>0x7ff00000)||
+ ((iy|((ly|-(int)ly)>>31))>0x7ff00000)) /* x or y is NaN */
+ return x+y;
+ if(((hx-0x3ff00000)|lx)==0) return fd_atan(y); /* x=1.0 */
+ m = ((hy>>31)&1)|((hx>>30)&2); /* 2*sign(x)+sign(y) */
+
+ /* when y = 0 */
+ if((iy|ly)==0) {
+ switch(m) {
+ case 0:
+ case 1: return y; /* atan(+-0,+anything)=+-0 */
+ case 2: return pi+tiny;/* atan(+0,-anything) = pi */
+ case 3: return -pi-tiny;/* atan(-0,-anything) =-pi */
+ }
+ }
+ /* when x = 0 */
+ if((ix|lx)==0) return (hy<0)? -pi_o_2-tiny: pi_o_2+tiny;
+
+ /* when x is INF */
+ if(ix==0x7ff00000) {
+ if(iy==0x7ff00000) {
+ switch(m) {
+ case 0: return pi_o_4+tiny;/* atan(+INF,+INF) */
+ case 1: return -pi_o_4-tiny;/* atan(-INF,+INF) */
+ case 2: return 3.0*pi_o_4+tiny;/*atan(+INF,-INF)*/
+ case 3: return -3.0*pi_o_4-tiny;/*atan(-INF,-INF)*/
+ }
+ } else {
+ switch(m) {
+ case 0: return zero ; /* atan(+...,+INF) */
+ case 1: return -zero ; /* atan(-...,+INF) */
+ case 2: return pi+tiny ; /* atan(+...,-INF) */
+ case 3: return -pi-tiny ; /* atan(-...,-INF) */
+ }
+ }
+ }
+ /* when y is INF */
+ if(iy==0x7ff00000) return (hy<0)? -pi_o_2-tiny: pi_o_2+tiny;
+
+ /* compute y/x */
+ k = (iy-ix)>>20;
+ if(k > 60) z=pi_o_2+0.5*pi_lo; /* |y/x| > 2**60 */
+ else if(hx<0&&k<-60) z=0.0; /* |y|/x < -2**60 */
+ else z=fd_atan(fd_fabs(y/x)); /* safe to do y/x */
+ switch (m) {
+ case 0: return z ; /* atan(+,+) */
+ case 1: uz.d = z;
+ __HI(uz) ^= 0x80000000;
+ z = uz.d;
+ return z ; /* atan(-,+) */
+ case 2: return pi-(z-pi_lo);/* atan(+,-) */
+ default: /* case 3 */
+ return (z-pi_lo)-pi;/* atan(-,-) */
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/e_atanh.c b/third_party/js-1.7/fdlibm/e_atanh.c
new file mode 100644
index 0000000..dc4a90c
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_atanh.c
@@ -0,0 +1,110 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_atanh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_atanh(x)
+ * Method :
+ * 1.Reduced x to positive by atanh(-x) = -atanh(x)
+ * 2.For x>=0.5
+ * 1 2x x
+ * atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------)
+ * 2 1 - x 1 - x
+ *
+ * For x<0.5
+ * atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
+ *
+ * Special cases:
+ * atanh(x) is NaN if |x| > 1 with signal;
+ * atanh(NaN) is that NaN with no signal;
+ * atanh(+-1) is +-INF with signal.
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0, really_big = 1e300;
+#else
+static double one = 1.0, really_big = 1e300;
+#endif
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_atanh(double x)
+#else
+ double __ieee754_atanh(x)
+ double x;
+#endif
+{
+ double t;
+ int hx,ix;
+ unsigned lx;
+ fd_twoints u;
+ u.d = x;
+ hx = __HI(u); /* high word */
+ lx = __LO(u); /* low word */
+ ix = hx&0x7fffffff;
+ if ((ix|((lx|(-(int)lx))>>31))>0x3ff00000) /* |x|>1 */
+ return (x-x)/(x-x);
+ if(ix==0x3ff00000)
+ return x/zero;
+ if(ix<0x3e300000&&(really_big+x)>zero) return x; /* x<2**-28 */
+ u.d = x;
+ __HI(u) = ix; /* x <- |x| */
+ x = u.d;
+ if(ix<0x3fe00000) { /* x < 0.5 */
+ t = x+x;
+ t = 0.5*fd_log1p(t+t*x/(one-x));
+ } else
+ t = 0.5*fd_log1p((x+x)/(one-x));
+ if(hx>=0) return t; else return -t;
+}
diff --git a/third_party/js-1.7/fdlibm/e_cosh.c b/third_party/js-1.7/fdlibm/e_cosh.c
new file mode 100644
index 0000000..4f8d4f7
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_cosh.c
@@ -0,0 +1,133 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_cosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_cosh(x)
+ * Method :
+ * mathematically cosh(x) if defined to be (exp(x)+exp(-x))/2
+ * 1. Replace x by |x| (cosh(x) = cosh(-x)).
+ * 2.
+ * [ exp(x) - 1 ]^2
+ * 0 <= x <= ln2/2 : cosh(x) := 1 + -------------------
+ * 2*exp(x)
+ *
+ * exp(x) + 1/exp(x)
+ * ln2/2 <= x <= 22 : cosh(x) := -------------------
+ * 2
+ * 22 <= x <= lnovft : cosh(x) := exp(x)/2
+ * lnovft <= x <= ln2ovft: cosh(x) := exp(x/2)/2 * exp(x/2)
+ * ln2ovft < x : cosh(x) := huge*huge (overflow)
+ *
+ * Special cases:
+ * cosh(x) is |x| if x is +INF, -INF, or NaN.
+ * only cosh(0)=1 is exact for finite x.
+ */
+
+#include "fdlibm.h"
+
+#ifdef _WIN32
+#define huge myhuge
+#endif
+
+#ifdef __STDC__
+static const double one = 1.0, half=0.5, really_big = 1.0e300;
+#else
+static double one = 1.0, half=0.5, really_big = 1.0e300;
+#endif
+
+#ifdef __STDC__
+ double __ieee754_cosh(double x)
+#else
+ double __ieee754_cosh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t,w;
+ int ix;
+ unsigned lx;
+
+ /* High word of |x|. */
+ u.d = x;
+ ix = __HI(u);
+ ix &= 0x7fffffff;
+
+ /* x is INF or NaN */
+ if(ix>=0x7ff00000) return x*x;
+
+ /* |x| in [0,0.5*ln2], return 1+expm1(|x|)^2/(2*exp(|x|)) */
+ if(ix<0x3fd62e43) {
+ t = fd_expm1(fd_fabs(x));
+ w = one+t;
+ if (ix<0x3c800000) return w; /* cosh(tiny) = 1 */
+ return one+(t*t)/(w+w);
+ }
+
+ /* |x| in [0.5*ln2,22], return (exp(|x|)+1/exp(|x|)/2; */
+ if (ix < 0x40360000) {
+ t = __ieee754_exp(fd_fabs(x));
+ return half*t+half/t;
+ }
+
+ /* |x| in [22, log(maxdouble)] return half*exp(|x|) */
+ if (ix < 0x40862E42) return half*__ieee754_exp(fd_fabs(x));
+
+ /* |x| in [log(maxdouble), overflowthresold] */
+ lx = *( (((*(unsigned*)&one)>>29)) + (unsigned*)&x);
+ if (ix<0x408633CE ||
+ (ix==0x408633ce)&&(lx<=(unsigned)0x8fb9f87d)) {
+ w = __ieee754_exp(half*fd_fabs(x));
+ t = half*w;
+ return t*w;
+ }
+
+ /* |x| > overflowthresold, cosh(x) overflow */
+ return really_big*really_big;
+}
diff --git a/third_party/js-1.7/fdlibm/e_exp.c b/third_party/js-1.7/fdlibm/e_exp.c
new file mode 100644
index 0000000..ad9cec1
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_exp.c
@@ -0,0 +1,202 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_exp.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_exp(x)
+ * Returns the exponential of x.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2.
+ *
+ * Here r will be represented as r = hi-lo for better
+ * accuracy.
+ *
+ * 2. Approximation of exp(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Write
+ * R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+ * We use a special Reme algorithm on [0,0.34658] to generate
+ * a polynomial of degree 5 to approximate R. The maximum error
+ * of this polynomial approximation is bounded by 2**-59. In
+ * other words,
+ * R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+ * (where z=r*r, and the values of P1 to P5 are listed below)
+ * and
+ * | 5 | -59
+ * | 2.0+P1*z+...+P5*z - R(z) | <= 2
+ * | |
+ * The computation of exp(r) thus becomes
+ * 2*r
+ * exp(r) = 1 + -------
+ * R - r
+ * r*R1(r)
+ * = 1 + r + ----------- (for better accuracy)
+ * 2 - R1(r)
+ * where
+ * 2 4 10
+ * R1(r) = r - (P1*r + P2*r + ... + P5*r ).
+ *
+ * 3. Scale back to obtain exp(x):
+ * From step 1, we have
+ * exp(x) = 2^k * exp(r)
+ *
+ * Special cases:
+ * exp(INF) is INF, exp(NaN) is NaN;
+ * exp(-INF) is 0, and
+ * for finite argument, only exp(0)=1 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then exp(x) overflow
+ * if x < -7.45133219101941108420e+02 then exp(x) underflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.0,
+halF[2] = {0.5,-0.5,},
+really_big = 1.0e+300,
+twom1000= 9.33263618503218878990e-302, /* 2**-1000=0x01700000,0*/
+o_threshold= 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+u_threshold= -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
+ln2HI[2] ={ 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
+ -6.93147180369123816490e-01,},/* 0xbfe62e42, 0xfee00000 */
+ln2LO[2] ={ 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
+ -1.90821492927058770002e-10,},/* 0xbdea39ef, 0x35793c76 */
+invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+P5 = 4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
+
+
+#ifdef __STDC__
+ double __ieee754_exp(double x) /* default IEEE double exp */
+#else
+ double __ieee754_exp(x) /* default IEEE double exp */
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,hi,lo,c,t;
+ int k, xsb;
+ unsigned hx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ xsb = (hx>>31)&1; /* sign bit of x */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out non-finite argument */
+ if(hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if(hx>=0x7ff00000) {
+ u.d = x;
+ if(((hx&0xfffff)|__LO(u))!=0)
+ return x+x; /* NaN */
+ else return (xsb==0)? x:0.0; /* exp(+-inf)={inf,0} */
+ }
+ if(x > o_threshold) return really_big*really_big; /* overflow */
+ if(x < u_threshold) return twom1000*twom1000; /* underflow */
+ }
+
+ /* argument reduction */
+ if(hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if(hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ hi = x-ln2HI[xsb]; lo=ln2LO[xsb]; k = 1-xsb-xsb;
+ } else {
+ k = (int)(invln2*x+halF[xsb]);
+ t = k;
+ hi = x - t*ln2HI[0]; /* t*ln2HI is exact here */
+ lo = t*ln2LO[0];
+ }
+ x = hi - lo;
+ }
+ else if(hx < 0x3e300000) { /* when |x|<2**-28 */
+ if(really_big+x>one) return one+x;/* trigger inexact */
+ }
+ else k = 0;
+
+ /* x is now in primary range */
+ t = x*x;
+ c = x - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+ if(k==0) return one-((x*c)/(c-2.0)-x);
+ else y = one-((lo-(x*c)/(2.0-c))-hi);
+ if(k >= -1021) {
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ return y;
+ } else {
+ u.d = y;
+ __HI(u) += ((k+1000)<<20);/* add k to y's exponent */
+ y = u.d;
+ return y*twom1000;
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/e_fmod.c b/third_party/js-1.7/fdlibm/e_fmod.c
new file mode 100644
index 0000000..7b5ce78
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_fmod.c
@@ -0,0 +1,184 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_fmod.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __ieee754_fmod(x,y)
+ * Return x mod y in exact arithmetic
+ * Method: shift and subtract
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0, Zero[] = {0.0, -0.0,};
+#else
+static double one = 1.0, Zero[] = {0.0, -0.0,};
+#endif
+
+#ifdef __STDC__
+ double __ieee754_fmod(double x, double y)
+#else
+ double __ieee754_fmod(x,y)
+ double x,y ;
+#endif
+{
+ fd_twoints ux, uy;
+ int n,hx,hy,hz,ix,iy,sx,i;
+ unsigned lx,ly,lz;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); /* high word of x */
+ lx = __LO(ux); /* low word of x */
+ hy = __HI(uy); /* high word of y */
+ ly = __LO(uy); /* low word of y */
+ sx = hx&0x80000000; /* sign of x */
+ hx ^=sx; /* |x| */
+ hy &= 0x7fffffff; /* |y| */
+
+ /* purge off exception values */
+ if((hy|ly)==0||(hx>=0x7ff00000)|| /* y=0,or x not finite */
+ ((hy|((ly|-(int)ly)>>31))>0x7ff00000)) /* or y is NaN */
+ return (x*y)/(x*y);
+ if(hx<=hy) {
+ if((hx<hy)||(lx<ly)) return x; /* |x|<|y| return x */
+ if(lx==ly)
+ return Zero[(unsigned)sx>>31]; /* |x|=|y| return x*0*/
+ }
+
+ /* determine ix = ilogb(x) */
+ if(hx<0x00100000) { /* subnormal x */
+ if(hx==0) {
+ for (ix = -1043, i=lx; i>0; i<<=1) ix -=1;
+ } else {
+ for (ix = -1022,i=(hx<<11); i>0; i<<=1) ix -=1;
+ }
+ } else ix = (hx>>20)-1023;
+
+ /* determine iy = ilogb(y) */
+ if(hy<0x00100000) { /* subnormal y */
+ if(hy==0) {
+ for (iy = -1043, i=ly; i>0; i<<=1) iy -=1;
+ } else {
+ for (iy = -1022,i=(hy<<11); i>0; i<<=1) iy -=1;
+ }
+ } else iy = (hy>>20)-1023;
+
+ /* set up {hx,lx}, {hy,ly} and align y to x */
+ if(ix >= -1022)
+ hx = 0x00100000|(0x000fffff&hx);
+ else { /* subnormal x, shift x to normal */
+ n = -1022-ix;
+ if(n<=31) {
+ hx = (hx<<n)|(lx>>(32-n));
+ lx <<= n;
+ } else {
+ hx = lx<<(n-32);
+ lx = 0;
+ }
+ }
+ if(iy >= -1022)
+ hy = 0x00100000|(0x000fffff&hy);
+ else { /* subnormal y, shift y to normal */
+ n = -1022-iy;
+ if(n<=31) {
+ hy = (hy<<n)|(ly>>(32-n));
+ ly <<= n;
+ } else {
+ hy = ly<<(n-32);
+ ly = 0;
+ }
+ }
+
+ /* fix point fmod */
+ n = ix - iy;
+ while(n--) {
+ hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
+ if(hz<0){hx = hx+hx+(lx>>31); lx = lx+lx;}
+ else {
+ if((hz|lz)==0) /* return sign(x)*0 */
+ return Zero[(unsigned)sx>>31];
+ hx = hz+hz+(lz>>31); lx = lz+lz;
+ }
+ }
+ hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
+ if(hz>=0) {hx=hz;lx=lz;}
+
+ /* convert back to floating value and restore the sign */
+ if((hx|lx)==0) /* return sign(x)*0 */
+ return Zero[(unsigned)sx>>31];
+ while(hx<0x00100000) { /* normalize x */
+ hx = hx+hx+(lx>>31); lx = lx+lx;
+ iy -= 1;
+ }
+ if(iy>= -1022) { /* normalize output */
+ hx = ((hx-0x00100000)|((iy+1023)<<20));
+ ux.d = x;
+ __HI(ux) = hx|sx;
+ __LO(ux) = lx;
+ x = ux.d;
+ } else { /* subnormal output */
+ n = -1022 - iy;
+ if(n<=20) {
+ lx = (lx>>n)|((unsigned)hx<<(32-n));
+ hx >>= n;
+ } else if (n<=31) {
+ lx = (hx<<(32-n))|(lx>>n); hx = sx;
+ } else {
+ lx = hx>>(n-32); hx = sx;
+ }
+ ux.d = x;
+ __HI(ux) = hx|sx;
+ __LO(ux) = lx;
+ x = ux.d;
+ x *= one; /* create necessary signal */
+ }
+ return x; /* exact output */
+}
diff --git a/third_party/js-1.7/fdlibm/e_gamma.c b/third_party/js-1.7/fdlibm/e_gamma.c
new file mode 100644
index 0000000..a34faa3
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_gamma.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_gamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_gamma(x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call __ieee754_gamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double __ieee754_gamma(double x)
+#else
+ double __ieee754_gamma(x)
+ double x;
+#endif
+{
+ return __ieee754_gamma_r(x,&signgam);
+}
diff --git a/third_party/js-1.7/fdlibm/e_gamma_r.c b/third_party/js-1.7/fdlibm/e_gamma_r.c
new file mode 100644
index 0000000..f10e32e
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_gamma_r.c
@@ -0,0 +1,70 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_gamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_gamma_r(x, signgamp)
+ * Reentrant version of the logarithm of the Gamma function
+ * with user provide pointer for the sign of Gamma(x).
+ *
+ * Method: See __ieee754_lgamma_r
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double __ieee754_gamma_r(double x, int *signgamp)
+#else
+ double __ieee754_gamma_r(x,signgamp)
+ double x; int *signgamp;
+#endif
+{
+ return __ieee754_lgamma_r(x,signgamp);
+}
diff --git a/third_party/js-1.7/fdlibm/e_hypot.c b/third_party/js-1.7/fdlibm/e_hypot.c
new file mode 100644
index 0000000..3900230
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_hypot.c
@@ -0,0 +1,173 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_hypot.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_hypot(x,y)
+ *
+ * Method :
+ * If (assume round-to-nearest) z=x*x+y*y
+ * has error less than sqrt(2)/2 ulp, than
+ * sqrt(z) has error less than 1 ulp (exercise).
+ *
+ * So, compute sqrt(x*x+y*y) with some care as
+ * follows to get the error below 1 ulp:
+ *
+ * Assume x>y>0;
+ * (if possible, set rounding to round-to-nearest)
+ * 1. if x > 2y use
+ * x1*x1+(y*y+(x2*(x+x1))) for x*x+y*y
+ * where x1 = x with lower 32 bits cleared, x2 = x-x1; else
+ * 2. if x <= 2y use
+ * t1*y1+((x-y)*(x-y)+(t1*y2+t2*y))
+ * where t1 = 2x with lower 32 bits cleared, t2 = 2x-t1,
+ * y1= y with lower 32 bits chopped, y2 = y-y1.
+ *
+ * NOTE: scaling may be necessary if some argument is too
+ * large or too tiny
+ *
+ * Special cases:
+ * hypot(x,y) is INF if x or y is +INF or -INF; else
+ * hypot(x,y) is NAN if x or y is NAN.
+ *
+ * Accuracy:
+ * hypot(x,y) returns sqrt(x^2+y^2) with error less
+ * than 1 ulps (units in the last place)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double __ieee754_hypot(double x, double y)
+#else
+ double __ieee754_hypot(x,y)
+ double x, y;
+#endif
+{
+ fd_twoints ux, uy;
+ double a=x,b=y,t1,t2,y1,y2,w;
+ int j,k,ha,hb;
+
+ ux.d = x; uy.d = y;
+ ha = __HI(ux)&0x7fffffff; /* high word of x */
+ hb = __HI(uy)&0x7fffffff; /* high word of y */
+ if(hb > ha) {a=y;b=x;j=ha; ha=hb;hb=j;} else {a=x;b=y;}
+ ux.d = a; uy.d = b;
+ __HI(ux) = ha; /* a <- |a| */
+ __HI(uy) = hb; /* b <- |b| */
+ a = ux.d; b = uy.d;
+ if((ha-hb)>0x3c00000) {return a+b;} /* x/y > 2**60 */
+ k=0;
+ if(ha > 0x5f300000) { /* a>2**500 */
+ if(ha >= 0x7ff00000) { /* Inf or NaN */
+ w = a+b; /* for sNaN */
+ ux.d = a; uy.d = b;
+ if(((ha&0xfffff)|__LO(ux))==0) w = a;
+ if(((hb^0x7ff00000)|__LO(uy))==0) w = b;
+ return w;
+ }
+ /* scale a and b by 2**-600 */
+ ha -= 0x25800000; hb -= 0x25800000; k += 600;
+ ux.d = a; uy.d = b;
+ __HI(ux) = ha;
+ __HI(uy) = hb;
+ a = ux.d; b = uy.d;
+ }
+ if(hb < 0x20b00000) { /* b < 2**-500 */
+ if(hb <= 0x000fffff) { /* subnormal b or 0 */
+ uy.d = b;
+ if((hb|(__LO(uy)))==0) return a;
+ t1=0;
+ ux.d = t1;
+ __HI(ux) = 0x7fd00000; /* t1=2^1022 */
+ t1 = ux.d;
+ b *= t1;
+ a *= t1;
+ k -= 1022;
+ } else { /* scale a and b by 2^600 */
+ ha += 0x25800000; /* a *= 2^600 */
+ hb += 0x25800000; /* b *= 2^600 */
+ k -= 600;
+ ux.d = a; uy.d = b;
+ __HI(ux) = ha;
+ __HI(uy) = hb;
+ a = ux.d; b = uy.d;
+ }
+ }
+ /* medium size a and b */
+ w = a-b;
+ if (w>b) {
+ t1 = 0;
+ ux.d = t1;
+ __HI(ux) = ha;
+ t1 = ux.d;
+ t2 = a-t1;
+ w = fd_sqrt(t1*t1-(b*(-b)-t2*(a+t1)));
+ } else {
+ a = a+a;
+ y1 = 0;
+ ux.d = y1;
+ __HI(ux) = hb;
+ y1 = ux.d;
+ y2 = b - y1;
+ t1 = 0;
+ ux.d = t1;
+ __HI(ux) = ha+0x00100000;
+ t1 = ux.d;
+ t2 = a - t1;
+ w = fd_sqrt(t1*y1-(w*(-w)-(t1*y2+t2*b)));
+ }
+ if(k!=0) {
+ t1 = 1.0;
+ ux.d = t1;
+ __HI(ux) += (k<<20);
+ t1 = ux.d;
+ return t1*w;
+ } else return w;
+}
diff --git a/third_party/js-1.7/fdlibm/e_j0.c b/third_party/js-1.7/fdlibm/e_j0.c
new file mode 100644
index 0000000..078e096
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_j0.c
@@ -0,0 +1,524 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_j0.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_j0(x), __ieee754_y0(x)
+ * Bessel function of the first and second kinds of order zero.
+ * Method -- j0(x):
+ * 1. For tiny x, we use j0(x) = 1 - x^2/4 + x^4/64 - ...
+ * 2. Reduce x to |x| since j0(x)=j0(-x), and
+ * for x in (0,2)
+ * j0(x) = 1-z/4+ z^2*R0/S0, where z = x*x;
+ * (precision: |j0-1+z/4-z^2R0/S0 |<2**-63.67 )
+ * for x in (2,inf)
+ * j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)-q0(x)*sin(x0))
+ * where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+ * as follow:
+ * cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+ * = 1/sqrt(2) * (cos(x) + sin(x))
+ * sin(x0) = sin(x)cos(pi/4)-cos(x)sin(pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * (To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.)
+ *
+ * 3 Special cases
+ * j0(nan)= nan
+ * j0(0) = 1
+ * j0(inf) = 0
+ *
+ * Method -- y0(x):
+ * 1. For x<2.
+ * Since
+ * y0(x) = 2/pi*(j0(x)*(ln(x/2)+Euler) + x^2/4 - ...)
+ * therefore y0(x)-2/pi*j0(x)*ln(x) is an even function.
+ * We use the following function to approximate y0,
+ * y0(x) = U(z)/V(z) + (2/pi)*(j0(x)*ln(x)), z= x^2
+ * where
+ * U(z) = u00 + u01*z + ... + u06*z^6
+ * V(z) = 1 + v01*z + ... + v04*z^4
+ * with absolute approximation error bounded by 2**-72.
+ * Note: For tiny x, U/V = u0 and j0(x)~1, hence
+ * y0(tiny) = u0 + (2/pi)*ln(tiny), (choose tiny<2**-27)
+ * 2. For x>=2.
+ * y0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)+q0(x)*sin(x0))
+ * where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+ * by the method mentioned above.
+ * 3. Special cases: y0(0)=-inf, y0(x<0)=NaN, y0(inf)=0.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static double pzero(double), qzero(double);
+#else
+static double pzero(), qzero();
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+really_big = 1e300,
+one = 1.0,
+invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */
+tpi = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+ /* R0/S0 on [0, 2.00] */
+R02 = 1.56249999999999947958e-02, /* 0x3F8FFFFF, 0xFFFFFFFD */
+R03 = -1.89979294238854721751e-04, /* 0xBF28E6A5, 0xB61AC6E9 */
+R04 = 1.82954049532700665670e-06, /* 0x3EBEB1D1, 0x0C503919 */
+R05 = -4.61832688532103189199e-09, /* 0xBE33D5E7, 0x73D63FCE */
+S01 = 1.56191029464890010492e-02, /* 0x3F8FFCE8, 0x82C8C2A4 */
+S02 = 1.16926784663337450260e-04, /* 0x3F1EA6D2, 0xDD57DBF4 */
+S03 = 5.13546550207318111446e-07, /* 0x3EA13B54, 0xCE84D5A9 */
+S04 = 1.16614003333790000205e-09; /* 0x3E1408BC, 0xF4745D8F */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_j0(double x)
+#else
+ double __ieee754_j0(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,r,u,v;
+ int hx,ix;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return one/(x*x);
+ x = fd_fabs(x);
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ s = fd_sin(x);
+ c = fd_cos(x);
+ ss = s-c;
+ cc = s+c;
+ if(ix<0x7fe00000) { /* make sure x+x not overflow */
+ z = -fd_cos(x+x);
+ if ((s*c)<zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ /*
+ * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+ * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+ */
+ if(ix>0x48000000) z = (invsqrtpi*cc)/fd_sqrt(x);
+ else {
+ u = pzero(x); v = qzero(x);
+ z = invsqrtpi*(u*cc-v*ss)/fd_sqrt(x);
+ }
+ return z;
+ }
+ if(ix<0x3f200000) { /* |x| < 2**-13 */
+ if(really_big+x>one) { /* raise inexact if x != 0 */
+ if(ix<0x3e400000) return one; /* |x|<2**-27 */
+ else return one - 0.25*x*x;
+ }
+ }
+ z = x*x;
+ r = z*(R02+z*(R03+z*(R04+z*R05)));
+ s = one+z*(S01+z*(S02+z*(S03+z*S04)));
+ if(ix < 0x3FF00000) { /* |x| < 1.00 */
+ return one + z*(-0.25+(r/s));
+ } else {
+ u = 0.5*x;
+ return((one+u)*(one-u)+z*(r/s));
+ }
+}
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+u00 = -7.38042951086872317523e-02, /* 0xBFB2E4D6, 0x99CBD01F */
+u01 = 1.76666452509181115538e-01, /* 0x3FC69D01, 0x9DE9E3FC */
+u02 = -1.38185671945596898896e-02, /* 0xBF8C4CE8, 0xB16CFA97 */
+u03 = 3.47453432093683650238e-04, /* 0x3F36C54D, 0x20B29B6B */
+u04 = -3.81407053724364161125e-06, /* 0xBECFFEA7, 0x73D25CAD */
+u05 = 1.95590137035022920206e-08, /* 0x3E550057, 0x3B4EABD4 */
+u06 = -3.98205194132103398453e-11, /* 0xBDC5E43D, 0x693FB3C8 */
+v01 = 1.27304834834123699328e-02, /* 0x3F8A1270, 0x91C9C71A */
+v02 = 7.60068627350353253702e-05, /* 0x3F13ECBB, 0xF578C6C1 */
+v03 = 2.59150851840457805467e-07, /* 0x3E91642D, 0x7FF202FD */
+v04 = 4.41110311332675467403e-10; /* 0x3DFE5018, 0x3BD6D9EF */
+
+#ifdef __STDC__
+ double __ieee754_y0(double x)
+#else
+ double __ieee754_y0(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,u,v;
+ int hx,ix,lx;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = 0x7fffffff&hx;
+ lx = __LO(un);
+ /* Y0(NaN) is NaN, y0(-inf) is Nan, y0(inf) is 0 */
+ if(ix>=0x7ff00000) return one/(x+x*x);
+ if((ix|lx)==0) return -one/zero;
+ if(hx<0) return zero/zero;
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ /* y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0))
+ * where x0 = x-pi/4
+ * Better formula:
+ * cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+ * = 1/sqrt(2) * (sin(x) + cos(x))
+ * sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.
+ */
+ s = fd_sin(x);
+ c = fd_cos(x);
+ ss = s-c;
+ cc = s+c;
+ /*
+ * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+ * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+ */
+ if(ix<0x7fe00000) { /* make sure x+x not overflow */
+ z = -fd_cos(x+x);
+ if ((s*c)<zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ if(ix>0x48000000) z = (invsqrtpi*ss)/fd_sqrt(x);
+ else {
+ u = pzero(x); v = qzero(x);
+ z = invsqrtpi*(u*ss+v*cc)/fd_sqrt(x);
+ }
+ return z;
+ }
+ if(ix<=0x3e400000) { /* x < 2**-27 */
+ return(u00 + tpi*__ieee754_log(x));
+ }
+ z = x*x;
+ u = u00+z*(u01+z*(u02+z*(u03+z*(u04+z*(u05+z*u06)))));
+ v = one+z*(v01+z*(v02+z*(v03+z*v04)));
+ return(u/v + tpi*(__ieee754_j0(x)*__ieee754_log(x)));
+}
+
+/* The asymptotic expansions of pzero is
+ * 1 - 9/128 s^2 + 11025/98304 s^4 - ..., where s = 1/x.
+ * For x >= 2, We approximate pzero by
+ * pzero(x) = 1 + (R/S)
+ * where R = pR0 + pR1*s^2 + pR2*s^4 + ... + pR5*s^10
+ * S = 1 + pS0*s^2 + ... + pS4*s^10
+ * and
+ * | pzero(x)-1-R/S | <= 2 ** ( -60.26)
+ */
+#ifdef __STDC__
+static const double pR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double pR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ -7.03124999999900357484e-02, /* 0xBFB1FFFF, 0xFFFFFD32 */
+ -8.08167041275349795626e+00, /* 0xC02029D0, 0xB44FA779 */
+ -2.57063105679704847262e+02, /* 0xC0701102, 0x7B19E863 */
+ -2.48521641009428822144e+03, /* 0xC0A36A6E, 0xCD4DCAFC */
+ -5.25304380490729545272e+03, /* 0xC0B4850B, 0x36CC643D */
+};
+#ifdef __STDC__
+static const double pS8[5] = {
+#else
+static double pS8[5] = {
+#endif
+ 1.16534364619668181717e+02, /* 0x405D2233, 0x07A96751 */
+ 3.83374475364121826715e+03, /* 0x40ADF37D, 0x50596938 */
+ 4.05978572648472545552e+04, /* 0x40E3D2BB, 0x6EB6B05F */
+ 1.16752972564375915681e+05, /* 0x40FC810F, 0x8F9FA9BD */
+ 4.76277284146730962675e+04, /* 0x40E74177, 0x4F2C49DC */
+};
+
+#ifdef __STDC__
+static const double pR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double pR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ -1.14125464691894502584e-11, /* 0xBDA918B1, 0x47E495CC */
+ -7.03124940873599280078e-02, /* 0xBFB1FFFF, 0xE69AFBC6 */
+ -4.15961064470587782438e+00, /* 0xC010A370, 0xF90C6BBF */
+ -6.76747652265167261021e+01, /* 0xC050EB2F, 0x5A7D1783 */
+ -3.31231299649172967747e+02, /* 0xC074B3B3, 0x6742CC63 */
+ -3.46433388365604912451e+02, /* 0xC075A6EF, 0x28A38BD7 */
+};
+#ifdef __STDC__
+static const double pS5[5] = {
+#else
+static double pS5[5] = {
+#endif
+ 6.07539382692300335975e+01, /* 0x404E6081, 0x0C98C5DE */
+ 1.05125230595704579173e+03, /* 0x40906D02, 0x5C7E2864 */
+ 5.97897094333855784498e+03, /* 0x40B75AF8, 0x8FBE1D60 */
+ 9.62544514357774460223e+03, /* 0x40C2CCB8, 0xFA76FA38 */
+ 2.40605815922939109441e+03, /* 0x40A2CC1D, 0xC70BE864 */
+};
+
+#ifdef __STDC__
+static const double pR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#else
+static double pR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ -2.54704601771951915620e-09, /* 0xBE25E103, 0x6FE1AA86 */
+ -7.03119616381481654654e-02, /* 0xBFB1FFF6, 0xF7C0E24B */
+ -2.40903221549529611423e+00, /* 0xC00345B2, 0xAEA48074 */
+ -2.19659774734883086467e+01, /* 0xC035F74A, 0x4CB94E14 */
+ -5.80791704701737572236e+01, /* 0xC04D0A22, 0x420A1A45 */
+ -3.14479470594888503854e+01, /* 0xC03F72AC, 0xA892D80F */
+};
+#ifdef __STDC__
+static const double pS3[5] = {
+#else
+static double pS3[5] = {
+#endif
+ 3.58560338055209726349e+01, /* 0x4041ED92, 0x84077DD3 */
+ 3.61513983050303863820e+02, /* 0x40769839, 0x464A7C0E */
+ 1.19360783792111533330e+03, /* 0x4092A66E, 0x6D1061D6 */
+ 1.12799679856907414432e+03, /* 0x40919FFC, 0xB8C39B7E */
+ 1.73580930813335754692e+02, /* 0x4065B296, 0xFC379081 */
+};
+
+#ifdef __STDC__
+static const double pR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double pR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ -8.87534333032526411254e-08, /* 0xBE77D316, 0xE927026D */
+ -7.03030995483624743247e-02, /* 0xBFB1FF62, 0x495E1E42 */
+ -1.45073846780952986357e+00, /* 0xBFF73639, 0x8A24A843 */
+ -7.63569613823527770791e+00, /* 0xC01E8AF3, 0xEDAFA7F3 */
+ -1.11931668860356747786e+01, /* 0xC02662E6, 0xC5246303 */
+ -3.23364579351335335033e+00, /* 0xC009DE81, 0xAF8FE70F */
+};
+#ifdef __STDC__
+static const double pS2[5] = {
+#else
+static double pS2[5] = {
+#endif
+ 2.22202997532088808441e+01, /* 0x40363865, 0x908B5959 */
+ 1.36206794218215208048e+02, /* 0x4061069E, 0x0EE8878F */
+ 2.70470278658083486789e+02, /* 0x4070E786, 0x42EA079B */
+ 1.53875394208320329881e+02, /* 0x40633C03, 0x3AB6FAFF */
+ 1.46576176948256193810e+01, /* 0x402D50B3, 0x44391809 */
+};
+
+#ifdef __STDC__
+ static double pzero(double x)
+#else
+ static double pzero(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints u;
+ double z,r,s;
+ int ix;
+ u.d = x;
+ ix = 0x7fffffff&__HI(u);
+ if(ix>=0x40200000) {p = pR8; q= pS8;}
+ else if(ix>=0x40122E8B){p = pR5; q= pS5;}
+ else if(ix>=0x4006DB6D){p = pR3; q= pS3;}
+ else if(ix>=0x40000000){p = pR2; q= pS2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))));
+ return one+ r/s;
+}
+
+
+/* For x >= 8, the asymptotic expansions of qzero is
+ * -1/8 s + 75/1024 s^3 - ..., where s = 1/x.
+ * We approximate pzero by
+ * qzero(x) = s*(-1.25 + (R/S))
+ * where R = qR0 + qR1*s^2 + qR2*s^4 + ... + qR5*s^10
+ * S = 1 + qS0*s^2 + ... + qS5*s^12
+ * and
+ * | qzero(x)/s +1.25-R/S | <= 2 ** ( -61.22)
+ */
+#ifdef __STDC__
+static const double qR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double qR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ 7.32421874999935051953e-02, /* 0x3FB2BFFF, 0xFFFFFE2C */
+ 1.17682064682252693899e+01, /* 0x40278952, 0x5BB334D6 */
+ 5.57673380256401856059e+02, /* 0x40816D63, 0x15301825 */
+ 8.85919720756468632317e+03, /* 0x40C14D99, 0x3E18F46D */
+ 3.70146267776887834771e+04, /* 0x40E212D4, 0x0E901566 */
+};
+#ifdef __STDC__
+static const double qS8[6] = {
+#else
+static double qS8[6] = {
+#endif
+ 1.63776026895689824414e+02, /* 0x406478D5, 0x365B39BC */
+ 8.09834494656449805916e+03, /* 0x40BFA258, 0x4E6B0563 */
+ 1.42538291419120476348e+05, /* 0x41016652, 0x54D38C3F */
+ 8.03309257119514397345e+05, /* 0x412883DA, 0x83A52B43 */
+ 8.40501579819060512818e+05, /* 0x4129A66B, 0x28DE0B3D */
+ -3.43899293537866615225e+05, /* 0xC114FD6D, 0x2C9530C5 */
+};
+
+#ifdef __STDC__
+static const double qR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double qR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ 1.84085963594515531381e-11, /* 0x3DB43D8F, 0x29CC8CD9 */
+ 7.32421766612684765896e-02, /* 0x3FB2BFFF, 0xD172B04C */
+ 5.83563508962056953777e+00, /* 0x401757B0, 0xB9953DD3 */
+ 1.35111577286449829671e+02, /* 0x4060E392, 0x0A8788E9 */
+ 1.02724376596164097464e+03, /* 0x40900CF9, 0x9DC8C481 */
+ 1.98997785864605384631e+03, /* 0x409F17E9, 0x53C6E3A6 */
+};
+#ifdef __STDC__
+static const double qS5[6] = {
+#else
+static double qS5[6] = {
+#endif
+ 8.27766102236537761883e+01, /* 0x4054B1B3, 0xFB5E1543 */
+ 2.07781416421392987104e+03, /* 0x40A03BA0, 0xDA21C0CE */
+ 1.88472887785718085070e+04, /* 0x40D267D2, 0x7B591E6D */
+ 5.67511122894947329769e+04, /* 0x40EBB5E3, 0x97E02372 */
+ 3.59767538425114471465e+04, /* 0x40E19118, 0x1F7A54A0 */
+ -5.35434275601944773371e+03, /* 0xC0B4EA57, 0xBEDBC609 */
+};
+
+#ifdef __STDC__
+static const double qR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#else
+static double qR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ 4.37741014089738620906e-09, /* 0x3E32CD03, 0x6ADECB82 */
+ 7.32411180042911447163e-02, /* 0x3FB2BFEE, 0x0E8D0842 */
+ 3.34423137516170720929e+00, /* 0x400AC0FC, 0x61149CF5 */
+ 4.26218440745412650017e+01, /* 0x40454F98, 0x962DAEDD */
+ 1.70808091340565596283e+02, /* 0x406559DB, 0xE25EFD1F */
+ 1.66733948696651168575e+02, /* 0x4064D77C, 0x81FA21E0 */
+};
+#ifdef __STDC__
+static const double qS3[6] = {
+#else
+static double qS3[6] = {
+#endif
+ 4.87588729724587182091e+01, /* 0x40486122, 0xBFE343A6 */
+ 7.09689221056606015736e+02, /* 0x40862D83, 0x86544EB3 */
+ 3.70414822620111362994e+03, /* 0x40ACF04B, 0xE44DFC63 */
+ 6.46042516752568917582e+03, /* 0x40B93C6C, 0xD7C76A28 */
+ 2.51633368920368957333e+03, /* 0x40A3A8AA, 0xD94FB1C0 */
+ -1.49247451836156386662e+02, /* 0xC062A7EB, 0x201CF40F */
+};
+
+#ifdef __STDC__
+static const double qR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double qR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ 1.50444444886983272379e-07, /* 0x3E84313B, 0x54F76BDB */
+ 7.32234265963079278272e-02, /* 0x3FB2BEC5, 0x3E883E34 */
+ 1.99819174093815998816e+00, /* 0x3FFFF897, 0xE727779C */
+ 1.44956029347885735348e+01, /* 0x402CFDBF, 0xAAF96FE5 */
+ 3.16662317504781540833e+01, /* 0x403FAA8E, 0x29FBDC4A */
+ 1.62527075710929267416e+01, /* 0x403040B1, 0x71814BB4 */
+};
+#ifdef __STDC__
+static const double qS2[6] = {
+#else
+static double qS2[6] = {
+#endif
+ 3.03655848355219184498e+01, /* 0x403E5D96, 0xF7C07AED */
+ 2.69348118608049844624e+02, /* 0x4070D591, 0xE4D14B40 */
+ 8.44783757595320139444e+02, /* 0x408A6645, 0x22B3BF22 */
+ 8.82935845112488550512e+02, /* 0x408B977C, 0x9C5CC214 */
+ 2.12666388511798828631e+02, /* 0x406A9553, 0x0E001365 */
+ -5.31095493882666946917e+00, /* 0xC0153E6A, 0xF8B32931 */
+};
+
+#ifdef __STDC__
+ static double qzero(double x)
+#else
+ static double qzero(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints u;
+ double s,r,z;
+ int ix;
+ u.d = x;
+ ix = 0x7fffffff&__HI(u);
+ if(ix>=0x40200000) {p = qR8; q= qS8;}
+ else if(ix>=0x40122E8B){p = qR5; q= qS5;}
+ else if(ix>=0x4006DB6D){p = qR3; q= qS3;}
+ else if(ix>=0x40000000){p = qR2; q= qS2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))));
+ return (-.125 + r/s)/x;
+}
diff --git a/third_party/js-1.7/fdlibm/e_j1.c b/third_party/js-1.7/fdlibm/e_j1.c
new file mode 100644
index 0000000..8982ac8
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_j1.c
@@ -0,0 +1,523 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_j1.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_j1(x), __ieee754_y1(x)
+ * Bessel function of the first and second kinds of order zero.
+ * Method -- j1(x):
+ * 1. For tiny x, we use j1(x) = x/2 - x^3/16 + x^5/384 - ...
+ * 2. Reduce x to |x| since j1(x)=-j1(-x), and
+ * for x in (0,2)
+ * j1(x) = x/2 + x*z*R0/S0, where z = x*x;
+ * (precision: |j1/x - 1/2 - R0/S0 |<2**-61.51 )
+ * for x in (2,inf)
+ * j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
+ * y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+ * where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+ * as follow:
+ * cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ * = -1/sqrt(2) * (sin(x) + cos(x))
+ * (To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.)
+ *
+ * 3 Special cases
+ * j1(nan)= nan
+ * j1(0) = 0
+ * j1(inf) = 0
+ *
+ * Method -- y1(x):
+ * 1. screen out x<=0 cases: y1(0)=-inf, y1(x<0)=NaN
+ * 2. For x<2.
+ * Since
+ * y1(x) = 2/pi*(j1(x)*(ln(x/2)+Euler)-1/x-x/2+5/64*x^3-...)
+ * therefore y1(x)-2/pi*j1(x)*ln(x)-1/x is an odd function.
+ * We use the following function to approximate y1,
+ * y1(x) = x*U(z)/V(z) + (2/pi)*(j1(x)*ln(x)-1/x), z= x^2
+ * where for x in [0,2] (abs err less than 2**-65.89)
+ * U(z) = U0[0] + U0[1]*z + ... + U0[4]*z^4
+ * V(z) = 1 + v0[0]*z + ... + v0[4]*z^5
+ * Note: For tiny x, 1/x dominate y1 and hence
+ * y1(tiny) = -2/pi/tiny, (choose tiny<2**-54)
+ * 3. For x>=2.
+ * y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+ * where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+ * by method mentioned above.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static double pone(double), qone(double);
+#else
+static double pone(), qone();
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+really_big = 1e300,
+one = 1.0,
+invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */
+tpi = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+ /* R0/S0 on [0,2] */
+r00 = -6.25000000000000000000e-02, /* 0xBFB00000, 0x00000000 */
+r01 = 1.40705666955189706048e-03, /* 0x3F570D9F, 0x98472C61 */
+r02 = -1.59955631084035597520e-05, /* 0xBEF0C5C6, 0xBA169668 */
+r03 = 4.96727999609584448412e-08, /* 0x3E6AAAFA, 0x46CA0BD9 */
+s01 = 1.91537599538363460805e-02, /* 0x3F939D0B, 0x12637E53 */
+s02 = 1.85946785588630915560e-04, /* 0x3F285F56, 0xB9CDF664 */
+s03 = 1.17718464042623683263e-06, /* 0x3EB3BFF8, 0x333F8498 */
+s04 = 5.04636257076217042715e-09, /* 0x3E35AC88, 0xC97DFF2C */
+s05 = 1.23542274426137913908e-11; /* 0x3DAB2ACF, 0xCFB97ED8 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_j1(double x)
+#else
+ double __ieee754_j1(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,r,u,v,y;
+ int hx,ix;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return one/x;
+ y = fd_fabs(x);
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ s = fd_sin(y);
+ c = fd_cos(y);
+ ss = -s-c;
+ cc = s-c;
+ if(ix<0x7fe00000) { /* make sure y+y not overflow */
+ z = fd_cos(y+y);
+ if ((s*c)>zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ /*
+ * j1(x) = 1/sqrt(pi) * (P(1,x)*cc - Q(1,x)*ss) / sqrt(x)
+ * y1(x) = 1/sqrt(pi) * (P(1,x)*ss + Q(1,x)*cc) / sqrt(x)
+ */
+ if(ix>0x48000000) z = (invsqrtpi*cc)/fd_sqrt(y);
+ else {
+ u = pone(y); v = qone(y);
+ z = invsqrtpi*(u*cc-v*ss)/fd_sqrt(y);
+ }
+ if(hx<0) return -z;
+ else return z;
+ }
+ if(ix<0x3e400000) { /* |x|<2**-27 */
+ if(really_big+x>one) return 0.5*x;/* inexact if x!=0 necessary */
+ }
+ z = x*x;
+ r = z*(r00+z*(r01+z*(r02+z*r03)));
+ s = one+z*(s01+z*(s02+z*(s03+z*(s04+z*s05))));
+ r *= x;
+ return(x*0.5+r/s);
+}
+
+#ifdef __STDC__
+static const double U0[5] = {
+#else
+static double U0[5] = {
+#endif
+ -1.96057090646238940668e-01, /* 0xBFC91866, 0x143CBC8A */
+ 5.04438716639811282616e-02, /* 0x3FA9D3C7, 0x76292CD1 */
+ -1.91256895875763547298e-03, /* 0xBF5F55E5, 0x4844F50F */
+ 2.35252600561610495928e-05, /* 0x3EF8AB03, 0x8FA6B88E */
+ -9.19099158039878874504e-08, /* 0xBE78AC00, 0x569105B8 */
+};
+#ifdef __STDC__
+static const double V0[5] = {
+#else
+static double V0[5] = {
+#endif
+ 1.99167318236649903973e-02, /* 0x3F94650D, 0x3F4DA9F0 */
+ 2.02552581025135171496e-04, /* 0x3F2A8C89, 0x6C257764 */
+ 1.35608801097516229404e-06, /* 0x3EB6C05A, 0x894E8CA6 */
+ 6.22741452364621501295e-09, /* 0x3E3ABF1D, 0x5BA69A86 */
+ 1.66559246207992079114e-11, /* 0x3DB25039, 0xDACA772A */
+};
+
+#ifdef __STDC__
+ double __ieee754_y1(double x)
+#else
+ double __ieee754_y1(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,u,v;
+ int hx,ix,lx;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = 0x7fffffff&hx;
+ lx = __LO(un);
+ /* if Y1(NaN) is NaN, Y1(-inf) is NaN, Y1(inf) is 0 */
+ if(ix>=0x7ff00000) return one/(x+x*x);
+ if((ix|lx)==0) return -one/zero;
+ if(hx<0) return zero/zero;
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ s = fd_sin(x);
+ c = fd_cos(x);
+ ss = -s-c;
+ cc = s-c;
+ if(ix<0x7fe00000) { /* make sure x+x not overflow */
+ z = fd_cos(x+x);
+ if ((s*c)>zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ /* y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x0)+q1(x)*cos(x0))
+ * where x0 = x-3pi/4
+ * Better formula:
+ * cos(x0) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ * = -1/sqrt(2) * (cos(x) + sin(x))
+ * To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.
+ */
+ if(ix>0x48000000) z = (invsqrtpi*ss)/fd_sqrt(x);
+ else {
+ u = pone(x); v = qone(x);
+ z = invsqrtpi*(u*ss+v*cc)/fd_sqrt(x);
+ }
+ return z;
+ }
+ if(ix<=0x3c900000) { /* x < 2**-54 */
+ return(-tpi/x);
+ }
+ z = x*x;
+ u = U0[0]+z*(U0[1]+z*(U0[2]+z*(U0[3]+z*U0[4])));
+ v = one+z*(V0[0]+z*(V0[1]+z*(V0[2]+z*(V0[3]+z*V0[4]))));
+ return(x*(u/v) + tpi*(__ieee754_j1(x)*__ieee754_log(x)-one/x));
+}
+
+/* For x >= 8, the asymptotic expansions of pone is
+ * 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
+ * We approximate pone by
+ * pone(x) = 1 + (R/S)
+ * where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
+ * S = 1 + ps0*s^2 + ... + ps4*s^10
+ * and
+ * | pone(x)-1-R/S | <= 2 ** ( -60.06)
+ */
+
+#ifdef __STDC__
+static const double pr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double pr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ 1.17187499999988647970e-01, /* 0x3FBDFFFF, 0xFFFFFCCE */
+ 1.32394806593073575129e+01, /* 0x402A7A9D, 0x357F7FCE */
+ 4.12051854307378562225e+02, /* 0x4079C0D4, 0x652EA590 */
+ 3.87474538913960532227e+03, /* 0x40AE457D, 0xA3A532CC */
+ 7.91447954031891731574e+03, /* 0x40BEEA7A, 0xC32782DD */
+};
+#ifdef __STDC__
+static const double ps8[5] = {
+#else
+static double ps8[5] = {
+#endif
+ 1.14207370375678408436e+02, /* 0x405C8D45, 0x8E656CAC */
+ 3.65093083420853463394e+03, /* 0x40AC85DC, 0x964D274F */
+ 3.69562060269033463555e+04, /* 0x40E20B86, 0x97C5BB7F */
+ 9.76027935934950801311e+04, /* 0x40F7D42C, 0xB28F17BB */
+ 3.08042720627888811578e+04, /* 0x40DE1511, 0x697A0B2D */
+};
+
+#ifdef __STDC__
+static const double pr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double pr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ 1.31990519556243522749e-11, /* 0x3DAD0667, 0xDAE1CA7D */
+ 1.17187493190614097638e-01, /* 0x3FBDFFFF, 0xE2C10043 */
+ 6.80275127868432871736e+00, /* 0x401B3604, 0x6E6315E3 */
+ 1.08308182990189109773e+02, /* 0x405B13B9, 0x452602ED */
+ 5.17636139533199752805e+02, /* 0x40802D16, 0xD052D649 */
+ 5.28715201363337541807e+02, /* 0x408085B8, 0xBB7E0CB7 */
+};
+#ifdef __STDC__
+static const double ps5[5] = {
+#else
+static double ps5[5] = {
+#endif
+ 5.92805987221131331921e+01, /* 0x404DA3EA, 0xA8AF633D */
+ 9.91401418733614377743e+02, /* 0x408EFB36, 0x1B066701 */
+ 5.35326695291487976647e+03, /* 0x40B4E944, 0x5706B6FB */
+ 7.84469031749551231769e+03, /* 0x40BEA4B0, 0xB8A5BB15 */
+ 1.50404688810361062679e+03, /* 0x40978030, 0x036F5E51 */
+};
+
+#ifdef __STDC__
+static const double pr3[6] = {
+#else
+static double pr3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ 3.02503916137373618024e-09, /* 0x3E29FC21, 0xA7AD9EDD */
+ 1.17186865567253592491e-01, /* 0x3FBDFFF5, 0x5B21D17B */
+ 3.93297750033315640650e+00, /* 0x400F76BC, 0xE85EAD8A */
+ 3.51194035591636932736e+01, /* 0x40418F48, 0x9DA6D129 */
+ 9.10550110750781271918e+01, /* 0x4056C385, 0x4D2C1837 */
+ 4.85590685197364919645e+01, /* 0x4048478F, 0x8EA83EE5 */
+};
+#ifdef __STDC__
+static const double ps3[5] = {
+#else
+static double ps3[5] = {
+#endif
+ 3.47913095001251519989e+01, /* 0x40416549, 0xA134069C */
+ 3.36762458747825746741e+02, /* 0x40750C33, 0x07F1A75F */
+ 1.04687139975775130551e+03, /* 0x40905B7C, 0x5037D523 */
+ 8.90811346398256432622e+02, /* 0x408BD67D, 0xA32E31E9 */
+ 1.03787932439639277504e+02, /* 0x4059F26D, 0x7C2EED53 */
+};
+
+#ifdef __STDC__
+static const double pr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double pr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ 1.07710830106873743082e-07, /* 0x3E7CE9D4, 0xF65544F4 */
+ 1.17176219462683348094e-01, /* 0x3FBDFF42, 0xBE760D83 */
+ 2.36851496667608785174e+00, /* 0x4002F2B7, 0xF98FAEC0 */
+ 1.22426109148261232917e+01, /* 0x40287C37, 0x7F71A964 */
+ 1.76939711271687727390e+01, /* 0x4031B1A8, 0x177F8EE2 */
+ 5.07352312588818499250e+00, /* 0x40144B49, 0xA574C1FE */
+};
+#ifdef __STDC__
+static const double ps2[5] = {
+#else
+static double ps2[5] = {
+#endif
+ 2.14364859363821409488e+01, /* 0x40356FBD, 0x8AD5ECDC */
+ 1.25290227168402751090e+02, /* 0x405F5293, 0x14F92CD5 */
+ 2.32276469057162813669e+02, /* 0x406D08D8, 0xD5A2DBD9 */
+ 1.17679373287147100768e+02, /* 0x405D6B7A, 0xDA1884A9 */
+ 8.36463893371618283368e+00, /* 0x4020BAB1, 0xF44E5192 */
+};
+
+#ifdef __STDC__
+ static double pone(double x)
+#else
+ static double pone(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints un;
+ double z,r,s;
+ int ix;
+ un.d = x;
+ ix = 0x7fffffff&__HI(un);
+ if(ix>=0x40200000) {p = pr8; q= ps8;}
+ else if(ix>=0x40122E8B){p = pr5; q= ps5;}
+ else if(ix>=0x4006DB6D){p = pr3; q= ps3;}
+ else if(ix>=0x40000000){p = pr2; q= ps2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))));
+ return one+ r/s;
+}
+
+
+/* For x >= 8, the asymptotic expansions of qone is
+ * 3/8 s - 105/1024 s^3 - ..., where s = 1/x.
+ * We approximate pone by
+ * qone(x) = s*(0.375 + (R/S))
+ * where R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
+ * S = 1 + qs1*s^2 + ... + qs6*s^12
+ * and
+ * | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
+ */
+
+#ifdef __STDC__
+static const double qr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double qr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ -1.02539062499992714161e-01, /* 0xBFBA3FFF, 0xFFFFFDF3 */
+ -1.62717534544589987888e+01, /* 0xC0304591, 0xA26779F7 */
+ -7.59601722513950107896e+02, /* 0xC087BCD0, 0x53E4B576 */
+ -1.18498066702429587167e+04, /* 0xC0C724E7, 0x40F87415 */
+ -4.84385124285750353010e+04, /* 0xC0E7A6D0, 0x65D09C6A */
+};
+#ifdef __STDC__
+static const double qs8[6] = {
+#else
+static double qs8[6] = {
+#endif
+ 1.61395369700722909556e+02, /* 0x40642CA6, 0xDE5BCDE5 */
+ 7.82538599923348465381e+03, /* 0x40BE9162, 0xD0D88419 */
+ 1.33875336287249578163e+05, /* 0x4100579A, 0xB0B75E98 */
+ 7.19657723683240939863e+05, /* 0x4125F653, 0x72869C19 */
+ 6.66601232617776375264e+05, /* 0x412457D2, 0x7719AD5C */
+ -2.94490264303834643215e+05, /* 0xC111F969, 0x0EA5AA18 */
+};
+
+#ifdef __STDC__
+static const double qr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double qr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ -2.08979931141764104297e-11, /* 0xBDB6FA43, 0x1AA1A098 */
+ -1.02539050241375426231e-01, /* 0xBFBA3FFF, 0xCB597FEF */
+ -8.05644828123936029840e+00, /* 0xC0201CE6, 0xCA03AD4B */
+ -1.83669607474888380239e+02, /* 0xC066F56D, 0x6CA7B9B0 */
+ -1.37319376065508163265e+03, /* 0xC09574C6, 0x6931734F */
+ -2.61244440453215656817e+03, /* 0xC0A468E3, 0x88FDA79D */
+};
+#ifdef __STDC__
+static const double qs5[6] = {
+#else
+static double qs5[6] = {
+#endif
+ 8.12765501384335777857e+01, /* 0x405451B2, 0xFF5A11B2 */
+ 1.99179873460485964642e+03, /* 0x409F1F31, 0xE77BF839 */
+ 1.74684851924908907677e+04, /* 0x40D10F1F, 0x0D64CE29 */
+ 4.98514270910352279316e+04, /* 0x40E8576D, 0xAABAD197 */
+ 2.79480751638918118260e+04, /* 0x40DB4B04, 0xCF7C364B */
+ -4.71918354795128470869e+03, /* 0xC0B26F2E, 0xFCFFA004 */
+};
+
+#ifdef __STDC__
+static const double qr3[6] = {
+#else
+static double qr3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ -5.07831226461766561369e-09, /* 0xBE35CFA9, 0xD38FC84F */
+ -1.02537829820837089745e-01, /* 0xBFBA3FEB, 0x51AEED54 */
+ -4.61011581139473403113e+00, /* 0xC01270C2, 0x3302D9FF */
+ -5.78472216562783643212e+01, /* 0xC04CEC71, 0xC25D16DA */
+ -2.28244540737631695038e+02, /* 0xC06C87D3, 0x4718D55F */
+ -2.19210128478909325622e+02, /* 0xC06B66B9, 0x5F5C1BF6 */
+};
+#ifdef __STDC__
+static const double qs3[6] = {
+#else
+static double qs3[6] = {
+#endif
+ 4.76651550323729509273e+01, /* 0x4047D523, 0xCCD367E4 */
+ 6.73865112676699709482e+02, /* 0x40850EEB, 0xC031EE3E */
+ 3.38015286679526343505e+03, /* 0x40AA684E, 0x448E7C9A */
+ 5.54772909720722782367e+03, /* 0x40B5ABBA, 0xA61D54A6 */
+ 1.90311919338810798763e+03, /* 0x409DBC7A, 0x0DD4DF4B */
+ -1.35201191444307340817e+02, /* 0xC060E670, 0x290A311F */
+};
+
+#ifdef __STDC__
+static const double qr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double qr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ -1.78381727510958865572e-07, /* 0xBE87F126, 0x44C626D2 */
+ -1.02517042607985553460e-01, /* 0xBFBA3E8E, 0x9148B010 */
+ -2.75220568278187460720e+00, /* 0xC0060484, 0x69BB4EDA */
+ -1.96636162643703720221e+01, /* 0xC033A9E2, 0xC168907F */
+ -4.23253133372830490089e+01, /* 0xC04529A3, 0xDE104AAA */
+ -2.13719211703704061733e+01, /* 0xC0355F36, 0x39CF6E52 */
+};
+#ifdef __STDC__
+static const double qs2[6] = {
+#else
+static double qs2[6] = {
+#endif
+ 2.95333629060523854548e+01, /* 0x403D888A, 0x78AE64FF */
+ 2.52981549982190529136e+02, /* 0x406F9F68, 0xDB821CBA */
+ 7.57502834868645436472e+02, /* 0x4087AC05, 0xCE49A0F7 */
+ 7.39393205320467245656e+02, /* 0x40871B25, 0x48D4C029 */
+ 1.55949003336666123687e+02, /* 0x40637E5E, 0x3C3ED8D4 */
+ -4.95949898822628210127e+00, /* 0xC013D686, 0xE71BE86B */
+};
+
+#ifdef __STDC__
+ static double qone(double x)
+#else
+ static double qone(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints un;
+ double s,r,z;
+ int ix;
+ un.d = x;
+ ix = 0x7fffffff&__HI(un);
+ if(ix>=0x40200000) {p = qr8; q= qs8;}
+ else if(ix>=0x40122E8B){p = qr5; q= qs5;}
+ else if(ix>=0x4006DB6D){p = qr3; q= qs3;}
+ else if(ix>=0x40000000){p = qr2; q= qs2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))));
+ return (.375 + r/s)/x;
+}
diff --git a/third_party/js-1.7/fdlibm/e_jn.c b/third_party/js-1.7/fdlibm/e_jn.c
new file mode 100644
index 0000000..2b61b44
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_jn.c
@@ -0,0 +1,315 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_jn.c 1.4 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __ieee754_jn(n, x), __ieee754_yn(n, x)
+ * floating point Bessel's function of the 1st and 2nd kind
+ * of order n
+ *
+ * Special cases:
+ * y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
+ * y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
+ * Note 2. About jn(n,x), yn(n,x)
+ * For n=0, j0(x) is called,
+ * for n=1, j1(x) is called,
+ * for n<x, forward recursion us used starting
+ * from values of j0(x) and j1(x).
+ * for n>x, a continued fraction approximation to
+ * j(n,x)/j(n-1,x) is evaluated and then backward
+ * recursion is used starting from a supposed value
+ * for j(n,x). The resulting value of j(0,x) is
+ * compared with the actual value to correct the
+ * supposed value of j(n,x).
+ *
+ * yn(n,x) is similar in all respects, except
+ * that forward recursion is used for all
+ * values of n>1.
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */
+two = 2.00000000000000000000e+00, /* 0x40000000, 0x00000000 */
+one = 1.00000000000000000000e+00; /* 0x3FF00000, 0x00000000 */
+
+static double zero = 0.00000000000000000000e+00;
+
+#ifdef __STDC__
+ double __ieee754_jn(int n, double x)
+#else
+ double __ieee754_jn(n,x)
+ int n; double x;
+#endif
+{
+ fd_twoints u;
+ int i,hx,ix,lx, sgn;
+ double a, b, temp, di;
+ double z, w;
+
+ /* J(-n,x) = (-1)^n * J(n, x), J(n, -x) = (-1)^n * J(n, x)
+ * Thus, J(-n,x) = J(n,-x)
+ */
+ u.d = x;
+ hx = __HI(u);
+ ix = 0x7fffffff&hx;
+ lx = __LO(u);
+ /* if J(n,NaN) is NaN */
+ if((ix|((unsigned)(lx|-lx))>>31)>0x7ff00000) return x+x;
+ if(n<0){
+ n = -n;
+ x = -x;
+ hx ^= 0x80000000;
+ }
+ if(n==0) return(__ieee754_j0(x));
+ if(n==1) return(__ieee754_j1(x));
+ sgn = (n&1)&(hx>>31); /* even n -- 0, odd n -- sign(x) */
+ x = fd_fabs(x);
+ if((ix|lx)==0||ix>=0x7ff00000) /* if x is 0 or inf */
+ b = zero;
+ else if((double)n<=x) {
+ /* Safe to use J(n+1,x)=2n/x *J(n,x)-J(n-1,x) */
+ if(ix>=0x52D00000) { /* x > 2**302 */
+ /* (x >> n**2)
+ * Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Let s=sin(x), c=cos(x),
+ * xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+ *
+ * n sin(xn)*sqt2 cos(xn)*sqt2
+ * ----------------------------------
+ * 0 s-c c+s
+ * 1 -s-c -c+s
+ * 2 -s+c -c-s
+ * 3 s+c c-s
+ */
+ switch(n&3) {
+ case 0: temp = fd_cos(x)+fd_sin(x); break;
+ case 1: temp = -fd_cos(x)+fd_sin(x); break;
+ case 2: temp = -fd_cos(x)-fd_sin(x); break;
+ case 3: temp = fd_cos(x)-fd_sin(x); break;
+ }
+ b = invsqrtpi*temp/fd_sqrt(x);
+ } else {
+ a = __ieee754_j0(x);
+ b = __ieee754_j1(x);
+ for(i=1;i<n;i++){
+ temp = b;
+ b = b*((double)(i+i)/x) - a; /* avoid underflow */
+ a = temp;
+ }
+ }
+ } else {
+ if(ix<0x3e100000) { /* x < 2**-29 */
+ /* x is tiny, return the first Taylor expansion of J(n,x)
+ * J(n,x) = 1/n!*(x/2)^n - ...
+ */
+ if(n>33) /* underflow */
+ b = zero;
+ else {
+ temp = x*0.5; b = temp;
+ for (a=one,i=2;i<=n;i++) {
+ a *= (double)i; /* a = n! */
+ b *= temp; /* b = (x/2)^n */
+ }
+ b = b/a;
+ }
+ } else {
+ /* use backward recurrence */
+ /* x x^2 x^2
+ * J(n,x)/J(n-1,x) = ---- ------ ------ .....
+ * 2n - 2(n+1) - 2(n+2)
+ *
+ * 1 1 1
+ * (for large x) = ---- ------ ------ .....
+ * 2n 2(n+1) 2(n+2)
+ * -- - ------ - ------ -
+ * x x x
+ *
+ * Let w = 2n/x and h=2/x, then the above quotient
+ * is equal to the continued fraction:
+ * 1
+ * = -----------------------
+ * 1
+ * w - -----------------
+ * 1
+ * w+h - ---------
+ * w+2h - ...
+ *
+ * To determine how many terms needed, let
+ * Q(0) = w, Q(1) = w(w+h) - 1,
+ * Q(k) = (w+k*h)*Q(k-1) - Q(k-2),
+ * When Q(k) > 1e4 good for single
+ * When Q(k) > 1e9 good for double
+ * When Q(k) > 1e17 good for quadruple
+ */
+ /* determine k */
+ double t,v;
+ double q0,q1,h,tmp; int k,m;
+ w = (n+n)/(double)x; h = 2.0/(double)x;
+ q0 = w; z = w+h; q1 = w*z - 1.0; k=1;
+ while(q1<1.0e9) {
+ k += 1; z += h;
+ tmp = z*q1 - q0;
+ q0 = q1;
+ q1 = tmp;
+ }
+ m = n+n;
+ for(t=zero, i = 2*(n+k); i>=m; i -= 2) t = one/(i/x-t);
+ a = t;
+ b = one;
+ /* estimate log((2/x)^n*n!) = n*log(2/x)+n*ln(n)
+ * Hence, if n*(log(2n/x)) > ...
+ * single 8.8722839355e+01
+ * double 7.09782712893383973096e+02
+ * long double 1.1356523406294143949491931077970765006170e+04
+ * then recurrent value may overflow and the result is
+ * likely underflow to zero
+ */
+ tmp = n;
+ v = two/x;
+ tmp = tmp*__ieee754_log(fd_fabs(v*tmp));
+ if(tmp<7.09782712893383973096e+02) {
+ for(i=n-1,di=(double)(i+i);i>0;i--){
+ temp = b;
+ b *= di;
+ b = b/x - a;
+ a = temp;
+ di -= two;
+ }
+ } else {
+ for(i=n-1,di=(double)(i+i);i>0;i--){
+ temp = b;
+ b *= di;
+ b = b/x - a;
+ a = temp;
+ di -= two;
+ /* scale b to avoid spurious overflow */
+ if(b>1e100) {
+ a /= b;
+ t /= b;
+ b = one;
+ }
+ }
+ }
+ b = (t*__ieee754_j0(x)/b);
+ }
+ }
+ if(sgn==1) return -b; else return b;
+}
+
+#ifdef __STDC__
+ double __ieee754_yn(int n, double x)
+#else
+ double __ieee754_yn(n,x)
+ int n; double x;
+#endif
+{
+ fd_twoints u;
+ int i,hx,ix,lx;
+ int sign;
+ double a, b, temp;
+
+ u.d = x;
+ hx = __HI(u);
+ ix = 0x7fffffff&hx;
+ lx = __LO(u);
+ /* if Y(n,NaN) is NaN */
+ if((ix|((unsigned)(lx|-lx))>>31)>0x7ff00000) return x+x;
+ if((ix|lx)==0) return -one/zero;
+ if(hx<0) return zero/zero;
+ sign = 1;
+ if(n<0){
+ n = -n;
+ sign = 1 - ((n&1)<<1);
+ }
+ if(n==0) return(__ieee754_y0(x));
+ if(n==1) return(sign*__ieee754_y1(x));
+ if(ix==0x7ff00000) return zero;
+ if(ix>=0x52D00000) { /* x > 2**302 */
+ /* (x >> n**2)
+ * Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Let s=sin(x), c=cos(x),
+ * xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+ *
+ * n sin(xn)*sqt2 cos(xn)*sqt2
+ * ----------------------------------
+ * 0 s-c c+s
+ * 1 -s-c -c+s
+ * 2 -s+c -c-s
+ * 3 s+c c-s
+ */
+ switch(n&3) {
+ case 0: temp = fd_sin(x)-fd_cos(x); break;
+ case 1: temp = -fd_sin(x)-fd_cos(x); break;
+ case 2: temp = -fd_sin(x)+fd_cos(x); break;
+ case 3: temp = fd_sin(x)+fd_cos(x); break;
+ }
+ b = invsqrtpi*temp/fd_sqrt(x);
+ } else {
+ a = __ieee754_y0(x);
+ b = __ieee754_y1(x);
+ /* quit if b is -inf */
+ u.d = b;
+ for(i=1;i<n&&(__HI(u) != 0xfff00000);i++){
+ temp = b;
+ b = ((double)(i+i)/x)*b - a;
+ a = temp;
+ }
+ }
+ if(sign>0) return b; else return -b;
+}
diff --git a/third_party/js-1.7/fdlibm/e_lgamma.c b/third_party/js-1.7/fdlibm/e_lgamma.c
new file mode 100644
index 0000000..beb3bd9
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_lgamma.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_lgamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_lgamma(x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call __ieee754_lgamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double __ieee754_lgamma(double x)
+#else
+ double __ieee754_lgamma(x)
+ double x;
+#endif
+{
+ return __ieee754_lgamma_r(x,&signgam);
+}
diff --git a/third_party/js-1.7/fdlibm/e_lgamma_r.c b/third_party/js-1.7/fdlibm/e_lgamma_r.c
new file mode 100644
index 0000000..df92e7a
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_lgamma_r.c
@@ -0,0 +1,347 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_lgamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_lgamma_r(x, signgamp)
+ * Reentrant version of the logarithm of the Gamma function
+ * with user provide pointer for the sign of Gamma(x).
+ *
+ * Method:
+ * 1. Argument Reduction for 0 < x <= 8
+ * Since gamma(1+s)=s*gamma(s), for x in [0,8], we may
+ * reduce x to a number in [1.5,2.5] by
+ * lgamma(1+s) = log(s) + lgamma(s)
+ * for example,
+ * lgamma(7.3) = log(6.3) + lgamma(6.3)
+ * = log(6.3*5.3) + lgamma(5.3)
+ * = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
+ * 2. Polynomial approximation of lgamma around its
+ * minimun ymin=1.461632144968362245 to maintain monotonicity.
+ * On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
+ * Let z = x-ymin;
+ * lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
+ * where
+ * poly(z) is a 14 degree polynomial.
+ * 2. Rational approximation in the primary interval [2,3]
+ * We use the following approximation:
+ * s = x-2.0;
+ * lgamma(x) = 0.5*s + s*P(s)/Q(s)
+ * with accuracy
+ * |P/Q - (lgamma(x)-0.5s)| < 2**-61.71
+ * Our algorithms are based on the following observation
+ *
+ * zeta(2)-1 2 zeta(3)-1 3
+ * lgamma(2+s) = s*(1-Euler) + --------- * s - --------- * s + ...
+ * 2 3
+ *
+ * where Euler = 0.5771... is the Euler constant, which is very
+ * close to 0.5.
+ *
+ * 3. For x>=8, we have
+ * lgamma(x)~(x-0.5)log(x)-x+0.5*log(2pi)+1/(12x)-1/(360x**3)+....
+ * (better formula:
+ * lgamma(x)~(x-0.5)*(log(x)-1)-.5*(log(2pi)-1) + ...)
+ * Let z = 1/x, then we approximation
+ * f(z) = lgamma(x) - (x-0.5)(log(x)-1)
+ * by
+ * 3 5 11
+ * w = w0 + w1*z + w2*z + w3*z + ... + w6*z
+ * where
+ * |w - f(z)| < 2**-58.74
+ *
+ * 4. For negative x, since (G is gamma function)
+ * -x*G(-x)*G(x) = pi/sin(pi*x),
+ * we have
+ * G(x) = pi/(sin(pi*x)*(-x)*G(-x))
+ * since G(-x) is positive, sign(G(x)) = sign(sin(pi*x)) for x<0
+ * Hence, for x<0, signgam = sign(sin(pi*x)) and
+ * lgamma(x) = log(|Gamma(x)|)
+ * = log(pi/(|x*sin(pi*x)|)) - lgamma(-x);
+ * Note: one should avoid compute pi*(-x) directly in the
+ * computation of sin(pi*(-x)).
+ *
+ * 5. Special Cases
+ * lgamma(2+s) ~ s*(1-Euler) for tiny s
+ * lgamma(1)=lgamma(2)=0
+ * lgamma(x) ~ -log(x) for tiny x
+ * lgamma(0) = lgamma(inf) = inf
+ * lgamma(-integer) = +-inf
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two52= 4.50359962737049600000e+15, /* 0x43300000, 0x00000000 */
+half= 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
+a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */
+a1 = 3.22467033424113591611e-01, /* 0x3FD4A34C, 0xC4A60FAD */
+a2 = 6.73523010531292681824e-02, /* 0x3FB13E00, 0x1A5562A7 */
+a3 = 2.05808084325167332806e-02, /* 0x3F951322, 0xAC92547B */
+a4 = 7.38555086081402883957e-03, /* 0x3F7E404F, 0xB68FEFE8 */
+a5 = 2.89051383673415629091e-03, /* 0x3F67ADD8, 0xCCB7926B */
+a6 = 1.19270763183362067845e-03, /* 0x3F538A94, 0x116F3F5D */
+a7 = 5.10069792153511336608e-04, /* 0x3F40B6C6, 0x89B99C00 */
+a8 = 2.20862790713908385557e-04, /* 0x3F2CF2EC, 0xED10E54D */
+a9 = 1.08011567247583939954e-04, /* 0x3F1C5088, 0x987DFB07 */
+a10 = 2.52144565451257326939e-05, /* 0x3EFA7074, 0x428CFA52 */
+a11 = 4.48640949618915160150e-05, /* 0x3F07858E, 0x90A45837 */
+tc = 1.46163214496836224576e+00, /* 0x3FF762D8, 0x6356BE3F */
+tf = -1.21486290535849611461e-01, /* 0xBFBF19B9, 0xBCC38A42 */
+/* tt = -(tail of tf) */
+tt = -3.63867699703950536541e-18, /* 0xBC50C7CA, 0xA48A971F */
+t0 = 4.83836122723810047042e-01, /* 0x3FDEF72B, 0xC8EE38A2 */
+t1 = -1.47587722994593911752e-01, /* 0xBFC2E427, 0x8DC6C509 */
+t2 = 6.46249402391333854778e-02, /* 0x3FB08B42, 0x94D5419B */
+t3 = -3.27885410759859649565e-02, /* 0xBFA0C9A8, 0xDF35B713 */
+t4 = 1.79706750811820387126e-02, /* 0x3F9266E7, 0x970AF9EC */
+t5 = -1.03142241298341437450e-02, /* 0xBF851F9F, 0xBA91EC6A */
+t6 = 6.10053870246291332635e-03, /* 0x3F78FCE0, 0xE370E344 */
+t7 = -3.68452016781138256760e-03, /* 0xBF6E2EFF, 0xB3E914D7 */
+t8 = 2.25964780900612472250e-03, /* 0x3F6282D3, 0x2E15C915 */
+t9 = -1.40346469989232843813e-03, /* 0xBF56FE8E, 0xBF2D1AF1 */
+t10 = 8.81081882437654011382e-04, /* 0x3F4CDF0C, 0xEF61A8E9 */
+t11 = -5.38595305356740546715e-04, /* 0xBF41A610, 0x9C73E0EC */
+t12 = 3.15632070903625950361e-04, /* 0x3F34AF6D, 0x6C0EBBF7 */
+t13 = -3.12754168375120860518e-04, /* 0xBF347F24, 0xECC38C38 */
+t14 = 3.35529192635519073543e-04, /* 0x3F35FD3E, 0xE8C2D3F4 */
+u0 = -7.72156649015328655494e-02, /* 0xBFB3C467, 0xE37DB0C8 */
+u1 = 6.32827064025093366517e-01, /* 0x3FE4401E, 0x8B005DFF */
+u2 = 1.45492250137234768737e+00, /* 0x3FF7475C, 0xD119BD6F */
+u3 = 9.77717527963372745603e-01, /* 0x3FEF4976, 0x44EA8450 */
+u4 = 2.28963728064692451092e-01, /* 0x3FCD4EAE, 0xF6010924 */
+u5 = 1.33810918536787660377e-02, /* 0x3F8B678B, 0xBF2BAB09 */
+v1 = 2.45597793713041134822e+00, /* 0x4003A5D7, 0xC2BD619C */
+v2 = 2.12848976379893395361e+00, /* 0x40010725, 0xA42B18F5 */
+v3 = 7.69285150456672783825e-01, /* 0x3FE89DFB, 0xE45050AF */
+v4 = 1.04222645593369134254e-01, /* 0x3FBAAE55, 0xD6537C88 */
+v5 = 3.21709242282423911810e-03, /* 0x3F6A5ABB, 0x57D0CF61 */
+s0 = -7.72156649015328655494e-02, /* 0xBFB3C467, 0xE37DB0C8 */
+s1 = 2.14982415960608852501e-01, /* 0x3FCB848B, 0x36E20878 */
+s2 = 3.25778796408930981787e-01, /* 0x3FD4D98F, 0x4F139F59 */
+s3 = 1.46350472652464452805e-01, /* 0x3FC2BB9C, 0xBEE5F2F7 */
+s4 = 2.66422703033638609560e-02, /* 0x3F9B481C, 0x7E939961 */
+s5 = 1.84028451407337715652e-03, /* 0x3F5E26B6, 0x7368F239 */
+s6 = 3.19475326584100867617e-05, /* 0x3F00BFEC, 0xDD17E945 */
+r1 = 1.39200533467621045958e+00, /* 0x3FF645A7, 0x62C4AB74 */
+r2 = 7.21935547567138069525e-01, /* 0x3FE71A18, 0x93D3DCDC */
+r3 = 1.71933865632803078993e-01, /* 0x3FC601ED, 0xCCFBDF27 */
+r4 = 1.86459191715652901344e-02, /* 0x3F9317EA, 0x742ED475 */
+r5 = 7.77942496381893596434e-04, /* 0x3F497DDA, 0xCA41A95B */
+r6 = 7.32668430744625636189e-06, /* 0x3EDEBAF7, 0xA5B38140 */
+w0 = 4.18938533204672725052e-01, /* 0x3FDACFE3, 0x90C97D69 */
+w1 = 8.33333333333329678849e-02, /* 0x3FB55555, 0x5555553B */
+w2 = -2.77777777728775536470e-03, /* 0xBF66C16C, 0x16B02E5C */
+w3 = 7.93650558643019558500e-04, /* 0x3F4A019F, 0x98CF38B6 */
+w4 = -5.95187557450339963135e-04, /* 0xBF4380CB, 0x8C0FE741 */
+w5 = 8.36339918996282139126e-04, /* 0x3F4B67BA, 0x4CDAD5D1 */
+w6 = -1.63092934096575273989e-03; /* 0xBF5AB89D, 0x0B9E43E4 */
+
+static double zero= 0.00000000000000000000e+00;
+
+#ifdef __STDC__
+ static double sin_pi(double x)
+#else
+ static double sin_pi(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,z;
+ int n,ix;
+
+ u.d = x;
+ ix = 0x7fffffff&__HI(u);
+
+ if(ix<0x3fd00000) return __kernel_sin(pi*x,zero,0);
+ y = -x; /* x is assume negative */
+
+ /*
+ * argument reduction, make sure inexact flag not raised if input
+ * is an integer
+ */
+ z = fd_floor(y);
+ if(z!=y) { /* inexact anyway */
+ y *= 0.5;
+ y = 2.0*(y - fd_floor(y)); /* y = |x| mod 2.0 */
+ n = (int) (y*4.0);
+ } else {
+ if(ix>=0x43400000) {
+ y = zero; n = 0; /* y must be even */
+ } else {
+ if(ix<0x43300000) z = y+two52; /* exact */
+ u.d = z;
+ n = __LO(u)&1; /* lower word of z */
+ y = n;
+ n<<= 2;
+ }
+ }
+ switch (n) {
+ case 0: y = __kernel_sin(pi*y,zero,0); break;
+ case 1:
+ case 2: y = __kernel_cos(pi*(0.5-y),zero); break;
+ case 3:
+ case 4: y = __kernel_sin(pi*(one-y),zero,0); break;
+ case 5:
+ case 6: y = -__kernel_cos(pi*(y-1.5),zero); break;
+ default: y = __kernel_sin(pi*(y-2.0),zero,0); break;
+ }
+ return -y;
+}
+
+
+#ifdef __STDC__
+ double __ieee754_lgamma_r(double x, int *signgamp)
+#else
+ double __ieee754_lgamma_r(x,signgamp)
+ double x; int *signgamp;
+#endif
+{
+ fd_twoints u;
+ double t,y,z,nadj,p,p1,p2,p3,q,r,w;
+ int i,hx,lx,ix;
+
+ u.d = x;
+ hx = __HI(u);
+ lx = __LO(u);
+
+ /* purge off +-inf, NaN, +-0, and negative arguments */
+ *signgamp = 1;
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return x*x;
+ if((ix|lx)==0) return one/zero;
+ if(ix<0x3b900000) { /* |x|<2**-70, return -log(|x|) */
+ if(hx<0) {
+ *signgamp = -1;
+ return -__ieee754_log(-x);
+ } else return -__ieee754_log(x);
+ }
+ if(hx<0) {
+ if(ix>=0x43300000) /* |x|>=2**52, must be -integer */
+ return one/zero;
+ t = sin_pi(x);
+ if(t==zero) return one/zero; /* -integer */
+ nadj = __ieee754_log(pi/fd_fabs(t*x));
+ if(t<zero) *signgamp = -1;
+ x = -x;
+ }
+
+ /* purge off 1 and 2 */
+ if((((ix-0x3ff00000)|lx)==0)||(((ix-0x40000000)|lx)==0)) r = 0;
+ /* for x < 2.0 */
+ else if(ix<0x40000000) {
+ if(ix<=0x3feccccc) { /* lgamma(x) = lgamma(x+1)-log(x) */
+ r = -__ieee754_log(x);
+ if(ix>=0x3FE76944) {y = one-x; i= 0;}
+ else if(ix>=0x3FCDA661) {y= x-(tc-one); i=1;}
+ else {y = x; i=2;}
+ } else {
+ r = zero;
+ if(ix>=0x3FFBB4C3) {y=2.0-x;i=0;} /* [1.7316,2] */
+ else if(ix>=0x3FF3B4C4) {y=x-tc;i=1;} /* [1.23,1.73] */
+ else {y=x-one;i=2;}
+ }
+ switch(i) {
+ case 0:
+ z = y*y;
+ p1 = a0+z*(a2+z*(a4+z*(a6+z*(a8+z*a10))));
+ p2 = z*(a1+z*(a3+z*(a5+z*(a7+z*(a9+z*a11)))));
+ p = y*p1+p2;
+ r += (p-0.5*y); break;
+ case 1:
+ z = y*y;
+ w = z*y;
+ p1 = t0+w*(t3+w*(t6+w*(t9 +w*t12))); /* parallel comp */
+ p2 = t1+w*(t4+w*(t7+w*(t10+w*t13)));
+ p3 = t2+w*(t5+w*(t8+w*(t11+w*t14)));
+ p = z*p1-(tt-w*(p2+y*p3));
+ r += (tf + p); break;
+ case 2:
+ p1 = y*(u0+y*(u1+y*(u2+y*(u3+y*(u4+y*u5)))));
+ p2 = one+y*(v1+y*(v2+y*(v3+y*(v4+y*v5))));
+ r += (-0.5*y + p1/p2);
+ }
+ }
+ else if(ix<0x40200000) { /* x < 8.0 */
+ i = (int)x;
+ t = zero;
+ y = x-(double)i;
+ p = y*(s0+y*(s1+y*(s2+y*(s3+y*(s4+y*(s5+y*s6))))));
+ q = one+y*(r1+y*(r2+y*(r3+y*(r4+y*(r5+y*r6)))));
+ r = half*y+p/q;
+ z = one; /* lgamma(1+s) = log(s) + lgamma(s) */
+ switch(i) {
+ case 7: z *= (y+6.0); /* FALLTHRU */
+ case 6: z *= (y+5.0); /* FALLTHRU */
+ case 5: z *= (y+4.0); /* FALLTHRU */
+ case 4: z *= (y+3.0); /* FALLTHRU */
+ case 3: z *= (y+2.0); /* FALLTHRU */
+ r += __ieee754_log(z); break;
+ }
+ /* 8.0 <= x < 2**58 */
+ } else if (ix < 0x43900000) {
+ t = __ieee754_log(x);
+ z = one/x;
+ y = z*z;
+ w = w0+z*(w1+y*(w2+y*(w3+y*(w4+y*(w5+y*w6)))));
+ r = (x-half)*(t-one)+w;
+ } else
+ /* 2**58 <= x <= inf */
+ r = x*(__ieee754_log(x)-one);
+ if(hx<0) r = nadj - r;
+ return r;
+}
diff --git a/third_party/js-1.7/fdlibm/e_log.c b/third_party/js-1.7/fdlibm/e_log.c
new file mode 100644
index 0000000..8645d6e
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_log.c
@@ -0,0 +1,184 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_log.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_log(x)
+ * Return the logrithm of x
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * 2. Approximation of log(1+f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s +Lg6*s +Lg7*s
+ * (the values of Lg1 to Lg7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lg1*s +...+Lg7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log(1+f) = f - s*(f - R) (if f is not too large)
+ * log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ * 3. Finally, log(x) = k*ln2 + log(1+f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log(x) is NaN with signal if x < 0 (including -INF) ;
+ * log(+INF) is +INF; log(0) is -INF with signal;
+ * log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+two54 = 1.80143985094819840000e+16, /* 43500000 00000000 */
+Lg1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+Lg2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+Lg3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+Lg4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+Lg5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+Lg6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+Lg7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_log(double x)
+#else
+ double __ieee754_log(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double hfsq,f,s,z,R,w,t1,t2,dk;
+ int k,hx,i,j;
+ unsigned lx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ lx = __LO(u); /* low word of x */
+
+ k=0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx&0x7fffffff)|lx)==0)
+ return -two54/zero; /* log(+-0)=-inf */
+ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */
+ k -= 54; x *= two54; /* subnormal number, scale up x */
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ k += (hx>>20)-1023;
+ hx &= 0x000fffff;
+ i = (hx+0x95f64)&0x100000;
+ u.d = x;
+ __HI(u) = hx|(i^0x3ff00000); /* normalize x or x/2 */
+ x = u.d;
+ k += (i>>20);
+ f = x-1.0;
+ if((0x000fffff&(2+hx))<3) { /* |f| < 2**-20 */
+ if(f==zero) {
+ if(k==0) return zero; else {dk=(double)k;
+ return dk*ln2_hi+dk*ln2_lo;}
+ }
+ R = f*f*(0.5-0.33333333333333333*f);
+ if(k==0) return f-R; else {dk=(double)k;
+ return dk*ln2_hi-((R-dk*ln2_lo)-f);}
+ }
+ s = f/(2.0+f);
+ dk = (double)k;
+ z = s*s;
+ i = hx-0x6147a;
+ w = z*z;
+ j = 0x6b851-hx;
+ t1= w*(Lg2+w*(Lg4+w*Lg6));
+ t2= z*(Lg1+w*(Lg3+w*(Lg5+w*Lg7)));
+ i |= j;
+ R = t2+t1;
+ if(i>0) {
+ hfsq=0.5*f*f;
+ if(k==0) return f-(hfsq-s*(hfsq+R)); else
+ return dk*ln2_hi-((hfsq-(s*(hfsq+R)+dk*ln2_lo))-f);
+ } else {
+ if(k==0) return f-s*(f-R); else
+ return dk*ln2_hi-((s*(f-R)-dk*ln2_lo)-f);
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/e_log10.c b/third_party/js-1.7/fdlibm/e_log10.c
new file mode 100644
index 0000000..5f88f4b
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_log10.c
@@ -0,0 +1,134 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_log10.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_log10(x)
+ * Return the base 10 logarithm of x
+ *
+ * Method :
+ * Let log10_2hi = leading 40 bits of log10(2) and
+ * log10_2lo = log10(2) - log10_2hi,
+ * ivln10 = 1/log(10) rounded.
+ * Then
+ * n = ilogb(x),
+ * if(n<0) n = n+1;
+ * x = scalbn(x,-n);
+ * log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
+ *
+ * Note 1:
+ * To guarantee log10(10**n)=n, where 10**n is normal, the rounding
+ * mode must set to Round-to-Nearest.
+ * Note 2:
+ * [1/log(10)] rounded to 53 bits has error .198 ulps;
+ * log10 is monotonic at all binary break points.
+ *
+ * Special cases:
+ * log10(x) is NaN with signal if x < 0;
+ * log10(+INF) is +INF with no signal; log10(0) is -INF with signal;
+ * log10(NaN) is that NaN with no signal;
+ * log10(10**N) = N for N=0,1,...,22.
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following constants.
+ * The decimal values may be used, provided that the compiler will convert
+ * from decimal to binary accurately enough to produce the hexadecimal values
+ * shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+ivln10 = 4.34294481903251816668e-01, /* 0x3FDBCB7B, 0x1526E50E */
+log10_2hi = 3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
+log10_2lo = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_log10(double x)
+#else
+ double __ieee754_log10(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,z;
+ int i,k,hx;
+ unsigned lx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ lx = __LO(u); /* low word of x */
+
+ k=0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx&0x7fffffff)|lx)==0)
+ return -two54/zero; /* log(+-0)=-inf */
+ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */
+ k -= 54; x *= two54; /* subnormal number, scale up x */
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ k += (hx>>20)-1023;
+ i = ((unsigned)k&0x80000000)>>31;
+ hx = (hx&0x000fffff)|((0x3ff-i)<<20);
+ y = (double)(k+i);
+ u.d = x;
+ __HI(u) = hx;
+ x = u.d;
+ z = y*log10_2lo + ivln10*__ieee754_log(x);
+ return z+y*log10_2hi;
+}
diff --git a/third_party/js-1.7/fdlibm/e_pow.c b/third_party/js-1.7/fdlibm/e_pow.c
new file mode 100644
index 0000000..18c8d06
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_pow.c
@@ -0,0 +1,386 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_pow.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_pow(x,y) return x**y
+ *
+ * n
+ * Method: Let x = 2 * (1+f)
+ * 1. Compute and return log2(x) in two pieces:
+ * log2(x) = w1 + w2,
+ * where w1 has 53-24 = 29 bit trailing zeros.
+ * 2. Perform y*log2(x) = n+y' by simulating muti-precision
+ * arithmetic, where |y'|<=0.5.
+ * 3. Return x**y = 2**n*exp(y'*log2)
+ *
+ * Special cases:
+ * 1. (anything) ** 0 is 1
+ * 2. (anything) ** 1 is itself
+ * 3. (anything) ** NAN is NAN
+ * 4. NAN ** (anything except 0) is NAN
+ * 5. +-(|x| > 1) ** +INF is +INF
+ * 6. +-(|x| > 1) ** -INF is +0
+ * 7. +-(|x| < 1) ** +INF is +0
+ * 8. +-(|x| < 1) ** -INF is +INF
+ * 9. +-1 ** +-INF is NAN
+ * 10. +0 ** (+anything except 0, NAN) is +0
+ * 11. -0 ** (+anything except 0, NAN, odd integer) is +0
+ * 12. +0 ** (-anything except 0, NAN) is +INF
+ * 13. -0 ** (-anything except 0, NAN, odd integer) is +INF
+ * 14. -0 ** (odd integer) = -( +0 ** (odd integer) )
+ * 15. +INF ** (+anything except 0,NAN) is +INF
+ * 16. +INF ** (-anything except 0,NAN) is +0
+ * 17. -INF ** (anything) = -0 ** (-anything)
+ * 18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
+ * 19. (-anything except 0 and inf) ** (non-integer) is NAN
+ *
+ * Accuracy:
+ * pow(x,y) returns x**y nearly rounded. In particular
+ * pow(integer,integer)
+ * always returns the correct integer provided it is
+ * representable.
+ *
+ * Constants :
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#if defined(_MSC_VER)
+/* Microsoft Compiler */
+#pragma warning( disable : 4723 ) /* disables potential divide by 0 warning */
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+bp[] = {1.0, 1.5,},
+dp_h[] = { 0.0, 5.84962487220764160156e-01,}, /* 0x3FE2B803, 0x40000000 */
+dp_l[] = { 0.0, 1.35003920212974897128e-08,}, /* 0x3E4CFDEB, 0x43CFD006 */
+zero = 0.0,
+one = 1.0,
+two = 2.0,
+two53 = 9007199254740992.0, /* 0x43400000, 0x00000000 */
+really_big = 1.0e300,
+tiny = 1.0e-300,
+ /* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */
+L1 = 5.99999999999994648725e-01, /* 0x3FE33333, 0x33333303 */
+L2 = 4.28571428578550184252e-01, /* 0x3FDB6DB6, 0xDB6FABFF */
+L3 = 3.33333329818377432918e-01, /* 0x3FD55555, 0x518F264D */
+L4 = 2.72728123808534006489e-01, /* 0x3FD17460, 0xA91D4101 */
+L5 = 2.30660745775561754067e-01, /* 0x3FCD864A, 0x93C9DB65 */
+L6 = 2.06975017800338417784e-01, /* 0x3FCA7E28, 0x4A454EEF */
+P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
+lg2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+lg2_h = 6.93147182464599609375e-01, /* 0x3FE62E43, 0x00000000 */
+lg2_l = -1.90465429995776804525e-09, /* 0xBE205C61, 0x0CA86C39 */
+ovt = 8.0085662595372944372e-0017, /* -(1024-log2(ovfl+.5ulp)) */
+cp = 9.61796693925975554329e-01, /* 0x3FEEC709, 0xDC3A03FD =2/(3ln2) */
+cp_h = 9.61796700954437255859e-01, /* 0x3FEEC709, 0xE0000000 =(float)cp */
+cp_l = -7.02846165095275826516e-09, /* 0xBE3E2FE0, 0x145B01F5 =tail of cp_h*/
+ivln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE =1/ln2 */
+ivln2_h = 1.44269502162933349609e+00, /* 0x3FF71547, 0x60000000 =24b 1/ln2*/
+ivln2_l = 1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
+
+#ifdef __STDC__
+ double __ieee754_pow(double x, double y)
+#else
+ double __ieee754_pow(x,y)
+ double x, y;
+#endif
+{
+ fd_twoints ux, uy, uz;
+ double y1,t1,p_h,t,z,ax;
+ double z_h,z_l,p_l;
+ double t2,r,s,u,v,w;
+ int i,j,k,yisint,n;
+ int hx,hy,ix,iy;
+ unsigned lx,ly;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); lx = __LO(ux);
+ hy = __HI(uy); ly = __LO(uy);
+ ix = hx&0x7fffffff; iy = hy&0x7fffffff;
+
+ /* y==zero: x**0 = 1 */
+ if((iy|ly)==0) return one;
+
+ /* +-NaN return x+y */
+ if(ix > 0x7ff00000 || ((ix==0x7ff00000)&&(lx!=0)) ||
+ iy > 0x7ff00000 || ((iy==0x7ff00000)&&(ly!=0)))
+ return x+y;
+
+ /* determine if y is an odd int when x < 0
+ * yisint = 0 ... y is not an integer
+ * yisint = 1 ... y is an odd int
+ * yisint = 2 ... y is an even int
+ */
+ yisint = 0;
+ if(hx<0) {
+ if(iy>=0x43400000) yisint = 2; /* even integer y */
+ else if(iy>=0x3ff00000) {
+ k = (iy>>20)-0x3ff; /* exponent */
+ if(k>20) {
+ j = ly>>(52-k);
+ if((j<<(52-k))==(int)ly) yisint = 2-(j&1);
+ } else if(ly==0) {
+ j = iy>>(20-k);
+ if((j<<(20-k))==iy) yisint = 2-(j&1);
+ }
+ }
+ }
+
+ /* special value of y */
+ if(ly==0) {
+ if (iy==0x7ff00000) { /* y is +-inf */
+ if(((ix-0x3ff00000)|lx)==0)
+#ifdef _WIN32
+/* VC++ optimizer reduces y - y to 0 */
+ return y / y;
+#else
+ return y - y; /* inf**+-1 is NaN */
+#endif
+ else if (ix >= 0x3ff00000)/* (|x|>1)**+-inf = inf,0 */
+ return (hy>=0)? y: zero;
+ else /* (|x|<1)**-,+inf = inf,0 */
+ return (hy<0)?-y: zero;
+ }
+ if(iy==0x3ff00000) { /* y is +-1 */
+ if(hy<0) return one/x; else return x;
+ }
+ if(hy==0x40000000) return x*x; /* y is 2 */
+ if(hy==0x3fe00000) { /* y is 0.5 */
+ if(hx>=0) /* x >= +0 */
+ return fd_sqrt(x);
+ }
+ }
+
+ ax = fd_fabs(x);
+ /* special value of x */
+ if(lx==0) {
+ if(ix==0x7ff00000||ix==0||ix==0x3ff00000){
+ z = ax; /*x is +-0,+-inf,+-1*/
+ if(hy<0) z = one/z; /* z = (1/|x|) */
+ if(hx<0) {
+ if(((ix-0x3ff00000)|yisint)==0) {
+ z = (z-z)/(z-z); /* (-1)**non-int is NaN */
+ } else if(yisint==1) {
+#ifdef HPUX
+ uz.d = z;
+ __HI(uz) ^= 1<<31; /* some HPUXes cannot negate 0.. */
+ z = uz.d;
+#else
+ z = -z; /* (x<0)**odd = -(|x|**odd) */
+#endif
+ }
+ }
+ return z;
+ }
+ }
+
+ /* (x<0)**(non-int) is NaN */
+ if((((hx>>31)+1)|yisint)==0) return (x-x)/(x-x);
+
+ /* |y| is really_big */
+ if(iy>0x41e00000) { /* if |y| > 2**31 */
+ if(iy>0x43f00000){ /* if |y| > 2**64, must o/uflow */
+ if(ix<=0x3fefffff) return (hy<0)? really_big*really_big:tiny*tiny;
+ if(ix>=0x3ff00000) return (hy>0)? really_big*really_big:tiny*tiny;
+ }
+ /* over/underflow if x is not close to one */
+ if(ix<0x3fefffff) return (hy<0)? really_big*really_big:tiny*tiny;
+ if(ix>0x3ff00000) return (hy>0)? really_big*really_big:tiny*tiny;
+ /* now |1-x| is tiny <= 2**-20, suffice to compute
+ log(x) by x-x^2/2+x^3/3-x^4/4 */
+ t = x-1; /* t has 20 trailing zeros */
+ w = (t*t)*(0.5-t*(0.3333333333333333333333-t*0.25));
+ u = ivln2_h*t; /* ivln2_h has 21 sig. bits */
+ v = t*ivln2_l-w*ivln2;
+ t1 = u+v;
+ uz.d = t1;
+ __LO(uz) = 0;
+ t1 = uz.d;
+ t2 = v-(t1-u);
+ } else {
+ double s_h,t_h;
+ double s2,s_l,t_l;
+ n = 0;
+ /* take care subnormal number */
+ if(ix<0x00100000)
+ {ax *= two53; n -= 53; uz.d = ax; ix = __HI(uz); }
+ n += ((ix)>>20)-0x3ff;
+ j = ix&0x000fffff;
+ /* determine interval */
+ ix = j|0x3ff00000; /* normalize ix */
+ if(j<=0x3988E) k=0; /* |x|<sqrt(3/2) */
+ else if(j<0xBB67A) k=1; /* |x|<sqrt(3) */
+ else {k=0;n+=1;ix -= 0x00100000;}
+ uz.d = ax;
+ __HI(uz) = ix;
+ ax = uz.d;
+
+ /* compute s = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+ u = ax-bp[k]; /* bp[0]=1.0, bp[1]=1.5 */
+ v = one/(ax+bp[k]);
+ s = u*v;
+ s_h = s;
+ uz.d = s_h;
+ __LO(uz) = 0;
+ s_h = uz.d;
+ /* t_h=ax+bp[k] High */
+ t_h = zero;
+ uz.d = t_h;
+ __HI(uz)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+ t_h = uz.d;
+ t_l = ax - (t_h-bp[k]);
+ s_l = v*((u-s_h*t_h)-s_h*t_l);
+ /* compute log(ax) */
+ s2 = s*s;
+ r = s2*s2*(L1+s2*(L2+s2*(L3+s2*(L4+s2*(L5+s2*L6)))));
+ r += s_l*(s_h+s);
+ s2 = s_h*s_h;
+ t_h = 3.0+s2+r;
+ uz.d = t_h;
+ __LO(uz) = 0;
+ t_h = uz.d;
+ t_l = r-((t_h-3.0)-s2);
+ /* u+v = s*(1+...) */
+ u = s_h*t_h;
+ v = s_l*t_h+t_l*s;
+ /* 2/(3log2)*(s+...) */
+ p_h = u+v;
+ uz.d = p_h;
+ __LO(uz) = 0;
+ p_h = uz.d;
+ p_l = v-(p_h-u);
+ z_h = cp_h*p_h; /* cp_h+cp_l = 2/(3*log2) */
+ z_l = cp_l*p_h+p_l*cp+dp_l[k];
+ /* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+ t = (double)n;
+ t1 = (((z_h+z_l)+dp_h[k])+t);
+ uz.d = t1;
+ __LO(uz) = 0;
+ t1 = uz.d;
+ t2 = z_l-(((t1-t)-dp_h[k])-z_h);
+ }
+
+ s = one; /* s (sign of result -ve**odd) = -1 else = 1 */
+ if((((hx>>31)+1)|(yisint-1))==0) s = -one;/* (-ve)**(odd int) */
+
+ /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+ y1 = y;
+ uy.d = y1;
+ __LO(uy) = 0;
+ y1 = uy.d;
+ p_l = (y-y1)*t1+y*t2;
+ p_h = y1*t1;
+ z = p_l+p_h;
+ uz.d = z;
+ j = __HI(uz);
+ i = __LO(uz);
+
+ if (j>=0x40900000) { /* z >= 1024 */
+ if(((j-0x40900000)|i)!=0) /* if z > 1024 */
+ return s*really_big*really_big; /* overflow */
+ else {
+ if(p_l+ovt>z-p_h) return s*really_big*really_big; /* overflow */
+ }
+ } else if((j&0x7fffffff)>=0x4090cc00 ) { /* z <= -1075 */
+ if(((j-0xc090cc00)|i)!=0) /* z < -1075 */
+ return s*tiny*tiny; /* underflow */
+ else {
+ if(p_l<=z-p_h) return s*tiny*tiny; /* underflow */
+ }
+ }
+ /*
+ * compute 2**(p_h+p_l)
+ */
+ i = j&0x7fffffff;
+ k = (i>>20)-0x3ff;
+ n = 0;
+ if(i>0x3fe00000) { /* if |z| > 0.5, set n = [z+0.5] */
+ n = j+(0x00100000>>(k+1));
+ k = ((n&0x7fffffff)>>20)-0x3ff; /* new k for n */
+ t = zero;
+ uz.d = t;
+ __HI(uz) = (n&~(0x000fffff>>k));
+ t = uz.d;
+ n = ((n&0x000fffff)|0x00100000)>>(20-k);
+ if(j<0) n = -n;
+ p_h -= t;
+ }
+ t = p_l+p_h;
+ uz.d = t;
+ __LO(uz) = 0;
+ t = uz.d;
+ u = t*lg2_h;
+ v = (p_l-(t-p_h))*lg2+t*lg2_l;
+ z = u+v;
+ w = v-(z-u);
+ t = z*z;
+ t1 = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+ r = (z*t1)/(t1-two)-(w+z*w);
+ z = one-(r-z);
+ uz.d = z;
+ j = __HI(uz);
+ j += (n<<20);
+ if((j>>20)<=0) z = fd_scalbn(z,n); /* subnormal output */
+ else { uz.d = z; __HI(uz) += (n<<20); z = uz.d; }
+ return s*z;
+}
diff --git a/third_party/js-1.7/fdlibm/e_rem_pio2.c b/third_party/js-1.7/fdlibm/e_rem_pio2.c
new file mode 100644
index 0000000..c9d2618
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_rem_pio2.c
@@ -0,0 +1,222 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_rem_pio2.c 1.4 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_rem_pio2(x,y)
+ *
+ * return the remainder of x rem pi/2 in y[0]+y[1]
+ * use __kernel_rem_pio2()
+ */
+
+#include "fdlibm.h"
+
+/*
+ * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+ */
+#ifdef __STDC__
+static const int two_over_pi[] = {
+#else
+static int two_over_pi[] = {
+#endif
+0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
+0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
+0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
+0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
+0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
+0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
+0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
+0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
+0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
+0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
+};
+
+#ifdef __STDC__
+static const int npio2_hw[] = {
+#else
+static int npio2_hw[] = {
+#endif
+0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
+0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
+0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
+0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
+0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
+0x404858EB, 0x404921FB,
+};
+
+/*
+ * invpio2: 53 bits of 2/pi
+ * pio2_1: first 33 bit of pi/2
+ * pio2_1t: pi/2 - pio2_1
+ * pio2_2: second 33 bit of pi/2
+ * pio2_2t: pi/2 - (pio2_1+pio2_2)
+ * pio2_3: third 33 bit of pi/2
+ * pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
+ */
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+zero = 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+invpio2 = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+pio2_1 = 1.57079632673412561417e+00, /* 0x3FF921FB, 0x54400000 */
+pio2_1t = 6.07710050650619224932e-11, /* 0x3DD0B461, 0x1A626331 */
+pio2_2 = 6.07710050630396597660e-11, /* 0x3DD0B461, 0x1A600000 */
+pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
+pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
+pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
+
+#ifdef __STDC__
+ int __ieee754_rem_pio2(double x, double *y)
+#else
+ int __ieee754_rem_pio2(x,y)
+ double x,y[];
+#endif
+{
+ fd_twoints u, ux, uz;
+ double z = 0;
+ double w,t,r,fn;
+ double tx[3];
+ int e0,i,j,nx,n,ix,hx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ ix = hx&0x7fffffff;
+ if(ix<=0x3fe921fb) /* |x| ~<= pi/4 , no need for reduction */
+ {y[0] = x; y[1] = 0; return 0;}
+ if(ix<0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+ if(hx>0) {
+ z = x - pio2_1;
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z - pio2_1t;
+ y[1] = (z-y[0])-pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z -= pio2_2;
+ y[0] = z - pio2_2t;
+ y[1] = (z-y[0])-pio2_2t;
+ }
+ return 1;
+ } else { /* negative x */
+ z = x + pio2_1;
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z + pio2_1t;
+ y[1] = (z-y[0])+pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z += pio2_2;
+ y[0] = z + pio2_2t;
+ y[1] = (z-y[0])+pio2_2t;
+ }
+ return -1;
+ }
+ }
+ if(ix<=0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+ t = fd_fabs(x);
+ n = (int) (t*invpio2+half);
+ fn = (double)n;
+ r = t-fn*pio2_1;
+ w = fn*pio2_1t; /* 1st round good to 85 bit */
+ if(n<32&&ix!=npio2_hw[n-1]) {
+ y[0] = r-w; /* quick check no cancellation */
+ } else {
+ j = ix>>20;
+ y[0] = r-w;
+ u.d = y[0];
+ i = j-(((__HI(u))>>20)&0x7ff);
+ if(i>16) { /* 2nd iteration needed, good to 118 */
+ t = r;
+ w = fn*pio2_2;
+ r = t-w;
+ w = fn*pio2_2t-((t-r)-w);
+ y[0] = r-w;
+ u.d = y[0];
+ i = j-(((__HI(u))>>20)&0x7ff);
+ if(i>49) { /* 3rd iteration need, 151 bits acc */
+ t = r; /* will cover all possible cases */
+ w = fn*pio2_3;
+ r = t-w;
+ w = fn*pio2_3t-((t-r)-w);
+ y[0] = r-w;
+ }
+ }
+ }
+ y[1] = (r-y[0])-w;
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ else return n;
+ }
+ /*
+ * all other (large) arguments
+ */
+ if(ix>=0x7ff00000) { /* x is inf or NaN */
+ y[0]=y[1]=x-x; return 0;
+ }
+ /* set z = scalbn(|x|,ilogb(x)-23) */
+ ux.d = x; uz.d = z;
+ __LO(uz) = __LO(ux);
+ z = uz.d;
+ e0 = (ix>>20)-1046; /* e0 = ilogb(z)-23; */
+ uz.d = z;
+ __HI(uz) = ix - (e0<<20);
+ z = uz.d;
+ for(i=0;i<2;i++) {
+ tx[i] = (double)((int)(z));
+ z = (z-tx[i])*two24;
+ }
+ tx[2] = z;
+ nx = 3;
+ while(tx[nx-1]==zero) nx--; /* skip zero term */
+ n = __kernel_rem_pio2(tx,y,e0,nx,2,two_over_pi);
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ return n;
+}
diff --git a/third_party/js-1.7/fdlibm/e_remainder.c b/third_party/js-1.7/fdlibm/e_remainder.c
new file mode 100644
index 0000000..de40f0c
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_remainder.c
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_remainder.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_remainder(x,p)
+ * Return :
+ * returns x REM p = x - [x/p]*p as if in infinite
+ * precise arithmetic, where [x/p] is the (infinite bit)
+ * integer nearest x/p (in half way case choose the even one).
+ * Method :
+ * Based on fmod() return x-[x/p]chopped*p exactlp.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double zero = 0.0;
+#else
+static double zero = 0.0;
+#endif
+
+
+#ifdef __STDC__
+ double __ieee754_remainder(double x, double p)
+#else
+ double __ieee754_remainder(x,p)
+ double x,p;
+#endif
+{
+ fd_twoints u;
+ int hx,hp;
+ unsigned sx,lx,lp;
+ double p_half;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ lx = __LO(u); /* low word of x */
+ u.d = p;
+ hp = __HI(u); /* high word of p */
+ lp = __LO(u); /* low word of p */
+ sx = hx&0x80000000;
+ hp &= 0x7fffffff;
+ hx &= 0x7fffffff;
+
+ /* purge off exception values */
+ if((hp|lp)==0) return (x*p)/(x*p); /* p = 0 */
+ if((hx>=0x7ff00000)|| /* x not finite */
+ ((hp>=0x7ff00000)&& /* p is NaN */
+ (((hp-0x7ff00000)|lp)!=0)))
+ return (x*p)/(x*p);
+
+
+ if (hp<=0x7fdfffff) x = __ieee754_fmod(x,p+p); /* now x < 2p */
+ if (((hx-hp)|(lx-lp))==0) return zero*x;
+ x = fd_fabs(x);
+ p = fd_fabs(p);
+ if (hp<0x00200000) {
+ if(x+x>p) {
+ x-=p;
+ if(x+x>=p) x -= p;
+ }
+ } else {
+ p_half = 0.5*p;
+ if(x>p_half) {
+ x-=p;
+ if(x>=p_half) x -= p;
+ }
+ }
+ u.d = x;
+ __HI(u) ^= sx;
+ x = u.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/e_scalb.c b/third_party/js-1.7/fdlibm/e_scalb.c
new file mode 100644
index 0000000..621704e
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_scalb.c
@@ -0,0 +1,89 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_scalb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __ieee754_scalb(x, fn) is provide for
+ * passing various standard test suite. One
+ * should use scalbn() instead.
+ */
+
+#include "fdlibm.h"
+
+#ifdef _SCALB_INT
+#ifdef __STDC__
+ double __ieee754_scalb(double x, int fn)
+#else
+ double __ieee754_scalb(x,fn)
+ double x; int fn;
+#endif
+#else
+#ifdef __STDC__
+ double __ieee754_scalb(double x, double fn)
+#else
+ double __ieee754_scalb(x,fn)
+ double x, fn;
+#endif
+#endif
+{
+#ifdef _SCALB_INT
+ return fd_scalbn(x,fn);
+#else
+ if (fd_isnan(x)||fd_isnan(fn)) return x*fn;
+ if (!fd_finite(fn)) {
+ if(fn>0.0) return x*fn;
+ else return x/(-fn);
+ }
+ if (fd_rint(fn)!=fn) return (fn-fn)/(fn-fn);
+ if ( fn > 65000.0) return fd_scalbn(x, 65000);
+ if (-fn > 65000.0) return fd_scalbn(x,-65000);
+ return fd_scalbn(x,(int)fn);
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/e_sinh.c b/third_party/js-1.7/fdlibm/e_sinh.c
new file mode 100644
index 0000000..98ab9b5
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_sinh.c
@@ -0,0 +1,122 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_sinh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_sinh(x)
+ * Method :
+ * mathematically sinh(x) if defined to be (exp(x)-exp(-x))/2
+ * 1. Replace x by |x| (sinh(-x) = -sinh(x)).
+ * 2.
+ * E + E/(E+1)
+ * 0 <= x <= 22 : sinh(x) := --------------, E=expm1(x)
+ * 2
+ *
+ * 22 <= x <= lnovft : sinh(x) := exp(x)/2
+ * lnovft <= x <= ln2ovft: sinh(x) := exp(x/2)/2 * exp(x/2)
+ * ln2ovft < x : sinh(x) := x*shuge (overflow)
+ *
+ * Special cases:
+ * sinh(x) is |x| if x is +INF, -INF, or NaN.
+ * only sinh(0)=0 is exact for finite x.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0, shuge = 1.0e307;
+#else
+static double one = 1.0, shuge = 1.0e307;
+#endif
+
+#ifdef __STDC__
+ double __ieee754_sinh(double x)
+#else
+ double __ieee754_sinh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t,w,h;
+ int ix,jx;
+ unsigned lx;
+
+ /* High word of |x|. */
+ u.d = x;
+ jx = __HI(u);
+ ix = jx&0x7fffffff;
+
+ /* x is INF or NaN */
+ if(ix>=0x7ff00000) return x+x;
+
+ h = 0.5;
+ if (jx<0) h = -h;
+ /* |x| in [0,22], return sign(x)*0.5*(E+E/(E+1))) */
+ if (ix < 0x40360000) { /* |x|<22 */
+ if (ix<0x3e300000) /* |x|<2**-28 */
+ if(shuge+x>one) return x;/* sinh(tiny) = tiny with inexact */
+ t = fd_expm1(fd_fabs(x));
+ if(ix<0x3ff00000) return h*(2.0*t-t*t/(t+one));
+ return h*(t+t/(t+one));
+ }
+
+ /* |x| in [22, log(maxdouble)] return 0.5*exp(|x|) */
+ if (ix < 0x40862E42) return h*__ieee754_exp(fd_fabs(x));
+
+ /* |x| in [log(maxdouble), overflowthresold] */
+ lx = *( (((*(unsigned*)&one)>>29)) + (unsigned*)&x);
+ if (ix<0x408633CE || (ix==0x408633ce)&&(lx<=(unsigned)0x8fb9f87d)) {
+ w = __ieee754_exp(0.5*fd_fabs(x));
+ t = h*w;
+ return t*w;
+ }
+
+ /* |x| > overflowthresold, sinh(x) overflow */
+ return x*shuge;
+}
diff --git a/third_party/js-1.7/fdlibm/e_sqrt.c b/third_party/js-1.7/fdlibm/e_sqrt.c
new file mode 100644
index 0000000..9180283
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/e_sqrt.c
@@ -0,0 +1,497 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* @(#)e_sqrt.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_sqrt(x)
+ * Return correctly rounded sqrt.
+ * ------------------------------------------
+ * | Use the hardware sqrt if you have one |
+ * ------------------------------------------
+ * Method:
+ * Bit by bit method using integer arithmetic. (Slow, but portable)
+ * 1. Normalization
+ * Scale x to y in [1,4) with even powers of 2:
+ * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
+ * sqrt(y) = 2^k * sqrt(x)
+ * 2. Bit by bit computation
+ * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
+ * i 0
+ * i+1 2
+ * s = 2*q , and y = 2 * ( y - q ). (1)
+ * i i i i
+ *
+ * To compute q from q , one checks whether
+ * i+1 i
+ *
+ * -(i+1) 2
+ * (q + 2 ) <= y. (2)
+ * i
+ * -(i+1)
+ * If (2) is false, then q = q ; otherwise q = q + 2 .
+ * i+1 i i+1 i
+ *
+ * With some algebric manipulation, it is not difficult to see
+ * that (2) is equivalent to
+ * -(i+1)
+ * s + 2 <= y (3)
+ * i i
+ *
+ * The advantage of (3) is that s and y can be computed by
+ * i i
+ * the following recurrence formula:
+ * if (3) is false
+ *
+ * s = s , y = y ; (4)
+ * i+1 i i+1 i
+ *
+ * otherwise,
+ * -i -(i+1)
+ * s = s + 2 , y = y - s - 2 (5)
+ * i+1 i i+1 i i
+ *
+ * One may easily use induction to prove (4) and (5).
+ * Note. Since the left hand side of (3) contain only i+2 bits,
+ * it does not necessary to do a full (53-bit) comparison
+ * in (3).
+ * 3. Final rounding
+ * After generating the 53 bits result, we compute one more bit.
+ * Together with the remainder, we can decide whether the
+ * result is exact, bigger than 1/2ulp, or less than 1/2ulp
+ * (it will never equal to 1/2ulp).
+ * The rounding mode can be detected by checking whether
+ * huge + tiny is equal to huge, and whether huge - tiny is
+ * equal to huge for some floating point number "huge" and "tiny".
+ *
+ * Special cases:
+ * sqrt(+-0) = +-0 ... exact
+ * sqrt(inf) = inf
+ * sqrt(-ve) = NaN ... with invalid signal
+ * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
+ *
+ * Other methods : see the appended file at the end of the program below.
+ *---------------
+ */
+
+#include "fdlibm.h"
+
+#if defined(_MSC_VER)
+/* Microsoft Compiler */
+#pragma warning( disable : 4723 ) /* disables potential divide by 0 warning */
+#endif
+
+#ifdef __STDC__
+static const double one = 1.0, tiny=1.0e-300;
+#else
+static double one = 1.0, tiny=1.0e-300;
+#endif
+
+#ifdef __STDC__
+ double __ieee754_sqrt(double x)
+#else
+ double __ieee754_sqrt(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double z;
+ int sign = (int)0x80000000;
+ unsigned r,t1,s1,ix1,q1;
+ int ix0,s0,q,m,t,i;
+
+ u.d = x;
+ ix0 = __HI(u); /* high word of x */
+ ix1 = __LO(u); /* low word of x */
+
+ /* take care of Inf and NaN */
+ if((ix0&0x7ff00000)==0x7ff00000) {
+ return x*x+x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf
+ sqrt(-inf)=sNaN */
+ }
+ /* take care of zero */
+ if(ix0<=0) {
+ if(((ix0&(~sign))|ix1)==0) return x;/* sqrt(+-0) = +-0 */
+ else if(ix0<0)
+ return (x-x)/(x-x); /* sqrt(-ve) = sNaN */
+ }
+ /* normalize x */
+ m = (ix0>>20);
+ if(m==0) { /* subnormal x */
+ while(ix0==0) {
+ m -= 21;
+ ix0 |= (ix1>>11); ix1 <<= 21;
+ }
+ for(i=0;(ix0&0x00100000)==0;i++) ix0<<=1;
+ m -= i-1;
+ ix0 |= (ix1>>(32-i));
+ ix1 <<= i;
+ }
+ m -= 1023; /* unbias exponent */
+ ix0 = (ix0&0x000fffff)|0x00100000;
+ if(m&1){ /* odd m, double x to make it even */
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ }
+ m >>= 1; /* m = [m/2] */
+
+ /* generate sqrt(x) bit by bit */
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ q = q1 = s0 = s1 = 0; /* [q,q1] = sqrt(x) */
+ r = 0x00200000; /* r = moving bit from right to left */
+
+ while(r!=0) {
+ t = s0+r;
+ if(t<=ix0) {
+ s0 = t+r;
+ ix0 -= t;
+ q += r;
+ }
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ r>>=1;
+ }
+
+ r = sign;
+ while(r!=0) {
+ t1 = s1+r;
+ t = s0;
+ if((t<ix0)||((t==ix0)&&(t1<=ix1))) {
+ s1 = t1+r;
+ if(((int)(t1&sign)==sign)&&(s1&sign)==0) s0 += 1;
+ ix0 -= t;
+ if (ix1 < t1) ix0 -= 1;
+ ix1 -= t1;
+ q1 += r;
+ }
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ r>>=1;
+ }
+
+ /* use floating add to find out rounding direction */
+ if((ix0|ix1)!=0) {
+ z = one-tiny; /* trigger inexact flag */
+ if (z>=one) {
+ z = one+tiny;
+ if (q1==(unsigned)0xffffffff) { q1=0; q += 1;}
+ else if (z>one) {
+ if (q1==(unsigned)0xfffffffe) q+=1;
+ q1+=2;
+ } else
+ q1 += (q1&1);
+ }
+ }
+ ix0 = (q>>1)+0x3fe00000;
+ ix1 = q1>>1;
+ if ((q&1)==1) ix1 |= sign;
+ ix0 += (m <<20);
+ u.d = z;
+ __HI(u) = ix0;
+ __LO(u) = ix1;
+ z = u.d;
+ return z;
+}
+
+/*
+Other methods (use floating-point arithmetic)
+-------------
+(This is a copy of a drafted paper by Prof W. Kahan
+and K.C. Ng, written in May, 1986)
+
+ Two algorithms are given here to implement sqrt(x)
+ (IEEE double precision arithmetic) in software.
+ Both supply sqrt(x) correctly rounded. The first algorithm (in
+ Section A) uses newton iterations and involves four divisions.
+ The second one uses reciproot iterations to avoid division, but
+ requires more multiplications. Both algorithms need the ability
+ to chop results of arithmetic operations instead of round them,
+ and the INEXACT flag to indicate when an arithmetic operation
+ is executed exactly with no roundoff error, all part of the
+ standard (IEEE 754-1985). The ability to perform shift, add,
+ subtract and logical AND operations upon 32-bit words is needed
+ too, though not part of the standard.
+
+A. sqrt(x) by Newton Iteration
+
+ (1) Initial approximation
+
+ Let x0 and x1 be the leading and the trailing 32-bit words of
+ a floating point number x (in IEEE double format) respectively
+
+ 1 11 52 ...widths
+ ------------------------------------------------------
+ x: |s| e | f |
+ ------------------------------------------------------
+ msb lsb msb lsb ...order
+
+
+ ------------------------ ------------------------
+ x0: |s| e | f1 | x1: | f2 |
+ ------------------------ ------------------------
+
+ By performing shifts and subtracts on x0 and x1 (both regarded
+ as integers), we obtain an 8-bit approximation of sqrt(x) as
+ follows.
+
+ k := (x0>>1) + 0x1ff80000;
+ y0 := k - T1[31&(k>>15)]. ... y ~ sqrt(x) to 8 bits
+ Here k is a 32-bit integer and T1[] is an integer array containing
+ correction terms. Now magically the floating value of y (y's
+ leading 32-bit word is y0, the value of its trailing word is 0)
+ approximates sqrt(x) to almost 8-bit.
+
+ Value of T1:
+ static int T1[32]= {
+ 0, 1024, 3062, 5746, 9193, 13348, 18162, 23592,
+ 29598, 36145, 43202, 50740, 58733, 67158, 75992, 85215,
+ 83599, 71378, 60428, 50647, 41945, 34246, 27478, 21581,
+ 16499, 12183, 8588, 5674, 3403, 1742, 661, 130,};
+
+ (2) Iterative refinement
+
+ Apply Heron's rule three times to y, we have y approximates
+ sqrt(x) to within 1 ulp (Unit in the Last Place):
+
+ y := (y+x/y)/2 ... almost 17 sig. bits
+ y := (y+x/y)/2 ... almost 35 sig. bits
+ y := y-(y-x/y)/2 ... within 1 ulp
+
+
+ Remark 1.
+ Another way to improve y to within 1 ulp is:
+
+ y := (y+x/y) ... almost 17 sig. bits to 2*sqrt(x)
+ y := y - 0x00100006 ... almost 18 sig. bits to sqrt(x)
+
+ 2
+ (x-y )*y
+ y := y + 2* ---------- ...within 1 ulp
+ 2
+ 3y + x
+
+
+ This formula has one division fewer than the one above; however,
+ it requires more multiplications and additions. Also x must be
+ scaled in advance to avoid spurious overflow in evaluating the
+ expression 3y*y+x. Hence it is not recommended uless division
+ is slow. If division is very slow, then one should use the
+ reciproot algorithm given in section B.
+
+ (3) Final adjustment
+
+ By twiddling y's last bit it is possible to force y to be
+ correctly rounded according to the prevailing rounding mode
+ as follows. Let r and i be copies of the rounding mode and
+ inexact flag before entering the square root program. Also we
+ use the expression y+-ulp for the next representable floating
+ numbers (up and down) of y. Note that y+-ulp = either fixed
+ point y+-1, or multiply y by nextafter(1,+-inf) in chopped
+ mode.
+
+ I := FALSE; ... reset INEXACT flag I
+ R := RZ; ... set rounding mode to round-toward-zero
+ z := x/y; ... chopped quotient, possibly inexact
+ If(not I) then { ... if the quotient is exact
+ if(z=y) {
+ I := i; ... restore inexact flag
+ R := r; ... restore rounded mode
+ return sqrt(x):=y.
+ } else {
+ z := z - ulp; ... special rounding
+ }
+ }
+ i := TRUE; ... sqrt(x) is inexact
+ If (r=RN) then z=z+ulp ... rounded-to-nearest
+ If (r=RP) then { ... round-toward-+inf
+ y = y+ulp; z=z+ulp;
+ }
+ y := y+z; ... chopped sum
+ y0:=y0-0x00100000; ... y := y/2 is correctly rounded.
+ I := i; ... restore inexact flag
+ R := r; ... restore rounded mode
+ return sqrt(x):=y.
+
+ (4) Special cases
+
+ Square root of +inf, +-0, or NaN is itself;
+ Square root of a negative number is NaN with invalid signal.
+
+
+B. sqrt(x) by Reciproot Iteration
+
+ (1) Initial approximation
+
+ Let x0 and x1 be the leading and the trailing 32-bit words of
+ a floating point number x (in IEEE double format) respectively
+ (see section A). By performing shifs and subtracts on x0 and y0,
+ we obtain a 7.8-bit approximation of 1/sqrt(x) as follows.
+
+ k := 0x5fe80000 - (x0>>1);
+ y0:= k - T2[63&(k>>14)]. ... y ~ 1/sqrt(x) to 7.8 bits
+
+ Here k is a 32-bit integer and T2[] is an integer array
+ containing correction terms. Now magically the floating
+ value of y (y's leading 32-bit word is y0, the value of
+ its trailing word y1 is set to zero) approximates 1/sqrt(x)
+ to almost 7.8-bit.
+
+ Value of T2:
+ static int T2[64]= {
+ 0x1500, 0x2ef8, 0x4d67, 0x6b02, 0x87be, 0xa395, 0xbe7a, 0xd866,
+ 0xf14a, 0x1091b,0x11fcd,0x13552,0x14999,0x15c98,0x16e34,0x17e5f,
+ 0x18d03,0x19a01,0x1a545,0x1ae8a,0x1b5c4,0x1bb01,0x1bfde,0x1c28d,
+ 0x1c2de,0x1c0db,0x1ba73,0x1b11c,0x1a4b5,0x1953d,0x18266,0x16be0,
+ 0x1683e,0x179d8,0x18a4d,0x19992,0x1a789,0x1b445,0x1bf61,0x1c989,
+ 0x1d16d,0x1d77b,0x1dddf,0x1e2ad,0x1e5bf,0x1e6e8,0x1e654,0x1e3cd,
+ 0x1df2a,0x1d635,0x1cb16,0x1be2c,0x1ae4e,0x19bde,0x1868e,0x16e2e,
+ 0x1527f,0x1334a,0x11051,0xe951, 0xbe01, 0x8e0d, 0x5924, 0x1edd,};
+
+ (2) Iterative refinement
+
+ Apply Reciproot iteration three times to y and multiply the
+ result by x to get an approximation z that matches sqrt(x)
+ to about 1 ulp. To be exact, we will have
+ -1ulp < sqrt(x)-z<1.0625ulp.
+
+ ... set rounding mode to Round-to-nearest
+ y := y*(1.5-0.5*x*y*y) ... almost 15 sig. bits to 1/sqrt(x)
+ y := y*((1.5-2^-30)+0.5*x*y*y)... about 29 sig. bits to 1/sqrt(x)
+ ... special arrangement for better accuracy
+ z := x*y ... 29 bits to sqrt(x), with z*y<1
+ z := z + 0.5*z*(1-z*y) ... about 1 ulp to sqrt(x)
+
+ Remark 2. The constant 1.5-2^-30 is chosen to bias the error so that
+ (a) the term z*y in the final iteration is always less than 1;
+ (b) the error in the final result is biased upward so that
+ -1 ulp < sqrt(x) - z < 1.0625 ulp
+ instead of |sqrt(x)-z|<1.03125ulp.
+
+ (3) Final adjustment
+
+ By twiddling y's last bit it is possible to force y to be
+ correctly rounded according to the prevailing rounding mode
+ as follows. Let r and i be copies of the rounding mode and
+ inexact flag before entering the square root program. Also we
+ use the expression y+-ulp for the next representable floating
+ numbers (up and down) of y. Note that y+-ulp = either fixed
+ point y+-1, or multiply y by nextafter(1,+-inf) in chopped
+ mode.
+
+ R := RZ; ... set rounding mode to round-toward-zero
+ switch(r) {
+ case RN: ... round-to-nearest
+ if(x<= z*(z-ulp)...chopped) z = z - ulp; else
+ if(x<= z*(z+ulp)...chopped) z = z; else z = z+ulp;
+ break;
+ case RZ:case RM: ... round-to-zero or round-to--inf
+ R:=RP; ... reset rounding mod to round-to-+inf
+ if(x<z*z ... rounded up) z = z - ulp; else
+ if(x>=(z+ulp)*(z+ulp) ...rounded up) z = z+ulp;
+ break;
+ case RP: ... round-to-+inf
+ if(x>(z+ulp)*(z+ulp)...chopped) z = z+2*ulp; else
+ if(x>z*z ...chopped) z = z+ulp;
+ break;
+ }
+
+ Remark 3. The above comparisons can be done in fixed point. For
+ example, to compare x and w=z*z chopped, it suffices to compare
+ x1 and w1 (the trailing parts of x and w), regarding them as
+ two's complement integers.
+
+ ...Is z an exact square root?
+ To determine whether z is an exact square root of x, let z1 be the
+ trailing part of z, and also let x0 and x1 be the leading and
+ trailing parts of x.
+
+ If ((z1&0x03ffffff)!=0) ... not exact if trailing 26 bits of z!=0
+ I := 1; ... Raise Inexact flag: z is not exact
+ else {
+ j := 1 - [(x0>>20)&1] ... j = logb(x) mod 2
+ k := z1 >> 26; ... get z's 25-th and 26-th
+ fraction bits
+ I := i or (k&j) or ((k&(j+j+1))!=(x1&3));
+ }
+ R:= r ... restore rounded mode
+ return sqrt(x):=z.
+
+ If multiplication is cheaper then the foregoing red tape, the
+ Inexact flag can be evaluated by
+
+ I := i;
+ I := (z*z!=x) or I.
+
+ Note that z*z can overwrite I; this value must be sensed if it is
+ True.
+
+ Remark 4. If z*z = x exactly, then bit 25 to bit 0 of z1 must be
+ zero.
+
+ --------------------
+ z1: | f2 |
+ --------------------
+ bit 31 bit 0
+
+ Further more, bit 27 and 26 of z1, bit 0 and 1 of x1, and the odd
+ or even of logb(x) have the following relations:
+
+ -------------------------------------------------
+ bit 27,26 of z1 bit 1,0 of x1 logb(x)
+ -------------------------------------------------
+ 00 00 odd and even
+ 01 01 even
+ 10 10 odd
+ 10 00 even
+ 11 01 even
+ -------------------------------------------------
+
+ (4) Special cases (see (4) of Section A).
+
+ */
+
diff --git a/third_party/js-1.7/fdlibm/fdlibm.h b/third_party/js-1.7/fdlibm/fdlibm.h
new file mode 100644
index 0000000..e623be5
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/fdlibm.h
@@ -0,0 +1,273 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)fdlibm.h 1.5 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* Modified defines start here.. */
+#undef __LITTLE_ENDIAN
+
+#ifdef _WIN32
+#define huge myhuge
+#define __LITTLE_ENDIAN
+#endif
+
+#ifdef XP_OS2
+#define __LITTLE_ENDIAN
+#endif
+
+#if defined(linux) && (defined(__i386__) || defined(__x86_64__) || defined(__ia64) || (defined(__mips) && defined(__MIPSEL__)))
+#define __LITTLE_ENDIAN
+#endif
+
+/* End here. The rest is the standard file. */
+
+#ifdef SOLARIS /* special setup for Sun test regime */
+#if defined(i386) || defined(i486) || \
+ defined(intel) || defined(x86) || defined(i86pc)
+#define __LITTLE_ENDIAN
+#endif
+#endif
+
+typedef union {
+#ifdef __LITTLE_ENDIAN
+ struct { int lo, hi; } ints;
+#else
+ struct { int hi, lo; } ints;
+#endif
+ double d;
+} fd_twoints;
+
+#define __HI(x) x.ints.hi
+#define __LO(x) x.ints.lo
+
+#undef __P
+#ifdef __STDC__
+#define __P(p) p
+#else
+#define __P(p) ()
+#endif
+
+/*
+ * ANSI/POSIX
+ */
+
+extern int signgam;
+
+#define MAXFLOAT ((float)3.40282346638528860e+38)
+
+enum fdversion {fdlibm_ieee = -1, fdlibm_svid, fdlibm_xopen, fdlibm_posix};
+
+#define _LIB_VERSION_TYPE enum fdversion
+#define _LIB_VERSION _fdlib_version
+
+/* if global variable _LIB_VERSION is not desirable, one may
+ * change the following to be a constant by:
+ * #define _LIB_VERSION_TYPE const enum version
+ * In that case, after one initializes the value _LIB_VERSION (see
+ * s_lib_version.c) during compile time, it cannot be modified
+ * in the middle of a program
+ */
+extern _LIB_VERSION_TYPE _LIB_VERSION;
+
+#define _IEEE_ fdlibm_ieee
+#define _SVID_ fdlibm_svid
+#define _XOPEN_ fdlibm_xopen
+#define _POSIX_ fdlibm_posix
+
+struct exception {
+ int type;
+ char *name;
+ double arg1;
+ double arg2;
+ double retval;
+};
+
+#define HUGE MAXFLOAT
+
+/*
+ * set X_TLOSS = pi*2**52, which is possibly defined in <values.h>
+ * (one may replace the following line by "#include <values.h>")
+ */
+
+#define X_TLOSS 1.41484755040568800000e+16
+
+#define DOMAIN 1
+#define SING 2
+#define OVERFLOW 3
+#define UNDERFLOW 4
+#define TLOSS 5
+#define PLOSS 6
+
+/*
+ * ANSI/POSIX
+ */
+
+extern double fd_acos __P((double));
+extern double fd_asin __P((double));
+extern double fd_atan __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_cos __P((double));
+extern double fd_sin __P((double));
+extern double fd_tan __P((double));
+
+extern double fd_cosh __P((double));
+extern double fd_sinh __P((double));
+extern double fd_tanh __P((double));
+
+extern double fd_exp __P((double));
+extern double fd_frexp __P((double, int *));
+extern double fd_ldexp __P((double, int));
+extern double fd_log __P((double));
+extern double fd_log10 __P((double));
+extern double fd_modf __P((double, double *));
+
+extern double fd_pow __P((double, double));
+extern double fd_sqrt __P((double));
+
+extern double fd_ceil __P((double));
+extern double fd_fabs __P((double));
+extern double fd_floor __P((double));
+extern double fd_fmod __P((double, double));
+
+extern double fd_erf __P((double));
+extern double fd_erfc __P((double));
+extern double fd_gamma __P((double));
+extern double fd_hypot __P((double, double));
+extern int fd_isnan __P((double));
+extern int fd_finite __P((double));
+extern double fd_j0 __P((double));
+extern double fd_j1 __P((double));
+extern double fd_jn __P((int, double));
+extern double fd_lgamma __P((double));
+extern double fd_y0 __P((double));
+extern double fd_y1 __P((double));
+extern double fd_yn __P((int, double));
+
+extern double fd_acosh __P((double));
+extern double fd_asinh __P((double));
+extern double fd_atanh __P((double));
+extern double fd_cbrt __P((double));
+extern double fd_logb __P((double));
+extern double fd_nextafter __P((double, double));
+extern double fd_remainder __P((double, double));
+#ifdef _SCALB_INT
+extern double fd_scalb __P((double, int));
+#else
+extern double fd_scalb __P((double, double));
+#endif
+
+extern int fd_matherr __P((struct exception *));
+
+/*
+ * IEEE Test Vector
+ */
+extern double significand __P((double));
+
+/*
+ * Functions callable from C, intended to support IEEE arithmetic.
+ */
+extern double fd_copysign __P((double, double));
+extern int fd_ilogb __P((double));
+extern double fd_rint __P((double));
+extern double fd_scalbn __P((double, int));
+
+/*
+ * BSD math library entry points
+ */
+extern double fd_expm1 __P((double));
+extern double fd_log1p __P((double));
+
+/*
+ * Reentrant version of gamma & lgamma; passes signgam back by reference
+ * as the second argument; user must allocate space for signgam.
+ */
+#ifdef _REENTRANT
+extern double gamma_r __P((double, int *));
+extern double lgamma_r __P((double, int *));
+#endif /* _REENTRANT */
+
+/* ieee style elementary functions */
+extern double __ieee754_sqrt __P((double));
+extern double __ieee754_acos __P((double));
+extern double __ieee754_acosh __P((double));
+extern double __ieee754_log __P((double));
+extern double __ieee754_atanh __P((double));
+extern double __ieee754_asin __P((double));
+extern double __ieee754_atan2 __P((double,double));
+extern double __ieee754_exp __P((double));
+extern double __ieee754_cosh __P((double));
+extern double __ieee754_fmod __P((double,double));
+extern double __ieee754_pow __P((double,double));
+extern double __ieee754_lgamma_r __P((double,int *));
+extern double __ieee754_gamma_r __P((double,int *));
+extern double __ieee754_lgamma __P((double));
+extern double __ieee754_gamma __P((double));
+extern double __ieee754_log10 __P((double));
+extern double __ieee754_sinh __P((double));
+extern double __ieee754_hypot __P((double,double));
+extern double __ieee754_j0 __P((double));
+extern double __ieee754_j1 __P((double));
+extern double __ieee754_y0 __P((double));
+extern double __ieee754_y1 __P((double));
+extern double __ieee754_jn __P((int,double));
+extern double __ieee754_yn __P((int,double));
+extern double __ieee754_remainder __P((double,double));
+extern int __ieee754_rem_pio2 __P((double,double*));
+#ifdef _SCALB_INT
+extern double __ieee754_scalb __P((double,int));
+#else
+extern double __ieee754_scalb __P((double,double));
+#endif
+
+/* fdlibm kernel function */
+extern double __kernel_standard __P((double,double,int,int*));
+extern double __kernel_sin __P((double,double,int));
+extern double __kernel_cos __P((double,double));
+extern double __kernel_tan __P((double,double,int));
+extern int __kernel_rem_pio2 __P((double*,double*,int,int,int,const int*));
diff --git a/third_party/js-1.7/fdlibm/fdlibm.mak b/third_party/js-1.7/fdlibm/fdlibm.mak
new file mode 100644
index 0000000..436c1c4
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/fdlibm.mak
@@ -0,0 +1,1453 @@
+# Microsoft Developer Studio Generated NMAKE File, Format Version 4.20
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+!IF "$(CFG)" == ""
+CFG=fdlibm - Win32 Debug
+!MESSAGE No configuration specified. Defaulting to fdlibm - Win32 Debug.
+!ENDIF
+
+!IF "$(CFG)" != "fdlibm - Win32 Release" && "$(CFG)" != "fdlibm - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE on this makefile
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "fdlibm.mak" CFG="fdlibm - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "fdlibm - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "fdlibm - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+################################################################################
+# Begin Project
+CPP=cl.exe
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "fdlibm__"
+# PROP BASE Intermediate_Dir "fdlibm__"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "fdlibm__"
+# PROP Intermediate_Dir "fdlibm__"
+# PROP Target_Dir ""
+OUTDIR=.\fdlibm__
+INTDIR=.\fdlibm__
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_acos.obj"
+ -@erase "$(INTDIR)\e_acosh.obj"
+ -@erase "$(INTDIR)\e_asin.obj"
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_atanh.obj"
+ -@erase "$(INTDIR)\e_cosh.obj"
+ -@erase "$(INTDIR)\e_exp.obj"
+ -@erase "$(INTDIR)\e_fmod.obj"
+ -@erase "$(INTDIR)\e_gamma.obj"
+ -@erase "$(INTDIR)\e_gamma_r.obj"
+ -@erase "$(INTDIR)\e_hypot.obj"
+ -@erase "$(INTDIR)\e_j0.obj"
+ -@erase "$(INTDIR)\e_j1.obj"
+ -@erase "$(INTDIR)\e_jn.obj"
+ -@erase "$(INTDIR)\e_lgamma.obj"
+ -@erase "$(INTDIR)\e_lgamma_r.obj"
+ -@erase "$(INTDIR)\e_log.obj"
+ -@erase "$(INTDIR)\e_log10.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_rem_pio2.obj"
+ -@erase "$(INTDIR)\e_remainder.obj"
+ -@erase "$(INTDIR)\e_scalb.obj"
+ -@erase "$(INTDIR)\e_sinh.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_cos.obj"
+ -@erase "$(INTDIR)\k_rem_pio2.obj"
+ -@erase "$(INTDIR)\k_sin.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\k_tan.obj"
+ -@erase "$(INTDIR)\s_asinh.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_cbrt.obj"
+ -@erase "$(INTDIR)\s_ceil.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_cos.obj"
+ -@erase "$(INTDIR)\s_erf.obj"
+ -@erase "$(INTDIR)\s_expm1.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_floor.obj"
+ -@erase "$(INTDIR)\s_frexp.obj"
+ -@erase "$(INTDIR)\s_ilogb.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_ldexp.obj"
+ -@erase "$(INTDIR)\s_lib_version.obj"
+ -@erase "$(INTDIR)\s_log1p.obj"
+ -@erase "$(INTDIR)\s_logb.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_modf.obj"
+ -@erase "$(INTDIR)\s_nextafter.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\s_signgam.obj"
+ -@erase "$(INTDIR)\s_significand.obj"
+ -@erase "$(INTDIR)\s_sin.obj"
+ -@erase "$(INTDIR)\s_tan.obj"
+ -@erase "$(INTDIR)\s_tanh.obj"
+ -@erase "$(INTDIR)\w_acos.obj"
+ -@erase "$(INTDIR)\w_acosh.obj"
+ -@erase "$(INTDIR)\w_asin.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_atanh.obj"
+ -@erase "$(INTDIR)\w_cosh.obj"
+ -@erase "$(INTDIR)\w_exp.obj"
+ -@erase "$(INTDIR)\w_fmod.obj"
+ -@erase "$(INTDIR)\w_gamma.obj"
+ -@erase "$(INTDIR)\w_gamma_r.obj"
+ -@erase "$(INTDIR)\w_hypot.obj"
+ -@erase "$(INTDIR)\w_j0.obj"
+ -@erase "$(INTDIR)\w_j1.obj"
+ -@erase "$(INTDIR)\w_jn.obj"
+ -@erase "$(INTDIR)\w_lgamma.obj"
+ -@erase "$(INTDIR)\w_lgamma_r.obj"
+ -@erase "$(INTDIR)\w_log.obj"
+ -@erase "$(INTDIR)\w_log10.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_remainder.obj"
+ -@erase "$(INTDIR)\w_scalb.obj"
+ -@erase "$(INTDIR)\w_sinh.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /c
+CPP_PROJ=/nologo /ML /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS"\
+ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\fdlibm__/
+CPP_SBRS=.\.
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_acos.obj" \
+ "$(INTDIR)\e_acosh.obj" \
+ "$(INTDIR)\e_asin.obj" \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_atanh.obj" \
+ "$(INTDIR)\e_cosh.obj" \
+ "$(INTDIR)\e_exp.obj" \
+ "$(INTDIR)\e_fmod.obj" \
+ "$(INTDIR)\e_gamma.obj" \
+ "$(INTDIR)\e_gamma_r.obj" \
+ "$(INTDIR)\e_hypot.obj" \
+ "$(INTDIR)\e_j0.obj" \
+ "$(INTDIR)\e_j1.obj" \
+ "$(INTDIR)\e_jn.obj" \
+ "$(INTDIR)\e_lgamma.obj" \
+ "$(INTDIR)\e_lgamma_r.obj" \
+ "$(INTDIR)\e_log.obj" \
+ "$(INTDIR)\e_log10.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_rem_pio2.obj" \
+ "$(INTDIR)\e_remainder.obj" \
+ "$(INTDIR)\e_scalb.obj" \
+ "$(INTDIR)\e_sinh.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_cos.obj" \
+ "$(INTDIR)\k_rem_pio2.obj" \
+ "$(INTDIR)\k_sin.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\k_tan.obj" \
+ "$(INTDIR)\s_asinh.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_cbrt.obj" \
+ "$(INTDIR)\s_ceil.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_cos.obj" \
+ "$(INTDIR)\s_erf.obj" \
+ "$(INTDIR)\s_expm1.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_floor.obj" \
+ "$(INTDIR)\s_frexp.obj" \
+ "$(INTDIR)\s_ilogb.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_ldexp.obj" \
+ "$(INTDIR)\s_lib_version.obj" \
+ "$(INTDIR)\s_log1p.obj" \
+ "$(INTDIR)\s_logb.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_modf.obj" \
+ "$(INTDIR)\s_nextafter.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\s_signgam.obj" \
+ "$(INTDIR)\s_significand.obj" \
+ "$(INTDIR)\s_sin.obj" \
+ "$(INTDIR)\s_tan.obj" \
+ "$(INTDIR)\s_tanh.obj" \
+ "$(INTDIR)\w_acos.obj" \
+ "$(INTDIR)\w_acosh.obj" \
+ "$(INTDIR)\w_asin.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_atanh.obj" \
+ "$(INTDIR)\w_cosh.obj" \
+ "$(INTDIR)\w_exp.obj" \
+ "$(INTDIR)\w_fmod.obj" \
+ "$(INTDIR)\w_gamma.obj" \
+ "$(INTDIR)\w_gamma_r.obj" \
+ "$(INTDIR)\w_hypot.obj" \
+ "$(INTDIR)\w_j0.obj" \
+ "$(INTDIR)\w_j1.obj" \
+ "$(INTDIR)\w_jn.obj" \
+ "$(INTDIR)\w_lgamma.obj" \
+ "$(INTDIR)\w_lgamma_r.obj" \
+ "$(INTDIR)\w_log.obj" \
+ "$(INTDIR)\w_log10.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_remainder.obj" \
+ "$(INTDIR)\w_scalb.obj" \
+ "$(INTDIR)\w_sinh.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "fdlibm_0"
+# PROP BASE Intermediate_Dir "fdlibm_0"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "fdlibm_0"
+# PROP Intermediate_Dir "fdlibm_0"
+# PROP Target_Dir ""
+OUTDIR=.\fdlibm_0
+INTDIR=.\fdlibm_0
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_acos.obj"
+ -@erase "$(INTDIR)\e_acosh.obj"
+ -@erase "$(INTDIR)\e_asin.obj"
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_atanh.obj"
+ -@erase "$(INTDIR)\e_cosh.obj"
+ -@erase "$(INTDIR)\e_exp.obj"
+ -@erase "$(INTDIR)\e_fmod.obj"
+ -@erase "$(INTDIR)\e_gamma.obj"
+ -@erase "$(INTDIR)\e_gamma_r.obj"
+ -@erase "$(INTDIR)\e_hypot.obj"
+ -@erase "$(INTDIR)\e_j0.obj"
+ -@erase "$(INTDIR)\e_j1.obj"
+ -@erase "$(INTDIR)\e_jn.obj"
+ -@erase "$(INTDIR)\e_lgamma.obj"
+ -@erase "$(INTDIR)\e_lgamma_r.obj"
+ -@erase "$(INTDIR)\e_log.obj"
+ -@erase "$(INTDIR)\e_log10.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_rem_pio2.obj"
+ -@erase "$(INTDIR)\e_remainder.obj"
+ -@erase "$(INTDIR)\e_scalb.obj"
+ -@erase "$(INTDIR)\e_sinh.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_cos.obj"
+ -@erase "$(INTDIR)\k_rem_pio2.obj"
+ -@erase "$(INTDIR)\k_sin.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\k_tan.obj"
+ -@erase "$(INTDIR)\s_asinh.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_cbrt.obj"
+ -@erase "$(INTDIR)\s_ceil.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_cos.obj"
+ -@erase "$(INTDIR)\s_erf.obj"
+ -@erase "$(INTDIR)\s_expm1.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_floor.obj"
+ -@erase "$(INTDIR)\s_frexp.obj"
+ -@erase "$(INTDIR)\s_ilogb.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_ldexp.obj"
+ -@erase "$(INTDIR)\s_lib_version.obj"
+ -@erase "$(INTDIR)\s_log1p.obj"
+ -@erase "$(INTDIR)\s_logb.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_modf.obj"
+ -@erase "$(INTDIR)\s_nextafter.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\s_signgam.obj"
+ -@erase "$(INTDIR)\s_significand.obj"
+ -@erase "$(INTDIR)\s_sin.obj"
+ -@erase "$(INTDIR)\s_tan.obj"
+ -@erase "$(INTDIR)\s_tanh.obj"
+ -@erase "$(INTDIR)\w_acos.obj"
+ -@erase "$(INTDIR)\w_acosh.obj"
+ -@erase "$(INTDIR)\w_asin.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_atanh.obj"
+ -@erase "$(INTDIR)\w_cosh.obj"
+ -@erase "$(INTDIR)\w_exp.obj"
+ -@erase "$(INTDIR)\w_fmod.obj"
+ -@erase "$(INTDIR)\w_gamma.obj"
+ -@erase "$(INTDIR)\w_gamma_r.obj"
+ -@erase "$(INTDIR)\w_hypot.obj"
+ -@erase "$(INTDIR)\w_j0.obj"
+ -@erase "$(INTDIR)\w_j1.obj"
+ -@erase "$(INTDIR)\w_jn.obj"
+ -@erase "$(INTDIR)\w_lgamma.obj"
+ -@erase "$(INTDIR)\w_lgamma_r.obj"
+ -@erase "$(INTDIR)\w_log.obj"
+ -@erase "$(INTDIR)\w_log10.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_remainder.obj"
+ -@erase "$(INTDIR)\w_scalb.obj"
+ -@erase "$(INTDIR)\w_sinh.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /c
+CPP_PROJ=/nologo /MLd /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS"\
+ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\fdlibm_0/
+CPP_SBRS=.\.
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_acos.obj" \
+ "$(INTDIR)\e_acosh.obj" \
+ "$(INTDIR)\e_asin.obj" \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_atanh.obj" \
+ "$(INTDIR)\e_cosh.obj" \
+ "$(INTDIR)\e_exp.obj" \
+ "$(INTDIR)\e_fmod.obj" \
+ "$(INTDIR)\e_gamma.obj" \
+ "$(INTDIR)\e_gamma_r.obj" \
+ "$(INTDIR)\e_hypot.obj" \
+ "$(INTDIR)\e_j0.obj" \
+ "$(INTDIR)\e_j1.obj" \
+ "$(INTDIR)\e_jn.obj" \
+ "$(INTDIR)\e_lgamma.obj" \
+ "$(INTDIR)\e_lgamma_r.obj" \
+ "$(INTDIR)\e_log.obj" \
+ "$(INTDIR)\e_log10.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_rem_pio2.obj" \
+ "$(INTDIR)\e_remainder.obj" \
+ "$(INTDIR)\e_scalb.obj" \
+ "$(INTDIR)\e_sinh.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_cos.obj" \
+ "$(INTDIR)\k_rem_pio2.obj" \
+ "$(INTDIR)\k_sin.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\k_tan.obj" \
+ "$(INTDIR)\s_asinh.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_cbrt.obj" \
+ "$(INTDIR)\s_ceil.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_cos.obj" \
+ "$(INTDIR)\s_erf.obj" \
+ "$(INTDIR)\s_expm1.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_floor.obj" \
+ "$(INTDIR)\s_frexp.obj" \
+ "$(INTDIR)\s_ilogb.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_ldexp.obj" \
+ "$(INTDIR)\s_lib_version.obj" \
+ "$(INTDIR)\s_log1p.obj" \
+ "$(INTDIR)\s_logb.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_modf.obj" \
+ "$(INTDIR)\s_nextafter.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\s_signgam.obj" \
+ "$(INTDIR)\s_significand.obj" \
+ "$(INTDIR)\s_sin.obj" \
+ "$(INTDIR)\s_tan.obj" \
+ "$(INTDIR)\s_tanh.obj" \
+ "$(INTDIR)\w_acos.obj" \
+ "$(INTDIR)\w_acosh.obj" \
+ "$(INTDIR)\w_asin.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_atanh.obj" \
+ "$(INTDIR)\w_cosh.obj" \
+ "$(INTDIR)\w_exp.obj" \
+ "$(INTDIR)\w_fmod.obj" \
+ "$(INTDIR)\w_gamma.obj" \
+ "$(INTDIR)\w_gamma_r.obj" \
+ "$(INTDIR)\w_hypot.obj" \
+ "$(INTDIR)\w_j0.obj" \
+ "$(INTDIR)\w_j1.obj" \
+ "$(INTDIR)\w_jn.obj" \
+ "$(INTDIR)\w_lgamma.obj" \
+ "$(INTDIR)\w_lgamma_r.obj" \
+ "$(INTDIR)\w_log.obj" \
+ "$(INTDIR)\w_log10.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_remainder.obj" \
+ "$(INTDIR)\w_scalb.obj" \
+ "$(INTDIR)\w_sinh.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ENDIF
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+################################################################################
+# Begin Target
+
+# Name "fdlibm - Win32 Release"
+# Name "fdlibm - Win32 Debug"
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_sqrt.c
+DEP_CPP_W_SQR=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_sqrt.obj" : $(SOURCE) $(DEP_CPP_W_SQR) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_acosh.c
+DEP_CPP_E_ACO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_acosh.obj" : $(SOURCE) $(DEP_CPP_E_ACO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_asin.c
+DEP_CPP_E_ASI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_asin.obj" : $(SOURCE) $(DEP_CPP_E_ASI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_atan2.c
+DEP_CPP_E_ATA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_atan2.obj" : $(SOURCE) $(DEP_CPP_E_ATA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_atanh.c
+DEP_CPP_E_ATAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_atanh.obj" : $(SOURCE) $(DEP_CPP_E_ATAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_cosh.c
+DEP_CPP_E_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_cosh.obj" : $(SOURCE) $(DEP_CPP_E_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_exp.c
+DEP_CPP_E_EXP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_exp.obj" : $(SOURCE) $(DEP_CPP_E_EXP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_fmod.c
+DEP_CPP_E_FMO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_fmod.obj" : $(SOURCE) $(DEP_CPP_E_FMO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_gamma.c
+DEP_CPP_E_GAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_gamma.obj" : $(SOURCE) $(DEP_CPP_E_GAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_gamma_r.c
+DEP_CPP_E_GAMM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_gamma_r.obj" : $(SOURCE) $(DEP_CPP_E_GAMM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_hypot.c
+DEP_CPP_E_HYP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_hypot.obj" : $(SOURCE) $(DEP_CPP_E_HYP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_j0.c
+DEP_CPP_E_J0_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_j0.obj" : $(SOURCE) $(DEP_CPP_E_J0_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_j1.c
+DEP_CPP_E_J1_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_j1.obj" : $(SOURCE) $(DEP_CPP_E_J1_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_jn.c
+DEP_CPP_E_JN_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_jn.obj" : $(SOURCE) $(DEP_CPP_E_JN_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_lgamma.c
+DEP_CPP_E_LGA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_lgamma.obj" : $(SOURCE) $(DEP_CPP_E_LGA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_lgamma_r.c
+DEP_CPP_E_LGAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_lgamma_r.obj" : $(SOURCE) $(DEP_CPP_E_LGAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_log.c
+DEP_CPP_E_LOG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_log.obj" : $(SOURCE) $(DEP_CPP_E_LOG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_log10.c
+DEP_CPP_E_LOG1=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_log10.obj" : $(SOURCE) $(DEP_CPP_E_LOG1) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_pow.c
+DEP_CPP_E_POW=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_pow.obj" : $(SOURCE) $(DEP_CPP_E_POW) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_rem_pio2.c
+DEP_CPP_E_REM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_rem_pio2.obj" : $(SOURCE) $(DEP_CPP_E_REM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_remainder.c
+DEP_CPP_E_REMA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_remainder.obj" : $(SOURCE) $(DEP_CPP_E_REMA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_scalb.c
+DEP_CPP_E_SCA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_scalb.obj" : $(SOURCE) $(DEP_CPP_E_SCA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_sinh.c
+DEP_CPP_E_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_sinh.obj" : $(SOURCE) $(DEP_CPP_E_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_sqrt.c
+DEP_CPP_E_SQR=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_sqrt.obj" : $(SOURCE) $(DEP_CPP_E_SQR) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm.h
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_cos.c
+DEP_CPP_K_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_cos.obj" : $(SOURCE) $(DEP_CPP_K_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_rem_pio2.c
+DEP_CPP_K_REM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_rem_pio2.obj" : $(SOURCE) $(DEP_CPP_K_REM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_sin.c
+DEP_CPP_K_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_sin.obj" : $(SOURCE) $(DEP_CPP_K_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_standard.c
+DEP_CPP_K_STA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_standard.obj" : $(SOURCE) $(DEP_CPP_K_STA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_tan.c
+DEP_CPP_K_TAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_tan.obj" : $(SOURCE) $(DEP_CPP_K_TAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_asinh.c
+DEP_CPP_S_ASI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_asinh.obj" : $(SOURCE) $(DEP_CPP_S_ASI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_atan.c
+DEP_CPP_S_ATA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_atan.obj" : $(SOURCE) $(DEP_CPP_S_ATA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_cbrt.c
+DEP_CPP_S_CBR=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_cbrt.obj" : $(SOURCE) $(DEP_CPP_S_CBR) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_ceil.c
+DEP_CPP_S_CEI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_ceil.obj" : $(SOURCE) $(DEP_CPP_S_CEI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_copysign.c
+DEP_CPP_S_COP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_copysign.obj" : $(SOURCE) $(DEP_CPP_S_COP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_cos.c
+DEP_CPP_S_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_cos.obj" : $(SOURCE) $(DEP_CPP_S_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_erf.c
+DEP_CPP_S_ERF=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_erf.obj" : $(SOURCE) $(DEP_CPP_S_ERF) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_expm1.c
+DEP_CPP_S_EXP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_expm1.obj" : $(SOURCE) $(DEP_CPP_S_EXP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_fabs.c
+DEP_CPP_S_FAB=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_fabs.obj" : $(SOURCE) $(DEP_CPP_S_FAB) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_finite.c
+DEP_CPP_S_FIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_finite.obj" : $(SOURCE) $(DEP_CPP_S_FIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_floor.c
+DEP_CPP_S_FLO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_floor.obj" : $(SOURCE) $(DEP_CPP_S_FLO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_frexp.c
+DEP_CPP_S_FRE=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_frexp.obj" : $(SOURCE) $(DEP_CPP_S_FRE) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_ilogb.c
+DEP_CPP_S_ILO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_ilogb.obj" : $(SOURCE) $(DEP_CPP_S_ILO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_isnan.c
+DEP_CPP_S_ISN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_isnan.obj" : $(SOURCE) $(DEP_CPP_S_ISN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_ldexp.c
+DEP_CPP_S_LDE=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_ldexp.obj" : $(SOURCE) $(DEP_CPP_S_LDE) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_lib_version.c
+DEP_CPP_S_LIB=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_lib_version.obj" : $(SOURCE) $(DEP_CPP_S_LIB) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_log1p.c
+DEP_CPP_S_LOG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_log1p.obj" : $(SOURCE) $(DEP_CPP_S_LOG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_logb.c
+DEP_CPP_S_LOGB=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_logb.obj" : $(SOURCE) $(DEP_CPP_S_LOGB) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_matherr.c
+DEP_CPP_S_MAT=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_matherr.obj" : $(SOURCE) $(DEP_CPP_S_MAT) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_modf.c
+DEP_CPP_S_MOD=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_modf.obj" : $(SOURCE) $(DEP_CPP_S_MOD) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_nextafter.c
+DEP_CPP_S_NEX=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_nextafter.obj" : $(SOURCE) $(DEP_CPP_S_NEX) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_rint.c
+DEP_CPP_S_RIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_rint.obj" : $(SOURCE) $(DEP_CPP_S_RIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_scalbn.c
+DEP_CPP_S_SCA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_scalbn.obj" : $(SOURCE) $(DEP_CPP_S_SCA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_signgam.c
+DEP_CPP_S_SIG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_signgam.obj" : $(SOURCE) $(DEP_CPP_S_SIG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_significand.c
+DEP_CPP_S_SIGN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_significand.obj" : $(SOURCE) $(DEP_CPP_S_SIGN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_sin.c
+DEP_CPP_S_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_sin.obj" : $(SOURCE) $(DEP_CPP_S_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_tan.c
+DEP_CPP_S_TAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_tan.obj" : $(SOURCE) $(DEP_CPP_S_TAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_tanh.c
+DEP_CPP_S_TANH=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_tanh.obj" : $(SOURCE) $(DEP_CPP_S_TANH) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_acos.c
+DEP_CPP_W_ACO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_acos.obj" : $(SOURCE) $(DEP_CPP_W_ACO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_acosh.c
+DEP_CPP_W_ACOS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_acosh.obj" : $(SOURCE) $(DEP_CPP_W_ACOS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_asin.c
+DEP_CPP_W_ASI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_asin.obj" : $(SOURCE) $(DEP_CPP_W_ASI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_atan2.c
+DEP_CPP_W_ATA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_atan2.obj" : $(SOURCE) $(DEP_CPP_W_ATA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_atanh.c
+DEP_CPP_W_ATAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_atanh.obj" : $(SOURCE) $(DEP_CPP_W_ATAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_cosh.c
+DEP_CPP_W_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_cosh.obj" : $(SOURCE) $(DEP_CPP_W_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_exp.c
+DEP_CPP_W_EXP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_exp.obj" : $(SOURCE) $(DEP_CPP_W_EXP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_fmod.c
+DEP_CPP_W_FMO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_fmod.obj" : $(SOURCE) $(DEP_CPP_W_FMO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_gamma.c
+DEP_CPP_W_GAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_gamma.obj" : $(SOURCE) $(DEP_CPP_W_GAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_gamma_r.c
+DEP_CPP_W_GAMM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_gamma_r.obj" : $(SOURCE) $(DEP_CPP_W_GAMM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_hypot.c
+DEP_CPP_W_HYP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_hypot.obj" : $(SOURCE) $(DEP_CPP_W_HYP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_j0.c
+DEP_CPP_W_J0_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_j0.obj" : $(SOURCE) $(DEP_CPP_W_J0_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_j1.c
+DEP_CPP_W_J1_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_j1.obj" : $(SOURCE) $(DEP_CPP_W_J1_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_jn.c
+DEP_CPP_W_JN_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_jn.obj" : $(SOURCE) $(DEP_CPP_W_JN_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_lgamma.c
+DEP_CPP_W_LGA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_lgamma.obj" : $(SOURCE) $(DEP_CPP_W_LGA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_lgamma_r.c
+DEP_CPP_W_LGAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_lgamma_r.obj" : $(SOURCE) $(DEP_CPP_W_LGAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_log.c
+DEP_CPP_W_LOG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_log.obj" : $(SOURCE) $(DEP_CPP_W_LOG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_log10.c
+DEP_CPP_W_LOG1=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_log10.obj" : $(SOURCE) $(DEP_CPP_W_LOG1) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_pow.c
+DEP_CPP_W_POW=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_pow.obj" : $(SOURCE) $(DEP_CPP_W_POW) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_remainder.c
+DEP_CPP_W_REM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_remainder.obj" : $(SOURCE) $(DEP_CPP_W_REM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_scalb.c
+DEP_CPP_W_SCA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_scalb.obj" : $(SOURCE) $(DEP_CPP_W_SCA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_sinh.c
+DEP_CPP_W_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_sinh.obj" : $(SOURCE) $(DEP_CPP_W_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_acos.c
+DEP_CPP_E_ACOS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_acos.obj" : $(SOURCE) $(DEP_CPP_E_ACOS) "$(INTDIR)"
+
+
+# End Source File
+# End Target
+# End Project
+################################################################################
diff --git a/third_party/js-1.7/fdlibm/fdlibm.mdp b/third_party/js-1.7/fdlibm/fdlibm.mdp
new file mode 100644
index 0000000..5904c49
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/fdlibm.mdp
Binary files differ
diff --git a/third_party/js-1.7/fdlibm/k_cos.c b/third_party/js-1.7/fdlibm/k_cos.c
new file mode 100644
index 0000000..1d18c80
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/k_cos.c
@@ -0,0 +1,135 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_cos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __kernel_cos( x, y )
+ * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ *
+ * Algorithm
+ * 1. Since cos(-x) = cos(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ * 3. cos(x) is approximated by a polynomial of degree 14 on
+ * [0,pi/4]
+ * 4 14
+ * cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+ * where the remez error is
+ *
+ * | 2 4 6 8 10 12 14 | -58
+ * |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2
+ * | |
+ *
+ * 4 6 8 10 12 14
+ * 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then
+ * cos(x) = 1 - x*x/2 + r
+ * since cos(x+y) ~ cos(x) - sin(x)*y
+ * ~ cos(x) - x*y,
+ * a correction term is necessary in cos(x) and hence
+ * cos(x+y) = 1 - (x*x/2 - (r - x*y))
+ * For better accuracy when x > 0.3, let qx = |x|/4 with
+ * the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+ * Then
+ * cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
+ * Note that 1-qx and (x*x/2-qx) is EXACT here, and the
+ * magnitude of the latter is at least a quarter of x*x/2,
+ * thus, reducing the rounding error in the subtraction.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+C1 = 4.16666666666666019037e-02, /* 0x3FA55555, 0x5555554C */
+C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */
+C3 = 2.48015872894767294178e-05, /* 0x3EFA01A0, 0x19CB1590 */
+C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */
+C5 = 2.08757232129817482790e-09, /* 0x3E21EE9E, 0xBDB4B1C4 */
+C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
+
+#ifdef __STDC__
+ double __kernel_cos(double x, double y)
+#else
+ double __kernel_cos(x, y)
+ double x,y;
+#endif
+{
+ fd_twoints u;
+ double qx = 0;
+ double a,hz,z,r;
+ int ix;
+ u.d = x;
+ ix = __HI(u)&0x7fffffff; /* ix = |x|'s high word*/
+ if(ix<0x3e400000) { /* if x < 2**27 */
+ if(((int)x)==0) return one; /* generate inexact */
+ }
+ z = x*x;
+ r = z*(C1+z*(C2+z*(C3+z*(C4+z*(C5+z*C6)))));
+ if(ix < 0x3FD33333) /* if |x| < 0.3 */
+ return one - (0.5*z - (z*r - x*y));
+ else {
+ if(ix > 0x3fe90000) { /* x > 0.78125 */
+ qx = 0.28125;
+ } else {
+ u.d = qx;
+ __HI(u) = ix-0x00200000; /* x/4 */
+ __LO(u) = 0;
+ qx = u.d;
+ }
+ hz = 0.5*z-qx;
+ a = one-qx;
+ return a - (hz - (z*r-x*y));
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/k_rem_pio2.c b/third_party/js-1.7/fdlibm/k_rem_pio2.c
new file mode 100644
index 0000000..d261e19
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/k_rem_pio2.c
@@ -0,0 +1,354 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_rem_pio2.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ * double x[],y[]; int e0,nx,prec; int ipio2[];
+ *
+ * __kernel_rem_pio2 return the last three digits of N with
+ * y = x - N*pi/2
+ * so that |y| < pi/2.
+ *
+ * The method is to compute the integer (mod 8) and fraction parts of
+ * (2/pi)*x without doing the full multiplication. In general we
+ * skip the part of the product that are known to be a huge integer (
+ * more accurately, = 0 mod 8 ). Thus the number of operations are
+ * independent of the exponent of the input.
+ *
+ * (2/pi) is represented by an array of 24-bit integers in ipio2[].
+ *
+ * Input parameters:
+ * x[] The input value (must be positive) is broken into nx
+ * pieces of 24-bit integers in double precision format.
+ * x[i] will be the i-th 24 bit of x. The scaled exponent
+ * of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+ * match x's up to 24 bits.
+ *
+ * Example of breaking a double positive z into x[0]+x[1]+x[2]:
+ * e0 = ilogb(z)-23
+ * z = scalbn(z,-e0)
+ * for i = 0,1,2
+ * x[i] = floor(z)
+ * z = (z-x[i])*2**24
+ *
+ *
+ * y[] ouput result in an array of double precision numbers.
+ * The dimension of y[] is:
+ * 24-bit precision 1
+ * 53-bit precision 2
+ * 64-bit precision 2
+ * 113-bit precision 3
+ * The actual value is the sum of them. Thus for 113-bit
+ * precison, one may have to do something like:
+ *
+ * long double t,w,r_head, r_tail;
+ * t = (long double)y[2] + (long double)y[1];
+ * w = (long double)y[0];
+ * r_head = t+w;
+ * r_tail = w - (r_head - t);
+ *
+ * e0 The exponent of x[0]
+ *
+ * nx dimension of x[]
+ *
+ * prec an integer indicating the precision:
+ * 0 24 bits (single)
+ * 1 53 bits (double)
+ * 2 64 bits (extended)
+ * 3 113 bits (quad)
+ *
+ * ipio2[]
+ * integer array, contains the (24*i)-th to (24*i+23)-th
+ * bit of 2/pi after binary point. The corresponding
+ * floating value is
+ *
+ * ipio2[i] * 2^(-24(i+1)).
+ *
+ * External function:
+ * double scalbn(), floor();
+ *
+ *
+ * Here is the description of some local variables:
+ *
+ * jk jk+1 is the initial number of terms of ipio2[] needed
+ * in the computation. The recommended value is 2,3,4,
+ * 6 for single, double, extended,and quad.
+ *
+ * jz local integer variable indicating the number of
+ * terms of ipio2[] used.
+ *
+ * jx nx - 1
+ *
+ * jv index for pointing to the suitable ipio2[] for the
+ * computation. In general, we want
+ * ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+ * is an integer. Thus
+ * e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+ * Hence jv = max(0,(e0-3)/24).
+ *
+ * jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
+ *
+ * q[] double array with integral value, representing the
+ * 24-bits chunk of the product of x and 2/pi.
+ *
+ * q0 the corresponding exponent of q[0]. Note that the
+ * exponent for q[i] would be q0-24*i.
+ *
+ * PIo2[] double precision array, obtained by cutting pi/2
+ * into 24 bits chunks.
+ *
+ * f[] ipio2[] in floating point
+ *
+ * iq[] integer array by breaking up q[] in 24-bits chunk.
+ *
+ * fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
+ *
+ * ih integer. If >0 it indicates q[] is >= 0.5, hence
+ * it also indicates the *sign* of the result.
+ *
+ */
+
+
+/*
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const int init_jk[] = {2,3,4,6}; /* initial value for jk */
+#else
+static int init_jk[] = {2,3,4,6};
+#endif
+
+#ifdef __STDC__
+static const double PIo2[] = {
+#else
+static double PIo2[] = {
+#endif
+ 1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
+ 7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
+ 5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
+ 3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
+ 1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
+ 1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
+ 2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
+ 2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+};
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+zero = 0.0,
+one = 1.0,
+two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
+
+#ifdef __STDC__
+ int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2)
+#else
+ int __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ double x[], y[]; int e0,nx,prec; int ipio2[];
+#endif
+{
+ int jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
+ double z,fw,f[20],fq[20],q[20];
+
+ /* initialize jk*/
+ jk = init_jk[prec];
+ jp = jk;
+
+ /* determine jx,jv,q0, note that 3>q0 */
+ jx = nx-1;
+ jv = (e0-3)/24; if(jv<0) jv=0;
+ q0 = e0-24*(jv+1);
+
+ /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+ j = jv-jx; m = jx+jk;
+ for(i=0;i<=m;i++,j++) f[i] = (j<0)? zero : (double) ipio2[j];
+
+ /* compute q[0],q[1],...q[jk] */
+ for (i=0;i<=jk;i++) {
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
+ }
+
+ jz = jk;
+recompute:
+ /* distill q[] into iq[] reversingly */
+ for(i=0,j=jz,z=q[jz];j>0;i++,j--) {
+ fw = (double)((int)(twon24* z));
+ iq[i] = (int)(z-two24*fw);
+ z = q[j-1]+fw;
+ }
+
+ /* compute n */
+ z = fd_scalbn(z,q0); /* actual value of z */
+ z -= 8.0*fd_floor(z*0.125); /* trim off integer >= 8 */
+ n = (int) z;
+ z -= (double)n;
+ ih = 0;
+ if(q0>0) { /* need iq[jz-1] to determine n */
+ i = (iq[jz-1]>>(24-q0)); n += i;
+ iq[jz-1] -= i<<(24-q0);
+ ih = iq[jz-1]>>(23-q0);
+ }
+ else if(q0==0) ih = iq[jz-1]>>23;
+ else if(z>=0.5) ih=2;
+
+ if(ih>0) { /* q > 0.5 */
+ n += 1; carry = 0;
+ for(i=0;i<jz ;i++) { /* compute 1-q */
+ j = iq[i];
+ if(carry==0) {
+ if(j!=0) {
+ carry = 1; iq[i] = 0x1000000- j;
+ }
+ } else iq[i] = 0xffffff - j;
+ }
+ if(q0>0) { /* rare case: chance is 1 in 12 */
+ switch(q0) {
+ case 1:
+ iq[jz-1] &= 0x7fffff; break;
+ case 2:
+ iq[jz-1] &= 0x3fffff; break;
+ }
+ }
+ if(ih==2) {
+ z = one - z;
+ if(carry!=0) z -= fd_scalbn(one,q0);
+ }
+ }
+
+ /* check if recomputation is needed */
+ if(z==zero) {
+ j = 0;
+ for (i=jz-1;i>=jk;i--) j |= iq[i];
+ if(j==0) { /* need recomputation */
+ for(k=1;iq[jk-k]==0;k++); /* k = no. of terms needed */
+
+ for(i=jz+1;i<=jz+k;i++) { /* add q[jz+1] to q[jz+k] */
+ f[jx+i] = (double) ipio2[jv+i];
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
+ q[i] = fw;
+ }
+ jz += k;
+ goto recompute;
+ }
+ }
+
+ /* chop off zero terms */
+ if(z==0.0) {
+ jz -= 1; q0 -= 24;
+ while(iq[jz]==0) { jz--; q0-=24;}
+ } else { /* break z into 24-bit if necessary */
+ z = fd_scalbn(z,-q0);
+ if(z>=two24) {
+ fw = (double)((int)(twon24*z));
+ iq[jz] = (int)(z-two24*fw);
+ jz += 1; q0 += 24;
+ iq[jz] = (int) fw;
+ } else iq[jz] = (int) z ;
+ }
+
+ /* convert integer "bit" chunk to floating-point value */
+ fw = fd_scalbn(one,q0);
+ for(i=jz;i>=0;i--) {
+ q[i] = fw*(double)iq[i]; fw*=twon24;
+ }
+
+ /* compute PIo2[0,...,jp]*q[jz,...,0] */
+ for(i=jz;i>=0;i--) {
+ for(fw=0.0,k=0;k<=jp&&k<=jz-i;k++) fw += PIo2[k]*q[i+k];
+ fq[jz-i] = fw;
+ }
+
+ /* compress fq[] into y[] */
+ switch(prec) {
+ case 0:
+ fw = 0.0;
+ for (i=jz;i>=0;i--) fw += fq[i];
+ y[0] = (ih==0)? fw: -fw;
+ break;
+ case 1:
+ case 2:
+ fw = 0.0;
+ for (i=jz;i>=0;i--) fw += fq[i];
+ y[0] = (ih==0)? fw: -fw;
+ fw = fq[0]-fw;
+ for (i=1;i<=jz;i++) fw += fq[i];
+ y[1] = (ih==0)? fw: -fw;
+ break;
+ case 3: /* painful */
+ for (i=jz;i>0;i--) {
+ fw = fq[i-1]+fq[i];
+ fq[i] += fq[i-1]-fw;
+ fq[i-1] = fw;
+ }
+ for (i=jz;i>1;i--) {
+ fw = fq[i-1]+fq[i];
+ fq[i] += fq[i-1]-fw;
+ fq[i-1] = fw;
+ }
+ for (fw=0.0,i=jz;i>=2;i--) fw += fq[i];
+ if(ih==0) {
+ y[0] = fq[0]; y[1] = fq[1]; y[2] = fw;
+ } else {
+ y[0] = -fq[0]; y[1] = -fq[1]; y[2] = -fw;
+ }
+ }
+ return n&7;
+}
diff --git a/third_party/js-1.7/fdlibm/k_sin.c b/third_party/js-1.7/fdlibm/k_sin.c
new file mode 100644
index 0000000..d2bdabd
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/k_sin.c
@@ -0,0 +1,114 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_sin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __kernel_sin( x, y, iy)
+ * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+ *
+ * Algorithm
+ * 1. Since sin(-x) = -sin(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ * 3. sin(x) is approximated by a polynomial of degree 13 on
+ * [0,pi/4]
+ * 3 13
+ * sin(x) ~ x + S1*x + ... + S6*x
+ * where
+ *
+ * |sin(x) 2 4 6 8 10 12 | -58
+ * |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2
+ * | x |
+ *
+ * 4. sin(x+y) = sin(x) + sin'(x')*y
+ * ~ sin(x) + (1-x*x/2)*y
+ * For better accuracy, let
+ * 3 2 2 2 2
+ * r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+ * then 3 2
+ * sin(x) = x + (S1*x + (x *(r-y/2)+y))
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */
+S2 = 8.33333333332248946124e-03, /* 0x3F811111, 0x1110F8A6 */
+S3 = -1.98412698298579493134e-04, /* 0xBF2A01A0, 0x19C161D5 */
+S4 = 2.75573137070700676789e-06, /* 0x3EC71DE3, 0x57B1FE7D */
+S5 = -2.50507602534068634195e-08, /* 0xBE5AE5E6, 0x8A2B9CEB */
+S6 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
+
+#ifdef __STDC__
+ double __kernel_sin(double x, double y, int iy)
+#else
+ double __kernel_sin(x, y, iy)
+ double x,y; int iy; /* iy=0 if y is zero */
+#endif
+{
+ fd_twoints u;
+ double z,r,v;
+ int ix;
+ u.d = x;
+ ix = __HI(u)&0x7fffffff; /* high word of x */
+ if(ix<0x3e400000) /* |x| < 2**-27 */
+ {if((int)x==0) return x;} /* generate inexact */
+ z = x*x;
+ v = z*x;
+ r = S2+z*(S3+z*(S4+z*(S5+z*S6)));
+ if(iy==0) return x+v*(S1+z*r);
+ else return x-((z*(half*y-v*r)-y)-v*S1);
+}
diff --git a/third_party/js-1.7/fdlibm/k_standard.c b/third_party/js-1.7/fdlibm/k_standard.c
new file mode 100644
index 0000000..720109c
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/k_standard.c
@@ -0,0 +1,785 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_standard.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+#include "fdlibm.h"
+
+/* XXX ugly hack to get msvc to link without error. */
+#if _LIB_VERSION == _IEEE_ && !(defined(DARWIN) || defined(XP_MACOSX))
+ int errno;
+# define EDOM 0
+# define ERANGE 0
+#else
+# include <errno.h>
+#endif
+
+
+#ifndef _USE_WRITE
+#include <stdio.h> /* fputs(), stderr */
+#define WRITE2(u,v) fputs(u, stderr)
+#else /* !defined(_USE_WRITE) */
+#include <unistd.h> /* write */
+#define WRITE2(u,v) write(2, u, v)
+#undef fflush
+#endif /* !defined(_USE_WRITE) */
+
+static double zero = 0.0; /* used as const */
+
+/*
+ * Standard conformance (non-IEEE) on exception cases.
+ * Mapping:
+ * 1 -- acos(|x|>1)
+ * 2 -- asin(|x|>1)
+ * 3 -- atan2(+-0,+-0)
+ * 4 -- hypot overflow
+ * 5 -- cosh overflow
+ * 6 -- exp overflow
+ * 7 -- exp underflow
+ * 8 -- y0(0)
+ * 9 -- y0(-ve)
+ * 10-- y1(0)
+ * 11-- y1(-ve)
+ * 12-- yn(0)
+ * 13-- yn(-ve)
+ * 14-- lgamma(finite) overflow
+ * 15-- lgamma(-integer)
+ * 16-- log(0)
+ * 17-- log(x<0)
+ * 18-- log10(0)
+ * 19-- log10(x<0)
+ * 20-- pow(0.0,0.0)
+ * 21-- pow(x,y) overflow
+ * 22-- pow(x,y) underflow
+ * 23-- pow(0,negative)
+ * 24-- pow(neg,non-integral)
+ * 25-- sinh(finite) overflow
+ * 26-- sqrt(negative)
+ * 27-- fmod(x,0)
+ * 28-- remainder(x,0)
+ * 29-- acosh(x<1)
+ * 30-- atanh(|x|>1)
+ * 31-- atanh(|x|=1)
+ * 32-- scalb overflow
+ * 33-- scalb underflow
+ * 34-- j0(|x|>X_TLOSS)
+ * 35-- y0(x>X_TLOSS)
+ * 36-- j1(|x|>X_TLOSS)
+ * 37-- y1(x>X_TLOSS)
+ * 38-- jn(|x|>X_TLOSS, n)
+ * 39-- yn(x>X_TLOSS, n)
+ * 40-- gamma(finite) overflow
+ * 41-- gamma(-integer)
+ * 42-- pow(NaN,0.0)
+ */
+
+
+#ifdef __STDC__
+ double __kernel_standard(double x, double y, int type, int *err)
+#else
+ double __kernel_standard(x,y,type, err)
+ double x,y; int type;int *err;
+#endif
+{
+ struct exception exc;
+#ifndef HUGE_VAL /* this is the only routine that uses HUGE_VAL */
+#define HUGE_VAL inf
+ double inf = 0.0;
+ fd_twoints u;
+
+ u.d = inf;
+ __HI(u) = 0x7ff00000; /* set inf to infinite */
+ inf = u.d;
+#endif
+
+ *err = 0;
+
+#ifdef _USE_WRITE
+ (void) fflush(stdout);
+#endif
+ exc.arg1 = x;
+ exc.arg2 = y;
+ switch(type) {
+ case 1:
+ /* acos(|x|>1) */
+ exc.type = DOMAIN;
+ exc.name = "acos";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if(_LIB_VERSION == _SVID_) {
+ (void) WRITE2("acos: DOMAIN error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 2:
+ /* asin(|x|>1) */
+ exc.type = DOMAIN;
+ exc.name = "asin";
+ exc.retval = zero;
+ if(_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if(_LIB_VERSION == _SVID_) {
+ (void) WRITE2("asin: DOMAIN error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 3:
+ /* atan2(+-0,+-0) */
+ exc.arg1 = y;
+ exc.arg2 = x;
+ exc.type = DOMAIN;
+ exc.name = "atan2";
+ exc.retval = zero;
+ if(_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if(_LIB_VERSION == _SVID_) {
+ (void) WRITE2("atan2: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 4:
+ /* hypot(finite,finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "hypot";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 5:
+ /* cosh(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "cosh";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 6:
+ /* exp(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "exp";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 7:
+ /* exp(finite) underflow */
+ exc.type = UNDERFLOW;
+ exc.name = "exp";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 8:
+ /* y0(0) = -inf */
+ exc.type = DOMAIN; /* should be SING for IEEE */
+ exc.name = "y0";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y0: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 9:
+ /* y0(x<0) = NaN */
+ exc.type = DOMAIN;
+ exc.name = "y0";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y0: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 10:
+ /* y1(0) = -inf */
+ exc.type = DOMAIN; /* should be SING for IEEE */
+ exc.name = "y1";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y1: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 11:
+ /* y1(x<0) = NaN */
+ exc.type = DOMAIN;
+ exc.name = "y1";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y1: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 12:
+ /* yn(n,0) = -inf */
+ exc.type = DOMAIN; /* should be SING for IEEE */
+ exc.name = "yn";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("yn: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 13:
+ /* yn(x<0) = NaN */
+ exc.type = DOMAIN;
+ exc.name = "yn";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("yn: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 14:
+ /* lgamma(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "lgamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 15:
+ /* lgamma(-integer) or lgamma(0) */
+ exc.type = SING;
+ exc.name = "lgamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("lgamma: SING error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 16:
+ /* log(0) */
+ exc.type = SING;
+ exc.name = "log";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log: SING error\n", 16);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 17:
+ /* log(x<0) */
+ exc.type = DOMAIN;
+ exc.name = "log";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log: DOMAIN error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 18:
+ /* log10(0) */
+ exc.type = SING;
+ exc.name = "log10";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log10: SING error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 19:
+ /* log10(x<0) */
+ exc.type = DOMAIN;
+ exc.name = "log10";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log10: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 20:
+ /* pow(0.0,0.0) */
+ /* error only if _LIB_VERSION == _SVID_ */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ exc.retval = zero;
+ if (_LIB_VERSION != _SVID_) exc.retval = 1.0;
+ else if (!fd_matherr(&exc)) {
+ (void) WRITE2("pow(0,0): DOMAIN error\n", 23);
+ *err = EDOM;
+ }
+ break;
+ case 21:
+ /* pow(x,y) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "pow";
+ if (_LIB_VERSION == _SVID_) {
+ exc.retval = HUGE;
+ y *= 0.5;
+ if(x<zero&&fd_rint(y)!=y) exc.retval = -HUGE;
+ } else {
+ exc.retval = HUGE_VAL;
+ y *= 0.5;
+ if(x<zero&&fd_rint(y)!=y) exc.retval = -HUGE_VAL;
+ }
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 22:
+ /* pow(x,y) underflow */
+ exc.type = UNDERFLOW;
+ exc.name = "pow";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 23:
+ /* 0**neg */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = zero;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("pow(0,neg): DOMAIN error\n", 25);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 24:
+ /* neg**non-integral */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = zero;
+ else
+ exc.retval = zero/zero; /* X/Open allow NaN */
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("neg**non-integral: DOMAIN error\n", 32);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 25:
+ /* sinh(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "sinh";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = ( (x>zero) ? HUGE : -HUGE);
+ else
+ exc.retval = ( (x>zero) ? HUGE_VAL : -HUGE_VAL);
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 26:
+ /* sqrt(x<0) */
+ exc.type = DOMAIN;
+ exc.name = "sqrt";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = zero;
+ else
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("sqrt: DOMAIN error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 27:
+ /* fmod(x,0) */
+ exc.type = DOMAIN;
+ exc.name = "fmod";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = x;
+ else
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("fmod: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 28:
+ /* remainder(x,0) */
+ exc.type = DOMAIN;
+ exc.name = "remainder";
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("remainder: DOMAIN error\n", 24);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 29:
+ /* acosh(x<1) */
+ exc.type = DOMAIN;
+ exc.name = "acosh";
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("acosh: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 30:
+ /* atanh(|x|>1) */
+ exc.type = DOMAIN;
+ exc.name = "atanh";
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("atanh: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 31:
+ /* atanh(|x|=1) */
+ exc.type = SING;
+ exc.name = "atanh";
+ exc.retval = x/zero; /* sign(x)*inf */
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("atanh: SING error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 32:
+ /* scalb overflow; SVID also returns +-HUGE_VAL */
+ exc.type = OVERFLOW;
+ exc.name = "scalb";
+ exc.retval = x > zero ? HUGE_VAL : -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 33:
+ /* scalb underflow */
+ exc.type = UNDERFLOW;
+ exc.name = "scalb";
+ exc.retval = fd_copysign(zero,x);
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 34:
+ /* j0(|x|>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "j0";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 35:
+ /* y0(x>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "y0";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 36:
+ /* j1(|x|>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "j1";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 37:
+ /* y1(x>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "y1";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 38:
+ /* jn(|x|>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "jn";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 39:
+ /* yn(x>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "yn";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 40:
+ /* gamma(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "gamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 41:
+ /* gamma(-integer) or gamma(0) */
+ exc.type = SING;
+ exc.name = "gamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("gamma: SING error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 42:
+ /* pow(NaN,0.0) */
+ /* error only if _LIB_VERSION == _SVID_ & _XOPEN_ */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ exc.retval = x;
+ if (_LIB_VERSION == _IEEE_ ||
+ _LIB_VERSION == _POSIX_) exc.retval = 1.0;
+ else if (!fd_matherr(&exc)) {
+ *err = EDOM;
+ }
+ break;
+ }
+ return exc.retval;
+}
diff --git a/third_party/js-1.7/fdlibm/k_tan.c b/third_party/js-1.7/fdlibm/k_tan.c
new file mode 100644
index 0000000..1e7681b
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/k_tan.c
@@ -0,0 +1,170 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_tan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __kernel_tan( x, y, k )
+ * kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input k indicates whether tan (if k=1) or
+ * -1/tan (if k= -1) is returned.
+ *
+ * Algorithm
+ * 1. Since tan(-x) = -tan(x), we need only to consider positive x.
+ * 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ * 3. tan(x) is approximated by a odd polynomial of degree 27 on
+ * [0,0.67434]
+ * 3 27
+ * tan(x) ~ x + T1*x + ... + T13*x
+ * where
+ *
+ * |tan(x) 2 4 26 | -59.2
+ * |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2
+ * | x |
+ *
+ * Note: tan(x+y) = tan(x) + tan'(x)*y
+ * ~ tan(x) + (1+x*x)*y
+ * Therefore, for better accuracy in computing tan(x+y), let
+ * 3 2 2 2 2
+ * r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+ * then
+ * 3 2
+ * tan(x+y) = x + (T1*x + (x *(r+y)+y))
+ *
+ * 4. For x in [0.67434,pi/4], let y = pi/4 - x, then
+ * tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
+ * = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
+ */
+
+#include "fdlibm.h"
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+pio4 = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
+pio4lo= 3.06161699786838301793e-17, /* 0x3C81A626, 0x33145C07 */
+T[] = {
+ 3.33333333333334091986e-01, /* 0x3FD55555, 0x55555563 */
+ 1.33333333333201242699e-01, /* 0x3FC11111, 0x1110FE7A */
+ 5.39682539762260521377e-02, /* 0x3FABA1BA, 0x1BB341FE */
+ 2.18694882948595424599e-02, /* 0x3F9664F4, 0x8406D637 */
+ 8.86323982359930005737e-03, /* 0x3F8226E3, 0xE96E8493 */
+ 3.59207910759131235356e-03, /* 0x3F6D6D22, 0xC9560328 */
+ 1.45620945432529025516e-03, /* 0x3F57DBC8, 0xFEE08315 */
+ 5.88041240820264096874e-04, /* 0x3F4344D8, 0xF2F26501 */
+ 2.46463134818469906812e-04, /* 0x3F3026F7, 0x1A8D1068 */
+ 7.81794442939557092300e-05, /* 0x3F147E88, 0xA03792A6 */
+ 7.14072491382608190305e-05, /* 0x3F12B80F, 0x32F0A7E9 */
+ -1.85586374855275456654e-05, /* 0xBEF375CB, 0xDB605373 */
+ 2.59073051863633712884e-05, /* 0x3EFB2A70, 0x74BF7AD4 */
+};
+
+#ifdef __STDC__
+ double __kernel_tan(double x, double y, int iy)
+#else
+ double __kernel_tan(x, y, iy)
+ double x,y; int iy;
+#endif
+{
+ fd_twoints u;
+ double z,r,v,w,s;
+ int ix,hx;
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ ix = hx&0x7fffffff; /* high word of |x| */
+ if(ix<0x3e300000) /* x < 2**-28 */
+ {if((int)x==0) { /* generate inexact */
+ u.d =x;
+ if(((ix|__LO(u))|(iy+1))==0) return one/fd_fabs(x);
+ else return (iy==1)? x: -one/x;
+ }
+ }
+ if(ix>=0x3FE59428) { /* |x|>=0.6744 */
+ if(hx<0) {x = -x; y = -y;}
+ z = pio4-x;
+ w = pio4lo-y;
+ x = z+w; y = 0.0;
+ }
+ z = x*x;
+ w = z*z;
+ /* Break x^5*(T[1]+x^2*T[2]+...) into
+ * x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
+ * x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
+ */
+ r = T[1]+w*(T[3]+w*(T[5]+w*(T[7]+w*(T[9]+w*T[11]))));
+ v = z*(T[2]+w*(T[4]+w*(T[6]+w*(T[8]+w*(T[10]+w*T[12])))));
+ s = z*x;
+ r = y + z*(s*(r+v)+y);
+ r += T[0]*s;
+ w = x+r;
+ if(ix>=0x3FE59428) {
+ v = (double)iy;
+ return (double)(1-((hx>>30)&2))*(v-2.0*(x-(w*w/(w+v)-r)));
+ }
+ if(iy==1) return w;
+ else { /* if allow error up to 2 ulp,
+ simply return -1.0/(x+r) here */
+ /* compute -1.0/(x+r) accurately */
+ double a,t;
+ z = w;
+ u.d = z;
+ __LO(u) = 0;
+ z = u.d;
+ v = r-(z - x); /* z+v = r+x */
+ t = a = -1.0/w; /* a = -1.0/w */
+ u.d = t;
+ __LO(u) = 0;
+ t = u.d;
+ s = 1.0+t*z;
+ return t+a*(s+t*v);
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_asinh.c b/third_party/js-1.7/fdlibm/s_asinh.c
new file mode 100644
index 0000000..fdf70a9
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_asinh.c
@@ -0,0 +1,101 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_asinh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* asinh(x)
+ * Method :
+ * Based on
+ * asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
+ * we have
+ * asinh(x) := x if 1+x*x=1,
+ * := sign(x)*(log(x)+ln2)) for large |x|, else
+ * := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
+ * := sign(x)*log1p(|x| + x^2/(1 + sqrt(1+x^2)))
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+ln2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+really_big= 1.00000000000000000000e+300;
+
+#ifdef __STDC__
+ double fd_asinh(double x)
+#else
+ double fd_asinh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t,w;
+ int hx,ix;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return x+x; /* x is inf or NaN */
+ if(ix< 0x3e300000) { /* |x|<2**-28 */
+ if(really_big+x>one) return x; /* return x inexact except 0 */
+ }
+ if(ix>0x41b00000) { /* |x| > 2**28 */
+ w = __ieee754_log(fd_fabs(x))+ln2;
+ } else if (ix>0x40000000) { /* 2**28 > |x| > 2.0 */
+ t = fd_fabs(x);
+ w = __ieee754_log(2.0*t+one/(fd_sqrt(x*x+one)+t));
+ } else { /* 2.0 > |x| > 2**-28 */
+ t = x*x;
+ w =fd_log1p(fd_fabs(x)+t/(one+fd_sqrt(one+t)));
+ }
+ if(hx>0) return w; else return -w;
+}
diff --git a/third_party/js-1.7/fdlibm/s_atan.c b/third_party/js-1.7/fdlibm/s_atan.c
new file mode 100644
index 0000000..99a00c6
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_atan.c
@@ -0,0 +1,175 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_atan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* atan(x)
+ * Method
+ * 1. Reduce x to positive by atan(x) = -atan(-x).
+ * 2. According to the integer k=4t+0.25 chopped, t=x, the argument
+ * is further reduced to one of the following intervals and the
+ * arctangent of t is evaluated by the corresponding formula:
+ *
+ * [0,7/16] atan(x) = t-t^3*(a1+t^2*(a2+...(a10+t^2*a11)...)
+ * [7/16,11/16] atan(x) = atan(1/2) + atan( (t-0.5)/(1+t/2) )
+ * [11/16.19/16] atan(x) = atan( 1 ) + atan( (t-1)/(1+t) )
+ * [19/16,39/16] atan(x) = atan(3/2) + atan( (t-1.5)/(1+1.5t) )
+ * [39/16,INF] atan(x) = atan(INF) + atan( -1/t )
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double atanhi[] = {
+#else
+static double atanhi[] = {
+#endif
+ 4.63647609000806093515e-01, /* atan(0.5)hi 0x3FDDAC67, 0x0561BB4F */
+ 7.85398163397448278999e-01, /* atan(1.0)hi 0x3FE921FB, 0x54442D18 */
+ 9.82793723247329054082e-01, /* atan(1.5)hi 0x3FEF730B, 0xD281F69B */
+ 1.57079632679489655800e+00, /* atan(inf)hi 0x3FF921FB, 0x54442D18 */
+};
+
+#ifdef __STDC__
+static const double atanlo[] = {
+#else
+static double atanlo[] = {
+#endif
+ 2.26987774529616870924e-17, /* atan(0.5)lo 0x3C7A2B7F, 0x222F65E2 */
+ 3.06161699786838301793e-17, /* atan(1.0)lo 0x3C81A626, 0x33145C07 */
+ 1.39033110312309984516e-17, /* atan(1.5)lo 0x3C700788, 0x7AF0CBBD */
+ 6.12323399573676603587e-17, /* atan(inf)lo 0x3C91A626, 0x33145C07 */
+};
+
+#ifdef __STDC__
+static const double aT[] = {
+#else
+static double aT[] = {
+#endif
+ 3.33333333333329318027e-01, /* 0x3FD55555, 0x5555550D */
+ -1.99999999998764832476e-01, /* 0xBFC99999, 0x9998EBC4 */
+ 1.42857142725034663711e-01, /* 0x3FC24924, 0x920083FF */
+ -1.11111104054623557880e-01, /* 0xBFBC71C6, 0xFE231671 */
+ 9.09088713343650656196e-02, /* 0x3FB745CD, 0xC54C206E */
+ -7.69187620504482999495e-02, /* 0xBFB3B0F2, 0xAF749A6D */
+ 6.66107313738753120669e-02, /* 0x3FB10D66, 0xA0D03D51 */
+ -5.83357013379057348645e-02, /* 0xBFADDE2D, 0x52DEFD9A */
+ 4.97687799461593236017e-02, /* 0x3FA97B4B, 0x24760DEB */
+ -3.65315727442169155270e-02, /* 0xBFA2B444, 0x2C6A6C2F */
+ 1.62858201153657823623e-02, /* 0x3F90AD3A, 0xE322DA11 */
+};
+
+#ifdef __STDC__
+ static const double
+#else
+ static double
+#endif
+one = 1.0,
+really_big = 1.0e300;
+
+#ifdef __STDC__
+ double fd_atan(double x)
+#else
+ double fd_atan(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double w,s1,s2,z;
+ int ix,hx,id;
+
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x44100000) { /* if |x| >= 2^66 */
+ u.d = x;
+ if(ix>0x7ff00000||
+ (ix==0x7ff00000&&(__LO(u)!=0)))
+ return x+x; /* NaN */
+ if(hx>0) return atanhi[3]+atanlo[3];
+ else return -atanhi[3]-atanlo[3];
+ } if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
+ if (ix < 0x3e200000) { /* |x| < 2^-29 */
+ if(really_big+x>one) return x; /* raise inexact */
+ }
+ id = -1;
+ } else {
+ x = fd_fabs(x);
+ if (ix < 0x3ff30000) { /* |x| < 1.1875 */
+ if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+ id = 0; x = (2.0*x-one)/(2.0+x);
+ } else { /* 11/16<=|x|< 19/16 */
+ id = 1; x = (x-one)/(x+one);
+ }
+ } else {
+ if (ix < 0x40038000) { /* |x| < 2.4375 */
+ id = 2; x = (x-1.5)/(one+1.5*x);
+ } else { /* 2.4375 <= |x| < 2^66 */
+ id = 3; x = -1.0/x;
+ }
+ }}
+ /* end of argument reduction */
+ z = x*x;
+ w = z*z;
+ /* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
+ s1 = z*(aT[0]+w*(aT[2]+w*(aT[4]+w*(aT[6]+w*(aT[8]+w*aT[10])))));
+ s2 = w*(aT[1]+w*(aT[3]+w*(aT[5]+w*(aT[7]+w*aT[9]))));
+ if (id<0) return x - x*(s1+s2);
+ else {
+ z = atanhi[id] - ((x*(s1+s2) - atanlo[id]) - x);
+ return (hx<0)? -z:z;
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_cbrt.c b/third_party/js-1.7/fdlibm/s_cbrt.c
new file mode 100644
index 0000000..4aed19b
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_cbrt.c
@@ -0,0 +1,133 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_cbrt.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+#include "fdlibm.h"
+
+/* cbrt(x)
+ * Return cube root of x
+ */
+#ifdef __STDC__
+static const unsigned
+#else
+static unsigned
+#endif
+ B1 = 715094163, /* B1 = (682-0.03306235651)*2**20 */
+ B2 = 696219795; /* B2 = (664-0.03306235651)*2**20 */
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+C = 5.42857142857142815906e-01, /* 19/35 = 0x3FE15F15, 0xF15F15F1 */
+D = -7.05306122448979611050e-01, /* -864/1225 = 0xBFE691DE, 0x2532C834 */
+E = 1.41428571428571436819e+00, /* 99/70 = 0x3FF6A0EA, 0x0EA0EA0F */
+F = 1.60714285714285720630e+00, /* 45/28 = 0x3FF9B6DB, 0x6DB6DB6E */
+G = 3.57142857142857150787e-01; /* 5/14 = 0x3FD6DB6D, 0xB6DB6DB7 */
+
+#ifdef __STDC__
+ double fd_cbrt(double x)
+#else
+ double fd_cbrt(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx;
+ double r,s,t=0.0,w;
+ unsigned sign;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ sign=hx&0x80000000; /* sign= sign(x) */
+ hx ^=sign;
+ if(hx>=0x7ff00000) return(x+x); /* cbrt(NaN,INF) is itself */
+ if((hx|__LO(u))==0) {
+ x = u.d;
+ return(x); /* cbrt(0) is itself */
+ }
+ u.d = x;
+ __HI(u) = hx; /* x <- |x| */
+ x = u.d;
+ /* rough cbrt to 5 bits */
+ if(hx<0x00100000) /* subnormal number */
+ {u.d = t; __HI(u)=0x43500000; t=u.d; /* set t= 2**54 */
+ t*=x; __HI(u)=__HI(u)/3+B2;
+ }
+ else {
+ u.d = t; __HI(u)=hx/3+B1; t = u.d;
+ }
+
+
+ /* new cbrt to 23 bits, may be implemented in single precision */
+ r=t*t/x;
+ s=C+r*t;
+ t*=G+F/(s+E+D/s);
+
+ /* chopped to 20 bits and make it larger than cbrt(x) */
+ u.d = t;
+ __LO(u)=0; __HI(u)+=0x00000001;
+ t = u.d;
+
+ /* one step newton iteration to 53 bits with error less than 0.667 ulps */
+ s=t*t; /* t*t is exact */
+ r=x/s;
+ w=t+t;
+ r=(r-t)/(w+r); /* r-s is exact */
+ t=t+t*r;
+
+ /* retore the sign bit */
+ u.d = t;
+ __HI(u) |= sign;
+ t = u.d;
+ return(t);
+}
diff --git a/third_party/js-1.7/fdlibm/s_ceil.c b/third_party/js-1.7/fdlibm/s_ceil.c
new file mode 100644
index 0000000..826bcac
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_ceil.c
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_ceil.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * ceil(x)
+ * Return x rounded toward -inf to integral value
+ * Method:
+ * Bit twiddling.
+ * Exception:
+ * Inexact flag raised if x not equal to ceil(x).
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double really_big = 1.0e300;
+#else
+static double really_big = 1.0e300;
+#endif
+
+#ifdef __STDC__
+ double fd_ceil(double x)
+#else
+ double fd_ceil(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int i0,i1,j0;
+ unsigned i,j;
+ u.d = x;
+ i0 = __HI(u);
+ i1 = __LO(u);
+ j0 = ((i0>>20)&0x7ff)-0x3ff;
+ if(j0<20) {
+ if(j0<0) { /* raise inexact if x != 0 */
+ if(really_big+x>0.0) {/* return 0*sign(x) if |x|<1 */
+ if(i0<0) {i0=0x80000000;i1=0;}
+ else if((i0|i1)!=0) { i0=0x3ff00000;i1=0;}
+ }
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0>0) i0 += (0x00100000)>>j0;
+ i0 &= (~i); i1=0;
+ }
+ }
+ } else if (j0>51) {
+ if(j0==0x400) return x+x; /* inf or NaN */
+ else return x; /* x is integral */
+ } else {
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0>0) {
+ if(j0==20) i0+=1;
+ else {
+ j = i1 + (1<<(52-j0));
+ if((int)j<i1) i0+=1; /* got a carry */
+ i1 = j;
+ }
+ }
+ i1 &= (~i);
+ }
+ }
+ u.d = x;
+ __HI(u) = i0;
+ __LO(u) = i1;
+ x = u.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/s_copysign.c b/third_party/js-1.7/fdlibm/s_copysign.c
new file mode 100644
index 0000000..8869a12
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_copysign.c
@@ -0,0 +1,72 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_copysign.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * copysign(double x, double y)
+ * copysign(x,y) returns a value with the magnitude of x and
+ * with the sign bit of y.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_copysign(double x, double y)
+#else
+ double fd_copysign(x,y)
+ double x,y;
+#endif
+{
+ fd_twoints ux, uy;
+ ux.d = x; uy.d = y;
+ __HI(ux) = (__HI(ux)&0x7fffffff)|(__HI(uy)&0x80000000);
+ x = ux.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/s_cos.c b/third_party/js-1.7/fdlibm/s_cos.c
new file mode 100644
index 0000000..3fb0a6b
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_cos.c
@@ -0,0 +1,118 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_cos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* cos(x)
+ * Return cosine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cosine function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_cos(double x)
+#else
+ double fd_cos(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ u.d = x;
+ ix = __HI(u);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_cos(x,z);
+
+ /* cos(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x;
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ switch(n&3) {
+ case 0: return __kernel_cos(y[0],y[1]);
+ case 1: return -__kernel_sin(y[0],y[1],1);
+ case 2: return -__kernel_cos(y[0],y[1]);
+ default:
+ return __kernel_sin(y[0],y[1],1);
+ }
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_erf.c b/third_party/js-1.7/fdlibm/s_erf.c
new file mode 100644
index 0000000..6eae8de
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_erf.c
@@ -0,0 +1,356 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_erf.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* double erf(double x)
+ * double erfc(double x)
+ * x
+ * 2 |\
+ * erf(x) = --------- | exp(-t*t)dt
+ * sqrt(pi) \|
+ * 0
+ *
+ * erfc(x) = 1-erf(x)
+ * Note that
+ * erf(-x) = -erf(x)
+ * erfc(-x) = 2 - erfc(x)
+ *
+ * Method:
+ * 1. For |x| in [0, 0.84375]
+ * erf(x) = x + x*R(x^2)
+ * erfc(x) = 1 - erf(x) if x in [-.84375,0.25]
+ * = 0.5 + ((0.5-x)-x*R) if x in [0.25,0.84375]
+ * where R = P/Q where P is an odd poly of degree 8 and
+ * Q is an odd poly of degree 10.
+ * -57.90
+ * | R - (erf(x)-x)/x | <= 2
+ *
+ *
+ * Remark. The formula is derived by noting
+ * erf(x) = (2/sqrt(pi))*(x - x^3/3 + x^5/10 - x^7/42 + ....)
+ * and that
+ * 2/sqrt(pi) = 1.128379167095512573896158903121545171688
+ * is close to one. The interval is chosen because the fix
+ * point of erf(x) is near 0.6174 (i.e., erf(x)=x when x is
+ * near 0.6174), and by some experiment, 0.84375 is chosen to
+ * guarantee the error is less than one ulp for erf.
+ *
+ * 2. For |x| in [0.84375,1.25], let s = |x| - 1, and
+ * c = 0.84506291151 rounded to single (24 bits)
+ * erf(x) = sign(x) * (c + P1(s)/Q1(s))
+ * erfc(x) = (1-c) - P1(s)/Q1(s) if x > 0
+ * 1+(c+P1(s)/Q1(s)) if x < 0
+ * |P1/Q1 - (erf(|x|)-c)| <= 2**-59.06
+ * Remark: here we use the taylor series expansion at x=1.
+ * erf(1+s) = erf(1) + s*Poly(s)
+ * = 0.845.. + P1(s)/Q1(s)
+ * That is, we use rational approximation to approximate
+ * erf(1+s) - (c = (single)0.84506291151)
+ * Note that |P1/Q1|< 0.078 for x in [0.84375,1.25]
+ * where
+ * P1(s) = degree 6 poly in s
+ * Q1(s) = degree 6 poly in s
+ *
+ * 3. For x in [1.25,1/0.35(~2.857143)],
+ * erfc(x) = (1/x)*exp(-x*x-0.5625+R1/S1)
+ * erf(x) = 1 - erfc(x)
+ * where
+ * R1(z) = degree 7 poly in z, (z=1/x^2)
+ * S1(z) = degree 8 poly in z
+ *
+ * 4. For x in [1/0.35,28]
+ * erfc(x) = (1/x)*exp(-x*x-0.5625+R2/S2) if x > 0
+ * = 2.0 - (1/x)*exp(-x*x-0.5625+R2/S2) if -6<x<0
+ * = 2.0 - tiny (if x <= -6)
+ * erf(x) = sign(x)*(1.0 - erfc(x)) if x < 6, else
+ * erf(x) = sign(x)*(1.0 - tiny)
+ * where
+ * R2(z) = degree 6 poly in z, (z=1/x^2)
+ * S2(z) = degree 7 poly in z
+ *
+ * Note1:
+ * To compute exp(-x*x-0.5625+R/S), let s be a single
+ * precision number and s := x; then
+ * -x*x = -s*s + (s-x)*(s+x)
+ * exp(-x*x-0.5626+R/S) =
+ * exp(-s*s-0.5625)*exp((s-x)*(s+x)+R/S);
+ * Note2:
+ * Here 4 and 5 make use of the asymptotic series
+ * exp(-x*x)
+ * erfc(x) ~ ---------- * ( 1 + Poly(1/x^2) )
+ * x*sqrt(pi)
+ * We use rational approximation to approximate
+ * g(s)=f(1/x^2) = log(erfc(x)*x) - x*x + 0.5625
+ * Here is the error bound for R1/S1 and R2/S2
+ * |R1/S1 - f(x)| < 2**(-62.57)
+ * |R2/S2 - f(x)| < 2**(-61.52)
+ *
+ * 5. For inf > x >= 28
+ * erf(x) = sign(x) *(1 - tiny) (raise inexact)
+ * erfc(x) = tiny*tiny (raise underflow) if x > 0
+ * = 2 - tiny if x<0
+ *
+ * 7. Special case:
+ * erf(0) = 0, erf(inf) = 1, erf(-inf) = -1,
+ * erfc(0) = 1, erfc(inf) = 0, erfc(-inf) = 2,
+ * erfc/erf(NaN) is NaN
+ */
+
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+tiny = 1e-300,
+half= 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+two = 2.00000000000000000000e+00, /* 0x40000000, 0x00000000 */
+ /* c = (float)0.84506291151 */
+erx = 8.45062911510467529297e-01, /* 0x3FEB0AC1, 0x60000000 */
+/*
+ * Coefficients for approximation to erf on [0,0.84375]
+ */
+efx = 1.28379167095512586316e-01, /* 0x3FC06EBA, 0x8214DB69 */
+efx8= 1.02703333676410069053e+00, /* 0x3FF06EBA, 0x8214DB69 */
+pp0 = 1.28379167095512558561e-01, /* 0x3FC06EBA, 0x8214DB68 */
+pp1 = -3.25042107247001499370e-01, /* 0xBFD4CD7D, 0x691CB913 */
+pp2 = -2.84817495755985104766e-02, /* 0xBF9D2A51, 0xDBD7194F */
+pp3 = -5.77027029648944159157e-03, /* 0xBF77A291, 0x236668E4 */
+pp4 = -2.37630166566501626084e-05, /* 0xBEF8EAD6, 0x120016AC */
+qq1 = 3.97917223959155352819e-01, /* 0x3FD97779, 0xCDDADC09 */
+qq2 = 6.50222499887672944485e-02, /* 0x3FB0A54C, 0x5536CEBA */
+qq3 = 5.08130628187576562776e-03, /* 0x3F74D022, 0xC4D36B0F */
+qq4 = 1.32494738004321644526e-04, /* 0x3F215DC9, 0x221C1A10 */
+qq5 = -3.96022827877536812320e-06, /* 0xBED09C43, 0x42A26120 */
+/*
+ * Coefficients for approximation to erf in [0.84375,1.25]
+ */
+pa0 = -2.36211856075265944077e-03, /* 0xBF6359B8, 0xBEF77538 */
+pa1 = 4.14856118683748331666e-01, /* 0x3FDA8D00, 0xAD92B34D */
+pa2 = -3.72207876035701323847e-01, /* 0xBFD7D240, 0xFBB8C3F1 */
+pa3 = 3.18346619901161753674e-01, /* 0x3FD45FCA, 0x805120E4 */
+pa4 = -1.10894694282396677476e-01, /* 0xBFBC6398, 0x3D3E28EC */
+pa5 = 3.54783043256182359371e-02, /* 0x3FA22A36, 0x599795EB */
+pa6 = -2.16637559486879084300e-03, /* 0xBF61BF38, 0x0A96073F */
+qa1 = 1.06420880400844228286e-01, /* 0x3FBB3E66, 0x18EEE323 */
+qa2 = 5.40397917702171048937e-01, /* 0x3FE14AF0, 0x92EB6F33 */
+qa3 = 7.18286544141962662868e-02, /* 0x3FB2635C, 0xD99FE9A7 */
+qa4 = 1.26171219808761642112e-01, /* 0x3FC02660, 0xE763351F */
+qa5 = 1.36370839120290507362e-02, /* 0x3F8BEDC2, 0x6B51DD1C */
+qa6 = 1.19844998467991074170e-02, /* 0x3F888B54, 0x5735151D */
+/*
+ * Coefficients for approximation to erfc in [1.25,1/0.35]
+ */
+ra0 = -9.86494403484714822705e-03, /* 0xBF843412, 0x600D6435 */
+ra1 = -6.93858572707181764372e-01, /* 0xBFE63416, 0xE4BA7360 */
+ra2 = -1.05586262253232909814e+01, /* 0xC0251E04, 0x41B0E726 */
+ra3 = -6.23753324503260060396e+01, /* 0xC04F300A, 0xE4CBA38D */
+ra4 = -1.62396669462573470355e+02, /* 0xC0644CB1, 0x84282266 */
+ra5 = -1.84605092906711035994e+02, /* 0xC067135C, 0xEBCCABB2 */
+ra6 = -8.12874355063065934246e+01, /* 0xC0545265, 0x57E4D2F2 */
+ra7 = -9.81432934416914548592e+00, /* 0xC023A0EF, 0xC69AC25C */
+sa1 = 1.96512716674392571292e+01, /* 0x4033A6B9, 0xBD707687 */
+sa2 = 1.37657754143519042600e+02, /* 0x4061350C, 0x526AE721 */
+sa3 = 4.34565877475229228821e+02, /* 0x407B290D, 0xD58A1A71 */
+sa4 = 6.45387271733267880336e+02, /* 0x40842B19, 0x21EC2868 */
+sa5 = 4.29008140027567833386e+02, /* 0x407AD021, 0x57700314 */
+sa6 = 1.08635005541779435134e+02, /* 0x405B28A3, 0xEE48AE2C */
+sa7 = 6.57024977031928170135e+00, /* 0x401A47EF, 0x8E484A93 */
+sa8 = -6.04244152148580987438e-02, /* 0xBFAEEFF2, 0xEE749A62 */
+/*
+ * Coefficients for approximation to erfc in [1/.35,28]
+ */
+rb0 = -9.86494292470009928597e-03, /* 0xBF843412, 0x39E86F4A */
+rb1 = -7.99283237680523006574e-01, /* 0xBFE993BA, 0x70C285DE */
+rb2 = -1.77579549177547519889e+01, /* 0xC031C209, 0x555F995A */
+rb3 = -1.60636384855821916062e+02, /* 0xC064145D, 0x43C5ED98 */
+rb4 = -6.37566443368389627722e+02, /* 0xC083EC88, 0x1375F228 */
+rb5 = -1.02509513161107724954e+03, /* 0xC0900461, 0x6A2E5992 */
+rb6 = -4.83519191608651397019e+02, /* 0xC07E384E, 0x9BDC383F */
+sb1 = 3.03380607434824582924e+01, /* 0x403E568B, 0x261D5190 */
+sb2 = 3.25792512996573918826e+02, /* 0x40745CAE, 0x221B9F0A */
+sb3 = 1.53672958608443695994e+03, /* 0x409802EB, 0x189D5118 */
+sb4 = 3.19985821950859553908e+03, /* 0x40A8FFB7, 0x688C246A */
+sb5 = 2.55305040643316442583e+03, /* 0x40A3F219, 0xCEDF3BE6 */
+sb6 = 4.74528541206955367215e+02, /* 0x407DA874, 0xE79FE763 */
+sb7 = -2.24409524465858183362e+01; /* 0xC03670E2, 0x42712D62 */
+
+#ifdef __STDC__
+ double fd_erf(double x)
+#else
+ double fd_erf(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx,ix,i;
+ double R,S,P,Q,s,y,z,r;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) { /* erf(nan)=nan */
+ i = ((unsigned)hx>>31)<<1;
+ return (double)(1-i)+one/x; /* erf(+-inf)=+-1 */
+ }
+
+ if(ix < 0x3feb0000) { /* |x|<0.84375 */
+ if(ix < 0x3e300000) { /* |x|<2**-28 */
+ if (ix < 0x00800000)
+ return 0.125*(8.0*x+efx8*x); /*avoid underflow */
+ return x + efx*x;
+ }
+ z = x*x;
+ r = pp0+z*(pp1+z*(pp2+z*(pp3+z*pp4)));
+ s = one+z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))));
+ y = r/s;
+ return x + x*y;
+ }
+ if(ix < 0x3ff40000) { /* 0.84375 <= |x| < 1.25 */
+ s = fd_fabs(x)-one;
+ P = pa0+s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))));
+ Q = one+s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))));
+ if(hx>=0) return erx + P/Q; else return -erx - P/Q;
+ }
+ if (ix >= 0x40180000) { /* inf>|x|>=6 */
+ if(hx>=0) return one-tiny; else return tiny-one;
+ }
+ x = fd_fabs(x);
+ s = one/(x*x);
+ if(ix< 0x4006DB6E) { /* |x| < 1/0.35 */
+ R=ra0+s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(
+ ra5+s*(ra6+s*ra7))))));
+ S=one+s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(
+ sa5+s*(sa6+s*(sa7+s*sa8)))))));
+ } else { /* |x| >= 1/0.35 */
+ R=rb0+s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(
+ rb5+s*rb6)))));
+ S=one+s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(
+ sb5+s*(sb6+s*sb7))))));
+ }
+ z = x;
+ u.d = z;
+ __LO(u) = 0;
+ z = u.d;
+ r = __ieee754_exp(-z*z-0.5625)*__ieee754_exp((z-x)*(z+x)+R/S);
+ if(hx>=0) return one-r/x; else return r/x-one;
+}
+
+#ifdef __STDC__
+ double erfc(double x)
+#else
+ double erfc(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx,ix;
+ double R,S,P,Q,s,y,z,r;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) { /* erfc(nan)=nan */
+ /* erfc(+-inf)=0,2 */
+ return (double)(((unsigned)hx>>31)<<1)+one/x;
+ }
+
+ if(ix < 0x3feb0000) { /* |x|<0.84375 */
+ if(ix < 0x3c700000) /* |x|<2**-56 */
+ return one-x;
+ z = x*x;
+ r = pp0+z*(pp1+z*(pp2+z*(pp3+z*pp4)));
+ s = one+z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))));
+ y = r/s;
+ if(hx < 0x3fd00000) { /* x<1/4 */
+ return one-(x+x*y);
+ } else {
+ r = x*y;
+ r += (x-half);
+ return half - r ;
+ }
+ }
+ if(ix < 0x3ff40000) { /* 0.84375 <= |x| < 1.25 */
+ s = fd_fabs(x)-one;
+ P = pa0+s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))));
+ Q = one+s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))));
+ if(hx>=0) {
+ z = one-erx; return z - P/Q;
+ } else {
+ z = erx+P/Q; return one+z;
+ }
+ }
+ if (ix < 0x403c0000) { /* |x|<28 */
+ x = fd_fabs(x);
+ s = one/(x*x);
+ if(ix< 0x4006DB6D) { /* |x| < 1/.35 ~ 2.857143*/
+ R=ra0+s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(
+ ra5+s*(ra6+s*ra7))))));
+ S=one+s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(
+ sa5+s*(sa6+s*(sa7+s*sa8)))))));
+ } else { /* |x| >= 1/.35 ~ 2.857143 */
+ if(hx<0&&ix>=0x40180000) return two-tiny;/* x < -6 */
+ R=rb0+s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(
+ rb5+s*rb6)))));
+ S=one+s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(
+ sb5+s*(sb6+s*sb7))))));
+ }
+ z = x;
+ u.d = z;
+ __LO(u) = 0;
+ z = u.d;
+ r = __ieee754_exp(-z*z-0.5625)*
+ __ieee754_exp((z-x)*(z+x)+R/S);
+ if(hx>0) return r/x; else return two-r/x;
+ } else {
+ if(hx>0) return tiny*tiny; else return two-tiny;
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_expm1.c b/third_party/js-1.7/fdlibm/s_expm1.c
new file mode 100644
index 0000000..578d2e1
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_expm1.c
@@ -0,0 +1,267 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_expm1.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* expm1(x)
+ * Returns exp(x)-1, the exponential of x minus 1.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
+ *
+ * Here a correction term c will be computed to compensate
+ * the error in r when rounded to a floating-point number.
+ *
+ * 2. Approximating expm1(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Since
+ * r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
+ * we define R1(r*r) by
+ * r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
+ * That is,
+ * R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+ * = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+ * = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
+ * We use a special Reme algorithm on [0,0.347] to generate
+ * a polynomial of degree 5 in r*r to approximate R1. The
+ * maximum error of this polynomial approximation is bounded
+ * by 2**-61. In other words,
+ * R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+ * where Q1 = -1.6666666666666567384E-2,
+ * Q2 = 3.9682539681370365873E-4,
+ * Q3 = -9.9206344733435987357E-6,
+ * Q4 = 2.5051361420808517002E-7,
+ * Q5 = -6.2843505682382617102E-9;
+ * (where z=r*r, and the values of Q1 to Q5 are listed below)
+ * with error bounded by
+ * | 5 | -61
+ * | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
+ * | |
+ *
+ * expm1(r) = exp(r)-1 is then computed by the following
+ * specific way which minimize the accumulation rounding error:
+ * 2 3
+ * r r [ 3 - (R1 + R1*r/2) ]
+ * expm1(r) = r + --- + --- * [--------------------]
+ * 2 2 [ 6 - r*(3 - R1*r/2) ]
+ *
+ * To compensate the error in the argument reduction, we use
+ * expm1(r+c) = expm1(r) + c + expm1(r)*c
+ * ~ expm1(r) + c + r*c
+ * Thus c+r*c will be added in as the correction terms for
+ * expm1(r+c). Now rearrange the term to avoid optimization
+ * screw up:
+ * ( 2 2 )
+ * ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
+ * expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+ * ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
+ * ( )
+ *
+ * = r - E
+ * 3. Scale back to obtain expm1(x):
+ * From step 1, we have
+ * expm1(x) = either 2^k*[expm1(r)+1] - 1
+ * = or 2^k*[expm1(r) + (1-2^-k)]
+ * 4. Implementation notes:
+ * (A). To save one multiplication, we scale the coefficient Qi
+ * to Qi*2^i, and replace z by (x^2)/2.
+ * (B). To achieve maximum accuracy, we compute expm1(x) by
+ * (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+ * (ii) if k=0, return r-E
+ * (iii) if k=-1, return 0.5*(r-E)-0.5
+ * (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
+ * else return 1.0+2.0*(r-E);
+ * (v) if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
+ * (vi) if k <= 20, return 2^k((1-2^-k)-(E-r)), else
+ * (vii) return 2^k(1-((E+2^-k)-r))
+ *
+ * Special cases:
+ * expm1(INF) is INF, expm1(NaN) is NaN;
+ * expm1(-INF) is -1, and
+ * for finite argument, only expm1(0)=0 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then expm1(x) overflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.0,
+really_big = 1.0e+300,
+tiny = 1.0e-300,
+o_threshold = 7.09782712893383973096e+02,/* 0x40862E42, 0xFEFA39EF */
+ln2_hi = 6.93147180369123816490e-01,/* 0x3fe62e42, 0xfee00000 */
+ln2_lo = 1.90821492927058770002e-10,/* 0x3dea39ef, 0x35793c76 */
+invln2 = 1.44269504088896338700e+00,/* 0x3ff71547, 0x652b82fe */
+ /* scaled coefficients related to expm1 */
+Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
+Q2 = 1.58730158725481460165e-03, /* 3F5A01A0 19FE5585 */
+Q3 = -7.93650757867487942473e-05, /* BF14CE19 9EAADBB7 */
+Q4 = 4.00821782732936239552e-06, /* 3ED0CFCA 86E65239 */
+Q5 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
+
+#ifdef __STDC__
+ double fd_expm1(double x)
+#else
+ double fd_expm1(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,hi,lo,c,t,e,hxs,hfx,r1;
+ int k,xsb;
+ unsigned hx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ xsb = hx&0x80000000; /* sign bit of x */
+ if(xsb==0) y=x; else y= -x; /* y = |x| */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out huge and non-finite argument */
+ if(hx >= 0x4043687A) { /* if |x|>=56*ln2 */
+ if(hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if(hx>=0x7ff00000) {
+ u.d = x;
+ if(((hx&0xfffff)|__LO(u))!=0)
+ return x+x; /* NaN */
+ else return (xsb==0)? x:-1.0;/* exp(+-inf)={inf,-1} */
+ }
+ if(x > o_threshold) return really_big*really_big; /* overflow */
+ }
+ if(xsb!=0) { /* x < -56*ln2, return -1.0 with inexact */
+ if(x+tiny<0.0) /* raise inexact */
+ return tiny-one; /* return -1 */
+ }
+ }
+
+ /* argument reduction */
+ if(hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if(hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ if(xsb==0)
+ {hi = x - ln2_hi; lo = ln2_lo; k = 1;}
+ else
+ {hi = x + ln2_hi; lo = -ln2_lo; k = -1;}
+ } else {
+ k = (int)(invln2*x+((xsb==0)?0.5:-0.5));
+ t = k;
+ hi = x - t*ln2_hi; /* t*ln2_hi is exact here */
+ lo = t*ln2_lo;
+ }
+ x = hi - lo;
+ c = (hi-x)-lo;
+ }
+ else if(hx < 0x3c900000) { /* when |x|<2**-54, return x */
+ t = really_big+x; /* return x with inexact flags when x!=0 */
+ return x - (t-(really_big+x));
+ }
+ else k = 0;
+
+ /* x is now in primary range */
+ hfx = 0.5*x;
+ hxs = x*hfx;
+ r1 = one+hxs*(Q1+hxs*(Q2+hxs*(Q3+hxs*(Q4+hxs*Q5))));
+ t = 3.0-r1*hfx;
+ e = hxs*((r1-t)/(6.0 - x*t));
+ if(k==0) return x - (x*e-hxs); /* c is 0 */
+ else {
+ e = (x*(e-c)-c);
+ e -= hxs;
+ if(k== -1) return 0.5*(x-e)-0.5;
+ if(k==1)
+ if(x < -0.25) return -2.0*(e-(x+0.5));
+ else return one+2.0*(x-e);
+ if (k <= -2 || k>56) { /* suffice to return exp(x)-1 */
+ y = one-(e-x);
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ return y-one;
+ }
+ t = one;
+ if(k<20) {
+ u.d = t;
+ __HI(u) = 0x3ff00000 - (0x200000>>k); /* t=1-2^-k */
+ t = u.d;
+ y = t-(e-x);
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ } else {
+ u.d = t;
+ __HI(u) = ((0x3ff-k)<<20); /* 2^-k */
+ t = u.d;
+ y = x-(e+t);
+ y += one;
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ }
+ }
+ return y;
+}
diff --git a/third_party/js-1.7/fdlibm/s_fabs.c b/third_party/js-1.7/fdlibm/s_fabs.c
new file mode 100644
index 0000000..6b029da
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_fabs.c
@@ -0,0 +1,70 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_fabs.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * fabs(x) returns the absolute value of x.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_fabs(double x)
+#else
+ double fd_fabs(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ u.d = x;
+ __HI(u) &= 0x7fffffff;
+ x = u.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/s_finite.c b/third_party/js-1.7/fdlibm/s_finite.c
new file mode 100644
index 0000000..4a0a4d3
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_finite.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_finite.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * finite(x) returns 1 is x is finite, else 0;
+ * no branching!
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_finite(double x)
+#else
+ int fd_finite(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx;
+ u.d = x;
+ hx = __HI(u);
+ return (unsigned)((hx&0x7fffffff)-0x7ff00000)>>31;
+}
diff --git a/third_party/js-1.7/fdlibm/s_floor.c b/third_party/js-1.7/fdlibm/s_floor.c
new file mode 100644
index 0000000..6c23495
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_floor.c
@@ -0,0 +1,121 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_floor.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * floor(x)
+ * Return x rounded toward -inf to integral value
+ * Method:
+ * Bit twiddling.
+ * Exception:
+ * Inexact flag raised if x not equal to floor(x).
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double really_big = 1.0e300;
+#else
+static double really_big = 1.0e300;
+#endif
+
+#ifdef __STDC__
+ double fd_floor(double x)
+#else
+ double fd_floor(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int i0,i1,j0;
+ unsigned i,j;
+ u.d = x;
+ i0 = __HI(u);
+ i1 = __LO(u);
+ j0 = ((i0>>20)&0x7ff)-0x3ff;
+ if(j0<20) {
+ if(j0<0) { /* raise inexact if x != 0 */
+ if(really_big+x>0.0) {/* return 0*sign(x) if |x|<1 */
+ if(i0>=0) {i0=i1=0;}
+ else if(((i0&0x7fffffff)|i1)!=0)
+ { i0=0xbff00000;i1=0;}
+ }
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0<0) i0 += (0x00100000)>>j0;
+ i0 &= (~i); i1=0;
+ }
+ }
+ } else if (j0>51) {
+ if(j0==0x400) return x+x; /* inf or NaN */
+ else return x; /* x is integral */
+ } else {
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0<0) {
+ if(j0==20) i0+=1;
+ else {
+ j = i1+(1<<(52-j0));
+ if((int)j<i1) i0 +=1 ; /* got a carry */
+ i1=j;
+ }
+ }
+ i1 &= (~i);
+ }
+ }
+ u.d = x;
+ __HI(u) = i0;
+ __LO(u) = i1;
+ x = u.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/s_frexp.c b/third_party/js-1.7/fdlibm/s_frexp.c
new file mode 100644
index 0000000..bec2ece
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_frexp.c
@@ -0,0 +1,99 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_frexp.c 1.4 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * for non-zero x
+ * x = frexp(arg,&exp);
+ * return a double fp quantity x such that 0.5 <= |x| <1.0
+ * and the corresponding binary exponent "exp". That is
+ * arg = x*2^exp.
+ * If arg is inf, 0.0, or NaN, then frexp(arg,&exp) returns arg
+ * with *exp=0.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two54 = 1.80143985094819840000e+16; /* 0x43500000, 0x00000000 */
+
+#ifdef __STDC__
+ double fd_frexp(double x, int *eptr)
+#else
+ double fd_frexp(x, eptr)
+ double x; int *eptr;
+#endif
+{
+ int hx, ix, lx;
+ fd_twoints u;
+ u.d = x;
+ hx = __HI(u);
+ ix = 0x7fffffff&hx;
+ lx = __LO(u);
+ *eptr = 0;
+ if(ix>=0x7ff00000||((ix|lx)==0)) return x; /* 0,inf,nan */
+ if (ix<0x00100000) { /* subnormal */
+ x *= two54;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ *eptr = -54;
+ }
+ *eptr += (ix>>20)-1022;
+ hx = (hx&0x800fffff)|0x3fe00000;
+ u.d = x;
+ __HI(u) = hx;
+ x = u.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/s_ilogb.c b/third_party/js-1.7/fdlibm/s_ilogb.c
new file mode 100644
index 0000000..f769781
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_ilogb.c
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_ilogb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* ilogb(double x)
+ * return the binary exponent of non-zero x
+ * ilogb(0) = 0x80000001
+ * ilogb(inf/NaN) = 0x7fffffff (no signal is raised)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_ilogb(double x)
+#else
+ int fd_ilogb(x)
+ double x;
+#endif
+{
+ int hx,lx,ix;
+ fd_twoints u;
+ u.d = x;
+ hx = (__HI(u))&0x7fffffff; /* high word of x */
+ if(hx<0x00100000) {
+ lx = __LO(u);
+ if((hx|lx)==0)
+ return 0x80000001; /* ilogb(0) = 0x80000001 */
+ else /* subnormal x */
+ if(hx==0) {
+ for (ix = -1043; lx>0; lx<<=1) ix -=1;
+ } else {
+ for (ix = -1022,hx<<=11; hx>0; hx<<=1) ix -=1;
+ }
+ return ix;
+ }
+ else if (hx<0x7ff00000) return (hx>>20)-1023;
+ else return 0x7fffffff;
+}
diff --git a/third_party/js-1.7/fdlibm/s_isnan.c b/third_party/js-1.7/fdlibm/s_isnan.c
new file mode 100644
index 0000000..52f8759
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_isnan.c
@@ -0,0 +1,74 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_isnan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * isnan(x) returns 1 is x is nan, else 0;
+ * no branching!
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_isnan(double x)
+#else
+ int fd_isnan(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx,lx;
+ u.d = x;
+ hx = (__HI(u)&0x7fffffff);
+ lx = __LO(u);
+ hx |= (unsigned)(lx|(-lx))>>31;
+ hx = 0x7ff00000 - hx;
+ return ((unsigned)(hx))>>31;
+}
diff --git a/third_party/js-1.7/fdlibm/s_ldexp.c b/third_party/js-1.7/fdlibm/s_ldexp.c
new file mode 100644
index 0000000..9475520
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_ldexp.c
@@ -0,0 +1,66 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_ldexp.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+#include "fdlibm.h"
+#include <errno.h>
+
+#ifdef __STDC__
+ double fd_ldexp(double value, int exp)
+#else
+ double fd_ldexp(value, exp)
+ double value; int exp;
+#endif
+{
+ if(!fd_finite(value)||value==0.0) return value;
+ value = fd_scalbn(value,exp);
+ if(!fd_finite(value)||value==0.0) errno = ERANGE;
+ return value;
+}
diff --git a/third_party/js-1.7/fdlibm/s_lib_version.c b/third_party/js-1.7/fdlibm/s_lib_version.c
new file mode 100644
index 0000000..2ccf67d
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_lib_version.c
@@ -0,0 +1,73 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_lib_version.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * MACRO for standards
+ */
+
+#include "fdlibm.h"
+
+/*
+ * define and initialize _LIB_VERSION
+ */
+#ifdef _POSIX_MODE
+_LIB_VERSION_TYPE _LIB_VERSION = _POSIX_;
+#else
+#ifdef _XOPEN_MODE
+_LIB_VERSION_TYPE _LIB_VERSION = _XOPEN_;
+#else
+#ifdef _SVID3_MODE
+_LIB_VERSION_TYPE _LIB_VERSION = _SVID_;
+#else /* default _IEEE_MODE */
+_LIB_VERSION_TYPE _LIB_VERSION = _IEEE_;
+#endif
+#endif
+#endif
diff --git a/third_party/js-1.7/fdlibm/s_log1p.c b/third_party/js-1.7/fdlibm/s_log1p.c
new file mode 100644
index 0000000..1840156
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_log1p.c
@@ -0,0 +1,211 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_log1p.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* double log1p(double x)
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * 1+x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * Note. If k=0, then f=x is exact. However, if k!=0, then f
+ * may not be representable exactly. In that case, a correction
+ * term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+ * log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+ * and add back the correction term c/u.
+ * (Note: when x > 2**53, one can simply return log(x))
+ *
+ * 2. Approximation of log1p(f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s
+ * (the values of Lp1 to Lp7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lp1*s +...+Lp7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log1p(f) = f - (hfsq - s*(hfsq+R)).
+ *
+ * 3. Finally, log1p(x) = k*ln2 + log1p(f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log1p(x) is NaN with signal if x < -1 (including -INF) ;
+ * log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+ * log1p(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ *
+ * Note: Assuming log() return accurate answer, the following
+ * algorithm can be used to compute log1p(x) to within a few ULP:
+ *
+ * u = 1+x;
+ * if(u==1.0) return x ; else
+ * return log(u)*(x/(u-1.0));
+ *
+ * See HP-15C Advanced Functions Handbook, p.193.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+two54 = 1.80143985094819840000e+16, /* 43500000 00000000 */
+Lp1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+Lp2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+Lp3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+Lp4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+Lp5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+Lp6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+Lp7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double fd_log1p(double x)
+#else
+ double fd_log1p(x)
+ double x;
+#endif
+{
+ double hfsq,f,c,s,z,R,u;
+ int k,hx,hu,ax;
+ fd_twoints un;
+
+ un.d = x;
+ hx = __HI(un); /* high word of x */
+ ax = hx&0x7fffffff;
+
+ k = 1;
+ if (hx < 0x3FDA827A) { /* x < 0.41422 */
+ if(ax>=0x3ff00000) { /* x <= -1.0 */
+ if(x==-1.0) return -two54/zero; /* log1p(-1)=+inf */
+ else return (x-x)/(x-x); /* log1p(x<-1)=NaN */
+ }
+ if(ax<0x3e200000) { /* |x| < 2**-29 */
+ if(two54+x>zero /* raise inexact */
+ &&ax<0x3c900000) /* |x| < 2**-54 */
+ return x;
+ else
+ return x - x*x*0.5;
+ }
+ if(hx>0||hx<=((int)0xbfd2bec3)) {
+ k=0;f=x;hu=1;} /* -0.2929<x<0.41422 */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ if(k!=0) {
+ if(hx<0x43400000) {
+ u = 1.0+x;
+ un.d = u;
+ hu = __HI(un); /* high word of u */
+ k = (hu>>20)-1023;
+ c = (k>0)? 1.0-(u-x):x-(u-1.0);/* correction term */
+ c /= u;
+ } else {
+ u = x;
+ un.d = u;
+ hu = __HI(un); /* high word of u */
+ k = (hu>>20)-1023;
+ c = 0;
+ }
+ hu &= 0x000fffff;
+ if(hu<0x6a09e) {
+ un.d = u;
+ __HI(un) = hu|0x3ff00000; /* normalize u */
+ u = un.d;
+ } else {
+ k += 1;
+ un.d = u;
+ __HI(un) = hu|0x3fe00000; /* normalize u/2 */
+ u = un.d;
+ hu = (0x00100000-hu)>>2;
+ }
+ f = u-1.0;
+ }
+ hfsq=0.5*f*f;
+ if(hu==0) { /* |f| < 2**-20 */
+ if(f==zero) if(k==0) return zero;
+ else {c += k*ln2_lo; return k*ln2_hi+c;}
+ R = hfsq*(1.0-0.66666666666666666*f);
+ if(k==0) return f-R; else
+ return k*ln2_hi-((R-(k*ln2_lo+c))-f);
+ }
+ s = f/(2.0+f);
+ z = s*s;
+ R = z*(Lp1+z*(Lp2+z*(Lp3+z*(Lp4+z*(Lp5+z*(Lp6+z*Lp7))))));
+ if(k==0) return f-(hfsq-s*(hfsq+R)); else
+ return k*ln2_hi-((hfsq-(s*(hfsq+R)+(k*ln2_lo+c)))-f);
+}
diff --git a/third_party/js-1.7/fdlibm/s_logb.c b/third_party/js-1.7/fdlibm/s_logb.c
new file mode 100644
index 0000000..f885c4d
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_logb.c
@@ -0,0 +1,79 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_logb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * double logb(x)
+ * IEEE 754 logb. Included to pass IEEE test suite. Not recommend.
+ * Use ilogb instead.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_logb(double x)
+#else
+ double fd_logb(x)
+ double x;
+#endif
+{
+ int lx,ix;
+ fd_twoints u;
+
+ u.d = x;
+ ix = (__HI(u))&0x7fffffff; /* high |x| */
+ lx = __LO(u); /* low x */
+ if((ix|lx)==0) return -1.0/fd_fabs(x);
+ if(ix>=0x7ff00000) return x*x;
+ if((ix>>=20)==0) /* IEEE 754 logb */
+ return -1022.0;
+ else
+ return (double) (ix-1023);
+}
diff --git a/third_party/js-1.7/fdlibm/s_matherr.c b/third_party/js-1.7/fdlibm/s_matherr.c
new file mode 100644
index 0000000..cd99ca8
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_matherr.c
@@ -0,0 +1,64 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_matherr.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_matherr(struct exception *x)
+#else
+ int fd_matherr(x)
+ struct exception *x;
+#endif
+{
+ int n=0;
+ if(x->arg1!=x->arg1) return 0;
+ return n;
+}
diff --git a/third_party/js-1.7/fdlibm/s_modf.c b/third_party/js-1.7/fdlibm/s_modf.c
new file mode 100644
index 0000000..3b182bd
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_modf.c
@@ -0,0 +1,132 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_modf.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * modf(double x, double *iptr)
+ * return fraction part of x, and return x's integral part in *iptr.
+ * Method:
+ * Bit twiddling.
+ *
+ * Exception:
+ * No exception.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0;
+#else
+static double one = 1.0;
+#endif
+
+#ifdef __STDC__
+ double fd_modf(double x, double *iptr)
+#else
+ double fd_modf(x, iptr)
+ double x,*iptr;
+#endif
+{
+ int i0,i1,j0;
+ unsigned i;
+ fd_twoints u;
+ u.d = x;
+ i0 = __HI(u); /* high x */
+ i1 = __LO(u); /* low x */
+ j0 = ((i0>>20)&0x7ff)-0x3ff; /* exponent of x */
+ if(j0<20) { /* integer part in high x */
+ if(j0<0) { /* |x|<1 */
+ u.d = *iptr;
+ __HI(u) = i0&0x80000000;
+ __LO(u) = 0; /* *iptr = +-0 */
+ *iptr = u.d;
+ return x;
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) { /* x is integral */
+ *iptr = x;
+ u.d = x;
+ __HI(u) &= 0x80000000;
+ __LO(u) = 0; /* return +-0 */
+ x = u.d;
+ return x;
+ } else {
+ u.d = *iptr;
+ __HI(u) = i0&(~i);
+ __LO(u) = 0;
+ *iptr = u.d;
+ return x - *iptr;
+ }
+ }
+ } else if (j0>51) { /* no fraction part */
+ *iptr = x*one;
+ u.d = x;
+ __HI(u) &= 0x80000000;
+ __LO(u) = 0; /* return +-0 */
+ x = u.d;
+ return x;
+ } else { /* fraction part in low x */
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) { /* x is integral */
+ *iptr = x;
+ u.d = x;
+ __HI(u) &= 0x80000000;
+ __LO(u) = 0; /* return +-0 */
+ x = u.d;
+ return x;
+ } else {
+ u.d = *iptr;
+ __HI(u) = i0;
+ __LO(u) = i1&(~i);
+ *iptr = u.d;
+ return x - *iptr;
+ }
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_nextafter.c b/third_party/js-1.7/fdlibm/s_nextafter.c
new file mode 100644
index 0000000..f71c5c8
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_nextafter.c
@@ -0,0 +1,124 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_nextafter.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* IEEE functions
+ * nextafter(x,y)
+ * return the next machine floating-point number of x in the
+ * direction toward y.
+ * Special cases:
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_nextafter(double x, double y)
+#else
+ double fd_nextafter(x,y)
+ double x,y;
+#endif
+{
+ int hx,hy,ix,iy;
+ unsigned lx,ly;
+ fd_twoints ux, uy;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); /* high word of x */
+ lx = __LO(ux); /* low word of x */
+ hy = __HI(uy); /* high word of y */
+ ly = __LO(uy); /* low word of y */
+ ix = hx&0x7fffffff; /* |x| */
+ iy = hy&0x7fffffff; /* |y| */
+
+ if(((ix>=0x7ff00000)&&((ix-0x7ff00000)|lx)!=0) || /* x is nan */
+ ((iy>=0x7ff00000)&&((iy-0x7ff00000)|ly)!=0)) /* y is nan */
+ return x+y;
+ if(x==y) return x; /* x=y, return x */
+ if((ix|lx)==0) { /* x == 0 */
+ ux.d = x;
+ __HI(ux) = hy&0x80000000; /* return +-minsubnormal */
+ __LO(ux) = 1;
+ x = ux.d;
+ y = x*x;
+ if(y==x) return y; else return x; /* raise underflow flag */
+ }
+ if(hx>=0) { /* x > 0 */
+ if(hx>hy||((hx==hy)&&(lx>ly))) { /* x > y, x -= ulp */
+ if(lx==0) hx -= 1;
+ lx -= 1;
+ } else { /* x < y, x += ulp */
+ lx += 1;
+ if(lx==0) hx += 1;
+ }
+ } else { /* x < 0 */
+ if(hy>=0||hx>hy||((hx==hy)&&(lx>ly))){/* x < y, x -= ulp */
+ if(lx==0) hx -= 1;
+ lx -= 1;
+ } else { /* x > y, x += ulp */
+ lx += 1;
+ if(lx==0) hx += 1;
+ }
+ }
+ hy = hx&0x7ff00000;
+ if(hy>=0x7ff00000) return x+x; /* overflow */
+ if(hy<0x00100000) { /* underflow */
+ y = x*x;
+ if(y!=x) { /* raise underflow flag */
+ uy.d = y;
+ __HI(uy) = hx; __LO(uy) = lx;
+ y = uy.d;
+ return y;
+ }
+ }
+ ux.d = x;
+ __HI(ux) = hx; __LO(ux) = lx;
+ x = ux.d;
+ return x;
+}
diff --git a/third_party/js-1.7/fdlibm/s_rint.c b/third_party/js-1.7/fdlibm/s_rint.c
new file mode 100644
index 0000000..3c4fab6
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_rint.c
@@ -0,0 +1,131 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_rint.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * rint(x)
+ * Return x rounded to integral value according to the prevailing
+ * rounding mode.
+ * Method:
+ * Using floating addition.
+ * Exception:
+ * Inexact flag raised if x not equal to rint(x).
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+TWO52[2]={
+ 4.50359962737049600000e+15, /* 0x43300000, 0x00000000 */
+ -4.50359962737049600000e+15, /* 0xC3300000, 0x00000000 */
+};
+
+#ifdef __STDC__
+ double fd_rint(double x)
+#else
+ double fd_rint(x)
+ double x;
+#endif
+{
+ int i0,j0,sx;
+ unsigned i,i1;
+ double w,t;
+ fd_twoints u;
+
+ u.d = x;
+ i0 = __HI(u);
+ sx = (i0>>31)&1;
+ i1 = __LO(u);
+ j0 = ((i0>>20)&0x7ff)-0x3ff;
+ if(j0<20) {
+ if(j0<0) {
+ if(((i0&0x7fffffff)|i1)==0) return x;
+ i1 |= (i0&0x0fffff);
+ i0 &= 0xfffe0000;
+ i0 |= ((i1|-(int)i1)>>12)&0x80000;
+ u.d = x;
+ __HI(u)=i0;
+ x = u.d;
+ w = TWO52[sx]+x;
+ t = w-TWO52[sx];
+ u.d = t;
+ i0 = __HI(u);
+ __HI(u) = (i0&0x7fffffff)|(sx<<31);
+ t = u.d;
+ return t;
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) return x; /* x is integral */
+ i>>=1;
+ if(((i0&i)|i1)!=0) {
+ if(j0==19) i1 = 0x40000000; else
+ i0 = (i0&(~i))|((0x20000)>>j0);
+ }
+ }
+ } else if (j0>51) {
+ if(j0==0x400) return x+x; /* inf or NaN */
+ else return x; /* x is integral */
+ } else {
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) return x; /* x is integral */
+ i>>=1;
+ if((i1&i)!=0) i1 = (i1&(~i))|((0x40000000)>>(j0-20));
+ }
+ u.d = x;
+ __HI(u) = i0;
+ __LO(u) = i1;
+ x = u.d;
+ w = TWO52[sx]+x;
+ return w-TWO52[sx];
+}
diff --git a/third_party/js-1.7/fdlibm/s_scalbn.c b/third_party/js-1.7/fdlibm/s_scalbn.c
new file mode 100644
index 0000000..3deeaa3
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_scalbn.c
@@ -0,0 +1,107 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_scalbn.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * scalbn (double x, int n)
+ * scalbn(x,n) returns x* 2**n computed by exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+really_big = 1.0e+300,
+tiny = 1.0e-300;
+
+#ifdef __STDC__
+ double fd_scalbn (double x, int n)
+#else
+ double fd_scalbn (x,n)
+ double x; int n;
+#endif
+{
+ fd_twoints u;
+ int k,hx,lx;
+ u.d = x;
+ hx = __HI(u);
+ lx = __LO(u);
+ k = (hx&0x7ff00000)>>20; /* extract exponent */
+ if (k==0) { /* 0 or subnormal x */
+ if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+ x *= two54;
+ u.d = x;
+ hx = __HI(u);
+ k = ((hx&0x7ff00000)>>20) - 54;
+ if (n< -50000) return tiny*x; /*underflow*/
+ }
+ if (k==0x7ff) return x+x; /* NaN or Inf */
+ k = k+n;
+ if (k > 0x7fe) return really_big*fd_copysign(really_big,x); /* overflow */
+ if (k > 0) /* normal result */
+ {u.d = x; __HI(u) = (hx&0x800fffff)|(k<<20); x = u.d; return x;}
+ if (k <= -54) {
+ if (n > 50000) /* in case integer overflow in n+k */
+ return really_big*fd_copysign(really_big,x); /*overflow*/
+ else return tiny*fd_copysign(tiny,x); /*underflow*/
+ }
+ k += 54; /* subnormal result */
+ u.d = x;
+ __HI(u) = (hx&0x800fffff)|(k<<20);
+ x = u.d;
+ return x*twom54;
+}
diff --git a/third_party/js-1.7/fdlibm/s_signgam.c b/third_party/js-1.7/fdlibm/s_signgam.c
new file mode 100644
index 0000000..4eb8ce7
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_signgam.c
@@ -0,0 +1,40 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#include "fdlibm.h"
+int signgam = 0;
diff --git a/third_party/js-1.7/fdlibm/s_significand.c b/third_party/js-1.7/fdlibm/s_significand.c
new file mode 100644
index 0000000..2e1c0b2
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_significand.c
@@ -0,0 +1,68 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_significand.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * significand(x) computes just
+ * scalb(x, (double) -ilogb(x)),
+ * for exercising the fraction-part(F) IEEE 754-1985 test vector.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_significand(double x)
+#else
+ double fd_significand(x)
+ double x;
+#endif
+{
+ return __ieee754_scalb(x,(double) -fd_ilogb(x));
+}
diff --git a/third_party/js-1.7/fdlibm/s_sin.c b/third_party/js-1.7/fdlibm/s_sin.c
new file mode 100644
index 0000000..8bbc5c6
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_sin.c
@@ -0,0 +1,118 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_sin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* sin(x)
+ * Return sine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cose function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_sin(double x)
+#else
+ double fd_sin(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ u.d = x;
+ ix = __HI(u);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_sin(x,z,0);
+
+ /* sin(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x;
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ switch(n&3) {
+ case 0: return __kernel_sin(y[0],y[1],1);
+ case 1: return __kernel_cos(y[0],y[1]);
+ case 2: return -__kernel_sin(y[0],y[1],1);
+ default:
+ return -__kernel_cos(y[0],y[1]);
+ }
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_tan.c b/third_party/js-1.7/fdlibm/s_tan.c
new file mode 100644
index 0000000..ded36c1
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_tan.c
@@ -0,0 +1,112 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_tan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* tan(x)
+ * Return tangent function of x.
+ *
+ * kernel function:
+ * __kernel_tan ... tangent function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_tan(double x)
+#else
+ double fd_tan(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ u.d = x;
+ ix = __HI(u);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_tan(x,z,1);
+
+ /* tan(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x; /* NaN */
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ return __kernel_tan(y[0],y[1],1-((n&1)<<1)); /* 1 -- n even
+ -1 -- n odd */
+ }
+}
diff --git a/third_party/js-1.7/fdlibm/s_tanh.c b/third_party/js-1.7/fdlibm/s_tanh.c
new file mode 100644
index 0000000..aa6809f
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/s_tanh.c
@@ -0,0 +1,122 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_tanh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* Tanh(x)
+ * Return the Hyperbolic Tangent of x
+ *
+ * Method :
+ * x -x
+ * e - e
+ * 0. tanh(x) is defined to be -----------
+ * x -x
+ * e + e
+ * 1. reduce x to non-negative by tanh(-x) = -tanh(x).
+ * 2. 0 <= x <= 2**-55 : tanh(x) := x*(one+x)
+ * -t
+ * 2**-55 < x <= 1 : tanh(x) := -----; t = expm1(-2x)
+ * t + 2
+ * 2
+ * 1 <= x <= 22.0 : tanh(x) := 1- ----- ; t=expm1(2x)
+ * t + 2
+ * 22.0 < x <= INF : tanh(x) := 1.
+ *
+ * Special cases:
+ * tanh(NaN) is NaN;
+ * only tanh(0)=0 is exact for finite argument.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one=1.0, two=2.0, tiny = 1.0e-300;
+#else
+static double one=1.0, two=2.0, tiny = 1.0e-300;
+#endif
+
+#ifdef __STDC__
+ double fd_tanh(double x)
+#else
+ double fd_tanh(x)
+ double x;
+#endif
+{
+ double t,z;
+ int jx,ix;
+ fd_twoints u;
+
+ /* High word of |x|. */
+ u.d = x;
+ jx = __HI(u);
+ ix = jx&0x7fffffff;
+
+ /* x is INF or NaN */
+ if(ix>=0x7ff00000) {
+ if (jx>=0) return one/x+one; /* tanh(+-inf)=+-1 */
+ else return one/x-one; /* tanh(NaN) = NaN */
+ }
+
+ /* |x| < 22 */
+ if (ix < 0x40360000) { /* |x|<22 */
+ if (ix<0x3c800000) /* |x|<2**-55 */
+ return x*(one+x); /* tanh(small) = small */
+ if (ix>=0x3ff00000) { /* |x|>=1 */
+ t = fd_expm1(two*fd_fabs(x));
+ z = one - two/(t+two);
+ } else {
+ t = fd_expm1(-two*fd_fabs(x));
+ z= -t/(t+two);
+ }
+ /* |x| > 22, return +-1 */
+ } else {
+ z = one - tiny; /* raised inexact flag */
+ }
+ return (jx>=0)? z: -z;
+}
diff --git a/third_party/js-1.7/fdlibm/w_acos.c b/third_party/js-1.7/fdlibm/w_acos.c
new file mode 100644
index 0000000..872c81d
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_acos.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_acos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrap_acos(x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_acos(double x) /* wrapper acos */
+#else
+ double fd_acos(x) /* wrapper acos */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_acos(x);
+#else
+ double z;
+ z = __ieee754_acos(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>1.0) {
+ int err;
+ return __kernel_standard(x,x,1,&err); /* acos(|x|>1) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_acosh.c b/third_party/js-1.7/fdlibm/w_acosh.c
new file mode 100644
index 0000000..745d402
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_acosh.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_acosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/*
+ * wrapper acosh(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_acosh(double x) /* wrapper acosh */
+#else
+ double fd_acosh(x) /* wrapper acosh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_acosh(x);
+#else
+ double z;
+ z = __ieee754_acosh(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(x<1.0) {
+ int err;
+ return __kernel_standard(x,x,29,&err); /* acosh(x<1) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_asin.c b/third_party/js-1.7/fdlibm/w_asin.c
new file mode 100644
index 0000000..18aaefd
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_asin.c
@@ -0,0 +1,80 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_asin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/*
+ * wrapper asin(x)
+ */
+
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_asin(double x) /* wrapper asin */
+#else
+ double fd_asin(x) /* wrapper asin */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_asin(x);
+#else
+ double z;
+ z = __ieee754_asin(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>1.0) {
+ int err;
+ return __kernel_standard(x,x,2,&err); /* asin(|x|>1) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_atan2.c b/third_party/js-1.7/fdlibm/w_atan2.c
new file mode 100644
index 0000000..8cfa4bb
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_atan2.c
@@ -0,0 +1,79 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_atan2.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/*
+ * wrapper atan2(y,x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_atan2(double y, double x) /* wrapper atan2 */
+#else
+ double fd_atan2(y,x) /* wrapper atan2 */
+ double y,x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_atan2(y,x);
+#else
+ double z;
+ z = __ieee754_atan2(y,x);
+ if(_LIB_VERSION == _IEEE_||fd_isnan(x)||fd_isnan(y)) return z;
+ if(x==0.0&&y==0.0) {
+ int err;
+ return __kernel_standard(y,x,3,&err); /* atan2(+-0,+-0) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_atanh.c b/third_party/js-1.7/fdlibm/w_atanh.c
new file mode 100644
index 0000000..6ba52d1
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_atanh.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_atanh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * wrapper atanh(x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_atanh(double x) /* wrapper atanh */
+#else
+ double fd_atanh(x) /* wrapper atanh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_atanh(x);
+#else
+ double z,y;
+ z = __ieee754_atanh(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ y = fd_fabs(x);
+ if(y>=1.0) {
+ int err;
+ if(y>1.0)
+ return __kernel_standard(x,x,30,&err); /* atanh(|x|>1) */
+ else
+ return __kernel_standard(x,x,31,&err); /* atanh(|x|==1) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_cosh.c b/third_party/js-1.7/fdlibm/w_cosh.c
new file mode 100644
index 0000000..146449e
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_cosh.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_cosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper cosh(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_cosh(double x) /* wrapper cosh */
+#else
+ double fd_cosh(x) /* wrapper cosh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_cosh(x);
+#else
+ double z;
+ z = __ieee754_cosh(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>7.10475860073943863426e+02) {
+ int err;
+ return __kernel_standard(x,x,5,&err); /* cosh overflow */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_exp.c b/third_party/js-1.7/fdlibm/w_exp.c
new file mode 100644
index 0000000..f5dea0b
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_exp.c
@@ -0,0 +1,88 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_exp.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper exp(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+o_threshold= 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+u_threshold= -7.45133219101941108420e+02; /* 0xc0874910, 0xD52D3051 */
+
+#ifdef __STDC__
+ double fd_exp(double x) /* wrapper exp */
+#else
+ double fd_exp(x) /* wrapper exp */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_exp(x);
+#else
+ double z;
+ z = __ieee754_exp(x);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if(fd_finite(x)) {
+ int err;
+ if(x>o_threshold)
+ return __kernel_standard(x,x,6,&err); /* exp overflow */
+ else if(x<u_threshold)
+ return __kernel_standard(x,x,7,&err); /* exp underflow */
+ }
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_fmod.c b/third_party/js-1.7/fdlibm/w_fmod.c
new file mode 100644
index 0000000..7686209
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_fmod.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_fmod.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper fmod(x,y)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_fmod(double x, double y) /* wrapper fmod */
+#else
+ double fd_fmod(x,y) /* wrapper fmod */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_fmod(x,y);
+#else
+ double z;
+ z = __ieee754_fmod(x,y);
+ if(_LIB_VERSION == _IEEE_ ||fd_isnan(y)||fd_isnan(x)) return z;
+ if(y==0.0) {
+ int err;
+ return __kernel_standard(x,y,27,&err); /* fmod(x,0) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_gamma.c b/third_party/js-1.7/fdlibm/w_gamma.c
new file mode 100644
index 0000000..9eb8e42
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_gamma.c
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_gamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* double gamma(double x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call gamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double fd_gamma(double x)
+#else
+ double fd_gamma(x)
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_gamma_r(x,&signgam);
+#else
+ double y;
+ y = __ieee754_gamma_r(x,&signgam);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,41,&err); /* gamma pole */
+ else
+ return __kernel_standard(x,x,40,&err); /* gamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_gamma_r.c b/third_party/js-1.7/fdlibm/w_gamma_r.c
new file mode 100644
index 0000000..2669b4f
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_gamma_r.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_gamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper double gamma_r(double x, int *signgamp)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_gamma_r(double x, int *signgamp) /* wrapper lgamma_r */
+#else
+ double fd_gamma_r(x,signgamp) /* wrapper lgamma_r */
+ double x; int *signgamp;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_gamma_r(x,signgamp);
+#else
+ double y;
+ y = __ieee754_gamma_r(x,signgamp);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,41,&err); /* gamma pole */
+ else
+ return __kernel_standard(x,x,40,&err); /* gamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_hypot.c b/third_party/js-1.7/fdlibm/w_hypot.c
new file mode 100644
index 0000000..bfaac66
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_hypot.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_hypot.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper hypot(x,y)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_hypot(double x, double y)/* wrapper hypot */
+#else
+ double fd_hypot(x,y) /* wrapper hypot */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_hypot(x,y);
+#else
+ double z;
+ z = __ieee754_hypot(x,y);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if((!fd_finite(z))&&fd_finite(x)&&fd_finite(y)) {
+ int err;
+ return __kernel_standard(x,y,4,&err); /* hypot overflow */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_j0.c b/third_party/js-1.7/fdlibm/w_j0.c
new file mode 100644
index 0000000..5e676ff
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_j0.c
@@ -0,0 +1,105 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_j0.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper j0(double x), y0(double x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_j0(double x) /* wrapper j0 */
+#else
+ double fd_j0(x) /* wrapper j0 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_j0(x);
+#else
+ double z = __ieee754_j0(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>X_TLOSS) {
+ int err;
+ return __kernel_standard(x,x,34,&err); /* j0(|x|>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
+
+#ifdef __STDC__
+ double y0(double x) /* wrapper y0 */
+#else
+ double y0(x) /* wrapper y0 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_y0(x);
+#else
+ double z;
+ int err;
+ z = __ieee754_y0(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(x <= 0.0){
+ if(x==0.0)
+ /* d= -one/(x-x); */
+ return __kernel_standard(x,x,8,&err);
+ else
+ /* d = zero/(x-x); */
+ return __kernel_standard(x,x,9,&err);
+ }
+ if(x>X_TLOSS) {
+ return __kernel_standard(x,x,35,&err); /* y0(x>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_j1.c b/third_party/js-1.7/fdlibm/w_j1.c
new file mode 100644
index 0000000..86a506b
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_j1.c
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_j1.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper of j1,y1
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_j1(double x) /* wrapper j1 */
+#else
+ double fd_j1(x) /* wrapper j1 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_j1(x);
+#else
+ double z;
+ z = __ieee754_j1(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(fd_fabs(x)>X_TLOSS) {
+ int err;
+ return __kernel_standard(x,x,36,&err); /* j1(|x|>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
+
+#ifdef __STDC__
+ double y1(double x) /* wrapper y1 */
+#else
+ double y1(x) /* wrapper y1 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_y1(x);
+#else
+ double z;
+ int err;
+ z = __ieee754_y1(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(x <= 0.0){
+ if(x==0.0)
+ /* d= -one/(x-x); */
+ return __kernel_standard(x,x,10,&err);
+ else
+ /* d = zero/(x-x); */
+ return __kernel_standard(x,x,11,&err);
+ }
+ if(x>X_TLOSS) {
+ return __kernel_standard(x,x,37,&err); /* y1(x>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_jn.c b/third_party/js-1.7/fdlibm/w_jn.c
new file mode 100644
index 0000000..6926b0d
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_jn.c
@@ -0,0 +1,128 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_jn.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper jn(int n, double x), yn(int n, double x)
+ * floating point Bessel's function of the 1st and 2nd kind
+ * of order n
+ *
+ * Special cases:
+ * y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
+ * y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
+ * Note 2. About jn(n,x), yn(n,x)
+ * For n=0, j0(x) is called,
+ * for n=1, j1(x) is called,
+ * for n<x, forward recursion us used starting
+ * from values of j0(x) and j1(x).
+ * for n>x, a continued fraction approximation to
+ * j(n,x)/j(n-1,x) is evaluated and then backward
+ * recursion is used starting from a supposed value
+ * for j(n,x). The resulting value of j(0,x) is
+ * compared with the actual value to correct the
+ * supposed value of j(n,x).
+ *
+ * yn(n,x) is similar in all respects, except
+ * that forward recursion is used for all
+ * values of n>1.
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_jn(int n, double x) /* wrapper jn */
+#else
+ double fd_jn(n,x) /* wrapper jn */
+ double x; int n;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_jn(n,x);
+#else
+ double z;
+ z = __ieee754_jn(n,x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(fd_fabs(x)>X_TLOSS) {
+ int err;
+ return __kernel_standard((double)n,x,38,&err); /* jn(|x|>X_TLOSS,n) */
+ } else
+ return z;
+#endif
+}
+
+#ifdef __STDC__
+ double yn(int n, double x) /* wrapper yn */
+#else
+ double yn(n,x) /* wrapper yn */
+ double x; int n;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_yn(n,x);
+#else
+ double z;
+ int err;
+ z = __ieee754_yn(n,x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(x <= 0.0){
+ if(x==0.0)
+ /* d= -one/(x-x); */
+ return __kernel_standard((double)n,x,12,&err);
+ else
+ /* d = zero/(x-x); */
+ return __kernel_standard((double)n,x,13,&err);
+ }
+ if(x>X_TLOSS) {
+ return __kernel_standard((double)n,x,39,&err); /* yn(x>X_TLOSS,n) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_lgamma.c b/third_party/js-1.7/fdlibm/w_lgamma.c
new file mode 100644
index 0000000..f7576e8
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_lgamma.c
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_lgamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* double lgamma(double x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call __ieee754_lgamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double fd_lgamma(double x)
+#else
+ double fd_lgamma(x)
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_lgamma_r(x,&signgam);
+#else
+ double y;
+ y = __ieee754_lgamma_r(x,&signgam);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,15,&err); /* lgamma pole */
+ else
+ return __kernel_standard(x,x,14,&err); /* lgamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_lgamma_r.c b/third_party/js-1.7/fdlibm/w_lgamma_r.c
new file mode 100644
index 0000000..ba2ad59
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_lgamma_r.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_lgamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper double lgamma_r(double x, int *signgamp)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_lgamma_r(double x, int *signgamp) /* wrapper lgamma_r */
+#else
+ double fd_lgamma_r(x,signgamp) /* wrapper lgamma_r */
+ double x; int *signgamp;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_lgamma_r(x,signgamp);
+#else
+ double y;
+ y = __ieee754_lgamma_r(x,signgamp);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,15,&err); /* lgamma pole */
+ else
+ return __kernel_standard(x,x,14,&err); /* lgamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_log.c b/third_party/js-1.7/fdlibm/w_log.c
new file mode 100644
index 0000000..7e358fc
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_log.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_log.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper log(x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_log(double x) /* wrapper log */
+#else
+ double fd_log(x) /* wrapper log */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_log(x);
+#else
+ double z;
+ int err;
+ z = __ieee754_log(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) || x > 0.0) return z;
+ if(x==0.0)
+ return __kernel_standard(x,x,16,&err); /* log(0) */
+ else
+ return __kernel_standard(x,x,17,&err); /* log(x<0) */
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_log10.c b/third_party/js-1.7/fdlibm/w_log10.c
new file mode 100644
index 0000000..6b298b2
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_log10.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_log10.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper log10(X)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_log10(double x) /* wrapper log10 */
+#else
+ double fd_log10(x) /* wrapper log10 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_log10(x);
+#else
+ double z;
+ z = __ieee754_log10(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(x<=0.0) {
+ int err;
+ if(x==0.0)
+ return __kernel_standard(x,x,18,&err); /* log10(0) */
+ else
+ return __kernel_standard(x,x,19,&err); /* log10(x<0) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_pow.c b/third_party/js-1.7/fdlibm/w_pow.c
new file mode 100644
index 0000000..3d2c15a
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_pow.c
@@ -0,0 +1,99 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+
+/* @(#)w_pow.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper pow(x,y) return x**y
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_pow(double x, double y) /* wrapper pow */
+#else
+ double fd_pow(x,y) /* wrapper pow */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_pow(x,y);
+#else
+ double z;
+ int err;
+ z=__ieee754_pow(x,y);
+ if(_LIB_VERSION == _IEEE_|| fd_isnan(y)) return z;
+ if(fd_isnan(x)) {
+ if(y==0.0)
+ return __kernel_standard(x,y,42,&err); /* pow(NaN,0.0) */
+ else
+ return z;
+ }
+ if(x==0.0){
+ if(y==0.0)
+ return __kernel_standard(x,y,20,&err); /* pow(0.0,0.0) */
+ if(fd_finite(y)&&y<0.0)
+ return __kernel_standard(x,y,23,&err); /* pow(0.0,negative) */
+ return z;
+ }
+ if(!fd_finite(z)) {
+ if(fd_finite(x)&&fd_finite(y)) {
+ if(fd_isnan(z))
+ return __kernel_standard(x,y,24,&err); /* pow neg**non-int */
+ else
+ return __kernel_standard(x,y,21,&err); /* pow overflow */
+ }
+ }
+ if(z==0.0&&fd_finite(x)&&fd_finite(y))
+ return __kernel_standard(x,y,22,&err); /* pow underflow */
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_remainder.c b/third_party/js-1.7/fdlibm/w_remainder.c
new file mode 100644
index 0000000..25d1ba1
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_remainder.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_remainder.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper remainder(x,p)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_remainder(double x, double y) /* wrapper remainder */
+#else
+ double fd_remainder(x,y) /* wrapper remainder */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_remainder(x,y);
+#else
+ double z;
+ z = __ieee754_remainder(x,y);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(y)) return z;
+ if(y==0.0) {
+ int err;
+ return __kernel_standard(x,y,28,&err); /* remainder(x,0) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_scalb.c b/third_party/js-1.7/fdlibm/w_scalb.c
new file mode 100644
index 0000000..35c16a5
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_scalb.c
@@ -0,0 +1,95 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_scalb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper scalb(double x, double fn) is provide for
+ * passing various standard test suite. One
+ * should use scalbn() instead.
+ */
+
+#include "fdlibm.h"
+
+#include <errno.h>
+
+#ifdef __STDC__
+#ifdef _SCALB_INT
+ double fd_scalb(double x, int fn) /* wrapper scalb */
+#else
+ double fd_scalb(double x, double fn) /* wrapper scalb */
+#endif
+#else
+ double fd_scalb(x,fn) /* wrapper scalb */
+#ifdef _SCALB_INT
+ double x; int fn;
+#else
+ double x,fn;
+#endif
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_scalb(x,fn);
+#else
+ double z;
+ int err;
+ z = __ieee754_scalb(x,fn);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if(!(fd_finite(z)||fd_isnan(z))&&fd_finite(x)) {
+ return __kernel_standard(x,(double)fn,32,&err); /* scalb overflow */
+ }
+ if(z==0.0&&z!=x) {
+ return __kernel_standard(x,(double)fn,33,&err); /* scalb underflow */
+ }
+#ifndef _SCALB_INT
+ if(!fd_finite(fn)) errno = ERANGE;
+#endif
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_sinh.c b/third_party/js-1.7/fdlibm/w_sinh.c
new file mode 100644
index 0000000..8b04ecb
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_sinh.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_sinh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper sinh(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_sinh(double x) /* wrapper sinh */
+#else
+ double fd_sinh(x) /* wrapper sinh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_sinh(x);
+#else
+ double z;
+ z = __ieee754_sinh(x);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if(!fd_finite(z)&&fd_finite(x)) {
+ int err;
+ return __kernel_standard(x,x,25,&err); /* sinh overflow */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/fdlibm/w_sqrt.c b/third_party/js-1.7/fdlibm/w_sqrt.c
new file mode 100644
index 0000000..462d776
--- /dev/null
+++ b/third_party/js-1.7/fdlibm/w_sqrt.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_sqrt.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper sqrt(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_sqrt(double x) /* wrapper sqrt */
+#else
+ double fd_sqrt(x) /* wrapper sqrt */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_sqrt(x);
+#else
+ double z;
+ z = __ieee754_sqrt(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(x<0.0) {
+ int err;
+ return __kernel_standard(x,x,26,&err); /* sqrt(negative) */
+ } else
+ return z;
+#endif
+}
diff --git a/third_party/js-1.7/js.c b/third_party/js-1.7/js.c
new file mode 100644
index 0000000..fb4332f
--- /dev/null
+++ b/third_party/js-1.7/js.c
@@ -0,0 +1,3181 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS shell.
+ */
+#include "jsstddef.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <locale.h>
+#include "jstypes.h"
+#include "jsarena.h"
+#include "jsutil.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsparse.h"
+#include "jsscope.h"
+#include "jsscript.h"
+
+#ifdef PERLCONNECT
+#include "perlconnect/jsperl.h"
+#endif
+
+#ifdef LIVECONNECT
+#include "jsjava.h"
+#endif
+
+#ifdef JSDEBUGGER
+#include "jsdebug.h"
+#ifdef JSDEBUGGER_JAVA_UI
+#include "jsdjava.h"
+#endif /* JSDEBUGGER_JAVA_UI */
+#ifdef JSDEBUGGER_C_UI
+#include "jsdb.h"
+#endif /* JSDEBUGGER_C_UI */
+#endif /* JSDEBUGGER */
+
+#ifdef XP_UNIX
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#endif
+
+#if defined(XP_WIN) || defined(XP_OS2)
+#include <io.h> /* for isatty() */
+#endif
+
+typedef enum JSShellExitCode {
+ EXITCODE_RUNTIME_ERROR = 3,
+ EXITCODE_FILE_NOT_FOUND = 4,
+ EXITCODE_OUT_OF_MEMORY = 5
+} JSShellExitCode;
+
+size_t gStackChunkSize = 8192;
+
+/* Assume that we can not use more than 5e5 bytes of C stack by default. */
+static size_t gMaxStackSize = 500000;
+
+static jsuword gStackBase;
+int gExitCode = 0;
+JSBool gQuitting = JS_FALSE;
+FILE *gErrFile = NULL;
+FILE *gOutFile = NULL;
+
+#ifdef JSDEBUGGER
+static JSDContext *_jsdc;
+#ifdef JSDEBUGGER_JAVA_UI
+static JSDJContext *_jsdjc;
+#endif /* JSDEBUGGER_JAVA_UI */
+#endif /* JSDEBUGGER */
+
+static JSBool reportWarnings = JS_TRUE;
+static JSBool compileOnly = JS_FALSE;
+
+typedef enum JSShellErrNum {
+#define MSG_DEF(name, number, count, exception, format) \
+ name = number,
+#include "jsshell.msg"
+#undef MSG_DEF
+ JSShellErr_Limit
+#undef MSGDEF
+} JSShellErrNum;
+
+static const JSErrorFormatString *
+my_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber);
+static JSObject *
+split_setup(JSContext *cx);
+
+#ifdef EDITLINE
+extern char *readline(const char *prompt);
+extern void add_history(char *line);
+#endif
+
+static JSBool
+GetLine(JSContext *cx, char *bufp, FILE *file, const char *prompt) {
+#ifdef EDITLINE
+ /*
+ * Use readline only if file is stdin, because there's no way to specify
+ * another handle. Are other filehandles interactive?
+ */
+ if (file == stdin) {
+ char *linep = readline(prompt);
+ if (!linep)
+ return JS_FALSE;
+ if (linep[0] != '\0')
+ add_history(linep);
+ strcpy(bufp, linep);
+ JS_free(cx, linep);
+ bufp += strlen(bufp);
+ *bufp++ = '\n';
+ *bufp = '\0';
+ } else
+#endif
+ {
+ char line[256];
+ fprintf(gOutFile, prompt);
+ fflush(gOutFile);
+ if (!fgets(line, sizeof line, file))
+ return JS_FALSE;
+ strcpy(bufp, line);
+ }
+ return JS_TRUE;
+}
+
+static void
+Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY)
+{
+ JSBool ok, hitEOF;
+ JSScript *script;
+ jsval result;
+ JSString *str;
+ char buffer[4096];
+ char *bufp;
+ int lineno;
+ int startline;
+ FILE *file;
+ jsuword stackLimit;
+
+ if (forceTTY || !filename || strcmp(filename, "-") == 0) {
+ file = stdin;
+ } else {
+ file = fopen(filename, "r");
+ if (!file) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_CANT_OPEN, filename, strerror(errno));
+ gExitCode = EXITCODE_FILE_NOT_FOUND;
+ return;
+ }
+ }
+
+ if (gMaxStackSize == 0) {
+ /*
+ * Disable checking for stack overflow if limit is zero.
+ */
+ stackLimit = 0;
+ } else {
+#if JS_STACK_GROWTH_DIRECTION > 0
+ stackLimit = gStackBase + gMaxStackSize;
+#else
+ stackLimit = gStackBase - gMaxStackSize;
+#endif
+ }
+ JS_SetThreadStackLimit(cx, stackLimit);
+
+ if (!forceTTY && !isatty(fileno(file))) {
+ /*
+ * It's not interactive - just execute it.
+ *
+ * Support the UNIX #! shell hack; gobble the first line if it starts
+ * with '#'. TODO - this isn't quite compatible with sharp variables,
+ * as a legal js program (using sharp variables) might start with '#'.
+ * But that would require multi-character lookahead.
+ */
+ int ch = fgetc(file);
+ if (ch == '#') {
+ while((ch = fgetc(file)) != EOF) {
+ if (ch == '\n' || ch == '\r')
+ break;
+ }
+ }
+ ungetc(ch, file);
+ script = JS_CompileFileHandle(cx, obj, filename, file);
+ if (script) {
+ if (!compileOnly)
+ (void)JS_ExecuteScript(cx, obj, script, &result);
+ JS_DestroyScript(cx, script);
+ }
+
+ return;
+ }
+
+ /* It's an interactive filehandle; drop into read-eval-print loop. */
+ lineno = 1;
+ hitEOF = JS_FALSE;
+ do {
+ bufp = buffer;
+ *bufp = '\0';
+
+ /*
+ * Accumulate lines until we get a 'compilable unit' - one that either
+ * generates an error (before running out of source) or that compiles
+ * cleanly. This should be whenever we get a complete statement that
+ * coincides with the end of a line.
+ */
+ startline = lineno;
+ do {
+ if (!GetLine(cx, bufp, file, startline == lineno ? "js> " : "")) {
+ hitEOF = JS_TRUE;
+ break;
+ }
+ bufp += strlen(bufp);
+ lineno++;
+ } while (!JS_BufferIsCompilableUnit(cx, obj, buffer, strlen(buffer)));
+
+ /* Clear any pending exception from previous failed compiles. */
+ JS_ClearPendingException(cx);
+ script = JS_CompileScript(cx, obj, buffer, strlen(buffer), "typein",
+ startline);
+ if (script) {
+ if (!compileOnly) {
+ ok = JS_ExecuteScript(cx, obj, script, &result);
+ if (ok && result != JSVAL_VOID) {
+ str = JS_ValueToString(cx, result);
+ if (str)
+ fprintf(gOutFile, "%s\n", JS_GetStringBytes(str));
+ else
+ ok = JS_FALSE;
+ }
+ }
+ JS_DestroyScript(cx, script);
+ }
+ } while (!hitEOF && !gQuitting);
+ fprintf(gOutFile, "\n");
+ return;
+}
+
+static int
+usage(void)
+{
+ fprintf(gErrFile, "%s\n", JS_GetImplementationVersion());
+ fprintf(gErrFile, "usage: js [-PswWxCi] [-b branchlimit] [-c stackchunksize] [-v version] [-f scriptfile] [-e script] [-S maxstacksize] [scriptfile] [scriptarg...]\n");
+ return 2;
+}
+
+static uint32 gBranchCount;
+static uint32 gBranchLimit;
+
+static JSBool
+my_BranchCallback(JSContext *cx, JSScript *script)
+{
+ if (++gBranchCount == gBranchLimit) {
+ if (script) {
+ if (script->filename)
+ fprintf(gErrFile, "%s:", script->filename);
+ fprintf(gErrFile, "%u: script branch callback (%u callbacks)\n",
+ script->lineno, gBranchLimit);
+ } else {
+ fprintf(gErrFile, "native branch callback (%u callbacks)\n",
+ gBranchLimit);
+ }
+ gBranchCount = 0;
+ return JS_FALSE;
+ }
+ if ((gBranchCount & 0x3fff) == 1)
+ JS_MaybeGC(cx);
+ return JS_TRUE;
+}
+
+extern JSClass global_class;
+
+static int
+ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
+{
+ int i, j, length;
+ JSObject *argsObj;
+ char *filename = NULL;
+ JSBool isInteractive = JS_TRUE;
+ JSBool forceTTY = JS_FALSE;
+
+ /*
+ * Scan past all optional arguments so we can create the arguments object
+ * before processing any -f options, which must interleave properly with
+ * -v and -w options. This requires two passes, and without getopt, we'll
+ * have to keep the option logic here and in the second for loop in sync.
+ */
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] != '-' || argv[i][1] == '\0') {
+ ++i;
+ break;
+ }
+ switch (argv[i][1]) {
+ case 'b':
+ case 'c':
+ case 'f':
+ case 'e':
+ case 'v':
+ case 'S':
+ ++i;
+ break;
+ default:;
+ }
+ }
+
+ /*
+ * Create arguments early and define it to root it, so it's safe from any
+ * GC calls nested below, and so it is available to -f <file> arguments.
+ */
+ argsObj = JS_NewArrayObject(cx, 0, NULL);
+ if (!argsObj)
+ return 1;
+ if (!JS_DefineProperty(cx, obj, "arguments", OBJECT_TO_JSVAL(argsObj),
+ NULL, NULL, 0)) {
+ return 1;
+ }
+
+ length = argc - i;
+ for (j = 0; j < length; j++) {
+ JSString *str = JS_NewStringCopyZ(cx, argv[i++]);
+ if (!str)
+ return 1;
+ if (!JS_DefineElement(cx, argsObj, j, STRING_TO_JSVAL(str),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return 1;
+ }
+ }
+
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] != '-' || argv[i][1] == '\0') {
+ filename = argv[i++];
+ isInteractive = JS_FALSE;
+ break;
+ }
+
+ switch (argv[i][1]) {
+ case 'v':
+ if (++i == argc)
+ return usage();
+
+ JS_SetVersion(cx, (JSVersion) atoi(argv[i]));
+ break;
+
+ case 'w':
+ reportWarnings = JS_TRUE;
+ break;
+
+ case 'W':
+ reportWarnings = JS_FALSE;
+ break;
+
+ case 's':
+ JS_ToggleOptions(cx, JSOPTION_STRICT);
+ break;
+
+ case 'x':
+ JS_ToggleOptions(cx, JSOPTION_XML);
+ break;
+
+ case 'P':
+ if (JS_GET_CLASS(cx, JS_GetPrototype(cx, obj)) != &global_class) {
+ JSObject *gobj;
+
+ if (!JS_SealObject(cx, obj, JS_TRUE))
+ return JS_FALSE;
+ gobj = JS_NewObject(cx, &global_class, NULL, NULL);
+ if (!gobj)
+ return JS_FALSE;
+ if (!JS_SetPrototype(cx, gobj, obj))
+ return JS_FALSE;
+ JS_SetParent(cx, gobj, NULL);
+ JS_SetGlobalObject(cx, gobj);
+ obj = gobj;
+ }
+ break;
+
+ case 'b':
+ gBranchLimit = atoi(argv[++i]);
+ JS_SetBranchCallback(cx, my_BranchCallback);
+ JS_ToggleOptions(cx, JSOPTION_NATIVE_BRANCH_CALLBACK);
+ break;
+
+ case 'c':
+ /* set stack chunk size */
+ gStackChunkSize = atoi(argv[++i]);
+ break;
+
+ case 'f':
+ if (++i == argc)
+ return usage();
+
+ Process(cx, obj, argv[i], JS_FALSE);
+
+ /*
+ * XXX: js -f foo.js should interpret foo.js and then
+ * drop into interactive mode, but that breaks the test
+ * harness. Just execute foo.js for now.
+ */
+ isInteractive = JS_FALSE;
+ break;
+
+ case 'e':
+ {
+ jsval rval;
+
+ if (++i == argc)
+ return usage();
+
+ /* Pass a filename of -e to imitate PERL */
+ JS_EvaluateScript(cx, obj, argv[i], strlen(argv[i]),
+ "-e", 1, &rval);
+
+ isInteractive = JS_FALSE;
+ break;
+
+ }
+ case 'C':
+ compileOnly = JS_TRUE;
+ isInteractive = JS_FALSE;
+ break;
+
+ case 'i':
+ isInteractive = forceTTY = JS_TRUE;
+ break;
+
+ case 'S':
+ if (++i == argc)
+ return usage();
+
+ /* Set maximum stack size. */
+ gMaxStackSize = atoi(argv[i]);
+ break;
+
+ case 'z':
+ obj = split_setup(cx);
+ break;
+
+ default:
+ return usage();
+ }
+ }
+
+ if (filename || isInteractive)
+ Process(cx, obj, filename, forceTTY);
+ return gExitCode;
+}
+
+
+static JSBool
+Version(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (argc > 0 && JSVAL_IS_INT(argv[0]))
+ *rval = INT_TO_JSVAL(JS_SetVersion(cx, (JSVersion) JSVAL_TO_INT(argv[0])));
+ else
+ *rval = INT_TO_JSVAL(JS_GetVersion(cx));
+ return JS_TRUE;
+}
+
+static struct {
+ const char *name;
+ uint32 flag;
+} js_options[] = {
+ {"strict", JSOPTION_STRICT},
+ {"werror", JSOPTION_WERROR},
+ {"atline", JSOPTION_ATLINE},
+ {"xml", JSOPTION_XML},
+ {0, 0}
+};
+
+static JSBool
+Options(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uint32 optset, flag;
+ uintN i, j, found;
+ JSString *str;
+ const char *opt;
+ char *names;
+
+ optset = 0;
+ for (i = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ opt = JS_GetStringBytes(str);
+ for (j = 0; js_options[j].name; j++) {
+ if (strcmp(js_options[j].name, opt) == 0) {
+ optset |= js_options[j].flag;
+ break;
+ }
+ }
+ }
+ optset = JS_ToggleOptions(cx, optset);
+
+ names = NULL;
+ found = 0;
+ while (optset != 0) {
+ flag = optset;
+ optset &= optset - 1;
+ flag &= ~optset;
+ for (j = 0; js_options[j].name; j++) {
+ if (js_options[j].flag == flag) {
+ names = JS_sprintf_append(names, "%s%s",
+ names ? "," : "", js_options[j].name);
+ found++;
+ break;
+ }
+ }
+ }
+ if (!found)
+ names = strdup("");
+ if (!names) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ str = JS_NewString(cx, names, strlen(names));
+ if (!str) {
+ free(names);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+Load(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ JSString *str;
+ const char *filename;
+ JSScript *script;
+ JSBool ok;
+ jsval result;
+ uint32 oldopts;
+
+ for (i = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ argv[i] = STRING_TO_JSVAL(str);
+ filename = JS_GetStringBytes(str);
+ errno = 0;
+ oldopts = JS_GetOptions(cx);
+ JS_SetOptions(cx, oldopts | JSOPTION_COMPILE_N_GO);
+ script = JS_CompileFile(cx, obj, filename);
+ if (!script) {
+ ok = JS_FALSE;
+ } else {
+ ok = !compileOnly
+ ? JS_ExecuteScript(cx, obj, script, &result)
+ : JS_TRUE;
+ JS_DestroyScript(cx, script);
+ }
+ JS_SetOptions(cx, oldopts);
+ if (!ok)
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+}
+
+/*
+ * function readline()
+ * Provides a hook for scripts to read a line from stdin.
+ */
+static JSBool
+ReadLine(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+#define BUFSIZE 256
+ FILE *from;
+ char *buf, *tmp;
+ size_t bufsize, buflength, gotlength;
+ JSString *str;
+
+ from = stdin;
+ buflength = 0;
+ bufsize = BUFSIZE;
+ buf = JS_malloc(cx, bufsize);
+ if (!buf)
+ return JS_FALSE;
+
+ while ((gotlength =
+ js_fgets(buf + buflength, bufsize - buflength, from)) > 0) {
+ buflength += gotlength;
+
+ /* Are we done? */
+ if (buf[buflength - 1] == '\n') {
+ buf[buflength - 1] = '\0';
+ break;
+ }
+
+ /* Else, grow our buffer for another pass. */
+ tmp = JS_realloc(cx, buf, bufsize * 2);
+ if (!tmp) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ bufsize *= 2;
+ buf = tmp;
+ }
+
+ /* Treat the empty string specially. */
+ if (buflength == 0) {
+ *rval = JS_GetEmptyStringValue(cx);
+ JS_free(cx, buf);
+ return JS_TRUE;
+ }
+
+ /* Shrink the buffer to the real size. */
+ tmp = JS_realloc(cx, buf, buflength);
+ if (!tmp) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ buf = tmp;
+
+ /*
+ * Turn buf into a JSString. Note that buflength includes the trailing null
+ * character.
+ */
+ str = JS_NewString(cx, buf, buflength - 1);
+ if (!str) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+Print(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i, n;
+ JSString *str;
+
+ for (i = n = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ fprintf(gOutFile, "%s%s", i ? " " : "", JS_GetStringBytes(str));
+ }
+ n++;
+ if (n)
+ fputc('\n', gOutFile);
+ return JS_TRUE;
+}
+
+static JSBool
+Help(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+static JSBool
+Quit(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+#ifdef LIVECONNECT
+ JSJ_SimpleShutdown();
+#endif
+
+ JS_ConvertArguments(cx, argc, argv,"/ i", &gExitCode);
+
+ gQuitting = JS_TRUE;
+ return JS_FALSE;
+}
+
+static JSBool
+GC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSRuntime *rt;
+ uint32 preBytes;
+
+ rt = cx->runtime;
+ preBytes = rt->gcBytes;
+#ifdef GC_MARK_DEBUG
+ if (argc && JSVAL_IS_STRING(argv[0])) {
+ char *name = JS_GetStringBytes(JSVAL_TO_STRING(argv[0]));
+ FILE *file = fopen(name, "w");
+ if (!file) {
+ fprintf(gErrFile, "gc: can't open %s: %s\n", strerror(errno));
+ return JS_FALSE;
+ }
+ js_DumpGCHeap = file;
+ } else {
+ js_DumpGCHeap = stdout;
+ }
+#endif
+ JS_GC(cx);
+#ifdef GC_MARK_DEBUG
+ if (js_DumpGCHeap != stdout)
+ fclose(js_DumpGCHeap);
+ js_DumpGCHeap = NULL;
+#endif
+ fprintf(gOutFile, "before %lu, after %lu, break %08lx\n",
+ (unsigned long)preBytes, (unsigned long)rt->gcBytes,
+#ifdef XP_UNIX
+ (unsigned long)sbrk(0)
+#else
+ 0
+#endif
+ );
+#ifdef JS_GCMETER
+ js_DumpGCStats(rt, stdout);
+#endif
+ return JS_TRUE;
+}
+
+static JSScript *
+ValueToScript(JSContext *cx, jsval v)
+{
+ JSScript *script;
+ JSFunction *fun;
+
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ JS_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_ScriptClass) {
+ script = (JSScript *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ } else {
+ fun = JS_ValueToFunction(cx, v);
+ if (!fun)
+ return NULL;
+ script = FUN_SCRIPT(fun);
+ }
+ return script;
+}
+
+static JSBool
+GetTrapArgs(JSContext *cx, uintN argc, jsval *argv, JSScript **scriptp,
+ int32 *ip)
+{
+ jsval v;
+ uintN intarg;
+ JSScript *script;
+
+ *scriptp = cx->fp->down->script;
+ *ip = 0;
+ if (argc != 0) {
+ v = argv[0];
+ intarg = 0;
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ (JS_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_FunctionClass ||
+ JS_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_ScriptClass)) {
+ script = ValueToScript(cx, v);
+ if (!script)
+ return JS_FALSE;
+ *scriptp = script;
+ intarg++;
+ }
+ if (argc > intarg) {
+ if (!JS_ValueToInt32(cx, argv[intarg], ip))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSTrapStatus
+TrapHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
+ void *closure)
+{
+ JSString *str;
+ JSStackFrame *caller;
+
+ str = (JSString *) closure;
+ caller = JS_GetScriptedCaller(cx, NULL);
+ if (!JS_EvaluateScript(cx, caller->scopeChain,
+ JS_GetStringBytes(str), JS_GetStringLength(str),
+ caller->script->filename, caller->script->lineno,
+ rval)) {
+ return JSTRAP_ERROR;
+ }
+ if (*rval != JSVAL_VOID)
+ return JSTRAP_RETURN;
+ return JSTRAP_CONTINUE;
+}
+
+static JSBool
+Trap(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ JSScript *script;
+ int32 i;
+
+ if (argc == 0) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL, JSSMSG_TRAP_USAGE);
+ return JS_FALSE;
+ }
+ argc--;
+ str = JS_ValueToString(cx, argv[argc]);
+ if (!str)
+ return JS_FALSE;
+ argv[argc] = STRING_TO_JSVAL(str);
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ return JS_SetTrap(cx, script, script->code + i, TrapHandler, str);
+}
+
+static JSBool
+Untrap(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+ int32 i;
+
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ JS_ClearTrap(cx, script, script->code + i, NULL, NULL);
+ return JS_TRUE;
+}
+
+static JSBool
+LineToPC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+ int32 i;
+ uintN lineno;
+ jsbytecode *pc;
+
+ if (argc == 0) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL, JSSMSG_LINE2PC_USAGE);
+ return JS_FALSE;
+ }
+ script = cx->fp->down->script;
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ lineno = (i == 0) ? script->lineno : (uintN)i;
+ pc = JS_LineNumberToPC(cx, script, lineno);
+ if (!pc)
+ return JS_FALSE;
+ *rval = INT_TO_JSVAL(PTRDIFF(pc, script->code, jsbytecode));
+ return JS_TRUE;
+}
+
+static JSBool
+PCToLine(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+ int32 i;
+ uintN lineno;
+
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ lineno = JS_PCToLineNumber(cx, script, script->code + i);
+ if (!lineno)
+ return JS_FALSE;
+ *rval = INT_TO_JSVAL(lineno);
+ return JS_TRUE;
+}
+
+#ifdef DEBUG
+
+static void
+GetSwitchTableBounds(JSScript *script, uintN offset,
+ uintN *start, uintN *end)
+{
+ jsbytecode *pc;
+ JSOp op;
+ ptrdiff_t jmplen;
+ jsint low, high, n;
+
+ pc = script->code + offset;
+ op = *pc;
+ switch (op) {
+ case JSOP_TABLESWITCHX:
+ jmplen = JUMPX_OFFSET_LEN;
+ goto jump_table;
+ case JSOP_TABLESWITCH:
+ jmplen = JUMP_OFFSET_LEN;
+ jump_table:
+ pc += jmplen;
+ low = GET_JUMP_OFFSET(pc);
+ pc += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc);
+ pc += JUMP_OFFSET_LEN;
+ n = high - low + 1;
+ break;
+
+ case JSOP_LOOKUPSWITCHX:
+ jmplen = JUMPX_OFFSET_LEN;
+ goto lookup_table;
+ default:
+ JS_ASSERT(op == JSOP_LOOKUPSWITCH);
+ jmplen = JUMP_OFFSET_LEN;
+ lookup_table:
+ pc += jmplen;
+ n = GET_ATOM_INDEX(pc);
+ pc += ATOM_INDEX_LEN;
+ jmplen += ATOM_INDEX_LEN;
+ break;
+ }
+
+ *start = (uintN)(pc - script->code);
+ *end = *start + (uintN)(n * jmplen);
+}
+
+
+/*
+ * SrcNotes assumes that SRC_METHODBASE should be distinguished from SRC_LABEL
+ * using the bytecode the source note points to.
+ */
+JS_STATIC_ASSERT(SRC_LABEL == SRC_METHODBASE);
+
+static void
+SrcNotes(JSContext *cx, JSScript *script)
+{
+ uintN offset, delta, caseOff, switchTableStart, switchTableEnd;
+ jssrcnote *notes, *sn;
+ JSSrcNoteType type;
+ const char *name;
+ JSOp op;
+ jsatomid atomIndex;
+ JSAtom *atom;
+
+ fprintf(gOutFile, "\nSource notes:\n");
+ offset = 0;
+ notes = SCRIPT_NOTES(script);
+ switchTableEnd = switchTableStart = 0;
+ for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ delta = SN_DELTA(sn);
+ offset += delta;
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ name = js_SrcNoteSpec[type].name;
+ if (type == SRC_LABEL) {
+ /* Heavily overloaded case. */
+ if (switchTableStart <= offset && offset < switchTableEnd) {
+ name = "case";
+ } else {
+ op = script->code[offset];
+ if (op == JSOP_GETMETHOD || op == JSOP_SETMETHOD) {
+ /* This is SRC_METHODBASE which we print as SRC_PCBASE. */
+ type = SRC_PCBASE;
+ name = "methodbase";
+ } else {
+ JS_ASSERT(op == JSOP_NOP);
+ }
+ }
+ }
+ fprintf(gOutFile, "%3u: %5u [%4u] %-8s",
+ PTRDIFF(sn, notes, jssrcnote), offset, delta, name);
+ switch (type) {
+ case SRC_SETLINE:
+ fprintf(gOutFile, " lineno %u", (uintN) js_GetSrcNoteOffset(sn, 0));
+ break;
+ case SRC_FOR:
+ fprintf(gOutFile, " cond %u update %u tail %u",
+ (uintN) js_GetSrcNoteOffset(sn, 0),
+ (uintN) js_GetSrcNoteOffset(sn, 1),
+ (uintN) js_GetSrcNoteOffset(sn, 2));
+ break;
+ case SRC_IF_ELSE:
+ fprintf(gOutFile, " else %u elseif %u",
+ (uintN) js_GetSrcNoteOffset(sn, 0),
+ (uintN) js_GetSrcNoteOffset(sn, 1));
+ break;
+ case SRC_COND:
+ case SRC_WHILE:
+ case SRC_PCBASE:
+ case SRC_PCDELTA:
+ case SRC_DECL:
+ case SRC_BRACE:
+ fprintf(gOutFile, " offset %u", (uintN) js_GetSrcNoteOffset(sn, 0));
+ break;
+ case SRC_LABEL:
+ case SRC_LABELBRACE:
+ case SRC_BREAK2LABEL:
+ case SRC_CONT2LABEL:
+ case SRC_FUNCDEF: {
+ const char *bytes;
+ JSFunction *fun;
+ JSString *str;
+
+ atomIndex = (jsatomid) js_GetSrcNoteOffset(sn, 0);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ if (type != SRC_FUNCDEF) {
+ bytes = js_AtomToPrintableString(cx, atom);
+ } else {
+ fun = (JSFunction *)
+ JS_GetPrivate(cx, ATOM_TO_OBJECT(atom));
+ str = JS_DecompileFunction(cx, fun, JS_DONT_PRETTY_PRINT);
+ bytes = str ? JS_GetStringBytes(str) : "N/A";
+ }
+ fprintf(gOutFile, " atom %u (%s)", (uintN)atomIndex, bytes);
+ break;
+ }
+ case SRC_SWITCH:
+ fprintf(gOutFile, " length %u", (uintN) js_GetSrcNoteOffset(sn, 0));
+ caseOff = (uintN) js_GetSrcNoteOffset(sn, 1);
+ if (caseOff)
+ fprintf(gOutFile, " first case offset %u", caseOff);
+ GetSwitchTableBounds(script, offset,
+ &switchTableStart, &switchTableEnd);
+ break;
+ case SRC_CATCH:
+ delta = (uintN) js_GetSrcNoteOffset(sn, 0);
+ if (delta) {
+ if (script->main[offset] == JSOP_LEAVEBLOCK)
+ fprintf(gOutFile, " stack depth %u", delta);
+ else
+ fprintf(gOutFile, " guard delta %u", delta);
+ }
+ break;
+ default:;
+ }
+ fputc('\n', gOutFile);
+ }
+}
+
+static JSBool
+Notes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ JSScript *script;
+
+ for (i = 0; i < argc; i++) {
+ script = ValueToScript(cx, argv[i]);
+ if (!script)
+ continue;
+
+ SrcNotes(cx, script);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+TryNotes(JSContext *cx, JSScript *script)
+{
+ JSTryNote *tn = script->trynotes;
+
+ if (!tn)
+ return JS_TRUE;
+ fprintf(gOutFile, "\nException table:\nstart\tend\tcatch\n");
+ while (tn->start && tn->catchStart) {
+ fprintf(gOutFile, " %d\t%d\t%d\n",
+ tn->start, tn->start + tn->length, tn->catchStart);
+ tn++;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+Disassemble(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool lines;
+ uintN i;
+ JSScript *script;
+
+ if (argc > 0 &&
+ JSVAL_IS_STRING(argv[0]) &&
+ !strcmp(JS_GetStringBytes(JSVAL_TO_STRING(argv[0])), "-l")) {
+ lines = JS_TRUE;
+ argv++, argc--;
+ } else {
+ lines = JS_FALSE;
+ }
+ for (i = 0; i < argc; i++) {
+ script = ValueToScript(cx, argv[i]);
+ if (!script)
+ return JS_FALSE;
+
+ if (VALUE_IS_FUNCTION(cx, argv[i])) {
+ JSFunction *fun = JS_ValueToFunction(cx, argv[i]);
+ if (fun && (fun->flags & JSFUN_FLAGS_MASK)) {
+ uint16 flags = fun->flags;
+ fputs("flags:", stdout);
+
+#define SHOW_FLAG(flag) if (flags & JSFUN_##flag) fputs(" " #flag, stdout);
+
+ SHOW_FLAG(LAMBDA);
+ SHOW_FLAG(SETTER);
+ SHOW_FLAG(GETTER);
+ SHOW_FLAG(BOUND_METHOD);
+ SHOW_FLAG(HEAVYWEIGHT);
+ SHOW_FLAG(THISP_STRING);
+ SHOW_FLAG(THISP_NUMBER);
+ SHOW_FLAG(THISP_BOOLEAN);
+ SHOW_FLAG(INTERPRETED);
+
+#undef SHOW_FLAG
+ putchar('\n');
+ }
+ }
+
+ if (!js_Disassemble(cx, script, lines, stdout))
+ return JS_FALSE;
+ SrcNotes(cx, script);
+ TryNotes(cx, script);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+DisassWithSrc(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+#define LINE_BUF_LEN 512
+ uintN i, len, line1, line2, bupline;
+ JSScript *script;
+ FILE *file;
+ char linebuf[LINE_BUF_LEN];
+ jsbytecode *pc, *end;
+ static char sep[] = ";-------------------------";
+
+ for (i = 0; i < argc; i++) {
+ script = ValueToScript(cx, argv[i]);
+ if (!script)
+ return JS_FALSE;
+
+ if (!script || !script->filename) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_FILE_SCRIPTS_ONLY);
+ return JS_FALSE;
+ }
+
+ file = fopen(script->filename, "r");
+ if (!file) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_CANT_OPEN,
+ script->filename, strerror(errno));
+ return JS_FALSE;
+ }
+
+ pc = script->code;
+ end = pc + script->length;
+
+ /* burn the leading lines */
+ line2 = JS_PCToLineNumber(cx, script, pc);
+ for (line1 = 0; line1 < line2 - 1; line1++)
+ fgets(linebuf, LINE_BUF_LEN, file);
+
+ bupline = 0;
+ while (pc < end) {
+ line2 = JS_PCToLineNumber(cx, script, pc);
+
+ if (line2 < line1) {
+ if (bupline != line2) {
+ bupline = line2;
+ fprintf(gOutFile, "%s %3u: BACKUP\n", sep, line2);
+ }
+ } else {
+ if (bupline && line1 == line2)
+ fprintf(gOutFile, "%s %3u: RESTORE\n", sep, line2);
+ bupline = 0;
+ while (line1 < line2) {
+ if (!fgets(linebuf, LINE_BUF_LEN, file)) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_UNEXPECTED_EOF,
+ script->filename);
+ goto bail;
+ }
+ line1++;
+ fprintf(gOutFile, "%s %3u: %s", sep, line1, linebuf);
+ }
+ }
+
+ len = js_Disassemble1(cx, script, pc,
+ PTRDIFF(pc, script->code, jsbytecode),
+ JS_TRUE, stdout);
+ if (!len)
+ return JS_FALSE;
+ pc += len;
+ }
+
+ bail:
+ fclose(file);
+ }
+ return JS_TRUE;
+#undef LINE_BUF_LEN
+}
+
+static JSBool
+Tracing(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool bval;
+ JSString *str;
+
+ if (argc == 0) {
+ *rval = BOOLEAN_TO_JSVAL(cx->tracefp != 0);
+ return JS_TRUE;
+ }
+
+ switch (JS_TypeOfValue(cx, argv[0])) {
+ case JSTYPE_NUMBER:
+ bval = JSVAL_IS_INT(argv[0])
+ ? JSVAL_TO_INT(argv[0])
+ : (jsint) *JSVAL_TO_DOUBLE(argv[0]);
+ break;
+ case JSTYPE_BOOLEAN:
+ bval = JSVAL_TO_BOOLEAN(argv[0]);
+ break;
+ default:
+ str = JS_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ fprintf(gErrFile, "tracing: illegal argument %s\n",
+ JS_GetStringBytes(str));
+ return JS_TRUE;
+ }
+ cx->tracefp = bval ? stderr : NULL;
+ return JS_TRUE;
+}
+
+typedef struct DumpAtomArgs {
+ JSContext *cx;
+ FILE *fp;
+} DumpAtomArgs;
+
+static int
+DumpAtom(JSHashEntry *he, int i, void *arg)
+{
+ DumpAtomArgs *args = (DumpAtomArgs *)arg;
+ FILE *fp = args->fp;
+ JSAtom *atom = (JSAtom *)he;
+
+ fprintf(fp, "%3d %08x %5lu ",
+ i, (uintN)he->keyHash, (unsigned long)atom->number);
+ if (ATOM_IS_STRING(atom))
+ fprintf(fp, "\"%s\"\n", js_AtomToPrintableString(args->cx, atom));
+ else if (ATOM_IS_INT(atom))
+ fprintf(fp, "%ld\n", (long)ATOM_TO_INT(atom));
+ else
+ fprintf(fp, "%.16g\n", *ATOM_TO_DOUBLE(atom));
+ return HT_ENUMERATE_NEXT;
+}
+
+static void
+DumpScope(JSContext *cx, JSObject *obj, FILE *fp)
+{
+ uintN i;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+
+ i = 0;
+ scope = OBJ_SCOPE(obj);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) && !SCOPE_HAS_PROPERTY(scope, sprop))
+ continue;
+ fprintf(fp, "%3u %p", i, (void *)sprop);
+ if (JSID_IS_INT(sprop->id)) {
+ fprintf(fp, " [%ld]", (long)JSVAL_TO_INT(sprop->id));
+ } else if (JSID_IS_ATOM(sprop->id)) {
+ JSAtom *atom = JSID_TO_ATOM(sprop->id);
+ fprintf(fp, " \"%s\"", js_AtomToPrintableString(cx, atom));
+ } else {
+ jsval v = OBJECT_TO_JSVAL(JSID_TO_OBJECT(sprop->id));
+ fprintf(fp, " \"%s\"", js_ValueToPrintableString(cx, v));
+ }
+
+#define DUMP_ATTR(name) if (sprop->attrs & JSPROP_##name) fputs(" " #name, fp)
+ DUMP_ATTR(ENUMERATE);
+ DUMP_ATTR(READONLY);
+ DUMP_ATTR(PERMANENT);
+ DUMP_ATTR(EXPORTED);
+ DUMP_ATTR(GETTER);
+ DUMP_ATTR(SETTER);
+#undef DUMP_ATTR
+
+ fprintf(fp, " slot %lu flags %x shortid %d\n",
+ (unsigned long)sprop->slot, sprop->flags, sprop->shortid);
+ }
+}
+
+static JSBool
+DumpStats(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ JSString *str;
+ const char *bytes;
+ JSAtom *atom;
+ JSObject *obj2;
+ JSProperty *prop;
+ jsval value;
+
+ for (i = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ bytes = JS_GetStringBytes(str);
+ if (strcmp(bytes, "arena") == 0) {
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(stdout);
+#endif
+ } else if (strcmp(bytes, "atom") == 0) {
+ DumpAtomArgs args;
+
+ fprintf(gOutFile, "\natom table contents:\n");
+ args.cx = cx;
+ args.fp = stdout;
+ JS_HashTableEnumerateEntries(cx->runtime->atomState.table,
+ DumpAtom,
+ &args);
+#ifdef HASHMETER
+ JS_HashTableDumpMeter(cx->runtime->atomState.table,
+ DumpAtom,
+ stdout);
+#endif
+ } else if (strcmp(bytes, "global") == 0) {
+ DumpScope(cx, cx->globalObject, stdout);
+ } else {
+ atom = js_Atomize(cx, bytes, JS_GetStringLength(str), 0);
+ if (!atom)
+ return JS_FALSE;
+ if (!js_FindProperty(cx, ATOM_TO_JSID(atom), &obj, &obj2, &prop))
+ return JS_FALSE;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &value))
+ return JS_FALSE;
+ }
+ if (!prop || !JSVAL_IS_OBJECT(value)) {
+ fprintf(gErrFile, "js: invalid stats argument %s\n",
+ bytes);
+ continue;
+ }
+ obj = JSVAL_TO_OBJECT(value);
+ if (obj)
+ DumpScope(cx, obj, stdout);
+ }
+ }
+ return JS_TRUE;
+}
+
+#endif /* DEBUG */
+
+#ifdef TEST_EXPORT
+static JSBool
+DoExport(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSAtom *atom;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+ uintN attrs;
+
+ if (argc != 2) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL, JSSMSG_DOEXP_USAGE);
+ return JS_FALSE;
+ }
+ if (!JS_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(obj);
+ atom = js_ValueToStringAtom(cx, argv[1]);
+ if (!atom)
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, NULL, NULL,
+ JSPROP_EXPORTED, NULL);
+ } else {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, &attrs);
+ if (ok) {
+ attrs |= JSPROP_EXPORTED;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, &attrs);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+#endif
+
+#ifdef TEST_CVTARGS
+#include <ctype.h>
+
+static const char *
+EscapeWideString(jschar *w)
+{
+ static char enuf[80];
+ static char hex[] = "0123456789abcdef";
+ jschar u;
+ unsigned char b, c;
+ int i, j;
+
+ if (!w)
+ return "";
+ for (i = j = 0; i < sizeof enuf - 1; i++, j++) {
+ u = w[j];
+ if (u == 0)
+ break;
+ b = (unsigned char)(u >> 8);
+ c = (unsigned char)(u);
+ if (b) {
+ if (i >= sizeof enuf - 6)
+ break;
+ enuf[i++] = '\\';
+ enuf[i++] = 'u';
+ enuf[i++] = hex[b >> 4];
+ enuf[i++] = hex[b & 15];
+ enuf[i++] = hex[c >> 4];
+ enuf[i] = hex[c & 15];
+ } else if (!isprint(c)) {
+ if (i >= sizeof enuf - 4)
+ break;
+ enuf[i++] = '\\';
+ enuf[i++] = 'x';
+ enuf[i++] = hex[c >> 4];
+ enuf[i] = hex[c & 15];
+ } else {
+ enuf[i] = (char)c;
+ }
+ }
+ enuf[i] = 0;
+ return enuf;
+}
+
+#include <stdarg.h>
+
+static JSBool
+ZZ_formatter(JSContext *cx, const char *format, JSBool fromJS, jsval **vpp,
+ va_list *app)
+{
+ jsval *vp;
+ va_list ap;
+ jsdouble re, im;
+
+ printf("entering ZZ_formatter");
+ vp = *vpp;
+ ap = *app;
+ if (fromJS) {
+ if (!JS_ValueToNumber(cx, vp[0], &re))
+ return JS_FALSE;
+ if (!JS_ValueToNumber(cx, vp[1], &im))
+ return JS_FALSE;
+ *va_arg(ap, jsdouble *) = re;
+ *va_arg(ap, jsdouble *) = im;
+ } else {
+ re = va_arg(ap, jsdouble);
+ im = va_arg(ap, jsdouble);
+ if (!JS_NewNumberValue(cx, re, &vp[0]))
+ return JS_FALSE;
+ if (!JS_NewNumberValue(cx, im, &vp[1]))
+ return JS_FALSE;
+ }
+ *vpp = vp + 2;
+ *app = ap;
+ printf("leaving ZZ_formatter");
+ return JS_TRUE;
+}
+
+static JSBool
+ConvertArgs(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool b = JS_FALSE;
+ jschar c = 0;
+ int32 i = 0, j = 0;
+ uint32 u = 0;
+ jsdouble d = 0, I = 0, re = 0, im = 0;
+ char *s = NULL;
+ JSString *str = NULL;
+ jschar *w = NULL;
+ JSObject *obj2 = NULL;
+ JSFunction *fun = NULL;
+ jsval v = JSVAL_VOID;
+ JSBool ok;
+
+ if (!JS_AddArgumentFormatter(cx, "ZZ", ZZ_formatter))
+ return JS_FALSE;;
+ ok = JS_ConvertArguments(cx, argc, argv, "b/ciujdIsSWofvZZ*",
+ &b, &c, &i, &u, &j, &d, &I, &s, &str, &w, &obj2,
+ &fun, &v, &re, &im);
+ JS_RemoveArgumentFormatter(cx, "ZZ");
+ if (!ok)
+ return JS_FALSE;
+ fprintf(gOutFile,
+ "b %u, c %x (%c), i %ld, u %lu, j %ld\n",
+ b, c, (char)c, i, u, j);
+ fprintf(gOutFile,
+ "d %g, I %g, s %s, S %s, W %s, obj %s, fun %s\n"
+ "v %s, re %g, im %g\n",
+ d, I, s, str ? JS_GetStringBytes(str) : "", EscapeWideString(w),
+ JS_GetStringBytes(JS_ValueToString(cx, OBJECT_TO_JSVAL(obj2))),
+ fun ? JS_GetStringBytes(JS_DecompileFunction(cx, fun, 4)) : "",
+ JS_GetStringBytes(JS_ValueToString(cx, v)), re, im);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+BuildDate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ char version[20] = "\n";
+#if JS_VERSION < 150
+ sprintf(version, " for version %d\n", JS_VERSION);
+#endif
+ fprintf(gOutFile, "built on %s at %s%s", __DATE__, __TIME__, version);
+ return JS_TRUE;
+}
+
+static JSBool
+Clear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (argc != 0 && !JS_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ JS_ClearScope(cx, obj);
+ return JS_TRUE;
+}
+
+static JSBool
+Intern(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ str = JS_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ if (!JS_InternUCStringN(cx, JS_GetStringChars(str),
+ JS_GetStringLength(str))) {
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+Clone(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFunction *fun;
+ JSObject *funobj, *parent, *clone;
+
+ fun = JS_ValueToFunction(cx, argv[0]);
+ if (!fun)
+ return JS_FALSE;
+ funobj = JS_GetFunctionObject(fun);
+ if (argc > 1) {
+ if (!JS_ValueToObject(cx, argv[1], &parent))
+ return JS_FALSE;
+ } else {
+ parent = JS_GetParent(cx, funobj);
+ }
+ clone = JS_CloneFunctionObject(cx, funobj, parent);
+ if (!clone)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(clone);
+ return JS_TRUE;
+}
+
+static JSBool
+Seal(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *target;
+ JSBool deep = JS_FALSE;
+
+ if (!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
+ return JS_FALSE;
+ if (!target)
+ return JS_TRUE;
+ return JS_SealObject(cx, target, deep);
+}
+
+static JSBool
+GetPDA(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *vobj, *aobj, *pdobj;
+ JSBool ok;
+ JSPropertyDescArray pda;
+ JSPropertyDesc *pd;
+ uint32 i;
+ jsval v;
+
+ if (!JS_ValueToObject(cx, argv[0], &vobj))
+ return JS_FALSE;
+ if (!vobj)
+ return JS_TRUE;
+
+ aobj = JS_NewArrayObject(cx, 0, NULL);
+ if (!aobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(aobj);
+
+ ok = JS_GetPropertyDescArray(cx, vobj, &pda);
+ if (!ok)
+ return JS_FALSE;
+ pd = pda.array;
+ for (i = 0; i < pda.length; i++) {
+ pdobj = JS_NewObject(cx, NULL, NULL, NULL);
+ if (!pdobj) {
+ ok = JS_FALSE;
+ break;
+ }
+
+ ok = JS_SetProperty(cx, pdobj, "id", &pd->id) &&
+ JS_SetProperty(cx, pdobj, "value", &pd->value) &&
+ (v = INT_TO_JSVAL(pd->flags),
+ JS_SetProperty(cx, pdobj, "flags", &v)) &&
+ (v = INT_TO_JSVAL(pd->slot),
+ JS_SetProperty(cx, pdobj, "slot", &v)) &&
+ JS_SetProperty(cx, pdobj, "alias", &pd->alias);
+ if (!ok)
+ break;
+
+ v = OBJECT_TO_JSVAL(pdobj);
+ ok = JS_SetElement(cx, aobj, i, &v);
+ if (!ok)
+ break;
+ }
+ JS_PutPropertyDescArray(cx, &pda);
+ return ok;
+}
+
+static JSBool
+GetSLX(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+
+ script = ValueToScript(cx, argv[0]);
+ if (!script)
+ return JS_FALSE;
+ *rval = INT_TO_JSVAL(js_GetScriptLineExtent(script));
+ return JS_TRUE;
+}
+
+static JSBool
+ToInt32(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ int32 i;
+
+ if (!JS_ValueToInt32(cx, argv[0], &i))
+ return JS_FALSE;
+ return JS_NewNumberValue(cx, i, rval);
+}
+
+static JSBool
+StringsAreUtf8(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = JS_CStringsAreUTF8() ? JSVAL_TRUE : JSVAL_FALSE;
+ return JS_TRUE;
+}
+
+static const char* badUtf8 = "...\xC0...";
+static const char* bigUtf8 = "...\xFB\xBF\xBF\xBF\xBF...";
+static const jschar badSurrogate[] = { 'A', 'B', 'C', 0xDEEE, 'D', 'E', 0 };
+
+static JSBool
+TestUtf8(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ intN mode = 1;
+ jschar chars[20];
+ size_t charsLength = 5;
+ char bytes[20];
+ size_t bytesLength = 20;
+ if (argc && !JS_ValueToInt32(cx, *argv, &mode))
+ return JS_FALSE;
+
+ /* The following throw errors if compiled with UTF-8. */
+ switch (mode) {
+ /* mode 1: malformed UTF-8 string. */
+ case 1:
+ JS_NewStringCopyZ(cx, badUtf8);
+ break;
+ /* mode 2: big UTF-8 character. */
+ case 2:
+ JS_NewStringCopyZ(cx, bigUtf8);
+ break;
+ /* mode 3: bad surrogate character. */
+ case 3:
+ JS_EncodeCharacters(cx, badSurrogate, 6, bytes, &bytesLength);
+ break;
+ /* mode 4: use a too small buffer. */
+ case 4:
+ JS_DecodeBytes(cx, "1234567890", 10, chars, &charsLength);
+ break;
+ default:
+ JS_ReportError(cx, "invalid mode parameter");
+ return JS_FALSE;
+ }
+ return !JS_IsExceptionPending (cx);
+}
+
+static JSBool
+ThrowError(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JS_ReportError(cx, "This is an error");
+ return JS_FALSE;
+}
+
+#define LAZY_STANDARD_CLASSES
+
+/* A class for easily testing the inner/outer object callbacks. */
+typedef struct ComplexObject {
+ JSBool isInner;
+ JSObject *inner;
+ JSObject *outer;
+} ComplexObject;
+
+static JSObject *
+split_create_outer(JSContext *cx);
+
+static JSObject *
+split_create_inner(JSContext *cx, JSObject *outer);
+
+static ComplexObject *
+split_get_private(JSContext *cx, JSObject *obj);
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+ jsid asId;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ /* Make sure to define this property on the inner object. */
+ if (!JS_ValueToId(cx, *vp, &asId))
+ return JS_FALSE;
+ return OBJ_DEFINE_PROPERTY(cx, cpx->inner, asId, *vp, NULL, NULL,
+ JSPROP_ENUMERATE, NULL);
+ }
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ if (JSVAL_IS_STRING(id)) {
+ JSString *str;
+
+ str = JSVAL_TO_STRING(id);
+ return JS_GetUCProperty(cx, cpx->inner, JS_GetStringChars(str),
+ JS_GetStringLength(str), vp);
+ }
+ if (JSVAL_IS_INT(id))
+ return JS_GetElement(cx, cpx->inner, JSVAL_TO_INT(id), vp);
+ return JS_TRUE;
+ }
+
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ if (JSVAL_IS_STRING(id)) {
+ JSString *str;
+
+ str = JSVAL_TO_STRING(id);
+ return JS_SetUCProperty(cx, cpx->inner, JS_GetStringChars(str),
+ JS_GetStringLength(str), vp);
+ }
+ if (JSVAL_IS_INT(id))
+ return JS_SetElement(cx, cpx->inner, JSVAL_TO_INT(id), vp);
+ return JS_TRUE;
+ }
+
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_delProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+ jsid asId;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ /* Make sure to define this property on the inner object. */
+ if (!JS_ValueToId(cx, *vp, &asId))
+ return JS_FALSE;
+ return OBJ_DELETE_PROPERTY(cx, cpx->inner, asId, vp);
+ }
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ ComplexObject *cpx;
+ JSObject *iterator;
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ cpx = JS_GetPrivate(cx, obj);
+
+ if (!cpx->isInner && cpx->inner)
+ obj = cpx->inner;
+
+ iterator = JS_NewPropertyIterator(cx, obj);
+ if (!iterator)
+ return JS_FALSE;
+
+ *statep = OBJECT_TO_JSVAL(iterator);
+ if (idp)
+ *idp = JSVAL_ZERO;
+ break;
+
+ case JSENUMERATE_NEXT:
+ iterator = (JSObject*)JSVAL_TO_OBJECT(*statep);
+ if (!JS_NextProperty(cx, iterator, idp))
+ return JS_FALSE;
+
+ if (*idp != JSVAL_VOID)
+ break;
+ /* Fall through. */
+
+ case JSENUMERATE_DESTROY:
+ /* Let GC at our iterator object. */
+ *statep = JSVAL_NULL;
+ break;
+ }
+
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ ComplexObject *cpx;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ jsid asId;
+ JSProperty *prop;
+
+ if (!JS_ValueToId(cx, id, &asId))
+ return JS_FALSE;
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, cpx->inner, asId, objp, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, cpx->inner, prop);
+
+ return JS_TRUE;
+ }
+
+#ifdef LAZY_STANDARD_CLASSES
+ if (!(flags & JSRESOLVE_ASSIGNING)) {
+ JSBool resolved;
+
+ if (!JS_ResolveStandardClass(cx, obj, id, &resolved))
+ return JS_FALSE;
+
+ if (resolved) {
+ *objp = obj;
+ return JS_TRUE;
+ }
+ }
+#endif
+
+ /* XXX For additional realism, let's resolve some random property here. */
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+split_finalize(JSContext *cx, JSObject *obj)
+{
+ JS_free(cx, JS_GetPrivate(cx, obj));
+}
+
+JS_STATIC_DLL_CALLBACK(uint32)
+split_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ ComplexObject *cpx;
+
+ cpx = JS_GetPrivate(cx, obj);
+
+ if (!cpx->isInner && cpx->inner) {
+ /* Mark the inner object. */
+ JS_MarkGCThing(cx, cpx->inner, "ComplexObject.inner", arg);
+ }
+
+ return 0;
+}
+
+JS_STATIC_DLL_CALLBACK(JSObject *)
+split_outerObject(JSContext *cx, JSObject *obj)
+{
+ ComplexObject *cpx;
+
+ cpx = JS_GetPrivate(cx, obj);
+ return cpx->isInner ? cpx->outer : obj;
+}
+
+JS_STATIC_DLL_CALLBACK(JSObject *)
+split_innerObject(JSContext *cx, JSObject *obj)
+{
+ ComplexObject *cpx;
+
+ cpx = JS_GetPrivate(cx, obj);
+ return !cpx->isInner ? cpx->inner : obj;
+}
+
+static JSExtendedClass split_global_class = {
+ {"split_global",
+ JSCLASS_NEW_RESOLVE | JSCLASS_HAS_PRIVATE | JSCLASS_IS_EXTENDED,
+ split_addProperty, split_delProperty,
+ split_getProperty, split_setProperty,
+ (JSEnumerateOp)split_enumerate,
+ (JSResolveOp)split_resolve,
+ JS_ConvertStub, split_finalize,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ split_mark, NULL},
+ NULL, split_outerObject, split_innerObject,
+ NULL, NULL, NULL, NULL, NULL
+};
+
+JSObject *
+split_create_outer(JSContext *cx)
+{
+ ComplexObject *cpx;
+ JSObject *obj;
+
+ cpx = JS_malloc(cx, sizeof *obj);
+ if (!cpx)
+ return NULL;
+ cpx->outer = NULL;
+ cpx->inner = NULL;
+ cpx->isInner = JS_FALSE;
+
+ obj = JS_NewObject(cx, &split_global_class.base, NULL, NULL);
+ if (!obj) {
+ JS_free(cx, cpx);
+ return NULL;
+ }
+
+ JS_ASSERT(!JS_GetParent(cx, obj));
+ if (!JS_SetPrivate(cx, obj, cpx)) {
+ JS_free(cx, cpx);
+ return NULL;
+ }
+
+ return obj;
+}
+
+static JSObject *
+split_create_inner(JSContext *cx, JSObject *outer)
+{
+ ComplexObject *cpx, *outercpx;
+ JSObject *obj;
+
+ JS_ASSERT(JS_GET_CLASS(cx, outer) == &split_global_class.base);
+
+ cpx = JS_malloc(cx, sizeof *cpx);
+ if (!cpx)
+ return NULL;
+ cpx->outer = outer;
+ cpx->inner = NULL;
+ cpx->isInner = JS_TRUE;
+
+ obj = JS_NewObject(cx, &split_global_class.base, NULL, NULL);
+ if (!obj || !JS_SetParent(cx, obj, NULL) || !JS_SetPrivate(cx, obj, cpx)) {
+ JS_free(cx, cpx);
+ return NULL;
+ }
+
+ outercpx = JS_GetPrivate(cx, outer);
+ outercpx->inner = obj;
+
+ return obj;
+}
+
+static ComplexObject *
+split_get_private(JSContext *cx, JSObject *obj)
+{
+ do {
+ if (JS_GET_CLASS(cx, obj) == &split_global_class.base)
+ return JS_GetPrivate(cx, obj);
+ obj = JS_GetParent(cx, obj);
+ } while (obj);
+
+ return NULL;
+}
+
+static JSBool
+sandbox_enumerate(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+ JSBool b;
+
+ if (!JS_GetProperty(cx, obj, "lazy", &v) || !JS_ValueToBoolean(cx, v, &b))
+ return JS_FALSE;
+ return !b || JS_EnumerateStandardClasses(cx, obj);
+}
+
+static JSBool
+sandbox_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ jsval v;
+ JSBool b, resolved;
+
+ if (!JS_GetProperty(cx, obj, "lazy", &v) || !JS_ValueToBoolean(cx, v, &b))
+ return JS_FALSE;
+ if (b && (flags & JSRESOLVE_ASSIGNING) == 0) {
+ if (!JS_ResolveStandardClass(cx, obj, id, &resolved))
+ return JS_FALSE;
+ if (resolved) {
+ *objp = obj;
+ return JS_TRUE;
+ }
+ }
+ *objp = NULL;
+ return JS_TRUE;
+}
+
+static JSClass sandbox_class = {
+ "sandbox",
+ JSCLASS_NEW_RESOLVE,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, JS_PropertyStub,
+ sandbox_enumerate, (JSResolveOp)sandbox_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ JSObject *sobj;
+ JSContext *scx;
+ const jschar *src;
+ size_t srclen;
+ JSBool lazy, ok;
+ jsval v;
+ JSStackFrame *fp;
+
+ sobj = NULL;
+ if (!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sobj))
+ return JS_FALSE;
+
+ scx = JS_NewContext(JS_GetRuntime(cx), gStackChunkSize);
+ if (!scx) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ src = JS_GetStringChars(str);
+ srclen = JS_GetStringLength(str);
+ lazy = JS_FALSE;
+ if (srclen == 4 &&
+ src[0] == 'l' && src[1] == 'a' && src[2] == 'z' && src[3] == 'y') {
+ lazy = JS_TRUE;
+ srclen = 0;
+ }
+
+ if (!sobj) {
+ sobj = JS_NewObject(scx, &sandbox_class, NULL, NULL);
+ if (!sobj || (!lazy && !JS_InitStandardClasses(scx, sobj))) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ v = BOOLEAN_TO_JSVAL(v);
+ ok = JS_SetProperty(cx, sobj, "lazy", &v);
+ if (!ok)
+ goto out;
+ }
+
+ if (srclen == 0) {
+ *rval = OBJECT_TO_JSVAL(sobj);
+ ok = JS_TRUE;
+ } else {
+ fp = JS_GetScriptedCaller(cx, NULL);
+ ok = JS_EvaluateUCScript(scx, sobj, src, srclen,
+ fp->script->filename,
+ JS_PCToLineNumber(cx, fp->script, fp->pc),
+ rval);
+ }
+
+out:
+ JS_DestroyContext(scx);
+ return ok;
+}
+
+static JSFunctionSpec shell_functions[] = {
+ {"version", Version, 0,0,0},
+ {"options", Options, 0,0,0},
+ {"load", Load, 1,0,0},
+ {"readline", ReadLine, 0,0,0},
+ {"print", Print, 0,0,0},
+ {"help", Help, 0,0,0},
+ {"quit", Quit, 0,0,0},
+ {"gc", GC, 0,0,0},
+ {"trap", Trap, 3,0,0},
+ {"untrap", Untrap, 2,0,0},
+ {"line2pc", LineToPC, 0,0,0},
+ {"pc2line", PCToLine, 0,0,0},
+ {"stringsAreUtf8", StringsAreUtf8, 0,0,0},
+ {"testUtf8", TestUtf8, 1,0,0},
+ {"throwError", ThrowError, 0,0,0},
+#ifdef DEBUG
+ {"dis", Disassemble, 1,0,0},
+ {"dissrc", DisassWithSrc, 1,0,0},
+ {"notes", Notes, 1,0,0},
+ {"tracing", Tracing, 0,0,0},
+ {"stats", DumpStats, 1,0,0},
+#endif
+#ifdef TEST_EXPORT
+ {"xport", DoExport, 2,0,0},
+#endif
+#ifdef TEST_CVTARGS
+ {"cvtargs", ConvertArgs, 0,0,12},
+#endif
+ {"build", BuildDate, 0,0,0},
+ {"clear", Clear, 0,0,0},
+ {"intern", Intern, 1,0,0},
+ {"clone", Clone, 1,0,0},
+ {"seal", Seal, 1,0,1},
+ {"getpda", GetPDA, 1,0,0},
+ {"getslx", GetSLX, 1,0,0},
+ {"toint32", ToInt32, 1,0,0},
+ {"evalcx", EvalInContext, 1,0,0},
+ {NULL,NULL,0,0,0}
+};
+
+/* NOTE: These must be kept in sync with the above. */
+
+static char *shell_help_messages[] = {
+ "version([number]) Get or set JavaScript version number",
+ "options([option ...]) Get or toggle JavaScript options",
+ "load(['foo.js' ...]) Load files named by string arguments",
+ "readline() Read a single line from stdin",
+ "print([exp ...]) Evaluate and print expressions",
+ "help([name ...]) Display usage and help messages",
+ "quit() Quit the shell",
+ "gc() Run the garbage collector",
+ "trap([fun, [pc,]] exp) Trap bytecode execution",
+ "untrap(fun[, pc]) Remove a trap",
+ "line2pc([fun,] line) Map line number to PC",
+ "pc2line(fun[, pc]) Map PC to line number",
+ "stringsAreUTF8() Check if strings are UTF-8 encoded",
+ "testUTF8(mode) Perform UTF-8 tests (modes are 1 to 4)",
+ "throwError() Throw an error from JS_ReportError",
+#ifdef DEBUG
+ "dis([fun]) Disassemble functions into bytecodes",
+ "dissrc([fun]) Disassemble functions with source lines",
+ "notes([fun]) Show source notes for functions",
+ "tracing([toggle]) Turn tracing on or off",
+ "stats([string ...]) Dump 'arena', 'atom', 'global' stats",
+#endif
+#ifdef TEST_EXPORT
+ "xport(obj, id) Export identified property from object",
+#endif
+#ifdef TEST_CVTARGS
+ "cvtargs(b, c, ...) Test JS_ConvertArguments",
+#endif
+ "build() Show build date and time",
+ "clear([obj]) Clear properties of object",
+ "intern(str) Internalize str in the atom table",
+ "clone(fun[, scope]) Clone function object",
+ "seal(obj[, deep]) Seal object, or object graph if deep",
+ "getpda(obj) Get the property descriptors for obj",
+ "getslx(obj) Get script line extent",
+ "toint32(n) Testing hook for JS_ValueToInt32",
+ "evalcx(s[, o]) Evaluate s in optional sandbox object o\n"
+ " if (s == '' && !o) return new o with eager standard classes\n"
+ " if (s == 'lazy' && !o) return new o with lazy standard classes",
+ 0
+};
+
+static void
+ShowHelpHeader(void)
+{
+ fprintf(gOutFile, "%-14s %-22s %s\n", "Command", "Usage", "Description");
+ fprintf(gOutFile, "%-14s %-22s %s\n", "=======", "=====", "===========");
+}
+
+static void
+ShowHelpForCommand(uintN n)
+{
+ fprintf(gOutFile, "%-14.14s %s\n", shell_functions[n].name, shell_help_messages[n]);
+}
+
+static JSObject *
+split_setup(JSContext *cx)
+{
+ JSObject *outer, *inner, *arguments;
+
+ outer = split_create_outer(cx);
+ if (!outer)
+ return NULL;
+ JS_SetGlobalObject(cx, outer);
+
+ inner = split_create_inner(cx, outer);
+ if (!inner)
+ return NULL;
+
+ if (!JS_DefineFunctions(cx, inner, shell_functions))
+ return NULL;
+ JS_ClearScope(cx, outer);
+
+ /* Create a dummy arguments object. */
+ arguments = JS_NewArrayObject(cx, 0, NULL);
+ if (!arguments ||
+ !JS_DefineProperty(cx, inner, "arguments", OBJECT_TO_JSVAL(arguments),
+ NULL, NULL, 0)) {
+ return NULL;
+ }
+
+#ifndef LAZY_STANDARD_CLASSES
+ if (!JS_InitStandardClasses(cx, inner))
+ return NULL;
+#endif
+
+ return inner;
+}
+
+static JSBool
+Help(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i, j;
+ int did_header, did_something;
+ JSType type;
+ JSFunction *fun;
+ JSString *str;
+ const char *bytes;
+
+ fprintf(gOutFile, "%s\n", JS_GetImplementationVersion());
+ if (argc == 0) {
+ ShowHelpHeader();
+ for (i = 0; shell_functions[i].name; i++)
+ ShowHelpForCommand(i);
+ } else {
+ did_header = 0;
+ for (i = 0; i < argc; i++) {
+ did_something = 0;
+ type = JS_TypeOfValue(cx, argv[i]);
+ if (type == JSTYPE_FUNCTION) {
+ fun = JS_ValueToFunction(cx, argv[i]);
+ str = fun->atom ? ATOM_TO_STRING(fun->atom) : NULL;
+ } else if (type == JSTYPE_STRING) {
+ str = JSVAL_TO_STRING(argv[i]);
+ } else {
+ str = NULL;
+ }
+ if (str) {
+ bytes = JS_GetStringBytes(str);
+ for (j = 0; shell_functions[j].name; j++) {
+ if (!strcmp(bytes, shell_functions[j].name)) {
+ if (!did_header) {
+ did_header = 1;
+ ShowHelpHeader();
+ }
+ did_something = 1;
+ ShowHelpForCommand(j);
+ break;
+ }
+ }
+ }
+ if (!did_something) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ fprintf(gErrFile, "Sorry, no help for %s\n",
+ JS_GetStringBytes(str));
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Define a JS object called "it". Give it class operations that printf why
+ * they're being called for tutorial purposes.
+ */
+enum its_tinyid {
+ ITS_COLOR, ITS_HEIGHT, ITS_WIDTH, ITS_FUNNY, ITS_ARRAY, ITS_RDONLY
+};
+
+static JSPropertySpec its_props[] = {
+ {"color", ITS_COLOR, JSPROP_ENUMERATE, NULL, NULL},
+ {"height", ITS_HEIGHT, JSPROP_ENUMERATE, NULL, NULL},
+ {"width", ITS_WIDTH, JSPROP_ENUMERATE, NULL, NULL},
+ {"funny", ITS_FUNNY, JSPROP_ENUMERATE, NULL, NULL},
+ {"array", ITS_ARRAY, JSPROP_ENUMERATE, NULL, NULL},
+ {"rdonly", ITS_RDONLY, JSPROP_READONLY, NULL, NULL},
+ {NULL,0,0,NULL,NULL}
+};
+
+static JSBool
+its_item(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (argc != 0)
+ JS_SetCallReturnValue2(cx, argv[0]);
+ return JS_TRUE;
+}
+
+static JSBool
+its_bindMethod(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ char *name;
+ JSObject *method;
+
+ if (!JS_ConvertArguments(cx, argc, argv, "so", &name, &method))
+ return JS_FALSE;
+
+ *rval = OBJECT_TO_JSVAL(method);
+
+ if (JS_TypeOfValue(cx, *rval) != JSTYPE_FUNCTION) {
+ JSString *valstr = JS_ValueToString(cx, *rval);
+ if (valstr) {
+ JS_ReportError(cx, "can't bind method %s to non-callable object %s",
+ name, JS_GetStringBytes(valstr));
+ }
+ return JS_FALSE;
+ }
+
+ if (!JS_DefineProperty(cx, obj, name, *rval, NULL, NULL, JSPROP_ENUMERATE))
+ return JS_FALSE;
+
+ return JS_SetParent(cx, method, obj);
+}
+
+static JSFunctionSpec its_methods[] = {
+ {"item", its_item, 0,0,0},
+ {"bindMethod", its_bindMethod, 2,0,0},
+ {NULL,NULL,0,0,0}
+};
+
+#ifdef JSD_LOWLEVEL_SOURCE
+/*
+ * This facilitates sending source to JSD (the debugger system) in the shell
+ * where the source is loaded using the JSFILE hack in jsscan. The function
+ * below is used as a callback for the jsdbgapi JS_SetSourceHandler hook.
+ * A more normal embedding (e.g. mozilla) loads source itself and can send
+ * source directly to JSD without using this hook scheme.
+ */
+static void
+SendSourceToJSDebugger(const char *filename, uintN lineno,
+ jschar *str, size_t length,
+ void **listenerTSData, JSDContext* jsdc)
+{
+ JSDSourceText *jsdsrc = (JSDSourceText *) *listenerTSData;
+
+ if (!jsdsrc) {
+ if (!filename)
+ filename = "typein";
+ if (1 == lineno) {
+ jsdsrc = JSD_NewSourceText(jsdc, filename);
+ } else {
+ jsdsrc = JSD_FindSourceForURL(jsdc, filename);
+ if (jsdsrc && JSD_SOURCE_PARTIAL !=
+ JSD_GetSourceStatus(jsdc, jsdsrc)) {
+ jsdsrc = NULL;
+ }
+ }
+ }
+ if (jsdsrc) {
+ jsdsrc = JSD_AppendUCSourceText(jsdc,jsdsrc, str, length,
+ JSD_SOURCE_PARTIAL);
+ }
+ *listenerTSData = jsdsrc;
+}
+#endif /* JSD_LOWLEVEL_SOURCE */
+
+static JSBool its_noisy; /* whether to be noisy when finalizing it */
+
+static JSBool
+its_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "adding its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " initial value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_delProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "deleting its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " current value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "getting its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " current value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "setting its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " new value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ if (JSVAL_IS_STRING(id) &&
+ !strcmp(JS_GetStringBytes(JSVAL_TO_STRING(id)), "noisy")) {
+ return JS_ValueToBoolean(cx, *vp, &its_noisy);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_enumerate(JSContext *cx, JSObject *obj)
+{
+ if (its_noisy)
+ fprintf(gOutFile, "enumerate its properties\n");
+ return JS_TRUE;
+}
+
+static JSBool
+its_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "resolving its property %s, flags {%s,%s,%s}\n",
+ JS_GetStringBytes(JS_ValueToString(cx, id)),
+ (flags & JSRESOLVE_QUALIFIED) ? "qualified" : "",
+ (flags & JSRESOLVE_ASSIGNING) ? "assigning" : "",
+ (flags & JSRESOLVE_DETECTING) ? "detecting" : "");
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ if (its_noisy)
+ fprintf(gOutFile, "converting it to %s type\n", JS_GetTypeName(cx, type));
+ return JS_TRUE;
+}
+
+static void
+its_finalize(JSContext *cx, JSObject *obj)
+{
+ if (its_noisy)
+ fprintf(gOutFile, "finalizing it\n");
+}
+
+static JSClass its_class = {
+ "It", JSCLASS_NEW_RESOLVE,
+ its_addProperty, its_delProperty, its_getProperty, its_setProperty,
+ its_enumerate, (JSResolveOp)its_resolve,
+ its_convert, its_finalize,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+JSErrorFormatString jsShell_ErrorFormatString[JSErr_Limit] = {
+#define MSG_DEF(name, number, count, exception, format) \
+ { format, count, JSEXN_ERR } ,
+#include "jsshell.msg"
+#undef MSG_DEF
+};
+
+static const JSErrorFormatString *
+my_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
+{
+ if ((errorNumber > 0) && (errorNumber < JSShellErr_Limit))
+ return &jsShell_ErrorFormatString[errorNumber];
+ return NULL;
+}
+
+static void
+my_ErrorReporter(JSContext *cx, const char *message, JSErrorReport *report)
+{
+ int i, j, k, n;
+ char *prefix, *tmp;
+ const char *ctmp;
+
+ if (!report) {
+ fprintf(gErrFile, "%s\n", message);
+ return;
+ }
+
+ /* Conditionally ignore reported warnings. */
+ if (JSREPORT_IS_WARNING(report->flags) && !reportWarnings)
+ return;
+
+ prefix = NULL;
+ if (report->filename)
+ prefix = JS_smprintf("%s:", report->filename);
+ if (report->lineno) {
+ tmp = prefix;
+ prefix = JS_smprintf("%s%u: ", tmp ? tmp : "", report->lineno);
+ JS_free(cx, tmp);
+ }
+ if (JSREPORT_IS_WARNING(report->flags)) {
+ tmp = prefix;
+ prefix = JS_smprintf("%s%swarning: ",
+ tmp ? tmp : "",
+ JSREPORT_IS_STRICT(report->flags) ? "strict " : "");
+ JS_free(cx, tmp);
+ }
+
+ /* embedded newlines -- argh! */
+ while ((ctmp = strchr(message, '\n')) != 0) {
+ ctmp++;
+ if (prefix)
+ fputs(prefix, gErrFile);
+ fwrite(message, 1, ctmp - message, gErrFile);
+ message = ctmp;
+ }
+
+ /* If there were no filename or lineno, the prefix might be empty */
+ if (prefix)
+ fputs(prefix, gErrFile);
+ fputs(message, gErrFile);
+
+ if (!report->linebuf) {
+ fputc('\n', gErrFile);
+ goto out;
+ }
+
+ /* report->linebuf usually ends with a newline. */
+ n = strlen(report->linebuf);
+ fprintf(gErrFile, ":\n%s%s%s%s",
+ prefix,
+ report->linebuf,
+ (n > 0 && report->linebuf[n-1] == '\n') ? "" : "\n",
+ prefix);
+ n = PTRDIFF(report->tokenptr, report->linebuf, char);
+ for (i = j = 0; i < n; i++) {
+ if (report->linebuf[i] == '\t') {
+ for (k = (j + 8) & ~7; j < k; j++) {
+ fputc('.', gErrFile);
+ }
+ continue;
+ }
+ fputc('.', gErrFile);
+ j++;
+ }
+ fputs("^\n", gErrFile);
+ out:
+ if (!JSREPORT_IS_WARNING(report->flags)) {
+ if (report->errorNumber == JSMSG_OUT_OF_MEMORY) {
+ gExitCode = EXITCODE_OUT_OF_MEMORY;
+ } else {
+ gExitCode = EXITCODE_RUNTIME_ERROR;
+ }
+ }
+ JS_free(cx, prefix);
+}
+
+#if defined(SHELL_HACK) && defined(DEBUG) && defined(XP_UNIX)
+static JSBool
+Exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFunction *fun;
+ const char *name, **nargv;
+ uintN i, nargc;
+ JSString *str;
+ pid_t pid;
+ int status;
+
+ fun = JS_ValueToFunction(cx, argv[-2]);
+ if (!fun)
+ return JS_FALSE;
+ if (!fun->atom)
+ return JS_TRUE;
+ name = JS_GetStringBytes(ATOM_TO_STRING(fun->atom));
+ nargc = 1 + argc;
+ nargv = JS_malloc(cx, (nargc + 1) * sizeof(char *));
+ if (!nargv)
+ return JS_FALSE;
+ nargv[0] = name;
+ for (i = 1; i < nargc; i++) {
+ str = JS_ValueToString(cx, argv[i-1]);
+ if (!str) {
+ JS_free(cx, nargv);
+ return JS_FALSE;
+ }
+ nargv[i] = JS_GetStringBytes(str);
+ }
+ nargv[nargc] = 0;
+ pid = fork();
+ switch (pid) {
+ case -1:
+ perror("js");
+ break;
+ case 0:
+ (void) execvp(name, (char **)nargv);
+ perror("js");
+ exit(127);
+ default:
+ while (waitpid(pid, &status, 0) < 0 && errno == EINTR)
+ continue;
+ break;
+ }
+ JS_free(cx, nargv);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+global_enumerate(JSContext *cx, JSObject *obj)
+{
+#ifdef LAZY_STANDARD_CLASSES
+ return JS_EnumerateStandardClasses(cx, obj);
+#else
+ return JS_TRUE;
+#endif
+}
+
+static JSBool
+global_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+#ifdef LAZY_STANDARD_CLASSES
+ JSBool resolved;
+
+ if (!JS_ResolveStandardClass(cx, obj, id, &resolved))
+ return JS_FALSE;
+ if (resolved) {
+ *objp = obj;
+ return JS_TRUE;
+ }
+#endif
+
+#if defined(SHELL_HACK) && defined(DEBUG) && defined(XP_UNIX)
+ if ((flags & (JSRESOLVE_QUALIFIED | JSRESOLVE_ASSIGNING)) == 0) {
+ /*
+ * Do this expensive hack only for unoptimized Unix builds, which are
+ * not used for benchmarking.
+ */
+ char *path, *comp, *full;
+ const char *name;
+ JSBool ok, found;
+ JSFunction *fun;
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+ path = getenv("PATH");
+ if (!path)
+ return JS_TRUE;
+ path = JS_strdup(cx, path);
+ if (!path)
+ return JS_FALSE;
+ name = JS_GetStringBytes(JSVAL_TO_STRING(id));
+ ok = JS_TRUE;
+ for (comp = strtok(path, ":"); comp; comp = strtok(NULL, ":")) {
+ if (*comp != '\0') {
+ full = JS_smprintf("%s/%s", comp, name);
+ if (!full) {
+ JS_ReportOutOfMemory(cx);
+ ok = JS_FALSE;
+ break;
+ }
+ } else {
+ full = (char *)name;
+ }
+ found = (access(full, X_OK) == 0);
+ if (*comp != '\0')
+ free(full);
+ if (found) {
+ fun = JS_DefineFunction(cx, obj, name, Exec, 0,
+ JSPROP_ENUMERATE);
+ ok = (fun != NULL);
+ if (ok)
+ *objp = obj;
+ break;
+ }
+ }
+ JS_free(cx, path);
+ return ok;
+ }
+#else
+ return JS_TRUE;
+#endif
+}
+
+JSClass global_class = {
+ "global", JSCLASS_NEW_RESOLVE | JSCLASS_GLOBAL_FLAGS,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, JS_PropertyStub,
+ global_enumerate, (JSResolveOp) global_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+env_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+/* XXX porting may be easy, but these don't seem to supply setenv by default */
+#if !defined XP_BEOS && !defined XP_OS2 && !defined SOLARIS
+ JSString *idstr, *valstr;
+ const char *name, *value;
+ int rv;
+
+ idstr = JS_ValueToString(cx, id);
+ valstr = JS_ValueToString(cx, *vp);
+ if (!idstr || !valstr)
+ return JS_FALSE;
+ name = JS_GetStringBytes(idstr);
+ value = JS_GetStringBytes(valstr);
+#if defined XP_WIN || defined HPUX || defined OSF1 || defined IRIX
+ {
+ char *waste = JS_smprintf("%s=%s", name, value);
+ if (!waste) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ rv = putenv(waste);
+#ifdef XP_WIN
+ /*
+ * HPUX9 at least still has the bad old non-copying putenv.
+ *
+ * Per mail from <s.shanmuganathan@digital.com>, OSF1 also has a putenv
+ * that will crash if you pass it an auto char array (so it must place
+ * its argument directly in the char *environ[] array).
+ */
+ free(waste);
+#endif
+ }
+#else
+ rv = setenv(name, value, 1);
+#endif
+ if (rv < 0) {
+ JS_ReportError(cx, "can't set envariable %s to %s", name, value);
+ return JS_FALSE;
+ }
+ *vp = STRING_TO_JSVAL(valstr);
+#endif /* !defined XP_BEOS && !defined XP_OS2 && !defined SOLARIS */
+ return JS_TRUE;
+}
+
+static JSBool
+env_enumerate(JSContext *cx, JSObject *obj)
+{
+ static JSBool reflected;
+ char **evp, *name, *value;
+ JSString *valstr;
+ JSBool ok;
+
+ if (reflected)
+ return JS_TRUE;
+
+ for (evp = (char **)JS_GetPrivate(cx, obj); (name = *evp) != NULL; evp++) {
+ value = strchr(name, '=');
+ if (!value)
+ continue;
+ *value++ = '\0';
+ valstr = JS_NewStringCopyZ(cx, value);
+ if (!valstr) {
+ ok = JS_FALSE;
+ } else {
+ ok = JS_DefineProperty(cx, obj, name, STRING_TO_JSVAL(valstr),
+ NULL, NULL, JSPROP_ENUMERATE);
+ }
+ value[-1] = '=';
+ if (!ok)
+ return JS_FALSE;
+ }
+
+ reflected = JS_TRUE;
+ return JS_TRUE;
+}
+
+static JSBool
+env_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSString *idstr, *valstr;
+ const char *name, *value;
+
+ if (flags & JSRESOLVE_ASSIGNING)
+ return JS_TRUE;
+
+ idstr = JS_ValueToString(cx, id);
+ if (!idstr)
+ return JS_FALSE;
+ name = JS_GetStringBytes(idstr);
+ value = getenv(name);
+ if (value) {
+ valstr = JS_NewStringCopyZ(cx, value);
+ if (!valstr)
+ return JS_FALSE;
+ if (!JS_DefineProperty(cx, obj, name, STRING_TO_JSVAL(valstr),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ return JS_TRUE;
+}
+
+static JSClass env_class = {
+ "environment", JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, env_setProperty,
+ env_enumerate, (JSResolveOp) env_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#ifdef NARCISSUS
+
+static JSBool
+defineProperty(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ jsval value;
+ JSBool dontDelete, readOnly, dontEnum;
+ const jschar *chars;
+ size_t length;
+ uintN attrs;
+
+ dontDelete = readOnly = dontEnum = JS_FALSE;
+ if (!JS_ConvertArguments(cx, argc, argv, "Sv/bbb",
+ &str, &value, &dontDelete, &readOnly, &dontEnum)) {
+ return JS_FALSE;
+ }
+ chars = JS_GetStringChars(str);
+ length = JS_GetStringLength(str);
+ attrs = dontEnum ? 0 : JSPROP_ENUMERATE;
+ if (dontDelete)
+ attrs |= JSPROP_PERMANENT;
+ if (readOnly)
+ attrs |= JSPROP_READONLY;
+ return JS_DefineUCProperty(cx, obj, chars, length, value, NULL, NULL,
+ attrs);
+}
+
+static JSBool
+Evaluate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* function evaluate(source, filename, lineno) { ... } */
+ JSString *source;
+ const char *filename = "";
+ jsuint lineno = 0;
+ uint32 oldopts;
+ JSBool ok;
+
+ if (argc == 0) {
+ *rval = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ if (!JS_ConvertArguments(cx, argc, argv, "S/su",
+ &source, &filename, &lineno)) {
+ return JS_FALSE;
+ }
+
+ oldopts = JS_GetOptions(cx);
+ JS_SetOptions(cx, oldopts | JSOPTION_COMPILE_N_GO);
+ ok = JS_EvaluateUCScript(cx, obj, JS_GetStringChars(source),
+ JS_GetStringLength(source), filename,
+ lineno, rval);
+ JS_SetOptions(cx, oldopts);
+
+ return ok;
+}
+
+#include <fcntl.h>
+#include <sys/stat.h>
+
+/*
+ * Returns a JS_malloc'd string (that the caller needs to JS_free)
+ * containing the directory (non-leaf) part of |from| prepended to |leaf|.
+ * If |from| is empty or a leaf, MakeAbsolutePathname returns a copy of leaf.
+ * Returns NULL to indicate an error.
+ */
+static char *
+MakeAbsolutePathname(JSContext *cx, const char *from, const char *leaf)
+{
+ size_t dirlen;
+ char *dir;
+ const char *slash = NULL, *cp;
+
+ cp = from;
+ while (*cp) {
+ if (*cp == '/'
+#ifdef XP_WIN
+ || *cp == '\\'
+#endif
+ ) {
+ slash = cp;
+ }
+
+ ++cp;
+ }
+
+ if (!slash) {
+ /* We were given a leaf or |from| was empty. */
+ return JS_strdup(cx, leaf);
+ }
+
+ /* Else, we were given a real pathname, return that + the leaf. */
+ dirlen = slash - from + 1;
+ dir = JS_malloc(cx, dirlen + strlen(leaf) + 1);
+ if (!dir)
+ return NULL;
+
+ strncpy(dir, from, dirlen);
+ strcpy(dir + dirlen, leaf); /* Note: we can't use strcat here. */
+
+ return dir;
+}
+
+static JSBool
+snarf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ const char *filename;
+ char *pathname;
+ JSStackFrame *fp;
+ JSBool ok;
+ off_t cc, len;
+ char *buf;
+ FILE *file;
+
+ str = JS_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ filename = JS_GetStringBytes(str);
+
+ /* Get the currently executing script's name. */
+ fp = JS_GetScriptedCaller(cx, NULL);
+ JS_ASSERT(fp && fp->script->filename);
+ pathname = MakeAbsolutePathname(cx, fp->script->filename, filename);
+ if (!pathname)
+ return JS_FALSE;
+
+ ok = JS_FALSE;
+ len = 0;
+ buf = NULL;
+ file = fopen(pathname, "rb");
+ if (!file) {
+ JS_ReportError(cx, "can't open %s: %s", pathname, strerror(errno));
+ } else {
+ if (fseek(file, 0, SEEK_END) == EOF) {
+ JS_ReportError(cx, "can't seek end of %s", pathname);
+ } else {
+ len = ftell(file);
+ if (fseek(file, 0, SEEK_SET) == EOF) {
+ JS_ReportError(cx, "can't seek start of %s", pathname);
+ } else {
+ buf = JS_malloc(cx, len + 1);
+ if (buf) {
+ cc = fread(buf, 1, len, file);
+ if (cc != len) {
+ JS_free(cx, buf);
+ JS_ReportError(cx, "can't read %s: %s", pathname,
+ (cc < 0) ? strerror(errno)
+ : "short read");
+ } else {
+ len = (size_t)cc;
+ ok = JS_TRUE;
+ }
+ }
+ }
+ }
+ fclose(file);
+ }
+ JS_free(cx, pathname);
+ if (!ok) {
+ JS_free(cx, buf);
+ return ok;
+ }
+
+ buf[len] = '\0';
+ str = JS_NewString(cx, buf, len);
+ if (!str) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#endif /* NARCISSUS */
+
+int
+main(int argc, char **argv, char **envp)
+{
+ int stackDummy;
+ JSRuntime *rt;
+ JSContext *cx;
+ JSObject *glob, *it, *envobj;
+ int result;
+#ifdef LIVECONNECT
+ JavaVM *java_vm = NULL;
+#endif
+#ifdef JSDEBUGGER_JAVA_UI
+ JNIEnv *java_env;
+#endif
+
+ gStackBase = (jsuword)&stackDummy;
+
+ setlocale(LC_ALL, "");
+
+#ifdef XP_OS2
+ /* these streams are normally line buffered on OS/2 and need a \n, *
+ * so we need to unbuffer then to get a reasonable prompt */
+ setbuf(stdout,0);
+ setbuf(stderr,0);
+#endif
+
+ gErrFile = stderr;
+ gOutFile = stdout;
+
+ argc--;
+ argv++;
+
+ rt = JS_NewRuntime(64L * 1024L * 1024L);
+ if (!rt)
+ return 1;
+
+ cx = JS_NewContext(rt, gStackChunkSize);
+ if (!cx)
+ return 1;
+ JS_SetErrorReporter(cx, my_ErrorReporter);
+
+#ifdef JS_THREADSAFE
+ JS_BeginRequest(cx);
+#endif
+
+ glob = JS_NewObject(cx, &global_class, NULL, NULL);
+ if (!glob)
+ return 1;
+#ifdef LAZY_STANDARD_CLASSES
+ JS_SetGlobalObject(cx, glob);
+#else
+ if (!JS_InitStandardClasses(cx, glob))
+ return 1;
+#endif
+ if (!JS_DefineFunctions(cx, glob, shell_functions))
+ return 1;
+
+ it = JS_DefineObject(cx, glob, "it", &its_class, NULL, 0);
+ if (!it)
+ return 1;
+ if (!JS_DefineProperties(cx, it, its_props))
+ return 1;
+ if (!JS_DefineFunctions(cx, it, its_methods))
+ return 1;
+
+#ifdef PERLCONNECT
+ if (!JS_InitPerlClass(cx, glob))
+ return 1;
+#endif
+
+#ifdef JSDEBUGGER
+ /*
+ * XXX A command line option to enable debugging (or not) would be good
+ */
+ _jsdc = JSD_DebuggerOnForUser(rt, NULL, NULL);
+ if (!_jsdc)
+ return 1;
+ JSD_JSContextInUse(_jsdc, cx);
+#ifdef JSD_LOWLEVEL_SOURCE
+ JS_SetSourceHandler(rt, SendSourceToJSDebugger, _jsdc);
+#endif /* JSD_LOWLEVEL_SOURCE */
+#ifdef JSDEBUGGER_JAVA_UI
+ _jsdjc = JSDJ_CreateContext();
+ if (! _jsdjc)
+ return 1;
+ JSDJ_SetJSDContext(_jsdjc, _jsdc);
+ java_env = JSDJ_CreateJavaVMAndStartDebugger(_jsdjc);
+#ifdef LIVECONNECT
+ if (java_env)
+ (*java_env)->GetJavaVM(java_env, &java_vm);
+#endif
+ /*
+ * XXX This would be the place to wait for the debugger to start.
+ * Waiting would be nice in general, but especially when a js file
+ * is passed on the cmd line.
+ */
+#endif /* JSDEBUGGER_JAVA_UI */
+#ifdef JSDEBUGGER_C_UI
+ JSDB_InitDebugger(rt, _jsdc, 0);
+#endif /* JSDEBUGGER_C_UI */
+#endif /* JSDEBUGGER */
+
+#ifdef LIVECONNECT
+ if (!JSJ_SimpleInit(cx, glob, java_vm, getenv("CLASSPATH")))
+ return 1;
+#endif
+
+ envobj = JS_DefineObject(cx, glob, "environment", &env_class, NULL, 0);
+ if (!envobj || !JS_SetPrivate(cx, envobj, envp))
+ return 1;
+
+#ifdef NARCISSUS
+ {
+ jsval v;
+ static const char Object_prototype[] = "Object.prototype";
+
+ if (!JS_DefineFunction(cx, glob, "snarf", snarf, 1, 0))
+ return 1;
+ if (!JS_DefineFunction(cx, glob, "evaluate", Evaluate, 3, 0))
+ return 1;
+
+ if (!JS_EvaluateScript(cx, glob,
+ Object_prototype, sizeof Object_prototype - 1,
+ NULL, 0, &v)) {
+ return 1;
+ }
+ if (!JS_DefineFunction(cx, JSVAL_TO_OBJECT(v), "__defineProperty__",
+ defineProperty, 5, 0)) {
+ return 1;
+ }
+ }
+#endif
+
+ result = ProcessArgs(cx, glob, argv, argc);
+
+#ifdef JSDEBUGGER
+ if (_jsdc)
+ JSD_DebuggerOff(_jsdc);
+#endif /* JSDEBUGGER */
+
+#ifdef JS_THREADSAFE
+ JS_EndRequest(cx);
+#endif
+
+ JS_DestroyContext(cx);
+ JS_DestroyRuntime(rt);
+ JS_ShutDown();
+ return result;
+}
diff --git a/third_party/js-1.7/js.mak b/third_party/js-1.7/js.mak
new file mode 100644
index 0000000..f0f32b8
--- /dev/null
+++ b/third_party/js-1.7/js.mak
@@ -0,0 +1,4344 @@
+# Microsoft Developer Studio Generated NMAKE File, Format Version 4.20
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+!IF "$(CFG)" == ""
+CFG=jsshell - Win32 Debug
+!MESSAGE No configuration specified. Defaulting to jsshell - Win32 Debug.
+!ENDIF
+
+!IF "$(CFG)" != "js - Win32 Release" && "$(CFG)" != "js - Win32 Debug" &&\
+ "$(CFG)" != "jsshell - Win32 Release" && "$(CFG)" != "jsshell - Win32 Debug" &&\
+ "$(CFG)" != "jskwgen - Win32 Release" && "$(CFG)" != "jskwgen - Win32 Debug" &&\
+ "$(CFG)" != "fdlibm - Win32 Release" && "$(CFG)" != "fdlibm - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE on this makefile
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "js.mak" CFG="jsshell - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "js - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "js - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "jsshell - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "jsshell - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "jskwgen - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "jskwgen - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE "fdlibm - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "fdlibm - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+################################################################################
+# Begin Project
+# PROP Target_Last_Scanned "jsshell - Win32 Debug"
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "js___Wi1"
+# PROP BASE Intermediate_Dir "js___Wi1"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir ""
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "fdlibm - Win32 Release" "jskwgen - Win32 Release" "$(OUTDIR)\js32.dll"
+
+CLEAN :
+ -@erase "$(INTDIR)\jsapi.obj"
+ -@erase "$(INTDIR)\jsarena.obj"
+ -@erase "$(INTDIR)\jsarray.obj"
+ -@erase "$(INTDIR)\jsatom.obj"
+ -@erase "$(INTDIR)\jsbool.obj"
+ -@erase "$(INTDIR)\jscntxt.obj"
+ -@erase "$(INTDIR)\jsdate.obj"
+ -@erase "$(INTDIR)\jsdbgapi.obj"
+ -@erase "$(INTDIR)\jsdhash.obj"
+ -@erase "$(INTDIR)\jsdtoa.obj"
+ -@erase "$(INTDIR)\jsemit.obj"
+ -@erase "$(INTDIR)\jsexn.obj"
+ -@erase "$(INTDIR)\jsfun.obj"
+ -@erase "$(INTDIR)\jsgc.obj"
+ -@erase "$(INTDIR)\jshash.obj"
+ -@erase "$(INTDIR)\jsinterp.obj"
+ -@erase "$(INTDIR)\jslock.obj"
+ -@erase "$(INTDIR)\jslog2.obj"
+ -@erase "$(INTDIR)\jslong.obj"
+ -@erase "$(INTDIR)\jsmath.obj"
+ -@erase "$(INTDIR)\jsnum.obj"
+ -@erase "$(INTDIR)\jsobj.obj"
+ -@erase "$(INTDIR)\jsopcode.obj"
+ -@erase "$(INTDIR)\jsparse.obj"
+ -@erase "$(INTDIR)\jsprf.obj"
+ -@erase "$(INTDIR)\jsregexp.obj"
+ -@erase "$(INTDIR)\jsscan.obj"
+ -@erase "$(INTDIR)\jsscope.obj"
+ -@erase "$(INTDIR)\jsscript.obj"
+ -@erase "$(INTDIR)\jsstr.obj"
+ -@erase "$(INTDIR)\jsutil.obj"
+ -@erase "$(INTDIR)\jsxdrapi.obj"
+ -@erase "$(INTDIR)\jsxml.obj"
+ -@erase "$(INTDIR)\prmjtime.obj"
+ -@erase "$(INTDIR)\js.pch"
+ -@erase "$(INTDIR)\jsautokw.h"
+ -@erase "$(OUTDIR)\js32.dll"
+ -@erase "$(OUTDIR)\js32.exp"
+ -@erase "$(OUTDIR)\js32.lib"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Release" clean
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Release" clean
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /I"$(INTDIR)" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /Fp"$(INTDIR)/js.pch" /I"$(INTDIR)" /YX\
+ /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+MTL=mktyplib.exe
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /win32
+MTL_PROJ=/nologo /D "NDEBUG" /win32
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/js.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 /out:"Release/js32.dll"
+# SUBTRACT LINK32 /nodefaultlib
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:windows /dll /incremental:no\
+ /pdb:"$(OUTDIR)/js32.pdb" /machine:I386 /out:"$(OUTDIR)/js32.dll"\
+ /implib:"$(OUTDIR)/js32.lib" /opt:ref /opt:noicf
+LINK32_OBJS= \
+ "$(INTDIR)\jsapi.obj" \
+ "$(INTDIR)\jsarena.obj" \
+ "$(INTDIR)\jsarray.obj" \
+ "$(INTDIR)\jsatom.obj" \
+ "$(INTDIR)\jsbool.obj" \
+ "$(INTDIR)\jscntxt.obj" \
+ "$(INTDIR)\jsdate.obj" \
+ "$(INTDIR)\jsdbgapi.obj" \
+ "$(INTDIR)\jsdhash.obj" \
+ "$(INTDIR)\jsdtoa.obj" \
+ "$(INTDIR)\jsemit.obj" \
+ "$(INTDIR)\jsexn.obj" \
+ "$(INTDIR)\jsfun.obj" \
+ "$(INTDIR)\jsgc.obj" \
+ "$(INTDIR)\jshash.obj" \
+ "$(INTDIR)\jsinterp.obj" \
+ "$(INTDIR)\jslock.obj" \
+ "$(INTDIR)\jslog2.obj" \
+ "$(INTDIR)\jslong.obj" \
+ "$(INTDIR)\jsmath.obj" \
+ "$(INTDIR)\jsnum.obj" \
+ "$(INTDIR)\jsobj.obj" \
+ "$(INTDIR)\jsopcode.obj" \
+ "$(INTDIR)\jsparse.obj" \
+ "$(INTDIR)\jsprf.obj" \
+ "$(INTDIR)\jsregexp.obj" \
+ "$(INTDIR)\jsscan.obj" \
+ "$(INTDIR)\jsscope.obj" \
+ "$(INTDIR)\jsscript.obj" \
+ "$(INTDIR)\jsstr.obj" \
+ "$(INTDIR)\jsutil.obj" \
+ "$(INTDIR)\jsxdrapi.obj" \
+ "$(INTDIR)\jsxml.obj" \
+ "$(INTDIR)\prmjtime.obj" \
+ "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)\js32.dll" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "js___Wi2"
+# PROP BASE Intermediate_Dir "js___Wi2"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir ""
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "fdlibm - Win32 Debug" "jskwgen - Win32 Debug" "$(OUTDIR)\js32.dll"
+
+CLEAN :
+ -@erase "$(INTDIR)\jsapi.obj"
+ -@erase "$(INTDIR)\jsarena.obj"
+ -@erase "$(INTDIR)\jsarray.obj"
+ -@erase "$(INTDIR)\jsatom.obj"
+ -@erase "$(INTDIR)\jsbool.obj"
+ -@erase "$(INTDIR)\jscntxt.obj"
+ -@erase "$(INTDIR)\jsdate.obj"
+ -@erase "$(INTDIR)\jsdbgapi.obj"
+ -@erase "$(INTDIR)\jsdhash.obj"
+ -@erase "$(INTDIR)\jsdtoa.obj"
+ -@erase "$(INTDIR)\jsemit.obj"
+ -@erase "$(INTDIR)\jsexn.obj"
+ -@erase "$(INTDIR)\jsfun.obj"
+ -@erase "$(INTDIR)\jsgc.obj"
+ -@erase "$(INTDIR)\jshash.obj"
+ -@erase "$(INTDIR)\jsinterp.obj"
+ -@erase "$(INTDIR)\jslock.obj"
+ -@erase "$(INTDIR)\jslog2.obj"
+ -@erase "$(INTDIR)\jslong.obj"
+ -@erase "$(INTDIR)\jsmath.obj"
+ -@erase "$(INTDIR)\jsnum.obj"
+ -@erase "$(INTDIR)\jsobj.obj"
+ -@erase "$(INTDIR)\jsopcode.obj"
+ -@erase "$(INTDIR)\jsparse.obj"
+ -@erase "$(INTDIR)\jsprf.obj"
+ -@erase "$(INTDIR)\jsregexp.obj"
+ -@erase "$(INTDIR)\jsscan.obj"
+ -@erase "$(INTDIR)\jsscope.obj"
+ -@erase "$(INTDIR)\jsscript.obj"
+ -@erase "$(INTDIR)\jsstr.obj"
+ -@erase "$(INTDIR)\jsutil.obj"
+ -@erase "$(INTDIR)\jsxdrapi.obj"
+ -@erase "$(INTDIR)\jsxml.obj"
+ -@erase "$(INTDIR)\prmjtime.obj"
+ -@erase "$(INTDIR)\js.pch"
+ -@erase "$(INTDIR)\jsautokw.h"
+ -@erase "$(OUTDIR)\js32.dll"
+ -@erase "$(OUTDIR)\js32.exp"
+ -@erase "$(OUTDIR)\js32.ilk"
+ -@erase "$(OUTDIR)\js32.lib"
+ -@erase "$(OUTDIR)\js32.pdb"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Debug" clean
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Debug" clean
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /D "_DEBUG" /D "DEBUG" /D _X86_=1 /D "_WINDOWS" /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /I"$(INTDIR)" /YX /c
+CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /D "_DEBUG" /D "DEBUG" /D _X86_=1 /D "_WINDOWS"\
+ /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /Fp"$(INTDIR)/js.pch" /I"$(INTDIR)" /YX\
+ /Fo"$(INTDIR)/" /Fd"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+MTL=mktyplib.exe
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /win32
+MTL_PROJ=/nologo /D "_DEBUG" /win32
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/js.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /out:"Debug/js32.dll"
+# SUBTRACT LINK32 /nodefaultlib
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:windows /dll /incremental:yes\
+ /pdb:"$(OUTDIR)/js32.pdb" /debug /machine:I386 /out:"$(OUTDIR)/js32.dll"\
+ /implib:"$(OUTDIR)/js32.lib"
+LINK32_OBJS= \
+ "$(INTDIR)\jsapi.obj" \
+ "$(INTDIR)\jsarena.obj" \
+ "$(INTDIR)\jsarray.obj" \
+ "$(INTDIR)\jsatom.obj" \
+ "$(INTDIR)\jsbool.obj" \
+ "$(INTDIR)\jscntxt.obj" \
+ "$(INTDIR)\jsdate.obj" \
+ "$(INTDIR)\jsdbgapi.obj" \
+ "$(INTDIR)\jsdhash.obj" \
+ "$(INTDIR)\jsdtoa.obj" \
+ "$(INTDIR)\jsemit.obj" \
+ "$(INTDIR)\jsexn.obj" \
+ "$(INTDIR)\jsfun.obj" \
+ "$(INTDIR)\jsgc.obj" \
+ "$(INTDIR)\jshash.obj" \
+ "$(INTDIR)\jsinterp.obj" \
+ "$(INTDIR)\jslock.obj" \
+ "$(INTDIR)\jslog2.obj" \
+ "$(INTDIR)\jslong.obj" \
+ "$(INTDIR)\jsmath.obj" \
+ "$(INTDIR)\jsnum.obj" \
+ "$(INTDIR)\jsobj.obj" \
+ "$(INTDIR)\jsopcode.obj" \
+ "$(INTDIR)\jsparse.obj" \
+ "$(INTDIR)\jsprf.obj" \
+ "$(INTDIR)\jsregexp.obj" \
+ "$(INTDIR)\jsscan.obj" \
+ "$(INTDIR)\jsscope.obj" \
+ "$(INTDIR)\jsscript.obj" \
+ "$(INTDIR)\jsstr.obj" \
+ "$(INTDIR)\jsutil.obj" \
+ "$(INTDIR)\jsxdrapi.obj" \
+ "$(INTDIR)\jsxml.obj" \
+ "$(INTDIR)\prmjtime.obj" \
+ "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)\js32.dll" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jskwgen - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "jsshell\Release"
+# PROP BASE Intermediate_Dir "jskwgen\Release"
+# PROP BASE Target_Dir "jskwgen"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir "jskwgen"
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "$(INTDIR)" "$(INTDIR)\host_jskwgen.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\jskwgen.obj"
+ -@erase "$(INTDIR)\jskwgen.pch"
+ -@erase "$(INTDIR)\host_jskwgen.exe"
+
+"$(INTDIR)" :
+ if not exist "$(INTDIR)/$(NULL)" mkdir "$(INTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D "XP_WIN" /D "JSFILE" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /Fp"$(INTDIR)/jskwgen.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(INTDIR)/jskwgen.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(INTDIR)/jskwgen.pdb" /machine:I386 /out:"$(INTDIR)/host_jskwgen.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\jskwgen.obj" \
+
+"$(INTDIR)\host_jskwgen.exe" : "$(INTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jskwgen - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "jsshell\Debug"
+# PROP BASE Intermediate_Dir "jskwgen\Debug"
+# PROP BASE Target_Dir "jskwgen"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir "jskwgen"
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "$(INTDIR)" "$(INTDIR)\host_jskwgen.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\jskwgen.obj"
+ -@erase "$(INTDIR)\jskwgen.pch"
+ -@erase "$(INTDIR)\host_jskwgen.exe"
+
+"$(INTDIR)" :
+ if not exist "$(INTDIR)/$(NULL)" mkdir "$(INTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D "XP_WIN" /D "JSFILE" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /Fp"$(INTDIR)/jskwgen.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(INTDIR)/jskwgen.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(INTDIR)/jskwgen.pdb" /machine:I386 /out:"$(INTDIR)/host_jskwgen.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\jskwgen.obj" \
+
+"$(INTDIR)\host_jskwgen.exe" : "$(INTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "jsshell\Release"
+# PROP BASE Intermediate_Dir "jsshell\Release"
+# PROP BASE Target_Dir "jsshell"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir "jsshell"
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "js - Win32 Release" "$(OUTDIR)\jsshell.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\js.obj"
+ -@erase "$(INTDIR)\jsshell.pch"
+ -@erase "$(OUTDIR)\jsshell.exe"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Release" clean
+
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D "XP_WIN" /D "JSFILE" /I"$(INTDIR)" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /Fp"$(INTDIR)/jsshell.pch" /I"$(INTDIR)" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/jsshell.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(OUTDIR)/jsshell.pdb" /machine:I386 /out:"$(OUTDIR)/jsshell.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\js.obj" \
+ "$(OUTDIR)\js32.lib"
+
+"$(OUTDIR)\jsshell.exe" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "jsshell\jsshell_"
+# PROP BASE Intermediate_Dir "jsshell\jsshell_"
+# PROP BASE Target_Dir "jsshell"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir "jsshell"
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "js - Win32 Debug" "$(OUTDIR)\jsshell.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\js.obj"
+ -@erase "$(INTDIR)\jsshell.pch"
+ -@erase "$(OUTDIR)\jsshell.exe"
+ -@erase "$(OUTDIR)\jsshell.ilk"
+ -@erase "$(OUTDIR)\jsshell.pdb"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Debug" clean
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /D "_CONSOLE" /D "_DEBUG" /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "DEBUG" /YX /c
+CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /D "_CONSOLE" /D "_DEBUG" /D "WIN32"\
+ /D "XP_WIN" /D "JSFILE" /D "DEBUG" /Fp"$(INTDIR)/jsshell.pch" /YX\
+ /Fo"$(INTDIR)/" /Fd"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/jsshell.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:yes\
+ /pdb:"$(OUTDIR)/jsshell.pdb" /debug /machine:I386 /out:"$(OUTDIR)/jsshell.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\js.obj" \
+ "$(OUTDIR)\js32.lib"
+
+"$(OUTDIR)\jsshell.exe" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "fdlibm\Release"
+# PROP BASE Intermediate_Dir "fdlibm\Release"
+# PROP BASE Target_Dir "fdlibm"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir "fdlibm"
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(INTDIR)\fdlibm.pch"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D "_IEEE_LIBM" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D\
+ "_IEEE_LIBM" /D "XP_WIN" /I .\ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "fdlibm\Debug"
+# PROP BASE Intermediate_Dir "fdlibm\Debug"
+# PROP BASE Target_Dir "fdlibm"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir "fdlibm"
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(INTDIR)\fdlibm.pch"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /D "_DEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D "_IEEE_LIBM" /YX /c
+CPP_PROJ=/nologo /MDd /W3 /GX /Z7 /Od /D "_DEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D\
+ "_IEEE_LIBM" /D "XP_WIN" -I .\ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ENDIF
+
+################################################################################
+# Begin Target
+
+# Name "js - Win32 Release"
+# Name "js - Win32 Debug"
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsapi.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSAPI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsemit.h"\
+ ".\jsexn.h"\
+ ".\jsfile.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSAPI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsapi.obj" : $(SOURCE) $(DEP_CPP_JSAPI) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSAPI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsemit.h"\
+ ".\jsexn.h"\
+ ".\jsfile.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSAPI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsapi.obj" : $(SOURCE) $(DEP_CPP_JSAPI) "$(INTDIR)"
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsarena.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSARE=\
+ ".\jsarena.h"\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARE=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsarena.obj" : $(SOURCE) $(DEP_CPP_JSARE) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSARE=\
+ ".\jsarena.h"\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARE=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsarena.obj" : $(SOURCE) $(DEP_CPP_JSARE) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsarray.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSARR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsarray.obj" : $(SOURCE) $(DEP_CPP_JSARR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSARR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsarray.obj" : $(SOURCE) $(DEP_CPP_JSARR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsatom.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSATO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSATO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsatom.obj" : $(SOURCE) $(DEP_CPP_JSATO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSATO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSATO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsatom.obj" : $(SOURCE) $(DEP_CPP_JSATO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsbool.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSBOO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSBOO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsbool.obj" : $(SOURCE) $(DEP_CPP_JSBOO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSBOO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSBOO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsbool.obj" : $(SOURCE) $(DEP_CPP_JSBOO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jscntxt.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSCNT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSCNT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jscntxt.obj" : $(SOURCE) $(DEP_CPP_JSCNT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSCNT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSCNT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jscntxt.obj" : $(SOURCE) $(DEP_CPP_JSCNT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdate.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdate.obj" : $(SOURCE) $(DEP_CPP_JSDAT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdate.obj" : $(SOURCE) $(DEP_CPP_JSDAT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdbgapi.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDBG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDBG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdbgapi.obj" : $(SOURCE) $(DEP_CPP_JSDBG) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDBG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDBG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdbgapi.obj" : $(SOURCE) $(DEP_CPP_JSDBG) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdhash.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDHA=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdhash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDHA=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsdhash.obj" : $(SOURCE) $(DEP_CPP_JSDHA) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDHA=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdhash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDHA=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsdhash.obj" : $(SOURCE) $(DEP_CPP_JSDHA) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdtoa.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDTO=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDTO=\
+ ".\jsautocfg.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdtoa.obj" : $(SOURCE) $(DEP_CPP_JSDTO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDTO=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDTO=\
+ ".\jsautocfg.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdtoa.obj" : $(SOURCE) $(DEP_CPP_JSDTO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsemit.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSEMI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEMI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsemit.obj" : $(SOURCE) $(DEP_CPP_JSEMI) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSEMI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEMI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsemit.obj" : $(SOURCE) $(DEP_CPP_JSEMI) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsexn.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSEXN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsexn.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEXN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsexn.obj" : $(SOURCE) $(DEP_CPP_JSEXN) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSEXN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsexn.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEXN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsexn.obj" : $(SOURCE) $(DEP_CPP_JSEXN) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsfun.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSFUN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSFUN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsfun.obj" : $(SOURCE) $(DEP_CPP_JSFUN) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSFUN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSFUN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsfun.obj" : $(SOURCE) $(DEP_CPP_JSFUN) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsgc.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSGC_=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSGC_=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsgc.obj" : $(SOURCE) $(DEP_CPP_JSGC_) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSGC_=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSGC_=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsgc.obj" : $(SOURCE) $(DEP_CPP_JSGC_) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jshash.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSHAS=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jshash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSHAS=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jshash.obj" : $(SOURCE) $(DEP_CPP_JSHAS) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSHAS=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jshash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSHAS=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jshash.obj" : $(SOURCE) $(DEP_CPP_JSHAS) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsinterp.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSINT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSINT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsinterp.obj" : $(SOURCE) $(DEP_CPP_JSINT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSINT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSINT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsinterp.obj" : $(SOURCE) $(DEP_CPP_JSINT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jslock.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSLOC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOC=\
+ ".\jsautocfg.h"\
+ ".\pratom.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+ ".\prthread.h"\
+
+
+"$(INTDIR)\jslock.obj" : $(SOURCE) $(DEP_CPP_JSLOC) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSLOC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOC=\
+ ".\jsautocfg.h"\
+ ".\pratom.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+ ".\prthread.h"\
+
+
+"$(INTDIR)\jslock.obj" : $(SOURCE) $(DEP_CPP_JSLOC) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jslog2.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSLOG=\
+ ".\jsbit.h"\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOG=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslog2.obj" : $(SOURCE) $(DEP_CPP_JSLOG) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSLOG=\
+ ".\jsbit.h"\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOG=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslog2.obj" : $(SOURCE) $(DEP_CPP_JSLOG) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jslong.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSLON=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLON=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslong.obj" : $(SOURCE) $(DEP_CPP_JSLON) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSLON=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLON=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslong.obj" : $(SOURCE) $(DEP_CPP_JSLON) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsmath.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSMAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslibmath.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSMAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsmath.obj" : $(SOURCE) $(DEP_CPP_JSMAT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSMAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslibmath.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSMAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsmath.obj" : $(SOURCE) $(DEP_CPP_JSMAT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsnum.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSNUM=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSNUM=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsnum.obj" : $(SOURCE) $(DEP_CPP_JSNUM) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSNUM=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSNUM=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsnum.obj" : $(SOURCE) $(DEP_CPP_JSNUM) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsobj.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSOBJ=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOBJ=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsobj.obj" : $(SOURCE) $(DEP_CPP_JSOBJ) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSOBJ=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOBJ=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsobj.obj" : $(SOURCE) $(DEP_CPP_JSOBJ) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsopcode.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSOPC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsdtoa.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOPC=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsopcode.obj" : $(SOURCE) $(DEP_CPP_JSOPC) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSOPC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsdtoa.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOPC=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsopcode.obj" : $(SOURCE) $(DEP_CPP_JSOPC) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsparse.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSPAR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPAR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsparse.obj" : $(SOURCE) $(DEP_CPP_JSPAR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSPAR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPAR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsparse.obj" : $(SOURCE) $(DEP_CPP_JSPAR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsprf.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSPRF=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPRF=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsprf.obj" : $(SOURCE) $(DEP_CPP_JSPRF) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSPRF=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPRF=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsprf.obj" : $(SOURCE) $(DEP_CPP_JSPRF) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsregexp.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSREG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSREG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsregexp.obj" : $(SOURCE) $(DEP_CPP_JSREG) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSREG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSREG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsregexp.obj" : $(SOURCE) $(DEP_CPP_JSREG) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsscan.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCA=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCA=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscan.obj" : $(SOURCE) $(DEP_CPP_JSSCA) "$(INTDIR)" "$(INTDIR)\jsautokw.h"
+
+"$(INTDIR)\jsautokw.h" : $(INTDIR)\host_jskwgen.exe jskeyword.tbl
+ $(INTDIR)\host_jskwgen.exe $(INTDIR)\jsautokw.h
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCA=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+ $(INTDIR)\jsautokw.h \
+
+NODEP_CPP_JSSCA=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscan.obj" : $(SOURCE) $(DEP_CPP_JSSCA) "$(INTDIR)"
+
+"$(INTDIR)\jsautokw.h" : $(INTDIR)\host_jskwgen.exe jskeyword.tbl
+ $(INTDIR)\host_jskwgen.exe $(INTDIR)\jsautokw.h
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jskwgen.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCO=\
+ ".\jskwgen.c"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+
+"$(INTDIR)\jskwgen.obj" : $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCO=\
+ ".\jskwgen.c"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(INTDIR)/host_jskwgen.pdb" /machine:I386 /out:"$(INTDIR)/host_jskwgen.exe"
+
+LINK32_OBJS= \
+ "$(INTDIR)\jskwgen.obj"
+
+"$(INTDIR)\host_jskwgen.exe" : "$(INTDIR)" $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsscope.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscope.obj" : $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscope.obj" : $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsscript.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscript.obj" : $(SOURCE) $(DEP_CPP_JSSCR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscript.obj" : $(SOURCE) $(DEP_CPP_JSSCR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsstr.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSTR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSTR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsstr.obj" : $(SOURCE) $(DEP_CPP_JSSTR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSTR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSTR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsstr.obj" : $(SOURCE) $(DEP_CPP_JSSTR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsutil.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSUTI=\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSUTI=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsutil.obj" : $(SOURCE) $(DEP_CPP_JSUTI) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSUTI=\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSUTI=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsutil.obj" : $(SOURCE) $(DEP_CPP_JSUTI) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsxdrapi.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSXDR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXDR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxdrapi.obj" : $(SOURCE) $(DEP_CPP_JSXDR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSXDR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXDR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxdrapi.obj" : $(SOURCE) $(DEP_CPP_JSXDR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsxml.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSXML=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbit.h"\
+ ".\jsbool.h"\
+ ".\jscntxt.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXML=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxml.obj" : $(SOURCE) $(DEP_CPP_JSXML) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSXML=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbit.h"\
+ ".\jsbool.h"\
+ ".\jscntxt.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsparse.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ ".\jsprf.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXML=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxml.obj" : $(SOURCE) $(DEP_CPP_JSXML) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\prmjtime.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_PRMJT=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\TIMEB.H"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_PRMJT=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\prmjtime.obj" : $(SOURCE) $(DEP_CPP_PRMJT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_PRMJT=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\TIMEB.H"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_PRMJT=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\prmjtime.obj" : $(SOURCE) $(DEP_CPP_PRMJT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Project Dependency
+
+# Project_Dep_Name "fdlibm"
+
+!IF "$(CFG)" == "js - Win32 Debug"
+
+"fdlibm - Win32 Debug" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Debug"
+
+!ELSEIF "$(CFG)" == "js - Win32 Release"
+
+"fdlibm - Win32 Release" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Release"
+
+!ENDIF
+
+# End Project Dependency
+# End Target
+################################################################################
+# Begin Target
+
+# Name "jsshell - Win32 Release"
+# Name "jsshell - Win32 Debug"
+
+!IF "$(CFG)" == "jsshell - Win32 Release"
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\js.c
+DEP_CPP_JS_C42=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsshell.msg"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JS_C42=\
+ ".\jsautocfg.h"\
+ ".\jsdb.h"\
+ ".\jsdebug.h"\
+ ".\jsdjava.h"\
+ ".\jsjava.h"\
+ ".\jsperl.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\js.obj" : $(SOURCE) $(DEP_CPP_JS_C42) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Project Dependency
+
+# Project_Dep_Name "jskwgen"
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+"jskwgen - Win32 Release" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Release"
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+"jskwgen - Win32 Debug" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Debug"
+
+!ENDIF
+
+# End Project Dependency
+# End Target
+################################################################################
+# Begin Project Dependency
+
+# Project_Dep_Name "js"
+
+!IF "$(CFG)" == "jsshell - Win32 Release"
+
+"js - Win32 Release" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Release"
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Debug"
+
+"js - Win32 Debug" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Debug"
+
+!ENDIF
+
+# End Project Dependency
+# End Target
+################################################################################
+# Begin Target
+
+# Name "fdlibm - Win32 Release"
+# Name "fdlibm - Win32 Debug"
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\w_atan2.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_W_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_atan2.obj" : $(SOURCE) $(DEP_CPP_W_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_W_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_atan2.obj" : $(SOURCE) $(DEP_CPP_W_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_copysign.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_COP=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_copysign.obj" : $(SOURCE) $(DEP_CPP_S_COP) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_COP=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_copysign.obj" : $(SOURCE) $(DEP_CPP_S_COP) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\w_pow.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_W_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_pow.obj" : $(SOURCE) $(DEP_CPP_W_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_W_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_pow.obj" : $(SOURCE) $(DEP_CPP_W_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\e_pow.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_E_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_pow.obj" : $(SOURCE) $(DEP_CPP_E_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_E_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_pow.obj" : $(SOURCE) $(DEP_CPP_E_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\k_standard.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_K_STA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\k_standard.obj" : $(SOURCE) $(DEP_CPP_K_STA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_K_STA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\k_standard.obj" : $(SOURCE) $(DEP_CPP_K_STA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\e_atan2.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_E_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_atan2.obj" : $(SOURCE) $(DEP_CPP_E_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_E_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_atan2.obj" : $(SOURCE) $(DEP_CPP_E_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_isnan.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_ISN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_isnan.obj" : $(SOURCE) $(DEP_CPP_S_ISN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_ISN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_isnan.obj" : $(SOURCE) $(DEP_CPP_S_ISN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_fabs.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_FAB=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_fabs.obj" : $(SOURCE) $(DEP_CPP_S_FAB) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_FAB=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_fabs.obj" : $(SOURCE) $(DEP_CPP_S_FAB) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\w_sqrt.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_W_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_sqrt.obj" : $(SOURCE) $(DEP_CPP_W_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_W_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_sqrt.obj" : $(SOURCE) $(DEP_CPP_W_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_scalbn.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_SCA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_scalbn.obj" : $(SOURCE) $(DEP_CPP_S_SCA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_SCA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_scalbn.obj" : $(SOURCE) $(DEP_CPP_S_SCA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\e_sqrt.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_E_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_sqrt.obj" : $(SOURCE) $(DEP_CPP_E_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_E_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_sqrt.obj" : $(SOURCE) $(DEP_CPP_E_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_rint.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_RIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_rint.obj" : $(SOURCE) $(DEP_CPP_S_RIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_RIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_rint.obj" : $(SOURCE) $(DEP_CPP_S_RIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_atan.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_atan.obj" : $(SOURCE) $(DEP_CPP_S_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_atan.obj" : $(SOURCE) $(DEP_CPP_S_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_finite.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_FIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_finite.obj" : $(SOURCE) $(DEP_CPP_S_FIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_FIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_finite.obj" : $(SOURCE) $(DEP_CPP_S_FIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_matherr.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_MAT=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_matherr.obj" : $(SOURCE) $(DEP_CPP_S_MAT) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_MAT=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_matherr.obj" : $(SOURCE) $(DEP_CPP_S_MAT) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
+################################################################################
diff --git a/third_party/js-1.7/js.mdp b/third_party/js-1.7/js.mdp
new file mode 100644
index 0000000..8da64fb
--- /dev/null
+++ b/third_party/js-1.7/js.mdp
Binary files differ
diff --git a/third_party/js-1.7/js.msg b/third_party/js-1.7/js.msg
new file mode 100644
index 0000000..2686af0
--- /dev/null
+++ b/third_party/js-1.7/js.msg
@@ -0,0 +1,301 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * This is the JavaScript error message file.
+ *
+ * The format for each JS error message is:
+ *
+ * MSG_DEF(<SYMBOLIC_NAME>, <ERROR_NUMBER>, <ARGUMENT_COUNT>, <EXCEPTION_NAME>,
+ * <FORMAT_STRING>)
+ *
+ * where ;
+ * <SYMBOLIC_NAME> is a legal C identifer that will be used in the
+ * JS engine source.
+ *
+ * <ERROR_NUMBER> is an unique integral value identifying this error.
+ *
+ * <ARGUMENT_COUNT> is an integer literal specifying the total number of
+ * replaceable arguments in the following format string.
+ *
+ * <EXCEPTION_NAME> is an exception index from the enum in jsexn.c;
+ * JSEXN_NONE for none. The given exception index will be raised by the
+ * engine when the corresponding error occurs.
+ *
+ * <FORMAT_STRING> is a string literal, optionally containing sequences
+ * {X} where X is an integer representing the argument number that will
+ * be replaced with a string value when the error is reported.
+ *
+ * e.g.
+ *
+ * MSG_DEF(JSMSG_NOT_A_SUBSPECIES, 73, JSEXN_NONE, 2,
+ * "{0} is not a member of the {1} family")
+ *
+ * can be used:
+ *
+ * JS_ReportErrorNumber(JSMSG_NOT_A_SUBSPECIES, "Rhino", "Monkey");
+ *
+ * to report:
+ *
+ * "Rhino is not a member of the Monkey family"
+ *
+ * Before adding a new MSG_DEF at the end, look for JSMSG_UNUSED<n> free
+ * index placeholders in the middle of the list.
+ */
+
+MSG_DEF(JSMSG_NOT_AN_ERROR, 0, 0, JSEXN_NONE, "<Error #0 is reserved>")
+MSG_DEF(JSMSG_NOT_DEFINED, 1, 1, JSEXN_REFERENCEERR, "{0} is not defined")
+MSG_DEF(JSMSG_INACTIVE, 2, 0, JSEXN_INTERNALERR, "nothing active on context")
+MSG_DEF(JSMSG_MORE_ARGS_NEEDED, 3, 3, JSEXN_TYPEERR, "{0} requires more than {1} argument{2}")
+MSG_DEF(JSMSG_BAD_CHAR, 4, 1, JSEXN_INTERNALERR, "invalid format character {0}")
+MSG_DEF(JSMSG_BAD_TYPE, 5, 1, JSEXN_TYPEERR, "unknown type {0}")
+MSG_DEF(JSMSG_CANT_LOCK, 6, 0, JSEXN_INTERNALERR, "can't lock memory")
+MSG_DEF(JSMSG_CANT_UNLOCK, 7, 0, JSEXN_INTERNALERR, "can't unlock memory")
+MSG_DEF(JSMSG_INCOMPATIBLE_PROTO, 8, 3, JSEXN_TYPEERR, "{0}.prototype.{1} called on incompatible {2}")
+MSG_DEF(JSMSG_NO_CONSTRUCTOR, 9, 1, JSEXN_TYPEERR, "{0} has no constructor")
+MSG_DEF(JSMSG_CANT_ALIAS, 10, 3, JSEXN_TYPEERR, "can't alias {0} to {1} in class {2}")
+MSG_DEF(JSMSG_NOT_SCRIPTED_FUNCTION, 11, 1, JSEXN_TYPEERR, "{0} is not a scripted function")
+MSG_DEF(JSMSG_BAD_SORT_ARG, 12, 0, JSEXN_TYPEERR, "invalid Array.prototype.sort argument")
+MSG_DEF(JSMSG_BAD_ATOMIC_NUMBER, 13, 1, JSEXN_INTERNALERR, "internal error: no index for atom {0}")
+MSG_DEF(JSMSG_TOO_MANY_LITERALS, 14, 0, JSEXN_INTERNALERR, "too many literals")
+MSG_DEF(JSMSG_CANT_WATCH, 15, 1, JSEXN_TYPEERR, "can't watch non-native objects of class {0}")
+MSG_DEF(JSMSG_STACK_UNDERFLOW, 16, 2, JSEXN_INTERNALERR, "internal error compiling {0}: stack underflow at pc {1}")
+MSG_DEF(JSMSG_NEED_DIET, 17, 1, JSEXN_INTERNALERR, "{0} too large")
+MSG_DEF(JSMSG_TOO_MANY_LOCAL_ROOTS, 18, 0, JSEXN_ERR, "out of local root space")
+MSG_DEF(JSMSG_READ_ONLY, 19, 1, JSEXN_ERR, "{0} is read-only")
+MSG_DEF(JSMSG_BAD_FORMAL, 20, 0, JSEXN_SYNTAXERR, "malformed formal parameter")
+MSG_DEF(JSMSG_BAD_ITERATOR, 21, 3, JSEXN_TYPEERR, "{0} has invalid {1} value {2}")
+MSG_DEF(JSMSG_NOT_FUNCTION, 22, 1, JSEXN_TYPEERR, "{0} is not a function")
+MSG_DEF(JSMSG_NOT_CONSTRUCTOR, 23, 1, JSEXN_TYPEERR, "{0} is not a constructor")
+MSG_DEF(JSMSG_STACK_OVERFLOW, 24, 1, JSEXN_INTERNALERR, "stack overflow in {0}")
+MSG_DEF(JSMSG_NOT_EXPORTED, 25, 1, JSEXN_TYPEERR, "{0} is not exported")
+MSG_DEF(JSMSG_OVER_RECURSED, 26, 0, JSEXN_INTERNALERR, "too much recursion")
+MSG_DEF(JSMSG_IN_NOT_OBJECT, 27, 1, JSEXN_TYPEERR, "invalid 'in' operand {0}")
+MSG_DEF(JSMSG_BAD_NEW_RESULT, 28, 1, JSEXN_TYPEERR, "invalid new expression result {0}")
+MSG_DEF(JSMSG_BAD_SHARP_DEF, 29, 1, JSEXN_ERR, "invalid sharp variable definition #{0}=")
+MSG_DEF(JSMSG_BAD_SHARP_USE, 30, 1, JSEXN_ERR, "invalid sharp variable use #{0}#")
+MSG_DEF(JSMSG_BAD_INSTANCEOF_RHS, 31, 1, JSEXN_TYPEERR, "invalid 'instanceof' operand {0}")
+MSG_DEF(JSMSG_BAD_BYTECODE, 32, 1, JSEXN_INTERNALERR, "unimplemented JavaScript bytecode {0}")
+MSG_DEF(JSMSG_BAD_RADIX, 33, 1, JSEXN_ERR, "illegal radix {0}")
+MSG_DEF(JSMSG_PAREN_BEFORE_LET, 34, 0, JSEXN_SYNTAXERR, "missing ( before let head")
+MSG_DEF(JSMSG_CANT_CONVERT, 35, 1, JSEXN_ERR, "can't convert {0} to an integer")
+MSG_DEF(JSMSG_CYCLIC_VALUE, 36, 1, JSEXN_ERR, "cyclic {0} value")
+MSG_DEF(JSMSG_PERMANENT, 37, 1, JSEXN_ERR, "{0} is permanent")
+MSG_DEF(JSMSG_CANT_CONVERT_TO, 38, 2, JSEXN_TYPEERR, "can't convert {0} to {1}")
+MSG_DEF(JSMSG_NO_PROPERTIES, 39, 1, JSEXN_TYPEERR, "{0} has no properties")
+MSG_DEF(JSMSG_CANT_FIND_CLASS, 40, 1, JSEXN_TYPEERR, "can't find class id {0}")
+MSG_DEF(JSMSG_CANT_XDR_CLASS, 41, 1, JSEXN_TYPEERR, "can't XDR class {0}")
+MSG_DEF(JSMSG_BYTECODE_TOO_BIG, 42, 2, JSEXN_INTERNALERR, "bytecode {0} too large (limit {1})")
+MSG_DEF(JSMSG_UNKNOWN_FORMAT, 43, 1, JSEXN_INTERNALERR, "unknown bytecode format {0}")
+MSG_DEF(JSMSG_TOO_MANY_CON_ARGS, 44, 0, JSEXN_SYNTAXERR, "too many constructor arguments")
+MSG_DEF(JSMSG_TOO_MANY_FUN_ARGS, 45, 0, JSEXN_SYNTAXERR, "too many function arguments")
+MSG_DEF(JSMSG_BAD_QUANTIFIER, 46, 1, JSEXN_SYNTAXERR, "invalid quantifier {0}")
+MSG_DEF(JSMSG_MIN_TOO_BIG, 47, 1, JSEXN_SYNTAXERR, "overlarge minimum {0}")
+MSG_DEF(JSMSG_MAX_TOO_BIG, 48, 1, JSEXN_SYNTAXERR, "overlarge maximum {0}")
+MSG_DEF(JSMSG_OUT_OF_ORDER, 49, 1, JSEXN_SYNTAXERR, "maximum {0} less than minimum")
+MSG_DEF(JSMSG_BAD_DESTRUCT_DECL, 50, 0, JSEXN_SYNTAXERR, "missing = in destructuring declaration")
+MSG_DEF(JSMSG_BAD_DESTRUCT_ASS, 51, 0, JSEXN_SYNTAXERR, "invalid destructuring assignment operator")
+MSG_DEF(JSMSG_PAREN_AFTER_LET, 52, 0, JSEXN_SYNTAXERR, "missing ) after let head")
+MSG_DEF(JSMSG_CURLY_AFTER_LET, 53, 0, JSEXN_SYNTAXERR, "missing } after let block")
+MSG_DEF(JSMSG_MISSING_PAREN, 54, 0, JSEXN_SYNTAXERR, "unterminated parenthetical")
+MSG_DEF(JSMSG_UNTERM_CLASS, 55, 1, JSEXN_SYNTAXERR, "unterminated character class {0}")
+MSG_DEF(JSMSG_TRAILING_SLASH, 56, 0, JSEXN_SYNTAXERR, "trailing \\ in regular expression")
+MSG_DEF(JSMSG_BAD_CLASS_RANGE, 57, 0, JSEXN_SYNTAXERR, "invalid range in character class")
+MSG_DEF(JSMSG_BAD_FLAG, 58, 1, JSEXN_SYNTAXERR, "invalid regular expression flag {0}")
+MSG_DEF(JSMSG_NO_INPUT, 59, 3, JSEXN_SYNTAXERR, "no input for /{0}/{1}{2}")
+MSG_DEF(JSMSG_CANT_OPEN, 60, 2, JSEXN_ERR, "can't open {0}: {1}")
+MSG_DEF(JSMSG_BAD_STRING_MASK, 61, 1, JSEXN_ERR, "invalid string escape mask {0}")
+MSG_DEF(JSMSG_UNMATCHED_RIGHT_PAREN, 62, 0, JSEXN_SYNTAXERR, "unmatched ) in regular expression")
+MSG_DEF(JSMSG_END_OF_DATA, 63, 0, JSEXN_INTERNALERR, "unexpected end of data")
+MSG_DEF(JSMSG_SEEK_BEYOND_START, 64, 0, JSEXN_INTERNALERR, "illegal seek beyond start")
+MSG_DEF(JSMSG_SEEK_BEYOND_END, 65, 0, JSEXN_INTERNALERR, "illegal seek beyond end")
+MSG_DEF(JSMSG_END_SEEK, 66, 0, JSEXN_INTERNALERR, "illegal end-based seek")
+MSG_DEF(JSMSG_WHITHER_WHENCE, 67, 1, JSEXN_INTERNALERR, "unknown seek whence: {0}")
+MSG_DEF(JSMSG_BAD_SCRIPT_MAGIC, 68, 0, JSEXN_INTERNALERR, "bad script XDR magic number")
+MSG_DEF(JSMSG_PAREN_BEFORE_FORMAL, 69, 0, JSEXN_SYNTAXERR, "missing ( before formal parameters")
+MSG_DEF(JSMSG_MISSING_FORMAL, 70, 0, JSEXN_SYNTAXERR, "missing formal parameter")
+MSG_DEF(JSMSG_PAREN_AFTER_FORMAL, 71, 0, JSEXN_SYNTAXERR, "missing ) after formal parameters")
+MSG_DEF(JSMSG_CURLY_BEFORE_BODY, 72, 0, JSEXN_SYNTAXERR, "missing { before function body")
+MSG_DEF(JSMSG_CURLY_AFTER_BODY, 73, 0, JSEXN_SYNTAXERR, "missing } after function body")
+MSG_DEF(JSMSG_PAREN_BEFORE_COND, 74, 0, JSEXN_SYNTAXERR, "missing ( before condition")
+MSG_DEF(JSMSG_PAREN_AFTER_COND, 75, 0, JSEXN_SYNTAXERR, "missing ) after condition")
+MSG_DEF(JSMSG_NO_IMPORT_NAME, 76, 0, JSEXN_SYNTAXERR, "missing name in import statement")
+MSG_DEF(JSMSG_NAME_AFTER_DOT, 77, 0, JSEXN_SYNTAXERR, "missing name after . operator")
+MSG_DEF(JSMSG_BRACKET_IN_INDEX, 78, 0, JSEXN_SYNTAXERR, "missing ] in index expression")
+MSG_DEF(JSMSG_NO_EXPORT_NAME, 79, 0, JSEXN_SYNTAXERR, "missing name in export statement")
+MSG_DEF(JSMSG_PAREN_BEFORE_SWITCH, 80, 0, JSEXN_SYNTAXERR, "missing ( before switch expression")
+MSG_DEF(JSMSG_PAREN_AFTER_SWITCH, 81, 0, JSEXN_SYNTAXERR, "missing ) after switch expression")
+MSG_DEF(JSMSG_CURLY_BEFORE_SWITCH, 82, 0, JSEXN_SYNTAXERR, "missing { before switch body")
+MSG_DEF(JSMSG_COLON_AFTER_CASE, 83, 0, JSEXN_SYNTAXERR, "missing : after case label")
+MSG_DEF(JSMSG_WHILE_AFTER_DO, 84, 0, JSEXN_SYNTAXERR, "missing while after do-loop body")
+MSG_DEF(JSMSG_PAREN_AFTER_FOR, 85, 0, JSEXN_SYNTAXERR, "missing ( after for")
+MSG_DEF(JSMSG_SEMI_AFTER_FOR_INIT, 86, 0, JSEXN_SYNTAXERR, "missing ; after for-loop initializer")
+MSG_DEF(JSMSG_SEMI_AFTER_FOR_COND, 87, 0, JSEXN_SYNTAXERR, "missing ; after for-loop condition")
+MSG_DEF(JSMSG_PAREN_AFTER_FOR_CTRL, 88, 0, JSEXN_SYNTAXERR, "missing ) after for-loop control")
+MSG_DEF(JSMSG_CURLY_BEFORE_TRY, 89, 0, JSEXN_SYNTAXERR, "missing { before try block")
+MSG_DEF(JSMSG_CURLY_AFTER_TRY, 90, 0, JSEXN_SYNTAXERR, "missing } after try block")
+MSG_DEF(JSMSG_PAREN_BEFORE_CATCH, 91, 0, JSEXN_SYNTAXERR, "missing ( before catch")
+MSG_DEF(JSMSG_CATCH_IDENTIFIER, 92, 0, JSEXN_SYNTAXERR, "missing identifier in catch")
+MSG_DEF(JSMSG_PAREN_AFTER_CATCH, 93, 0, JSEXN_SYNTAXERR, "missing ) after catch")
+MSG_DEF(JSMSG_CURLY_BEFORE_CATCH, 94, 0, JSEXN_SYNTAXERR, "missing { before catch block")
+MSG_DEF(JSMSG_CURLY_AFTER_CATCH, 95, 0, JSEXN_SYNTAXERR, "missing } after catch block")
+MSG_DEF(JSMSG_CURLY_BEFORE_FINALLY, 96, 0, JSEXN_SYNTAXERR, "missing { before finally block")
+MSG_DEF(JSMSG_CURLY_AFTER_FINALLY, 97, 0, JSEXN_SYNTAXERR, "missing } after finally block")
+MSG_DEF(JSMSG_CATCH_OR_FINALLY, 98, 0, JSEXN_SYNTAXERR, "missing catch or finally after try")
+MSG_DEF(JSMSG_PAREN_BEFORE_WITH, 99, 0, JSEXN_SYNTAXERR, "missing ( before with-statement object")
+MSG_DEF(JSMSG_PAREN_AFTER_WITH, 100, 0, JSEXN_SYNTAXERR, "missing ) after with-statement object")
+MSG_DEF(JSMSG_CURLY_IN_COMPOUND, 101, 0, JSEXN_SYNTAXERR, "missing } in compound statement")
+MSG_DEF(JSMSG_NO_VARIABLE_NAME, 102, 0, JSEXN_SYNTAXERR, "missing variable name")
+MSG_DEF(JSMSG_COLON_IN_COND, 103, 0, JSEXN_SYNTAXERR, "missing : in conditional expression")
+MSG_DEF(JSMSG_PAREN_AFTER_ARGS, 104, 0, JSEXN_SYNTAXERR, "missing ) after argument list")
+MSG_DEF(JSMSG_BRACKET_AFTER_LIST, 105, 0, JSEXN_SYNTAXERR, "missing ] after element list")
+MSG_DEF(JSMSG_COLON_AFTER_ID, 106, 0, JSEXN_SYNTAXERR, "missing : after property id")
+MSG_DEF(JSMSG_CURLY_AFTER_LIST, 107, 0, JSEXN_SYNTAXERR, "missing } after property list")
+MSG_DEF(JSMSG_PAREN_IN_PAREN, 108, 0, JSEXN_SYNTAXERR, "missing ) in parenthetical")
+MSG_DEF(JSMSG_SEMI_BEFORE_STMNT, 109, 0, JSEXN_SYNTAXERR, "missing ; before statement")
+MSG_DEF(JSMSG_NO_RETURN_VALUE, 110, 1, JSEXN_TYPEERR, "function {0} does not always return a value")
+MSG_DEF(JSMSG_DUPLICATE_FORMAL, 111, 1, JSEXN_TYPEERR, "duplicate formal argument {0}")
+MSG_DEF(JSMSG_EQUAL_AS_ASSIGN, 112, 1, JSEXN_SYNTAXERR, "test for equality (==) mistyped as assignment (=)?{0}")
+MSG_DEF(JSMSG_BAD_IMPORT, 113, 0, JSEXN_SYNTAXERR, "invalid import expression")
+MSG_DEF(JSMSG_TOO_MANY_DEFAULTS, 114, 0, JSEXN_SYNTAXERR, "more than one switch default")
+MSG_DEF(JSMSG_TOO_MANY_CASES, 115, 0, JSEXN_INTERNALERR, "too many switch cases")
+MSG_DEF(JSMSG_BAD_SWITCH, 116, 0, JSEXN_SYNTAXERR, "invalid switch statement")
+MSG_DEF(JSMSG_BAD_FOR_LEFTSIDE, 117, 0, JSEXN_SYNTAXERR, "invalid for/in left-hand side")
+MSG_DEF(JSMSG_CATCH_AFTER_GENERAL, 118, 0, JSEXN_SYNTAXERR, "catch after unconditional catch")
+MSG_DEF(JSMSG_CATCH_WITHOUT_TRY, 119, 0, JSEXN_SYNTAXERR, "catch without try")
+MSG_DEF(JSMSG_FINALLY_WITHOUT_TRY, 120, 0, JSEXN_SYNTAXERR, "finally without try")
+MSG_DEF(JSMSG_LABEL_NOT_FOUND, 121, 0, JSEXN_SYNTAXERR, "label not found")
+MSG_DEF(JSMSG_TOUGH_BREAK, 122, 0, JSEXN_SYNTAXERR, "invalid break")
+MSG_DEF(JSMSG_BAD_CONTINUE, 123, 0, JSEXN_SYNTAXERR, "invalid continue")
+MSG_DEF(JSMSG_BAD_RETURN_OR_YIELD, 124, 1, JSEXN_SYNTAXERR, "{0} not in function")
+MSG_DEF(JSMSG_BAD_LABEL, 125, 0, JSEXN_SYNTAXERR, "invalid label")
+MSG_DEF(JSMSG_DUPLICATE_LABEL, 126, 0, JSEXN_SYNTAXERR, "duplicate label")
+MSG_DEF(JSMSG_VAR_HIDES_ARG, 127, 1, JSEXN_TYPEERR, "variable {0} hides argument")
+MSG_DEF(JSMSG_BAD_VAR_INIT, 128, 0, JSEXN_SYNTAXERR, "invalid variable initialization")
+MSG_DEF(JSMSG_BAD_LEFTSIDE_OF_ASS, 129, 0, JSEXN_SYNTAXERR, "invalid assignment left-hand side")
+MSG_DEF(JSMSG_BAD_OPERAND, 130, 1, JSEXN_SYNTAXERR, "invalid {0} operand")
+MSG_DEF(JSMSG_BAD_PROP_ID, 131, 0, JSEXN_SYNTAXERR, "invalid property id")
+MSG_DEF(JSMSG_RESERVED_ID, 132, 1, JSEXN_SYNTAXERR, "{0} is a reserved identifier")
+MSG_DEF(JSMSG_SYNTAX_ERROR, 133, 0, JSEXN_SYNTAXERR, "syntax error")
+MSG_DEF(JSMSG_BAD_SHARP_VAR_DEF, 134, 0, JSEXN_SYNTAXERR, "invalid sharp variable definition")
+MSG_DEF(JSMSG_BAD_PROTOTYPE, 135, 1, JSEXN_TYPEERR, "'prototype' property of {0} is not an object")
+MSG_DEF(JSMSG_MISSING_EXPONENT, 136, 0, JSEXN_SYNTAXERR, "missing exponent")
+MSG_DEF(JSMSG_OUT_OF_MEMORY, 137, 0, JSEXN_ERR, "out of memory")
+MSG_DEF(JSMSG_UNTERMINATED_STRING, 138, 0, JSEXN_SYNTAXERR, "unterminated string literal")
+MSG_DEF(JSMSG_TOO_MANY_PARENS, 139, 0, JSEXN_INTERNALERR, "too many parentheses in regular expression")
+MSG_DEF(JSMSG_UNTERMINATED_COMMENT, 140, 0, JSEXN_SYNTAXERR, "unterminated comment")
+MSG_DEF(JSMSG_UNTERMINATED_REGEXP, 141, 0, JSEXN_SYNTAXERR, "unterminated regular expression literal")
+MSG_DEF(JSMSG_BAD_REGEXP_FLAG, 142, 0, JSEXN_SYNTAXERR, "invalid flag after regular expression")
+MSG_DEF(JSMSG_SHARPVAR_TOO_BIG, 143, 0, JSEXN_SYNTAXERR, "overlarge sharp variable number")
+MSG_DEF(JSMSG_ILLEGAL_CHARACTER, 144, 0, JSEXN_SYNTAXERR, "illegal character")
+MSG_DEF(JSMSG_BAD_OCTAL, 145, 1, JSEXN_SYNTAXERR, "{0} is not a legal ECMA-262 octal constant")
+MSG_DEF(JSMSG_BAD_INDIRECT_CALL, 146, 1, JSEXN_EVALERR, "function {0} must be called directly, and not by way of a function of another name")
+MSG_DEF(JSMSG_UNCAUGHT_EXCEPTION, 147, 1, JSEXN_INTERNALERR, "uncaught exception: {0}")
+MSG_DEF(JSMSG_INVALID_BACKREF, 148, 0, JSEXN_SYNTAXERR, "non-octal digit in an escape sequence that doesn't match a back-reference")
+MSG_DEF(JSMSG_BAD_BACKREF, 149, 0, JSEXN_SYNTAXERR, "back-reference exceeds number of capturing parentheses")
+MSG_DEF(JSMSG_PRECISION_RANGE, 150, 1, JSEXN_RANGEERR, "precision {0} out of range")
+MSG_DEF(JSMSG_BAD_GETTER_OR_SETTER, 151, 1, JSEXN_SYNTAXERR, "invalid {0} usage")
+MSG_DEF(JSMSG_BAD_ARRAY_LENGTH, 152, 0, JSEXN_RANGEERR, "invalid array length")
+MSG_DEF(JSMSG_CANT_DESCRIBE_PROPS, 153, 1, JSEXN_TYPEERR, "can't describe non-native properties of class {0}")
+MSG_DEF(JSMSG_BAD_APPLY_ARGS, 154, 1, JSEXN_TYPEERR, "second argument to Function.prototype.{0} must be an array")
+MSG_DEF(JSMSG_REDECLARED_VAR, 155, 2, JSEXN_TYPEERR, "redeclaration of {0} {1}")
+MSG_DEF(JSMSG_UNDECLARED_VAR, 156, 1, JSEXN_TYPEERR, "assignment to undeclared variable {0}")
+MSG_DEF(JSMSG_ANON_NO_RETURN_VALUE, 157, 0, JSEXN_TYPEERR, "anonymous function does not always return a value")
+MSG_DEF(JSMSG_DEPRECATED_USAGE, 158, 1, JSEXN_REFERENCEERR, "deprecated {0} usage")
+MSG_DEF(JSMSG_BAD_URI, 159, 0, JSEXN_URIERR, "malformed URI sequence")
+MSG_DEF(JSMSG_GETTER_ONLY, 160, 0, JSEXN_TYPEERR, "setting a property that has only a getter")
+MSG_DEF(JSMSG_TRAILING_COMMA, 161, 0, JSEXN_SYNTAXERR, "trailing comma is not legal in ECMA-262 object initializers")
+MSG_DEF(JSMSG_UNDEFINED_PROP, 162, 1, JSEXN_REFERENCEERR, "reference to undefined property {0}")
+MSG_DEF(JSMSG_USELESS_EXPR, 163, 0, JSEXN_TYPEERR, "useless expression")
+MSG_DEF(JSMSG_REDECLARED_PARAM, 164, 1, JSEXN_TYPEERR, "redeclaration of formal parameter {0}")
+MSG_DEF(JSMSG_NEWREGEXP_FLAGGED, 165, 0, JSEXN_TYPEERR, "can't supply flags when constructing one RegExp from another")
+MSG_DEF(JSMSG_RESERVED_SLOT_RANGE, 166, 0, JSEXN_RANGEERR, "reserved slot index out of range")
+MSG_DEF(JSMSG_CANT_DECODE_PRINCIPALS, 167, 0, JSEXN_INTERNALERR, "can't decode JSPrincipals")
+MSG_DEF(JSMSG_CANT_SEAL_OBJECT, 168, 1, JSEXN_ERR, "can't seal {0} objects")
+MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS, 169, 0, JSEXN_SYNTAXERR, "too many catch variables")
+MSG_DEF(JSMSG_BAD_XML_MARKUP, 170, 0, JSEXN_SYNTAXERR, "invalid XML markup")
+MSG_DEF(JSMSG_BAD_XML_CHARACTER, 171, 0, JSEXN_SYNTAXERR, "illegal XML character")
+MSG_DEF(JSMSG_BAD_DEFAULT_XML_NAMESPACE,172,0,JSEXN_SYNTAXERR, "invalid default XML namespace")
+MSG_DEF(JSMSG_BAD_XML_NAME_SYNTAX, 173, 0, JSEXN_SYNTAXERR, "invalid XML name")
+MSG_DEF(JSMSG_BRACKET_AFTER_ATTR_EXPR,174, 0, JSEXN_SYNTAXERR, "missing ] after attribute expression")
+MSG_DEF(JSMSG_NESTING_GENERATOR, 175, 1, JSEXN_TYPEERR, "already executing generator {0}")
+MSG_DEF(JSMSG_CURLY_IN_XML_EXPR, 176, 0, JSEXN_SYNTAXERR, "missing } in XML expression")
+MSG_DEF(JSMSG_BAD_XML_NAMESPACE, 177, 1, JSEXN_TYPEERR, "invalid XML namespace {0}")
+MSG_DEF(JSMSG_BAD_XML_ATTR_NAME, 178, 1, JSEXN_TYPEERR, "invalid XML attribute name {0}")
+MSG_DEF(JSMSG_BAD_XML_NAME, 179, 1, JSEXN_TYPEERR, "invalid XML name {0}")
+MSG_DEF(JSMSG_BAD_XML_CONVERSION, 180, 1, JSEXN_TYPEERR, "can't convert {0} to XML")
+MSG_DEF(JSMSG_BAD_XMLLIST_CONVERSION, 181, 1, JSEXN_TYPEERR, "can't convert {0} to XMLList")
+MSG_DEF(JSMSG_BAD_GENERATOR_SEND, 182, 1, JSEXN_TYPEERR, "attempt to send {0} to newborn generator")
+MSG_DEF(JSMSG_NO_ASSIGN_IN_XML_ATTR, 183, 0, JSEXN_SYNTAXERR, "missing = in XML attribute")
+MSG_DEF(JSMSG_BAD_XML_ATTR_VALUE, 184, 0, JSEXN_SYNTAXERR, "invalid XML attribute value")
+MSG_DEF(JSMSG_XML_TAG_NAME_MISMATCH, 185, 1, JSEXN_SYNTAXERR, "XML tag name mismatch (expected {0})")
+MSG_DEF(JSMSG_BAD_XML_TAG_SYNTAX, 186, 0, JSEXN_SYNTAXERR, "invalid XML tag syntax")
+MSG_DEF(JSMSG_BAD_XML_LIST_SYNTAX, 187, 0, JSEXN_SYNTAXERR, "invalid XML list syntax")
+MSG_DEF(JSMSG_INCOMPATIBLE_METHOD, 188, 3, JSEXN_TYPEERR, "{0} {1} called on incompatible {2}")
+MSG_DEF(JSMSG_CANT_SET_XML_ATTRS, 189, 0, JSEXN_INTERNALERR, "can't set XML property attributes")
+MSG_DEF(JSMSG_END_OF_XML_SOURCE, 190, 0, JSEXN_SYNTAXERR, "unexpected end of XML source")
+MSG_DEF(JSMSG_END_OF_XML_ENTITY, 191, 0, JSEXN_SYNTAXERR, "unexpected end of XML entity")
+MSG_DEF(JSMSG_BAD_XML_QNAME, 192, 0, JSEXN_SYNTAXERR, "invalid XML qualified name")
+MSG_DEF(JSMSG_BAD_FOR_EACH_LOOP, 193, 0, JSEXN_SYNTAXERR, "invalid for each loop")
+MSG_DEF(JSMSG_BAD_XMLLIST_PUT, 194, 1, JSEXN_TYPEERR, "can't set property {0} in XMLList")
+MSG_DEF(JSMSG_UNKNOWN_XML_ENTITY, 195, 1, JSEXN_TYPEERR, "unknown XML entity {0}")
+MSG_DEF(JSMSG_BAD_XML_NCR, 196, 1, JSEXN_TYPEERR, "malformed XML character {0}")
+MSG_DEF(JSMSG_UNDEFINED_XML_NAME, 197, 1, JSEXN_REFERENCEERR, "reference to undefined XML name {0}")
+MSG_DEF(JSMSG_DUPLICATE_XML_ATTR, 198, 1, JSEXN_TYPEERR, "duplicate XML attribute {0}")
+MSG_DEF(JSMSG_TOO_MANY_FUN_VARS, 199, 0, JSEXN_SYNTAXERR, "too many local variables")
+MSG_DEF(JSMSG_ARRAY_INIT_TOO_BIG, 200, 0, JSEXN_INTERNALERR, "array initialiser too large")
+MSG_DEF(JSMSG_REGEXP_TOO_COMPLEX, 201, 0, JSEXN_INTERNALERR, "regular expression too complex")
+MSG_DEF(JSMSG_BUFFER_TOO_SMALL, 202, 0, JSEXN_INTERNALERR, "buffer too small")
+MSG_DEF(JSMSG_BAD_SURROGATE_CHAR, 203, 1, JSEXN_TYPEERR, "bad surrogate character {0}")
+MSG_DEF(JSMSG_UTF8_CHAR_TOO_LARGE, 204, 1, JSEXN_TYPEERR, "UTF-8 character {0} too large")
+MSG_DEF(JSMSG_MALFORMED_UTF8_CHAR, 205, 1, JSEXN_TYPEERR, "malformed UTF-8 character sequence at offset {0}")
+MSG_DEF(JSMSG_USER_DEFINED_ERROR, 206, 0, JSEXN_ERR, "JS_ReportError was called")
+MSG_DEF(JSMSG_WRONG_CONSTRUCTOR, 207, 1, JSEXN_TYPEERR, "wrong constructor called for {0}")
+MSG_DEF(JSMSG_BAD_GENERATOR_RETURN, 208, 1, JSEXN_TYPEERR, "generator function {0} returns a value")
+MSG_DEF(JSMSG_BAD_ANON_GENERATOR_RETURN, 209, 0, JSEXN_TYPEERR, "anonymous generator function returns a value")
+MSG_DEF(JSMSG_NAME_AFTER_FOR_PAREN, 210, 0, JSEXN_SYNTAXERR, "missing name after for (")
+MSG_DEF(JSMSG_IN_AFTER_FOR_NAME, 211, 0, JSEXN_SYNTAXERR, "missing in after for")
+MSG_DEF(JSMSG_BAD_ITERATOR_RETURN, 212, 2, JSEXN_TYPEERR, "{0}.{1} returned a primitive value")
+MSG_DEF(JSMSG_KEYWORD_NOT_NS, 213, 0, JSEXN_SYNTAXERR, "keyword is used as namespace")
+MSG_DEF(JSMSG_BAD_GENERATOR_YIELD, 214, 1, JSEXN_TYPEERR, "yield from closing generator {0}")
+MSG_DEF(JSMSG_BAD_YIELD_SYNTAX, 215, 0, JSEXN_SYNTAXERR, "yield expression must be parenthesized")
+MSG_DEF(JSMSG_ARRAY_COMP_LEFTSIDE, 216, 0, JSEXN_SYNTAXERR, "invalid array comprehension left-hand side")
+MSG_DEF(JSMSG_YIELD_FROM_FILTER, 217, 0, JSEXN_INTERNALERR, "yield not yet supported from filtering predicate")
+MSG_DEF(JSMSG_COMPILE_EXECED_SCRIPT, 218, 0, JSEXN_TYPEERR, "cannot compile over a script that is currently executing")
+MSG_DEF(JSMSG_NON_LIST_XML_METHOD, 219, 2, JSEXN_TYPEERR, "cannot call {0} method on an XML list with {1} elements")
diff --git a/third_party/js-1.7/js.pkg b/third_party/js-1.7/js.pkg
new file mode 100644
index 0000000..93185a9
--- /dev/null
+++ b/third_party/js-1.7/js.pkg
@@ -0,0 +1,2 @@
+[gecko xpi-bootstrap]
+dist/bin/@SHARED_LIBRARY@
diff --git a/third_party/js-1.7/js3240.rc b/third_party/js-1.7/js3240.rc
new file mode 100644
index 0000000..1a9f62c
--- /dev/null
+++ b/third_party/js-1.7/js3240.rc
@@ -0,0 +1,79 @@
+//Microsoft Developer Studio generated resource script.
+//
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include "winver.h"
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION 4,0,0,0
+ PRODUCTVERSION 4,0,0,0
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x10004L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904e4"
+ BEGIN
+ VALUE "CompanyName", "Netscape Communications Corporation\0"
+ VALUE "FileDescription", "Netscape 32-bit JavaScript Module\0"
+ VALUE "FileVersion", "4.0\0"
+ VALUE "InternalName", "JS3240\0"
+ VALUE "LegalCopyright", "Copyright Netscape Communications. 1994-96\0"
+ VALUE "LegalTrademarks", "Netscape, Mozilla\0"
+ VALUE "OriginalFilename", "js3240.dll\0"
+ VALUE "ProductName", "NETSCAPE\0"
+ VALUE "ProductVersion", "4.0\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1252
+ END
+END
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+
+1 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "#include ""winver.h""\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+///////////////////////////////////////////////////////////////////////////// \ No newline at end of file
diff --git a/third_party/js-1.7/jsOS240.def b/third_party/js-1.7/jsOS240.def
new file mode 100644
index 0000000..8f27d64
--- /dev/null
+++ b/third_party/js-1.7/jsOS240.def
@@ -0,0 +1,654 @@
+; ***** BEGIN LICENSE BLOCK *****
+; Version: MPL 1.1/GPL 2.0/LGPL 2.1
+;
+; The contents of this file are subject to the Mozilla Public License Version
+; 1.1 (the "License"); you may not use this file except in compliance with
+; the License. You may obtain a copy of the License at
+; http://www.mozilla.org/MPL/
+;
+; Software distributed under the License is distributed on an "AS IS" basis,
+; WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+; for the specific language governing rights and limitations under the
+; License.
+;
+; The Original Code is Mozilla Communicator client code, released
+; March 31, 1998.
+;
+; The Initial Developer of the Original Code is
+; Netscape Communications Corporation.
+; Portions created by the Initial Developer are Copyright (C) 1998
+; the Initial Developer. All Rights Reserved.
+;
+; Contributor(s):
+;
+; Alternatively, the contents of this file may be used under the terms of
+; either of the GNU General Public License Version 2 or later (the "GPL"),
+; or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+; in which case the provisions of the GPL or the LGPL are applicable instead
+; of those above. If you wish to allow use of your version of this file only
+; under the terms of either the GPL or the LGPL, and not to allow others to
+; use your version of this file under the terms of the MPL, indicate your
+; decision by deleting the provisions above and replace them with the notice
+; and other provisions required by the GPL or the LGPL. If you do not delete
+; the provisions above, a recipient may use your version of this file under
+; the terms of any one of the MPL, the GPL or the LGPL.
+;
+; ***** END LICENSE BLOCK *****
+
+LIBRARY JS3240 INITINSTANCE TERMINSTANCE
+PROTMODE
+
+DESCRIPTION 'Netscape OS/2 JavaScript Library'
+
+
+CODE LOADONCALL MOVEABLE DISCARDABLE
+DATA PRELOAD MOVEABLE MULTIPLE NONSHARED
+
+
+EXPORTS
+;====================== win16 exports these at least... ===========
+; JS_Init = JS_Init @2
+; JS_Finish = JS_Finish @3
+; JS_GetNaNValue
+; JS_GetNegativeInfinityValue
+; JS_GetPositiveInfinityValue
+; JS_GetEmptyStringValue
+; JS_ConvertValue
+; JS_ValueToObject
+; JS_ValueToFunction
+; JS_ValueToString
+; JS_ValueToNumber
+; JS_ValueToBoolean
+; JS_TypeOfValue
+; JS_GetTypeName
+; JS_Lock
+; JS_Unlock
+; JS_NewContext
+; JS_DestroyContext
+; JS_ContextIterator
+; JS_GetGlobalObject
+; JS_SetGlobalObject
+; JS_InitStandardClasses
+;; JS_GetStaticLink
+; JS_malloc
+; JS_realloc
+; JS_free
+; JS_strdup
+; JS_NewDouble
+; JS_NewDoubleValue
+; JS_AddRoot
+; JS_RemoveRoot
+; JS_LockGCThing
+; JS_UnlockGCThing
+; JS_GC
+; JS_PropertyStub
+; JS_EnumerateStub
+; JS_ResolveStub
+; JS_ConvertStub
+; JS_FinalizeStub
+; JS_InitClass
+; JS_GetClass
+; JS_InstanceOf
+; JS_GetPrivate
+; JS_SetPrivate
+; JS_GetInstancePrivate
+; JS_GetPrototype
+; JS_GetParent
+; JS_SetParent
+; JS_GetConstructor
+; JS_NewObject
+; JS_DefineObject
+; JS_DefineConstDoubles
+; JS_DefineProperties
+; JS_DefineProperty
+; JS_DefinePropertyWithTinyId
+; JS_AliasProperty
+; JS_LookupProperty
+; JS_GetProperty
+; JS_SetProperty
+; JS_DeleteProperty
+; JS_NewArrayObject
+; JS_DefineElement
+; JS_AliasElement
+; JS_LookupElement
+; JS_GetElement
+; JS_SetElement
+; JS_DeleteElement
+; JS_ClearScope
+; JS_NewFunction
+; JS_GetFunctionObject
+; JS_GetFunctionName
+; JS_DefineFunctions
+; JS_DefineFunction
+; JS_CompileScript
+; JS_DestroyScript
+; JS_CompileFunction
+; JS_DecompileScript
+; JS_DecompileFunction
+; JS_DecompileFunctionBody
+; JS_ExecuteScript
+; JS_EvaluateScript
+; JS_CallFunction
+; JS_CallFunctionName
+; JS_CallFunctionValue
+; JS_SetBranchCallback
+; JS_IsRunning
+; JS_IsConstructing
+; JS_SetCallReturnValue2
+; JS_NewString
+; JS_NewStringCopyN
+; JS_NewStringCopyZ
+; JS_InternString
+; JS_GetStringBytes
+; JS_GetStringLength
+; JS_CompareStrings
+; JS_ReportError
+; JS_ReportOutOfMemory
+; JS_SetErrorReporter
+; JS_NewRegExpObject
+; JS_SetRegExpInput
+; JS_ClearRegExpStatics
+;=================================================
+
+
+;00001:jsstr (OFFSET:0x00002e17, SIZE:0x0000ae17):
+; - Public Definitions:
+; js_EmptySubString
+; js_CompareStrings
+; js_HashString
+; js_ValueToString
+; js_StringToObject
+; js_FinalizeString
+; js_NewStringCopyZ
+; js_NewString
+; js_InitStringClass
+; js_NewStringCopyN
+; js_BoyerMooreHorspool
+;
+;
+;00002:jsscript (OFFSET:0x0000dc2e, SIZE:0x00003abb):
+; - Public Definitions:
+; js_LineNumberToPC
+; js_PCToLineNumber
+; js_GetSrcNote
+; js_DestroyScript
+; js_NewScript
+;
+;
+;00003:jsscope (OFFSET:0x000116e9, SIZE:0x00004f82):
+; - Public Definitions:
+; js_hash_scope_ops
+; js_list_scope_ops
+; js_DestroyProperty
+; js_NewProperty
+; js_IdToValue
+; js_HashValue
+; js_DestroyScope
+; js_MutateScope
+; js_DropScope
+; js_HoldScope
+; js_NewScope
+; js_GetMutableScope
+; js_HoldProperty
+; js_DropProperty
+;
+;
+;00004:jsscan (OFFSET:0x0001666b, SIZE:0x00008890):
+; - Public Definitions:
+; js_MatchToken
+; js_FlushNewlines
+; js_PeekTokenSameLine
+; js_UngetToken
+; js_GetToken
+; js_PeekToken
+; js_ReportCompileError
+ js_CloseTokenStream
+ js_NewBufferTokenStream
+; js_NewTokenStream
+; js_InitScanner
+;
+;
+;00005:jsregexp (OFFSET:0x0001eefb, SIZE:0x0000eee4):
+; - Public Definitions:
+; js_RegExpClass
+; reopsize
+; js_NewRegExpObject
+; js_InitRegExpClass
+; js_FreeRegExpStatics
+; js_InitRegExpStatics
+; js_ExecuteRegExp
+; js_NewRegExpOpt
+; js_DestroyRegExp
+; js_NewRegExp
+;
+;
+;00006:jsparse (OFFSET:0x0002dddf, SIZE:0x00010b71):
+; - Public Definitions:
+; js_ParseFunctionBody
+ js_Parse
+;
+;
+;00007:jsopcode (OFFSET:0x0003e950, SIZE:0x0000d362):
+; - Public Definitions:
+; js_EscapeMap
+; js_NumCodeSpecs
+; js_CodeSpec
+; js_incop_str
+; js_true_str
+; js_false_str
+; js_this_str
+; js_null_str
+; js_void_str
+; js_typeof_str
+; js_delete_str
+; js_new_str
+; js_ValueToSource
+; js_DecompileScript
+; js_DecompileCode
+; js_DecompileFunction
+; js_puts
+; js_printf
+; js_GetPrinterOutput
+; js_DestroyPrinter
+; js_NewPrinter
+; js_EscapeString
+; js_Disassemble1
+; js_Disassemble
+;
+;00008:jsobj (OFFSET:0x0004bcb2, SIZE:0x000090a4):
+; - Public Definitions:
+; js_WithClass
+; js_ObjectClass
+; js_TryValueOf
+; js_ValueToNonNullObject
+; js_TryMethod
+; js_ObjectToString
+; js_SetClassPrototype
+; js_DeleteProperty2
+; js_DeleteProperty
+; js_SetProperty
+; js_GetProperty
+; js_FindVariableScope
+; js_FindVariable
+; js_FindProperty
+; js_LookupProperty
+; js_DefineProperty
+; js_FreeSlot
+; js_AllocSlot
+; js_FinalizeObject
+; js_GetClassPrototype
+; js_NewObject
+; js_InitObjectClass
+; js_ValueToObject
+; js_obj_toString
+; js_SetSlot
+; js_GetSlot
+;
+;
+;00009:jsnum (OFFSET:0x00054d56, SIZE:0x00004f29):
+; - Public Definitions:
+; js_ValueToInt32
+; js_NumberToObject
+; js_FinalizeDouble
+; js_InitNumberClass
+; js_NumberToString
+; js_NewDoubleValue
+; js_NewDouble
+; js_ValueToNumber
+;
+;
+;00010:jsmath (OFFSET:0x00059c7f, SIZE:0x000054b6):
+; - Public Definitions:
+; js_InitMathClass
+;
+;
+;00011:jsjava (OFFSET:0x0005f135, SIZE:0x00022aad):
+; - Public Definitions:
+; js_Hooks
+; MojaSrcLog
+; finalizeTask
+ JSJ_FindCurrentJSContext
+; JSJ_GetPrincipals
+ JSJ_IsSafeMethod
+ JSJ_InitContext
+ JSJ_Init
+ js_JSErrorToJException
+ js_JavaErrorReporter
+ js_RemoveReflection
+ js_ReflectJObjectToJSObject
+ js_convertJObjectToJSValue
+ js_convertJSValueToJObject
+ js_ReflectJSObjectToJObject
+; js_ReflectJClassToJSObject
+ JSJ_ExitJS
+ JSJ_EnterJS
+ JSJ_CurrentContext
+ JSJ_IsEnabled
+;added in GA code - DSR70297
+ JSJ_Finish
+ JSJ_IsCalledFromJava
+ js_GetJSPrincipalsFromJavaCaller
+
+;
+;
+;00012:jsinterp (OFFSET:0x00081be2, SIZE:0x00012274):
+; - Public Definitions:
+; js_Call
+; js_Interpret
+; js_SetLocalVariable
+; js_GetLocalVariable
+; js_SetArgument
+; js_GetArgument
+; js_FlushPropertyCacheByProp
+; js_FlushPropertyCache
+;
+;
+;00013:jsgc (OFFSET:0x00093e56, SIZE:0x00004f8d):
+; - Public Definitions:
+; js_ForceGC
+; js_UnlockGCThing
+; js_LockGCThing
+; js_GC
+; js_AllocGCThing
+; js_RemoveRoot
+; js_AddRoot
+; js_FinishGC
+; js_InitGC
+;
+;
+;00014:jsfun (OFFSET:0x00098de3, SIZE:0x0000977c):
+; - Public Definitions:
+; js_FunctionClass
+; js_ClosureClass
+; js_CallClass
+; js_DefineFunction
+; js_NewFunction
+; js_InitCallAndClosureClasses
+; js_InitFunctionClass
+; js_ValueToFunction
+; js_SetCallVariable
+; js_GetCallVariable
+; js_PutCallObject
+; js_GetCallObject
+;
+;
+;00015:jsemit (OFFSET:0x000a255f, SIZE:0x000077be):
+; - Public Definitions:
+; js_SrcNoteName
+; js_SrcNoteArity
+ js_FinishTakingSrcNotes
+; js_MoveSrcNotes
+; js_GetSrcNoteOffset
+; js_BumpSrcNoteDelta
+; js_NewSrcNote3
+; js_NewSrcNote2
+; js_PopStatement
+; js_EmitContinue
+; js_EmitBreak
+; js_SetSrcNoteOffset
+; js_NewSrcNote
+; js_PushStatement
+; js_MoveCode
+; js_SetJumpOffset
+; js_Emit3
+; js_Emit2
+; js_Emit1
+; js_UpdateDepth
+; js_SrcNoteLength
+; js_CancelLastOpcode
+ js_InitCodeGenerator
+;
+;
+;00016:jsdbgapi (OFFSET:0x000a9d1d, SIZE:0x000057db):
+; - Public Definitions:
+; js_watchpoint_list
+; js_trap_list
+; JS_SetAnnotationInFrame
+; JS_GetAnnotationFromFrame
+; JS_GetJSPrincipalArrayFromFrame
+; JS_NextJSFrame
+; JS_InitJSFrameIterator
+ JS_LineNumberToPC
+ JS_PCToLineNumber
+ JS_ClearAllWatchPoints
+ JS_ClearWatchPoint
+ JS_SetWatchPoint
+ JS_HandleTrap
+ JS_ClearAllTraps
+ JS_ClearScriptTraps
+ JS_ClearTrap
+ JS_GetTrapOpcode
+ JS_SetTrap
+;DSR070297 - added in GA code
+ JS_FrameIterator
+ JS_GetFrameAnnotation
+ JS_GetFramePrincipalArray
+ JS_GetFrameScript
+ JS_GetScriptFilename
+ JS_SetFrameAnnotation
+ JS_GetFramePC
+ JS_GetFunctionScript
+
+;
+;
+;00017:jsdate (OFFSET:0x000af4f8, SIZE:0x00009a8e):
+; - Public Definitions:
+ js_DateGetSeconds
+ js_DateGetMinutes
+ js_DateGetHours
+ js_DateGetDate
+ js_DateGetMonth
+ js_DateGetYear
+ js_NewDateObject
+; js_InitDateClass
+;
+;
+;00018:jscntxt (OFFSET:0x000b8f86, SIZE:0x00003732):
+; - Public Definitions:
+; js_InterpreterHooks
+; js_ReportIsNotDefined
+; js_ReportErrorAgain
+; js_ReportErrorVA
+; js_ContextIterator
+; js_DestroyContext
+; js_NewContext
+; js_SetInterpreterHooks
+;
+;
+;00019:jsbool (OFFSET:0x000bc6b8, SIZE:0x00003375):
+; - Public Definitions:
+; js_BooleanToString
+; js_BooleanToObject
+; js_InitBooleanClass
+; js_ValueToBoolean
+;
+;
+;00020:jsatom (OFFSET:0x000bfa2d, SIZE:0x000058d0):
+; - Public Definitions:
+; js_valueOf_str
+; js_toString_str
+; js_length_str
+; js_eval_str
+; js_constructor_str
+; js_class_prototype_str
+; js_assign_str
+; js_anonymous_str
+; js_Object_str
+; js_Array_str
+; js_type_str
+; js_DropUnmappedAtoms
+ js_FreeAtomMap
+ js_InitAtomMap
+; js_GetAtom
+; js_DropAtom
+; js_IndexAtom
+; js_ValueToStringAtom
+; js_AtomizeString
+; js_AtomizeDouble
+; js_AtomizeInt
+; js_AtomizeBoolean
+; js_AtomizeObject
+; js_HoldAtom
+; js_MarkAtomState
+; js_FreeAtomState
+; js_Atomize
+; js_InitAtomState
+;
+;
+;00021:jsarray (OFFSET:0x000c52fd, SIZE:0x00007c86):
+; - Public Definitions:
+; js_ArrayClass
+; js_SetArrayLength
+; js_GetArrayLength
+; js_InitArrayClass
+; js_NewArrayObject
+; PR_qsort
+;
+;
+;00022:jsapi (OFFSET:0x000ccf83, SIZE:0x0000de8c):
+; - Public Definitions:
+ JS_ClearRegExpStatics
+ JS_SetRegExpInput
+ JS_NewRegExpObject
+ JS_SetErrorReporter
+ JS_CompareStrings
+ JS_GetStringLength
+ JS_GetStringBytes
+ JS_InternString
+ JS_NewStringCopyZ
+ JS_NewStringCopyN
+ JS_NewString
+ JS_IsRunning
+ JS_SetBranchCallback
+ JS_CallFunctionValue
+ JS_CallFunctionName
+ JS_CallFunction
+ JS_EvaluateScriptForPrincipals
+ JS_EvaluateScript
+ JS_ExecuteScript
+ JS_DecompileFunctionBody
+ JS_DecompileFunction
+ JS_DecompileScript
+ JS_CompileFunctionForPrincipals
+ JS_CompileFunction
+ JS_DestroyScript
+ JS_CompileScriptForPrincipals
+ JS_CompileScript
+ JS_DefineFunction
+ JS_GetFunctionName
+ JS_GetFunctionObject
+ JS_NewFunction
+ JS_ClearScope
+ JS_DeleteElement
+ JS_SetElement
+ JS_GetElement
+ JS_LookupElement
+ JS_AliasElement
+ JS_DefineElement
+ JS_SetArrayLength
+ JS_GetArrayLength
+ JS_NewArrayObject
+ JS_DeleteProperty
+ JS_SetProperty
+ JS_GetProperty
+ JS_LookupProperty
+ JS_AliasProperty
+ JS_DefinePropertyWithTinyId
+ JS_DefineProperty
+ JS_DefineConstDoubles
+ JS_DefineObject
+ JS_NewObject
+ JS_GetConstructor
+ JS_SetParent
+ JS_GetParent
+ JS_SetPrototype
+ JS_GetPrototype
+ JS_GetInstancePrivate
+ JS_SetPrivate
+ JS_GetPrivate
+ JS_InstanceOf
+ JS_GetClass
+ JS_DefineFunctions
+ JS_DefineProperties
+ JS_InitClass
+ JS_FinalizeStub
+ JS_ConvertStub
+ JS_ResolveStub
+ JS_EnumerateStub
+ JS_PropertyStub
+ JS_GC
+ JS_UnlockGCThing
+ JS_LockGCThing
+ JS_RemoveRoot
+ JS_AddRoot
+ JS_NewDoubleValue
+ JS_NewDouble
+ JS_strdup
+ JS_free
+ JS_realloc
+ JS_ReportOutOfMemory
+ JS_malloc
+ JS_GetScopeChain
+ JS_InitStandardClasses
+ JS_SetGlobalObject
+ JS_GetGlobalObject
+ JS_SetVersion
+ JS_GetVersion
+ JS_ContextIterator
+ JS_GetTaskState
+ JS_DestroyContext
+ JS_NewContext
+ JS_Unlock
+ JS_Lock
+ JS_Finish
+ JS_Init
+ JS_GetTypeName
+ JS_TypeOfValue
+ JS_ValueToBoolean
+ JS_ValueToInt32
+ JS_ValueToNumber
+ JS_ValueToString
+ JS_ValueToFunction
+ JS_ValueToObject
+ JS_ReportError
+ JS_ConvertValue
+ JS_GetEmptyStringValue
+ JS_GetPositiveInfinityValue
+ JS_GetNegativeInfinityValue
+ JS_GetNaNValue
+;DSR062897 - added for GA code
+ JS_MaybeGC
+ JS_GetScriptPrincipals
+ JS_IsAssigning
+ JS_SetCharSetInfo
+;brendan@mozilla.org, 2-Sept-2000
+ JS_SetCallReturnValue2
+ JS_SetGCCallback
+ JS_SetGCCallbackRT
+ JS_AddExternalStringFinalizer
+ JS_RemoveExternalStringFinalizer
+ JS_NewExternalString
+;
+;
+;00023:prmjtime (OFFSET:0x000dae0f, SIZE:0x00008986):
+; - Public Definitions:
+ PRMJ_FormatTimeUSEnglish
+ PRMJ_gmtime
+ PRMJ_FormatTime
+ PRMJ_mktime
+ PRMJ_ComputeTime
+ PRMJ_localtime
+ PRMJ_ExplodeTime
+ PRMJ_ToLocal
+ PRMJ_ToGMT
+ PRMJ_NowLocal
+ PRMJ_DSTOffset
+ PRMJ_NowS
+ PRMJ_NowMS
+ PRMJ_Now
+ PRMJ_ToExtendedTime
+ PRMJ_ToBaseTime
+ PRMJ_setDST
+ PRMJ_LocalGMTDifference
+
+
diff --git a/third_party/js-1.7/jsapi.c b/third_party/js-1.7/jsapi.c
new file mode 100644
index 0000000..f03fa36
--- /dev/null
+++ b/third_party/js-1.7/jsapi.c
@@ -0,0 +1,5011 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript API.
+ */
+#include "jsstddef.h"
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jsdhash.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdate.h"
+#include "jsdtoa.h"
+#include "jsemit.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsmath.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "prmjtime.h"
+
+#if JS_HAS_FILE_OBJECT
+#include "jsfile.h"
+#endif
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#if JS_HAS_GENERATORS
+#include "jsiter.h"
+#endif
+
+#ifdef HAVE_VA_LIST_AS_ARRAY
+#define JS_ADDRESSOF_VA_LIST(ap) ((va_list *)(ap))
+#else
+#define JS_ADDRESSOF_VA_LIST(ap) (&(ap))
+#endif
+
+#if defined(JS_PARANOID_REQUEST) && defined(JS_THREADSAFE)
+#define CHECK_REQUEST(cx) JS_ASSERT(cx->requestDepth)
+#else
+#define CHECK_REQUEST(cx) ((void)0)
+#endif
+
+JS_PUBLIC_API(int64)
+JS_Now()
+{
+ return PRMJ_Now();
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetNaNValue(JSContext *cx)
+{
+ return DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetNegativeInfinityValue(JSContext *cx)
+{
+ return DOUBLE_TO_JSVAL(cx->runtime->jsNegativeInfinity);
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetPositiveInfinityValue(JSContext *cx)
+{
+ return DOUBLE_TO_JSVAL(cx->runtime->jsPositiveInfinity);
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetEmptyStringValue(JSContext *cx)
+{
+ return STRING_TO_JSVAL(cx->runtime->emptyString);
+}
+
+static JSBool
+TryArgumentFormatter(JSContext *cx, const char **formatp, JSBool fromJS,
+ jsval **vpp, va_list *app)
+{
+ const char *format;
+ JSArgumentFormatMap *map;
+
+ format = *formatp;
+ for (map = cx->argumentFormatMap; map; map = map->next) {
+ if (!strncmp(format, map->format, map->length)) {
+ *formatp = format + map->length;
+ return map->formatter(cx, format, fromJS, vpp, app);
+ }
+ }
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_CHAR, format);
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertArguments(JSContext *cx, uintN argc, jsval *argv, const char *format,
+ ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, format);
+ ok = JS_ConvertArgumentsVA(cx, argc, argv, format, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertArgumentsVA(JSContext *cx, uintN argc, jsval *argv,
+ const char *format, va_list ap)
+{
+ jsval *sp;
+ JSBool required;
+ char c;
+ JSFunction *fun;
+ jsdouble d;
+ JSString *str;
+ JSObject *obj;
+
+ CHECK_REQUEST(cx);
+ sp = argv;
+ required = JS_TRUE;
+ while ((c = *format++) != '\0') {
+ if (isspace(c))
+ continue;
+ if (c == '/') {
+ required = JS_FALSE;
+ continue;
+ }
+ if (sp == argv + argc) {
+ if (required) {
+ fun = js_ValueToFunction(cx, &argv[-2], 0);
+ if (fun) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", argc);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_MORE_ARGS_NEEDED,
+ JS_GetFunctionName(fun), numBuf,
+ (argc == 1) ? "" : "s");
+ }
+ return JS_FALSE;
+ }
+ break;
+ }
+ switch (c) {
+ case 'b':
+ if (!js_ValueToBoolean(cx, *sp, va_arg(ap, JSBool *)))
+ return JS_FALSE;
+ break;
+ case 'c':
+ if (!js_ValueToUint16(cx, *sp, va_arg(ap, uint16 *)))
+ return JS_FALSE;
+ break;
+ case 'i':
+ if (!js_ValueToECMAInt32(cx, *sp, va_arg(ap, int32 *)))
+ return JS_FALSE;
+ break;
+ case 'u':
+ if (!js_ValueToECMAUint32(cx, *sp, va_arg(ap, uint32 *)))
+ return JS_FALSE;
+ break;
+ case 'j':
+ if (!js_ValueToInt32(cx, *sp, va_arg(ap, int32 *)))
+ return JS_FALSE;
+ break;
+ case 'd':
+ if (!js_ValueToNumber(cx, *sp, va_arg(ap, jsdouble *)))
+ return JS_FALSE;
+ break;
+ case 'I':
+ if (!js_ValueToNumber(cx, *sp, &d))
+ return JS_FALSE;
+ *va_arg(ap, jsdouble *) = js_DoubleToInteger(d);
+ break;
+ case 's':
+ case 'S':
+ case 'W':
+ str = js_ValueToString(cx, *sp);
+ if (!str)
+ return JS_FALSE;
+ *sp = STRING_TO_JSVAL(str);
+ if (c == 's')
+ *va_arg(ap, char **) = JS_GetStringBytes(str);
+ else if (c == 'W')
+ *va_arg(ap, jschar **) = JS_GetStringChars(str);
+ else
+ *va_arg(ap, JSString **) = str;
+ break;
+ case 'o':
+ if (!js_ValueToObject(cx, *sp, &obj))
+ return JS_FALSE;
+ *sp = OBJECT_TO_JSVAL(obj);
+ *va_arg(ap, JSObject **) = obj;
+ break;
+ case 'f':
+ obj = js_ValueToFunctionObject(cx, sp, 0);
+ if (!obj)
+ return JS_FALSE;
+ *va_arg(ap, JSFunction **) = (JSFunction *) JS_GetPrivate(cx, obj);
+ break;
+ case 'v':
+ *va_arg(ap, jsval *) = *sp;
+ break;
+ case '*':
+ break;
+ default:
+ format--;
+ if (!TryArgumentFormatter(cx, &format, JS_TRUE, &sp,
+ JS_ADDRESSOF_VA_LIST(ap))) {
+ return JS_FALSE;
+ }
+ /* NB: the formatter already updated sp, so we continue here. */
+ continue;
+ }
+ sp++;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(jsval *)
+JS_PushArguments(JSContext *cx, void **markp, const char *format, ...)
+{
+ va_list ap;
+ jsval *argv;
+
+ va_start(ap, format);
+ argv = JS_PushArgumentsVA(cx, markp, format, ap);
+ va_end(ap);
+ return argv;
+}
+
+JS_PUBLIC_API(jsval *)
+JS_PushArgumentsVA(JSContext *cx, void **markp, const char *format, va_list ap)
+{
+ uintN argc;
+ jsval *argv, *sp;
+ char c;
+ const char *cp;
+ JSString *str;
+ JSFunction *fun;
+ JSStackHeader *sh;
+
+ CHECK_REQUEST(cx);
+ *markp = NULL;
+ argc = 0;
+ for (cp = format; (c = *cp) != '\0'; cp++) {
+ /*
+ * Count non-space non-star characters as individual jsval arguments.
+ * This may over-allocate stack, but we'll fix below.
+ */
+ if (isspace(c) || c == '*')
+ continue;
+ argc++;
+ }
+ sp = js_AllocStack(cx, argc, markp);
+ if (!sp)
+ return NULL;
+ argv = sp;
+ while ((c = *format++) != '\0') {
+ if (isspace(c) || c == '*')
+ continue;
+ switch (c) {
+ case 'b':
+ *sp = BOOLEAN_TO_JSVAL((JSBool) va_arg(ap, int));
+ break;
+ case 'c':
+ *sp = INT_TO_JSVAL((uint16) va_arg(ap, unsigned int));
+ break;
+ case 'i':
+ case 'j':
+ if (!js_NewNumberValue(cx, (jsdouble) va_arg(ap, int32), sp))
+ goto bad;
+ break;
+ case 'u':
+ if (!js_NewNumberValue(cx, (jsdouble) va_arg(ap, uint32), sp))
+ goto bad;
+ break;
+ case 'd':
+ case 'I':
+ if (!js_NewDoubleValue(cx, va_arg(ap, jsdouble), sp))
+ goto bad;
+ break;
+ case 's':
+ str = JS_NewStringCopyZ(cx, va_arg(ap, char *));
+ if (!str)
+ goto bad;
+ *sp = STRING_TO_JSVAL(str);
+ break;
+ case 'W':
+ str = JS_NewUCStringCopyZ(cx, va_arg(ap, jschar *));
+ if (!str)
+ goto bad;
+ *sp = STRING_TO_JSVAL(str);
+ break;
+ case 'S':
+ str = va_arg(ap, JSString *);
+ *sp = STRING_TO_JSVAL(str);
+ break;
+ case 'o':
+ *sp = OBJECT_TO_JSVAL(va_arg(ap, JSObject *));
+ break;
+ case 'f':
+ fun = va_arg(ap, JSFunction *);
+ *sp = fun ? OBJECT_TO_JSVAL(fun->object) : JSVAL_NULL;
+ break;
+ case 'v':
+ *sp = va_arg(ap, jsval);
+ break;
+ default:
+ format--;
+ if (!TryArgumentFormatter(cx, &format, JS_FALSE, &sp,
+ JS_ADDRESSOF_VA_LIST(ap))) {
+ goto bad;
+ }
+ /* NB: the formatter already updated sp, so we continue here. */
+ continue;
+ }
+ sp++;
+ }
+
+ /*
+ * We may have overallocated stack due to a multi-character format code
+ * handled by a JSArgumentFormatter. Give back that stack space!
+ */
+ JS_ASSERT(sp <= argv + argc);
+ if (sp < argv + argc) {
+ /* Return slots not pushed to the current stack arena. */
+ cx->stackPool.current->avail = (jsuword)sp;
+
+ /* Reduce the count of slots the GC will scan in this stack segment. */
+ sh = cx->stackHeaders;
+ JS_ASSERT(JS_STACK_SEGMENT(sh) + sh->nslots == argv + argc);
+ sh->nslots -= argc - (sp - argv);
+ }
+ return argv;
+
+bad:
+ js_FreeStack(cx, *markp);
+ return NULL;
+}
+
+JS_PUBLIC_API(void)
+JS_PopArguments(JSContext *cx, void *mark)
+{
+ CHECK_REQUEST(cx);
+ js_FreeStack(cx, mark);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AddArgumentFormatter(JSContext *cx, const char *format,
+ JSArgumentFormatter formatter)
+{
+ size_t length;
+ JSArgumentFormatMap **mpp, *map;
+
+ length = strlen(format);
+ mpp = &cx->argumentFormatMap;
+ while ((map = *mpp) != NULL) {
+ /* Insert before any shorter string to match before prefixes. */
+ if (map->length < length)
+ break;
+ if (map->length == length && !strcmp(map->format, format))
+ goto out;
+ mpp = &map->next;
+ }
+ map = (JSArgumentFormatMap *) JS_malloc(cx, sizeof *map);
+ if (!map)
+ return JS_FALSE;
+ map->format = format;
+ map->length = length;
+ map->next = *mpp;
+ *mpp = map;
+out:
+ map->formatter = formatter;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_RemoveArgumentFormatter(JSContext *cx, const char *format)
+{
+ size_t length;
+ JSArgumentFormatMap **mpp, *map;
+
+ length = strlen(format);
+ mpp = &cx->argumentFormatMap;
+ while ((map = *mpp) != NULL) {
+ if (map->length == length && !strcmp(map->format, format)) {
+ *mpp = map->next;
+ JS_free(cx, map);
+ return;
+ }
+ mpp = &map->next;
+ }
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertValue(JSContext *cx, jsval v, JSType type, jsval *vp)
+{
+ JSBool ok, b;
+ JSObject *obj;
+ JSString *str;
+ jsdouble d, *dp;
+
+ CHECK_REQUEST(cx);
+ switch (type) {
+ case JSTYPE_VOID:
+ *vp = JSVAL_VOID;
+ ok = JS_TRUE;
+ break;
+ case JSTYPE_OBJECT:
+ ok = js_ValueToObject(cx, v, &obj);
+ if (ok)
+ *vp = OBJECT_TO_JSVAL(obj);
+ break;
+ case JSTYPE_FUNCTION:
+ *vp = v;
+ obj = js_ValueToFunctionObject(cx, vp, JSV2F_SEARCH_STACK);
+ ok = (obj != NULL);
+ break;
+ case JSTYPE_STRING:
+ str = js_ValueToString(cx, v);
+ ok = (str != NULL);
+ if (ok)
+ *vp = STRING_TO_JSVAL(str);
+ break;
+ case JSTYPE_NUMBER:
+ ok = js_ValueToNumber(cx, v, &d);
+ if (ok) {
+ dp = js_NewDouble(cx, d, 0);
+ ok = (dp != NULL);
+ if (ok)
+ *vp = DOUBLE_TO_JSVAL(dp);
+ }
+ break;
+ case JSTYPE_BOOLEAN:
+ ok = js_ValueToBoolean(cx, v, &b);
+ if (ok)
+ *vp = BOOLEAN_TO_JSVAL(b);
+ break;
+ default: {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", (int)type);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_TYPE,
+ numBuf);
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToObject(JSContext *cx, jsval v, JSObject **objp)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToObject(cx, v, objp);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_ValueToFunction(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToFunction(cx, &v, JSV2F_SEARCH_STACK);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_ValueToConstructor(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToFunction(cx, &v, JSV2F_SEARCH_STACK);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_ValueToString(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToString(cx, v);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToNumber(cx, v, dp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToECMAInt32(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToECMAUint32(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToInt32(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToUint16(JSContext *cx, jsval v, uint16 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToUint16(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToBoolean(cx, v, bp);
+}
+
+JS_PUBLIC_API(JSType)
+JS_TypeOfValue(JSContext *cx, jsval v)
+{
+ JSType type;
+ JSObject *obj;
+ JSObjectOps *ops;
+ JSClass *clasp;
+
+ CHECK_REQUEST(cx);
+ if (JSVAL_IS_OBJECT(v)) {
+ type = JSTYPE_OBJECT; /* XXXbe JSTYPE_NULL for JS2 */
+ obj = JSVAL_TO_OBJECT(v);
+ if (obj) {
+ ops = obj->map->ops;
+#if JS_HAS_XML_SUPPORT
+ if (ops == &js_XMLObjectOps.base) {
+ type = JSTYPE_XML;
+ } else
+#endif
+ {
+ /*
+ * ECMA 262, 11.4.3 says that any native object that implements
+ * [[Call]] should be of type "function". Note that RegExp and
+ * Script are both of type "function" for compatibility with
+ * older SpiderMonkeys.
+ */
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if ((ops == &js_ObjectOps)
+ ? (clasp->call
+ ? (clasp == &js_RegExpClass || clasp == &js_ScriptClass)
+ : clasp == &js_FunctionClass)
+ : ops->call != NULL) {
+ type = JSTYPE_FUNCTION;
+ } else {
+#ifdef NARCISSUS
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .callAtom),
+ &v)) {
+ JS_ClearPendingException(cx);
+ } else if (VALUE_IS_FUNCTION(cx, v)) {
+ type = JSTYPE_FUNCTION;
+ }
+#endif
+ }
+ }
+ }
+ } else if (JSVAL_IS_NUMBER(v)) {
+ type = JSTYPE_NUMBER;
+ } else if (JSVAL_IS_STRING(v)) {
+ type = JSTYPE_STRING;
+ } else if (JSVAL_IS_BOOLEAN(v)) {
+ type = JSTYPE_BOOLEAN;
+ } else {
+ type = JSTYPE_VOID;
+ }
+ return type;
+}
+
+JS_PUBLIC_API(const char *)
+JS_GetTypeName(JSContext *cx, JSType type)
+{
+ if ((uintN)type >= (uintN)JSTYPE_LIMIT)
+ return NULL;
+ return js_type_strs[type];
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSRuntime *)
+JS_NewRuntime(uint32 maxbytes)
+{
+ JSRuntime *rt;
+
+#ifdef DEBUG
+ static JSBool didFirstChecks;
+
+ if (!didFirstChecks) {
+ /*
+ * This code asserts that the numbers associated with the error names
+ * in jsmsg.def are monotonically increasing. It uses values for the
+ * error names enumerated in jscntxt.c. It's not a compile-time check
+ * but it's better than nothing.
+ */
+ int errorNumber = 0;
+#define MSG_DEF(name, number, count, exception, format) \
+ JS_ASSERT(name == errorNumber++);
+#include "js.msg"
+#undef MSG_DEF
+
+#define MSG_DEF(name, number, count, exception, format) \
+ JS_BEGIN_MACRO \
+ uintN numfmtspecs = 0; \
+ const char *fmt; \
+ for (fmt = format; *fmt != '\0'; fmt++) { \
+ if (*fmt == '{' && isdigit(fmt[1])) \
+ ++numfmtspecs; \
+ } \
+ JS_ASSERT(count == numfmtspecs); \
+ JS_END_MACRO;
+#include "js.msg"
+#undef MSG_DEF
+
+ didFirstChecks = JS_TRUE;
+ }
+#endif /* DEBUG */
+
+ rt = (JSRuntime *) malloc(sizeof(JSRuntime));
+ if (!rt)
+ return NULL;
+
+ /* Initialize infallibly first, so we can goto bad and JS_DestroyRuntime. */
+ memset(rt, 0, sizeof(JSRuntime));
+ JS_INIT_CLIST(&rt->contextList);
+ JS_INIT_CLIST(&rt->trapList);
+ JS_INIT_CLIST(&rt->watchPointList);
+
+ if (!js_InitGC(rt, maxbytes))
+ goto bad;
+#ifdef JS_THREADSAFE
+ if (PR_FAILURE == PR_NewThreadPrivateIndex(&rt->threadTPIndex,
+ js_ThreadDestructorCB)) {
+ goto bad;
+ }
+ rt->gcLock = JS_NEW_LOCK();
+ if (!rt->gcLock)
+ goto bad;
+ rt->gcDone = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->gcDone)
+ goto bad;
+ rt->requestDone = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->requestDone)
+ goto bad;
+ /* this is asymmetric with JS_ShutDown: */
+ if (!js_SetupLocks(8, 16))
+ goto bad;
+ rt->rtLock = JS_NEW_LOCK();
+ if (!rt->rtLock)
+ goto bad;
+ rt->stateChange = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->stateChange)
+ goto bad;
+ rt->setSlotLock = JS_NEW_LOCK();
+ if (!rt->setSlotLock)
+ goto bad;
+ rt->setSlotDone = JS_NEW_CONDVAR(rt->setSlotLock);
+ if (!rt->setSlotDone)
+ goto bad;
+ rt->scopeSharingDone = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->scopeSharingDone)
+ goto bad;
+ rt->scopeSharingTodo = NO_SCOPE_SHARING_TODO;
+#endif
+ rt->propertyCache.empty = JS_TRUE;
+ if (!js_InitPropertyTree(rt))
+ goto bad;
+ return rt;
+
+bad:
+ JS_DestroyRuntime(rt);
+ return NULL;
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyRuntime(JSRuntime *rt)
+{
+#ifdef DEBUG
+ /* Don't hurt everyone in leaky ol' Mozilla with a fatal JS_ASSERT! */
+ if (!JS_CLIST_IS_EMPTY(&rt->contextList)) {
+ JSContext *cx, *iter = NULL;
+ uintN cxcount = 0;
+ while ((cx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
+ cxcount++;
+ fprintf(stderr,
+"JS API usage error: %u contexts left in runtime upon JS_DestroyRuntime.\n",
+ cxcount);
+ }
+#endif
+
+ js_FreeRuntimeScriptState(rt);
+ js_FinishAtomState(&rt->atomState);
+ js_FinishGC(rt);
+#ifdef JS_THREADSAFE
+ if (rt->gcLock)
+ JS_DESTROY_LOCK(rt->gcLock);
+ if (rt->gcDone)
+ JS_DESTROY_CONDVAR(rt->gcDone);
+ if (rt->requestDone)
+ JS_DESTROY_CONDVAR(rt->requestDone);
+ if (rt->rtLock)
+ JS_DESTROY_LOCK(rt->rtLock);
+ if (rt->stateChange)
+ JS_DESTROY_CONDVAR(rt->stateChange);
+ if (rt->setSlotLock)
+ JS_DESTROY_LOCK(rt->setSlotLock);
+ if (rt->setSlotDone)
+ JS_DESTROY_CONDVAR(rt->setSlotDone);
+ if (rt->scopeSharingDone)
+ JS_DESTROY_CONDVAR(rt->scopeSharingDone);
+#else
+ GSN_CACHE_CLEAR(&rt->gsnCache);
+#endif
+ js_FinishPropertyTree(rt);
+ free(rt);
+}
+
+JS_PUBLIC_API(void)
+JS_ShutDown(void)
+{
+ js_FinishDtoa();
+#ifdef JS_THREADSAFE
+ js_CleanupLocks();
+#endif
+}
+
+JS_PUBLIC_API(void *)
+JS_GetRuntimePrivate(JSRuntime *rt)
+{
+ return rt->data;
+}
+
+JS_PUBLIC_API(void)
+JS_SetRuntimePrivate(JSRuntime *rt, void *data)
+{
+ rt->data = data;
+}
+
+#ifdef JS_THREADSAFE
+
+JS_PUBLIC_API(void)
+JS_BeginRequest(JSContext *cx)
+{
+ JSRuntime *rt;
+
+ JS_ASSERT(cx->thread->id == js_CurrentThreadId());
+ if (!cx->requestDepth) {
+ /* Wait until the GC is finished. */
+ rt = cx->runtime;
+ JS_LOCK_GC(rt);
+
+ /* NB: we use cx->thread here, not js_GetCurrentThread(). */
+ if (rt->gcThread != cx->thread) {
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ }
+
+ /* Indicate that a request is running. */
+ rt->requestCount++;
+ cx->requestDepth = 1;
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+ cx->requestDepth++;
+}
+
+JS_PUBLIC_API(void)
+JS_EndRequest(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSScope *scope, **todop;
+ uintN nshares;
+
+ CHECK_REQUEST(cx);
+ JS_ASSERT(cx->requestDepth > 0);
+ if (cx->requestDepth == 1) {
+ /* Lock before clearing to interlock with ClaimScope, in jslock.c. */
+ rt = cx->runtime;
+ JS_LOCK_GC(rt);
+ cx->requestDepth = 0;
+
+ /* See whether cx has any single-threaded scopes to start sharing. */
+ todop = &rt->scopeSharingTodo;
+ nshares = 0;
+ while ((scope = *todop) != NO_SCOPE_SHARING_TODO) {
+ if (scope->ownercx != cx) {
+ todop = &scope->u.link;
+ continue;
+ }
+ *todop = scope->u.link;
+ scope->u.link = NULL; /* null u.link for sanity ASAP */
+
+ /*
+ * If js_DropObjectMap returns null, we held the last ref to scope.
+ * The waiting thread(s) must have been killed, after which the GC
+ * collected the object that held this scope. Unlikely, because it
+ * requires that the GC ran (e.g., from a branch callback) during
+ * this request, but possible.
+ */
+ if (js_DropObjectMap(cx, &scope->map, NULL)) {
+ js_InitLock(&scope->lock);
+ scope->u.count = 0; /* NULL may not pun as 0 */
+ js_FinishSharingScope(rt, scope); /* set ownercx = NULL */
+ nshares++;
+ }
+ }
+ if (nshares)
+ JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
+
+ /* Give the GC a chance to run if this was the last request running. */
+ JS_ASSERT(rt->requestCount > 0);
+ rt->requestCount--;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+
+ cx->requestDepth--;
+}
+
+/* Yield to pending GC operations, regardless of request depth */
+JS_PUBLIC_API(void)
+JS_YieldRequest(JSContext *cx)
+{
+ JSRuntime *rt;
+
+ JS_ASSERT(cx->thread);
+ CHECK_REQUEST(cx);
+
+ rt = cx->runtime;
+ JS_LOCK_GC(rt);
+ JS_ASSERT(rt->requestCount > 0);
+ rt->requestCount--;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+ JS_UNLOCK_GC(rt);
+ /* XXXbe give the GC or another request calling it a chance to run here?
+ Assumes FIFO scheduling */
+ JS_LOCK_GC(rt);
+ if (rt->gcThread != cx->thread) {
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ }
+ rt->requestCount++;
+ JS_UNLOCK_GC(rt);
+}
+
+JS_PUBLIC_API(jsrefcount)
+JS_SuspendRequest(JSContext *cx)
+{
+ jsrefcount saveDepth = cx->requestDepth;
+
+ while (cx->requestDepth)
+ JS_EndRequest(cx);
+ return saveDepth;
+}
+
+JS_PUBLIC_API(void)
+JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth)
+{
+ JS_ASSERT(!cx->requestDepth);
+ while (--saveDepth >= 0)
+ JS_BeginRequest(cx);
+}
+
+#endif /* JS_THREADSAFE */
+
+JS_PUBLIC_API(void)
+JS_Lock(JSRuntime *rt)
+{
+ JS_LOCK_RUNTIME(rt);
+}
+
+JS_PUBLIC_API(void)
+JS_Unlock(JSRuntime *rt)
+{
+ JS_UNLOCK_RUNTIME(rt);
+}
+
+JS_PUBLIC_API(JSContextCallback)
+JS_SetContextCallback(JSRuntime *rt, JSContextCallback cxCallback)
+{
+ JSContextCallback old;
+
+ old = rt->cxCallback;
+ rt->cxCallback = cxCallback;
+ return old;
+}
+
+JS_PUBLIC_API(JSContext *)
+JS_NewContext(JSRuntime *rt, size_t stackChunkSize)
+{
+ return js_NewContext(rt, stackChunkSize);
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyContext(JSContext *cx)
+{
+ js_DestroyContext(cx, JSDCM_FORCE_GC);
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyContextNoGC(JSContext *cx)
+{
+ js_DestroyContext(cx, JSDCM_NO_GC);
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyContextMaybeGC(JSContext *cx)
+{
+ js_DestroyContext(cx, JSDCM_MAYBE_GC);
+}
+
+JS_PUBLIC_API(void *)
+JS_GetContextPrivate(JSContext *cx)
+{
+ return cx->data;
+}
+
+JS_PUBLIC_API(void)
+JS_SetContextPrivate(JSContext *cx, void *data)
+{
+ cx->data = data;
+}
+
+JS_PUBLIC_API(JSRuntime *)
+JS_GetRuntime(JSContext *cx)
+{
+ return cx->runtime;
+}
+
+JS_PUBLIC_API(JSContext *)
+JS_ContextIterator(JSRuntime *rt, JSContext **iterp)
+{
+ return js_ContextIterator(rt, JS_TRUE, iterp);
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_GetVersion(JSContext *cx)
+{
+ return cx->version & JSVERSION_MASK;
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_SetVersion(JSContext *cx, JSVersion version)
+{
+ JSVersion oldVersion;
+
+ JS_ASSERT(version != JSVERSION_UNKNOWN);
+ JS_ASSERT((version & ~JSVERSION_MASK) == 0);
+
+ oldVersion = cx->version & JSVERSION_MASK;
+ if (version == oldVersion)
+ return oldVersion;
+
+ /* We no longer support 1.4 or below. */
+ if (version != JSVERSION_DEFAULT && version <= JSVERSION_1_4)
+ return oldVersion;
+
+ cx->version = (cx->version & ~JSVERSION_MASK) | version;
+ js_OnVersionChange(cx);
+ return oldVersion;
+}
+
+static struct v2smap {
+ JSVersion version;
+ const char *string;
+} v2smap[] = {
+ {JSVERSION_1_0, "1.0"},
+ {JSVERSION_1_1, "1.1"},
+ {JSVERSION_1_2, "1.2"},
+ {JSVERSION_1_3, "1.3"},
+ {JSVERSION_1_4, "1.4"},
+ {JSVERSION_ECMA_3, "ECMAv3"},
+ {JSVERSION_1_5, "1.5"},
+ {JSVERSION_1_6, "1.6"},
+ {JSVERSION_1_7, "1.7"},
+ {JSVERSION_DEFAULT, js_default_str},
+ {JSVERSION_UNKNOWN, NULL}, /* must be last, NULL is sentinel */
+};
+
+JS_PUBLIC_API(const char *)
+JS_VersionToString(JSVersion version)
+{
+ int i;
+
+ for (i = 0; v2smap[i].string; i++)
+ if (v2smap[i].version == version)
+ return v2smap[i].string;
+ return "unknown";
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_StringToVersion(const char *string)
+{
+ int i;
+
+ for (i = 0; v2smap[i].string; i++)
+ if (strcmp(v2smap[i].string, string) == 0)
+ return v2smap[i].version;
+ return JSVERSION_UNKNOWN;
+}
+
+JS_PUBLIC_API(uint32)
+JS_GetOptions(JSContext *cx)
+{
+ return cx->options;
+}
+
+#define SYNC_OPTIONS_TO_VERSION(cx) \
+ JS_BEGIN_MACRO \
+ if ((cx)->options & JSOPTION_XML) \
+ (cx)->version |= JSVERSION_HAS_XML; \
+ else \
+ (cx)->version &= ~JSVERSION_HAS_XML; \
+ JS_END_MACRO
+
+JS_PUBLIC_API(uint32)
+JS_SetOptions(JSContext *cx, uint32 options)
+{
+ uint32 oldopts = cx->options;
+ cx->options = options;
+ SYNC_OPTIONS_TO_VERSION(cx);
+ return oldopts;
+}
+
+JS_PUBLIC_API(uint32)
+JS_ToggleOptions(JSContext *cx, uint32 options)
+{
+ uint32 oldopts = cx->options;
+ cx->options ^= options;
+ SYNC_OPTIONS_TO_VERSION(cx);
+ return oldopts;
+}
+
+JS_PUBLIC_API(const char *)
+JS_GetImplementationVersion(void)
+{
+ return "JavaScript-C 1.7.0 2007-10-03";
+}
+
+
+JS_PUBLIC_API(JSObject *)
+JS_GetGlobalObject(JSContext *cx)
+{
+ return cx->globalObject;
+}
+
+JS_PUBLIC_API(void)
+JS_SetGlobalObject(JSContext *cx, JSObject *obj)
+{
+ cx->globalObject = obj;
+
+#if JS_HAS_XML_SUPPORT
+ cx->xmlSettingFlags = 0;
+#endif
+}
+
+JSObject *
+js_InitFunctionAndObjectClasses(JSContext *cx, JSObject *obj)
+{
+ JSDHashTable *table;
+ JSBool resolving;
+ JSRuntime *rt;
+ JSResolvingKey key;
+ JSResolvingEntry *entry;
+ JSObject *fun_proto, *obj_proto;
+
+ /* If cx has no global object, use obj so prototypes can be found. */
+ if (!cx->globalObject)
+ JS_SetGlobalObject(cx, obj);
+
+ /* Record Function and Object in cx->resolvingTable, if we are resolving. */
+ table = cx->resolvingTable;
+ resolving = (table && table->entryCount);
+ rt = cx->runtime;
+ key.obj = obj;
+ if (resolving) {
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Function]);
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, &key, JS_DHASH_ADD);
+ if (entry && entry->key.obj && (entry->flags & JSRESFLAG_LOOKUP)) {
+ /* Already resolving Function, record Object too. */
+ JS_ASSERT(entry->key.obj == obj);
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, &key, JS_DHASH_ADD);
+ }
+ if (!entry) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ JS_ASSERT(!entry->key.obj && entry->flags == 0);
+ entry->key = key;
+ entry->flags = JSRESFLAG_LOOKUP;
+ } else {
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ if (!js_StartResolving(cx, &key, JSRESFLAG_LOOKUP, &entry))
+ return NULL;
+
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Function]);
+ if (!js_StartResolving(cx, &key, JSRESFLAG_LOOKUP, &entry)) {
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
+ return NULL;
+ }
+
+ table = cx->resolvingTable;
+ }
+
+ /* Initialize the function class first so constructors can be made. */
+ fun_proto = js_InitFunctionClass(cx, obj);
+ if (!fun_proto)
+ goto out;
+
+ /* Initialize the object class next so Object.prototype works. */
+ obj_proto = js_InitObjectClass(cx, obj);
+ if (!obj_proto) {
+ fun_proto = NULL;
+ goto out;
+ }
+
+ /* Function.prototype and the global object delegate to Object.prototype. */
+ OBJ_SET_PROTO(cx, fun_proto, obj_proto);
+ if (!OBJ_GET_PROTO(cx, obj))
+ OBJ_SET_PROTO(cx, obj, obj_proto);
+
+out:
+ /* If resolving, remove the other entry (Object or Function) from table. */
+ JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
+ if (!resolving) {
+ /* If not resolving, remove the first entry added above, for Object. */
+ JS_ASSERT(key.id == \
+ ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Function]));
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
+ }
+ return fun_proto;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_InitStandardClasses(JSContext *cx, JSObject *obj)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+
+ /* Define a top-level property 'undefined' with the undefined value. */
+ atom = cx->runtime->atomState.typeAtoms[JSTYPE_VOID];
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID,
+ NULL, NULL, JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+
+ /* Function and Object require cooperative bootstrapping magic. */
+ if (!js_InitFunctionAndObjectClasses(cx, obj))
+ return JS_FALSE;
+
+ /* Initialize the rest of the standard objects and functions. */
+ return js_InitArrayClass(cx, obj) &&
+ js_InitBlockClass(cx, obj) &&
+ js_InitBooleanClass(cx, obj) &&
+ js_InitCallClass(cx, obj) &&
+ js_InitExceptionClasses(cx, obj) &&
+ js_InitMathClass(cx, obj) &&
+ js_InitNumberClass(cx, obj) &&
+ js_InitRegExpClass(cx, obj) &&
+ js_InitStringClass(cx, obj) &&
+#if JS_HAS_SCRIPT_OBJECT
+ js_InitScriptClass(cx, obj) &&
+#endif
+#if JS_HAS_XML_SUPPORT
+ js_InitXMLClasses(cx, obj) &&
+#endif
+#if JS_HAS_FILE_OBJECT
+ js_InitFileClass(cx, obj) &&
+#endif
+#if JS_HAS_GENERATORS
+ js_InitIteratorClasses(cx, obj) &&
+#endif
+ js_InitDateClass(cx, obj);
+}
+
+#define ATOM_OFFSET(name) offsetof(JSAtomState,name##Atom)
+#define CLASS_ATOM_OFFSET(name) offsetof(JSAtomState,classAtoms[JSProto_##name])
+#define OFFSET_TO_ATOM(rt,off) (*(JSAtom **)((char*)&(rt)->atomState + (off)))
+#define CLASP(name) (JSClass *)&js_##name##Class
+
+#define EAGER_ATOM(name) ATOM_OFFSET(name), NULL
+#define EAGER_CLASS_ATOM(name) CLASS_ATOM_OFFSET(name), NULL
+#define EAGER_ATOM_AND_CLASP(name) EAGER_CLASS_ATOM(name), CLASP(name)
+#define LAZY_ATOM(name) ATOM_OFFSET(lazy.name), js_##name##_str
+
+typedef struct JSStdName {
+ JSObjectOp init;
+ size_t atomOffset; /* offset of atom pointer in JSAtomState */
+ const char *name; /* null if atom is pre-pinned, else name */
+ JSClass *clasp;
+} JSStdName;
+
+static JSAtom *
+StdNameToAtom(JSContext *cx, JSStdName *stdn)
+{
+ size_t offset;
+ JSAtom *atom;
+ const char *name;
+
+ offset = stdn->atomOffset;
+ atom = OFFSET_TO_ATOM(cx->runtime, offset);
+ if (!atom) {
+ name = stdn->name;
+ if (name) {
+ atom = js_Atomize(cx, name, strlen(name), ATOM_PINNED);
+ OFFSET_TO_ATOM(cx->runtime, offset) = atom;
+ }
+ }
+ return atom;
+}
+
+/*
+ * Table of class initializers and their atom offsets in rt->atomState.
+ * If you add a "standard" class, remember to update this table.
+ */
+static JSStdName standard_class_atoms[] = {
+ {js_InitFunctionAndObjectClasses, EAGER_ATOM_AND_CLASP(Function)},
+ {js_InitFunctionAndObjectClasses, EAGER_ATOM_AND_CLASP(Object)},
+ {js_InitArrayClass, EAGER_ATOM_AND_CLASP(Array)},
+ {js_InitBlockClass, EAGER_ATOM_AND_CLASP(Block)},
+ {js_InitBooleanClass, EAGER_ATOM_AND_CLASP(Boolean)},
+ {js_InitDateClass, EAGER_ATOM_AND_CLASP(Date)},
+ {js_InitMathClass, EAGER_ATOM_AND_CLASP(Math)},
+ {js_InitNumberClass, EAGER_ATOM_AND_CLASP(Number)},
+ {js_InitStringClass, EAGER_ATOM_AND_CLASP(String)},
+ {js_InitCallClass, EAGER_ATOM_AND_CLASP(Call)},
+ {js_InitExceptionClasses, EAGER_ATOM_AND_CLASP(Error)},
+ {js_InitRegExpClass, EAGER_ATOM_AND_CLASP(RegExp)},
+#if JS_HAS_SCRIPT_OBJECT
+ {js_InitScriptClass, EAGER_ATOM_AND_CLASP(Script)},
+#endif
+#if JS_HAS_XML_SUPPORT
+ {js_InitXMLClass, EAGER_ATOM_AND_CLASP(XML)},
+ {js_InitNamespaceClass, EAGER_ATOM_AND_CLASP(Namespace)},
+ {js_InitQNameClass, EAGER_ATOM_AND_CLASP(QName)},
+#endif
+#if JS_HAS_FILE_OBJECT
+ {js_InitFileClass, EAGER_ATOM_AND_CLASP(File)},
+#endif
+#if JS_HAS_GENERATORS
+ {js_InitIteratorClasses, EAGER_ATOM_AND_CLASP(StopIteration)},
+#endif
+ {NULL, 0, NULL, NULL}
+};
+
+/*
+ * Table of top-level function and constant names and their init functions.
+ * If you add a "standard" global function or property, remember to update
+ * this table.
+ */
+static JSStdName standard_class_names[] = {
+ /* ECMA requires that eval be a direct property of the global object. */
+ {js_InitObjectClass, EAGER_ATOM(eval), NULL},
+
+ /* Global properties and functions defined by the Number class. */
+ {js_InitNumberClass, LAZY_ATOM(NaN), NULL},
+ {js_InitNumberClass, LAZY_ATOM(Infinity), NULL},
+ {js_InitNumberClass, LAZY_ATOM(isNaN), NULL},
+ {js_InitNumberClass, LAZY_ATOM(isFinite), NULL},
+ {js_InitNumberClass, LAZY_ATOM(parseFloat), NULL},
+ {js_InitNumberClass, LAZY_ATOM(parseInt), NULL},
+
+ /* String global functions. */
+ {js_InitStringClass, LAZY_ATOM(escape), NULL},
+ {js_InitStringClass, LAZY_ATOM(unescape), NULL},
+ {js_InitStringClass, LAZY_ATOM(decodeURI), NULL},
+ {js_InitStringClass, LAZY_ATOM(encodeURI), NULL},
+ {js_InitStringClass, LAZY_ATOM(decodeURIComponent), NULL},
+ {js_InitStringClass, LAZY_ATOM(encodeURIComponent), NULL},
+#if JS_HAS_UNEVAL
+ {js_InitStringClass, LAZY_ATOM(uneval), NULL},
+#endif
+
+ /* Exception constructors. */
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(Error), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(InternalError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(EvalError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(RangeError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(ReferenceError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(SyntaxError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(TypeError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(URIError), CLASP(Error)},
+
+#if JS_HAS_XML_SUPPORT
+ {js_InitAnyNameClass, EAGER_ATOM_AND_CLASP(AnyName)},
+ {js_InitAttributeNameClass, EAGER_ATOM_AND_CLASP(AttributeName)},
+ {js_InitXMLClass, LAZY_ATOM(XMLList), &js_XMLClass},
+ {js_InitXMLClass, LAZY_ATOM(isXMLName), NULL},
+#endif
+
+#if JS_HAS_GENERATORS
+ {js_InitIteratorClasses, EAGER_ATOM_AND_CLASP(Iterator)},
+ {js_InitIteratorClasses, EAGER_ATOM_AND_CLASP(Generator)},
+#endif
+
+ {NULL, 0, NULL, NULL}
+};
+
+static JSStdName object_prototype_names[] = {
+ /* Object.prototype properties (global delegates to Object.prototype). */
+ {js_InitObjectClass, EAGER_ATOM(proto), NULL},
+ {js_InitObjectClass, EAGER_ATOM(parent), NULL},
+ {js_InitObjectClass, EAGER_ATOM(count), NULL},
+#if JS_HAS_TOSOURCE
+ {js_InitObjectClass, EAGER_ATOM(toSource), NULL},
+#endif
+ {js_InitObjectClass, EAGER_ATOM(toString), NULL},
+ {js_InitObjectClass, EAGER_ATOM(toLocaleString), NULL},
+ {js_InitObjectClass, EAGER_ATOM(valueOf), NULL},
+#if JS_HAS_OBJ_WATCHPOINT
+ {js_InitObjectClass, LAZY_ATOM(watch), NULL},
+ {js_InitObjectClass, LAZY_ATOM(unwatch), NULL},
+#endif
+ {js_InitObjectClass, LAZY_ATOM(hasOwnProperty), NULL},
+ {js_InitObjectClass, LAZY_ATOM(isPrototypeOf), NULL},
+ {js_InitObjectClass, LAZY_ATOM(propertyIsEnumerable), NULL},
+#if JS_HAS_GETTER_SETTER
+ {js_InitObjectClass, LAZY_ATOM(defineGetter), NULL},
+ {js_InitObjectClass, LAZY_ATOM(defineSetter), NULL},
+ {js_InitObjectClass, LAZY_ATOM(lookupGetter), NULL},
+ {js_InitObjectClass, LAZY_ATOM(lookupSetter), NULL},
+#endif
+
+ {NULL, 0, NULL, NULL}
+};
+
+JS_PUBLIC_API(JSBool)
+JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id,
+ JSBool *resolved)
+{
+ JSString *idstr;
+ JSRuntime *rt;
+ JSAtom *atom;
+ JSStdName *stdnm;
+ uintN i;
+
+ CHECK_REQUEST(cx);
+ *resolved = JS_FALSE;
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+ idstr = JSVAL_TO_STRING(id);
+ rt = cx->runtime;
+
+ /* Check whether we're resolving 'undefined', and define it if so. */
+ atom = rt->atomState.typeAtoms[JSTYPE_VOID];
+ if (idstr == ATOM_TO_STRING(atom)) {
+ *resolved = JS_TRUE;
+ return OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID,
+ NULL, NULL, JSPROP_PERMANENT, NULL);
+ }
+
+ /* Try for class constructors/prototypes named by well-known atoms. */
+ stdnm = NULL;
+ for (i = 0; standard_class_atoms[i].init; i++) {
+ atom = OFFSET_TO_ATOM(rt, standard_class_atoms[i].atomOffset);
+ if (idstr == ATOM_TO_STRING(atom)) {
+ stdnm = &standard_class_atoms[i];
+ break;
+ }
+ }
+
+ if (!stdnm) {
+ /* Try less frequently used top-level functions and constants. */
+ for (i = 0; standard_class_names[i].init; i++) {
+ atom = StdNameToAtom(cx, &standard_class_names[i]);
+ if (!atom)
+ return JS_FALSE;
+ if (idstr == ATOM_TO_STRING(atom)) {
+ stdnm = &standard_class_names[i];
+ break;
+ }
+ }
+
+ if (!stdnm && !OBJ_GET_PROTO(cx, obj)) {
+ /*
+ * Try even less frequently used names delegated from the global
+ * object to Object.prototype, but only if the Object class hasn't
+ * yet been initialized.
+ */
+ for (i = 0; object_prototype_names[i].init; i++) {
+ atom = StdNameToAtom(cx, &object_prototype_names[i]);
+ if (!atom)
+ return JS_FALSE;
+ if (idstr == ATOM_TO_STRING(atom)) {
+ stdnm = &standard_class_names[i];
+ break;
+ }
+ }
+ }
+ }
+
+ if (stdnm) {
+ /*
+ * If this standard class is anonymous and obj advertises itself as a
+ * global object (in order to reserve slots for standard class object
+ * pointers), then we don't want to resolve by name.
+ *
+ * If inversely, either id does not name a class, or id does not name
+ * an anonymous class, or the global does not reserve slots for class
+ * objects, then we must call the init hook here.
+ */
+ if (stdnm->clasp &&
+ (stdnm->clasp->flags & JSCLASS_IS_ANONYMOUS) &&
+ (OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL)) {
+ return JS_TRUE;
+ }
+
+ if (!stdnm->init(cx, obj))
+ return JS_FALSE;
+ *resolved = JS_TRUE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+AlreadyHasOwnProperty(JSContext *cx, JSObject *obj, JSAtom *atom)
+{
+ JSScopeProperty *sprop;
+ JSScope *scope;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ sprop = SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom));
+ JS_UNLOCK_SCOPE(cx, scope);
+ return sprop != NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj)
+{
+ JSRuntime *rt;
+ JSAtom *atom;
+ uintN i;
+
+ CHECK_REQUEST(cx);
+ rt = cx->runtime;
+
+ /* Check whether we need to bind 'undefined' and define it if so. */
+ atom = rt->atomState.typeAtoms[JSTYPE_VOID];
+ if (!AlreadyHasOwnProperty(cx, obj, atom) &&
+ !OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID,
+ NULL, NULL, JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+
+ /* Initialize any classes that have not been resolved yet. */
+ for (i = 0; standard_class_atoms[i].init; i++) {
+ atom = OFFSET_TO_ATOM(rt, standard_class_atoms[i].atomOffset);
+ if (!AlreadyHasOwnProperty(cx, obj, atom) &&
+ !standard_class_atoms[i].init(cx, obj)) {
+ return JS_FALSE;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSIdArray *
+AddAtomToArray(JSContext *cx, JSAtom *atom, JSIdArray *ida, jsint *ip)
+{
+ jsint i, length;
+
+ i = *ip;
+ length = ida->length;
+ if (i >= length) {
+ ida = js_SetIdArrayLength(cx, ida, JS_MAX(length * 2, 8));
+ if (!ida)
+ return NULL;
+ JS_ASSERT(i < ida->length);
+ }
+ ida->vector[i] = ATOM_TO_JSID(atom);
+ *ip = i + 1;
+ return ida;
+}
+
+static JSIdArray *
+EnumerateIfResolved(JSContext *cx, JSObject *obj, JSAtom *atom, JSIdArray *ida,
+ jsint *ip, JSBool *foundp)
+{
+ *foundp = AlreadyHasOwnProperty(cx, obj, atom);
+ if (*foundp)
+ ida = AddAtomToArray(cx, atom, ida, ip);
+ return ida;
+}
+
+JS_PUBLIC_API(JSIdArray *)
+JS_EnumerateResolvedStandardClasses(JSContext *cx, JSObject *obj,
+ JSIdArray *ida)
+{
+ JSRuntime *rt;
+ jsint i, j, k;
+ JSAtom *atom;
+ JSBool found;
+ JSObjectOp init;
+
+ CHECK_REQUEST(cx);
+ rt = cx->runtime;
+ if (ida) {
+ i = ida->length;
+ } else {
+ ida = js_NewIdArray(cx, 8);
+ if (!ida)
+ return NULL;
+ i = 0;
+ }
+
+ /* Check whether 'undefined' has been resolved and enumerate it if so. */
+ atom = rt->atomState.typeAtoms[JSTYPE_VOID];
+ ida = EnumerateIfResolved(cx, obj, atom, ida, &i, &found);
+ if (!ida)
+ return NULL;
+
+ /* Enumerate only classes that *have* been resolved. */
+ for (j = 0; standard_class_atoms[j].init; j++) {
+ atom = OFFSET_TO_ATOM(rt, standard_class_atoms[j].atomOffset);
+ ida = EnumerateIfResolved(cx, obj, atom, ida, &i, &found);
+ if (!ida)
+ return NULL;
+
+ if (found) {
+ init = standard_class_atoms[j].init;
+
+ for (k = 0; standard_class_names[k].init; k++) {
+ if (standard_class_names[k].init == init) {
+ atom = StdNameToAtom(cx, &standard_class_names[k]);
+ ida = AddAtomToArray(cx, atom, ida, &i);
+ if (!ida)
+ return NULL;
+ }
+ }
+
+ if (init == js_InitObjectClass) {
+ for (k = 0; object_prototype_names[k].init; k++) {
+ atom = StdNameToAtom(cx, &object_prototype_names[k]);
+ ida = AddAtomToArray(cx, atom, ida, &i);
+ if (!ida)
+ return NULL;
+ }
+ }
+ }
+ }
+
+ /* Trim to exact length via js_SetIdArrayLength. */
+ return js_SetIdArrayLength(cx, ida, i);
+}
+
+#undef ATOM_OFFSET
+#undef CLASS_ATOM_OFFSET
+#undef OFFSET_TO_ATOM
+#undef CLASP
+
+#undef EAGER_ATOM
+#undef EAGER_CLASS_ATOM
+#undef EAGER_ATOM_CLASP
+#undef LAZY_ATOM
+
+JS_PUBLIC_API(JSBool)
+JS_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp)
+{
+ CHECK_REQUEST(cx);
+ return js_GetClassObject(cx, obj, key, objp);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetScopeChain(JSContext *cx)
+{
+ JSStackFrame *fp;
+
+ fp = cx->fp;
+ if (!fp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_INACTIVE);
+ return NULL;
+ }
+ return js_GetScopeChain(cx, fp);
+}
+
+JS_PUBLIC_API(void *)
+JS_malloc(JSContext *cx, size_t nbytes)
+{
+ void *p;
+
+ JS_ASSERT(nbytes != 0);
+ if (nbytes == 0)
+ nbytes = 1;
+
+ p = malloc(nbytes);
+ if (!p) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ js_UpdateMallocCounter(cx, nbytes);
+
+ return p;
+}
+
+JS_PUBLIC_API(void *)
+JS_realloc(JSContext *cx, void *p, size_t nbytes)
+{
+ p = realloc(p, nbytes);
+ if (!p)
+ JS_ReportOutOfMemory(cx);
+ return p;
+}
+
+JS_PUBLIC_API(void)
+JS_free(JSContext *cx, void *p)
+{
+ if (p)
+ free(p);
+}
+
+JS_PUBLIC_API(char *)
+JS_strdup(JSContext *cx, const char *s)
+{
+ size_t n;
+ void *p;
+
+ n = strlen(s) + 1;
+ p = JS_malloc(cx, n);
+ if (!p)
+ return NULL;
+ return (char *)memcpy(p, s, n);
+}
+
+JS_PUBLIC_API(jsdouble *)
+JS_NewDouble(JSContext *cx, jsdouble d)
+{
+ CHECK_REQUEST(cx);
+ return js_NewDouble(cx, d, 0);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return js_NewDoubleValue(cx, d, rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return js_NewNumberValue(cx, d, rval);
+}
+
+#undef JS_AddRoot
+JS_PUBLIC_API(JSBool)
+JS_AddRoot(JSContext *cx, void *rp)
+{
+ CHECK_REQUEST(cx);
+ return js_AddRoot(cx, rp, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AddNamedRootRT(JSRuntime *rt, void *rp, const char *name)
+{
+ return js_AddRootRT(rt, rp, name);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_RemoveRoot(JSContext *cx, void *rp)
+{
+ CHECK_REQUEST(cx);
+ return js_RemoveRoot(cx->runtime, rp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_RemoveRootRT(JSRuntime *rt, void *rp)
+{
+ return js_RemoveRoot(rt, rp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AddNamedRoot(JSContext *cx, void *rp, const char *name)
+{
+ CHECK_REQUEST(cx);
+ return js_AddRoot(cx, rp, name);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearNewbornRoots(JSContext *cx)
+{
+ JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EnterLocalRootScope(JSContext *cx)
+{
+ CHECK_REQUEST(cx);
+ return js_EnterLocalRootScope(cx);
+}
+
+JS_PUBLIC_API(void)
+JS_LeaveLocalRootScope(JSContext *cx)
+{
+ CHECK_REQUEST(cx);
+ js_LeaveLocalRootScope(cx);
+}
+
+JS_PUBLIC_API(void)
+JS_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval)
+{
+ CHECK_REQUEST(cx);
+ js_LeaveLocalRootScopeWithResult(cx, rval);
+}
+
+JS_PUBLIC_API(void)
+JS_ForgetLocalRoot(JSContext *cx, void *thing)
+{
+ CHECK_REQUEST(cx);
+ js_ForgetLocalRoot(cx, (jsval) thing);
+}
+
+#ifdef DEBUG
+
+JS_PUBLIC_API(void)
+JS_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data)
+{
+ js_DumpNamedRoots(rt, dump, data);
+}
+
+#endif /* DEBUG */
+
+JS_PUBLIC_API(uint32)
+JS_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
+{
+ return js_MapGCRoots(rt, map, data);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LockGCThing(JSContext *cx, void *thing)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_LockGCThing(cx, thing);
+ if (!ok)
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_LOCK);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LockGCThingRT(JSRuntime *rt, void *thing)
+{
+ return js_LockGCThingRT(rt, thing);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_UnlockGCThing(JSContext *cx, void *thing)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_UnlockGCThingRT(cx->runtime, thing);
+ if (!ok)
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_UNLOCK);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_UnlockGCThingRT(JSRuntime *rt, void *thing)
+{
+ return js_UnlockGCThingRT(rt, thing);
+}
+
+JS_PUBLIC_API(void)
+JS_MarkGCThing(JSContext *cx, void *thing, const char *name, void *arg)
+{
+ JS_ASSERT(cx->runtime->gcLevel > 0);
+#ifdef JS_THREADSAFE
+ JS_ASSERT(cx->runtime->gcThread->id == js_CurrentThreadId());
+#endif
+
+ GC_MARK(cx, thing, name);
+}
+
+JS_PUBLIC_API(void)
+JS_GC(JSContext *cx)
+{
+#if JS_HAS_GENERATORS
+ /* Run previously scheduled but delayed close hooks. */
+ js_RunCloseHooks(cx);
+#endif
+
+ /* Don't nuke active arenas if executing or compiling. */
+ if (cx->stackPool.current == &cx->stackPool.first)
+ JS_FinishArenaPool(&cx->stackPool);
+ if (cx->tempPool.current == &cx->tempPool.first)
+ JS_FinishArenaPool(&cx->tempPool);
+ js_GC(cx, GC_NORMAL);
+
+#if JS_HAS_GENERATORS
+ /*
+ * Run close hooks for objects that became unreachable after the last GC.
+ */
+ js_RunCloseHooks(cx);
+#endif
+}
+
+JS_PUBLIC_API(void)
+JS_MaybeGC(JSContext *cx)
+{
+#ifdef WAY_TOO_MUCH_GC
+ JS_GC(cx);
+#else
+ JSRuntime *rt;
+ uint32 bytes, lastBytes;
+
+ rt = cx->runtime;
+ bytes = rt->gcBytes;
+ lastBytes = rt->gcLastBytes;
+
+ /*
+ * We run the GC if we used all available free GC cells and had to
+ * allocate extra 1/5 of GC arenas since the last run of GC, or if
+ * we have malloc'd more bytes through JS_malloc than we were told
+ * to allocate by JS_NewRuntime.
+ *
+ * The reason for
+ * bytes > 6/5 lastBytes
+ * condition is the following. Bug 312238 changed bytes and lastBytes
+ * to mean the total amount of memory that the GC uses now and right
+ * after the last GC.
+ *
+ * Before the bug the variables meant the size of allocated GC things
+ * now and right after the last GC. That size did not include the
+ * memory taken by free GC cells and the condition was
+ * bytes > 3/2 lastBytes.
+ * That is, we run the GC if we have half again as many bytes of
+ * GC-things as the last time we GC'd. To be compatible we need to
+ * express that condition through the new meaning of bytes and
+ * lastBytes.
+ *
+ * We write the original condition as
+ * B*(1-F) > 3/2 Bl*(1-Fl)
+ * where B is the total memory size allocated by GC and F is the free
+ * cell density currently and Sl and Fl are the size and the density
+ * right after GC. The density by definition is memory taken by free
+ * cells divided by total amount of memory. In other words, B and Bl
+ * are bytes and lastBytes with the new meaning and B*(1-F) and
+ * Bl*(1-Fl) are bytes and lastBytes with the original meaning.
+ *
+ * Our task is to exclude F and Fl from the last statement. According
+ * the stats from bug 331770 Fl is about 20-30% for GC allocations
+ * that contribute to S and Sl for a typical run of the browser. It
+ * means that the original condition implied that we did not run GC
+ * unless we exhausted the pool of free cells. Indeed if we still
+ * have free cells, then B == Bl since we did not yet allocated any
+ * new arenas and the condition means
+ * 1 - F > 3/2 (1-Fl) or 3/2Fl > 1/2 + F
+ * That implies 3/2 Fl > 1/2 or Fl > 1/3. That can not be fulfilled
+ * for the state described by the stats. So we can write the original
+ * condition as:
+ * F == 0 && B > 3/2 Bl(1-Fl)
+ * Again using the stats we see that Fl is about 20% when the browser
+ * starts up and when we are far from hitting rt->gcMaxBytes. With
+ * this F we have
+ * F == 0 && B > 3/2 Bl(1-0.8) or just B > 6/5 Bl.
+ */
+ if ((bytes > 8192 && bytes > lastBytes + lastBytes / 5) ||
+ rt->gcMallocBytes >= rt->gcMaxMallocBytes) {
+ JS_GC(cx);
+ }
+#if JS_HAS_GENERATORS
+ else {
+ /* Run scheduled but not yet executed close hooks. */
+ js_RunCloseHooks(cx);
+ }
+#endif
+#endif
+}
+
+JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallback(JSContext *cx, JSGCCallback cb)
+{
+ return JS_SetGCCallbackRT(cx->runtime, cb);
+}
+
+JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallbackRT(JSRuntime *rt, JSGCCallback cb)
+{
+ JSGCCallback oldcb;
+
+ oldcb = rt->gcCallback;
+ rt->gcCallback = cb;
+ return oldcb;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsAboutToBeFinalized(JSContext *cx, void *thing)
+{
+ JS_ASSERT(thing);
+ return js_IsAboutToBeFinalized(cx, thing);
+}
+
+JS_PUBLIC_API(void)
+JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value)
+{
+ switch (key) {
+ case JSGC_MAX_BYTES:
+ rt->gcMaxBytes = value;
+ break;
+ case JSGC_MAX_MALLOC_BYTES:
+ rt->gcMaxMallocBytes = value;
+ break;
+ }
+}
+
+JS_PUBLIC_API(intN)
+JS_AddExternalStringFinalizer(JSStringFinalizeOp finalizer)
+{
+ return js_ChangeExternalStringFinalizer(NULL, finalizer);
+}
+
+JS_PUBLIC_API(intN)
+JS_RemoveExternalStringFinalizer(JSStringFinalizeOp finalizer)
+{
+ return js_ChangeExternalStringFinalizer(finalizer, NULL);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewExternalString(JSContext *cx, jschar *chars, size_t length, intN type)
+{
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ JS_ASSERT(GCX_EXTERNAL_STRING <= type && type < (intN) GCX_NTYPES);
+
+ str = (JSString *) js_NewGCThing(cx, (uintN) type, sizeof(JSString));
+ if (!str)
+ return NULL;
+ str->length = length;
+ str->chars = chars;
+ return str;
+}
+
+JS_PUBLIC_API(intN)
+JS_GetExternalStringGCType(JSRuntime *rt, JSString *str)
+{
+ uint8 type = (uint8) (*js_GetGCThingFlags(str) & GCF_TYPEMASK);
+
+ if (type >= GCX_EXTERNAL_STRING)
+ return (intN)type;
+ JS_ASSERT(type == GCX_STRING || type == GCX_MUTABLE_STRING);
+ return -1;
+}
+
+JS_PUBLIC_API(void)
+JS_SetThreadStackLimit(JSContext *cx, jsuword limitAddr)
+{
+#if JS_STACK_GROWTH_DIRECTION > 0
+ if (limitAddr == 0)
+ limitAddr = (jsuword)-1;
+#endif
+ cx->stackLimit = limitAddr;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_DestroyIdArray(JSContext *cx, JSIdArray *ida)
+{
+ JS_free(cx, ida);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToId(JSContext *cx, jsval v, jsid *idp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ if (JSVAL_IS_INT(v)) {
+ *idp = INT_JSVAL_TO_JSID(v);
+ } else {
+#if JS_HAS_XML_SUPPORT
+ if (JSVAL_IS_OBJECT(v)) {
+ *idp = OBJECT_JSVAL_TO_JSID(v);
+ return JS_TRUE;
+ }
+#endif
+ atom = js_ValueToStringAtom(cx, v);
+ if (!atom)
+ return JS_FALSE;
+ *idp = ATOM_TO_JSID(atom);
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IdToValue(JSContext *cx, jsid id, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ *vp = ID_TO_VALUE(id);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_PropertyStub(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EnumerateStub(JSContext *cx, JSObject *obj)
+{
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ResolveStub(JSContext *cx, JSObject *obj, jsval id)
+{
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertStub(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ return js_TryValueOf(cx, obj, type, vp);
+}
+
+JS_PUBLIC_API(void)
+JS_FinalizeStub(JSContext *cx, JSObject *obj)
+{
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
+ JSClass *clasp, JSNative constructor, uintN nargs,
+ JSPropertySpec *ps, JSFunctionSpec *fs,
+ JSPropertySpec *static_ps, JSFunctionSpec *static_fs)
+{
+ JSAtom *atom;
+ JSProtoKey key;
+ JSObject *proto, *ctor;
+ JSTempValueRooter tvr;
+ jsval cval, rval;
+ JSBool named;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, clasp->name, strlen(clasp->name), 0);
+ if (!atom)
+ return NULL;
+
+ /*
+ * When initializing a standard class, if no parent_proto (grand-proto of
+ * instances of the class, parent-proto of the class's prototype object)
+ * is given, we must use Object.prototype if it is available. Otherwise,
+ * we could look up the wrong binding for a class name in obj. Example:
+ *
+ * String = Array;
+ * print("hi there".join);
+ *
+ * should print undefined, not Array.prototype.join. This is required by
+ * ECMA-262, alas. It might have been better to make String readonly and
+ * permanent in the global object, instead -- but that's too big a change
+ * to swallow at this point.
+ */
+ key = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (key != JSProto_Null &&
+ !parent_proto &&
+ !js_GetClassPrototype(cx, obj, INT_TO_JSID(JSProto_Object),
+ &parent_proto)) {
+ return NULL;
+ }
+
+ /* Create a prototype object for this class. */
+ proto = js_NewObject(cx, clasp, parent_proto, obj);
+ if (!proto)
+ return NULL;
+
+ /* After this point, control must exit via label bad or out. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, proto, &tvr);
+
+ if (!constructor) {
+ /*
+ * Lacking a constructor, name the prototype (e.g., Math) unless this
+ * class (a) is anonymous, i.e. for internal use only; (b) the class
+ * of obj (the global object) is has a reserved slot indexed by key;
+ * and (c) key is not the null key.
+ */
+ if ((clasp->flags & JSCLASS_IS_ANONYMOUS) &&
+ (OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL) &&
+ key != JSProto_Null) {
+ named = JS_FALSE;
+ } else {
+ named = OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom),
+ OBJECT_TO_JSVAL(proto),
+ NULL, NULL,
+ (clasp->flags & JSCLASS_IS_ANONYMOUS)
+ ? JSPROP_READONLY | JSPROP_PERMANENT
+ : 0,
+ NULL);
+ if (!named)
+ goto bad;
+ }
+
+ ctor = proto;
+ } else {
+ /* Define the constructor function in obj's scope. */
+ fun = js_DefineFunction(cx, obj, atom, constructor, nargs, 0);
+ named = (fun != NULL);
+ if (!fun)
+ goto bad;
+
+ /*
+ * Remember the class this function is a constructor for so that
+ * we know to create an object of this class when we call the
+ * constructor.
+ */
+ fun->clasp = clasp;
+
+ /*
+ * Optionally construct the prototype object, before the class has
+ * been fully initialized. Allow the ctor to replace proto with a
+ * different object, as is done for operator new -- and as at least
+ * XML support requires.
+ */
+ ctor = fun->object;
+ if (clasp->flags & JSCLASS_CONSTRUCT_PROTOTYPE) {
+ cval = OBJECT_TO_JSVAL(ctor);
+ if (!js_InternalConstruct(cx, proto, cval, 0, NULL, &rval))
+ goto bad;
+ if (!JSVAL_IS_PRIMITIVE(rval) && JSVAL_TO_OBJECT(rval) != proto)
+ proto = JSVAL_TO_OBJECT(rval);
+ }
+
+ /* Connect constructor and prototype by named properties. */
+ if (!js_SetClassPrototype(cx, ctor, proto,
+ JSPROP_READONLY | JSPROP_PERMANENT)) {
+ goto bad;
+ }
+
+ /* Bootstrap Function.prototype (see also JS_InitStandardClasses). */
+ if (OBJ_GET_CLASS(cx, ctor) == clasp) {
+ JS_ASSERT(!OBJ_GET_PROTO(cx, ctor));
+ OBJ_SET_PROTO(cx, ctor, proto);
+ }
+ }
+
+ /* Add properties and methods to the prototype and the constructor. */
+ if ((ps && !JS_DefineProperties(cx, proto, ps)) ||
+ (fs && !JS_DefineFunctions(cx, proto, fs)) ||
+ (static_ps && !JS_DefineProperties(cx, ctor, static_ps)) ||
+ (static_fs && !JS_DefineFunctions(cx, ctor, static_fs))) {
+ goto bad;
+ }
+
+ /* If this is a standard class, cache its prototype. */
+ if (key != JSProto_Null && !js_SetClassObject(cx, obj, key, ctor))
+ goto bad;
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return proto;
+
+bad:
+ if (named)
+ (void) OBJ_DELETE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &rval);
+ proto = NULL;
+ goto out;
+}
+
+#ifdef JS_THREADSAFE
+JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSContext *cx, JSObject *obj)
+{
+ return (JSClass *)
+ JSVAL_TO_PRIVATE(GC_AWARE_GET_SLOT(cx, obj, JSSLOT_CLASS));
+}
+#else
+JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSObject *obj)
+{
+ return LOCKED_OBJ_GET_CLASS(obj);
+}
+#endif
+
+JS_PUBLIC_API(JSBool)
+JS_InstanceOf(JSContext *cx, JSObject *obj, JSClass *clasp, jsval *argv)
+{
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ if (OBJ_GET_CLASS(cx, obj) == clasp)
+ return JS_TRUE;
+ if (argv) {
+ fun = js_ValueToFunction(cx, &argv[-2], 0);
+ if (fun) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ clasp->name, JS_GetFunctionName(fun),
+ OBJ_GET_CLASS(cx, obj)->name);
+ }
+ }
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ return js_HasInstance(cx, obj, v, bp);
+}
+
+JS_PUBLIC_API(void *)
+JS_GetPrivate(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_HAS_PRIVATE);
+ v = GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_INT(v))
+ return NULL;
+ return JSVAL_TO_PRIVATE(v);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetPrivate(JSContext *cx, JSObject *obj, void *data)
+{
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_HAS_PRIVATE);
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(data));
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void *)
+JS_GetInstancePrivate(JSContext *cx, JSObject *obj, JSClass *clasp,
+ jsval *argv)
+{
+ if (!JS_InstanceOf(cx, obj, clasp, argv))
+ return NULL;
+ return JS_GetPrivate(cx, obj);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetPrototype(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ CHECK_REQUEST(cx);
+ proto = JSVAL_TO_OBJECT(GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PROTO));
+
+ /* Beware ref to dead object (we may be called from obj's finalizer). */
+ return proto && proto->map ? proto : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetPrototype(JSContext *cx, JSObject *obj, JSObject *proto)
+{
+ CHECK_REQUEST(cx);
+ if (obj->map->ops->setProto)
+ return obj->map->ops->setProto(cx, obj, JSSLOT_PROTO, proto);
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PROTO, OBJECT_TO_JSVAL(proto));
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetParent(JSContext *cx, JSObject *obj)
+{
+ JSObject *parent;
+
+ parent = JSVAL_TO_OBJECT(GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PARENT));
+
+ /* Beware ref to dead object (we may be called from obj's finalizer). */
+ return parent && parent->map ? parent : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetParent(JSContext *cx, JSObject *obj, JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (obj->map->ops->setParent)
+ return obj->map->ops->setParent(cx, obj, JSSLOT_PARENT, parent);
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PARENT, OBJECT_TO_JSVAL(parent));
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetConstructor(JSContext *cx, JSObject *proto)
+{
+ jsval cval;
+
+ CHECK_REQUEST(cx);
+ if (!OBJ_GET_PROPERTY(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState.constructorAtom),
+ &cval)) {
+ return NULL;
+ }
+ if (!VALUE_IS_FUNCTION(cx, cval)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NO_CONSTRUCTOR,
+ OBJ_GET_CLASS(cx, proto)->name);
+ return NULL;
+ }
+ return JSVAL_TO_OBJECT(cval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetObjectId(JSContext *cx, JSObject *obj, jsid *idp)
+{
+ JS_ASSERT(((jsid)obj & JSID_TAGMASK) == 0);
+ *idp = OBJECT_TO_JSID(obj);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ return js_NewObject(cx, clasp, proto, parent);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep)
+{
+ JSScope *scope;
+ JSIdArray *ida;
+ uint32 nslots;
+ jsval v, *vp, *end;
+
+ if (!OBJ_IS_NATIVE(obj)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_SEAL_OBJECT,
+ OBJ_GET_CLASS(cx, obj)->name);
+ return JS_FALSE;
+ }
+
+ scope = OBJ_SCOPE(obj);
+
+#if defined JS_THREADSAFE && defined DEBUG
+ /* Insist on scope being used exclusively by cx's thread. */
+ if (scope->ownercx != cx) {
+ JS_LOCK_OBJ(cx, obj);
+ JS_ASSERT(OBJ_SCOPE(obj) == scope);
+ JS_ASSERT(scope->ownercx == cx);
+ JS_UNLOCK_SCOPE(cx, scope);
+ }
+#endif
+
+ /* Nothing to do if obj's scope is already sealed. */
+ if (SCOPE_IS_SEALED(scope))
+ return JS_TRUE;
+
+ /* XXX Enumerate lazy properties now, as they can't be added later. */
+ ida = JS_Enumerate(cx, obj);
+ if (!ida)
+ return JS_FALSE;
+ JS_DestroyIdArray(cx, ida);
+
+ /* Ensure that obj has its own, mutable scope, and seal that scope. */
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (scope)
+ SCOPE_SET_SEALED(scope);
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!scope)
+ return JS_FALSE;
+
+ /* If we are not sealing an entire object graph, we're done. */
+ if (!deep)
+ return JS_TRUE;
+
+ /* Walk obj->slots and if any value is a non-null object, seal it. */
+ nslots = JS_MIN(scope->map.freeslot, scope->map.nslots);
+ for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
+ v = *vp;
+ if (JSVAL_IS_PRIMITIVE(v))
+ continue;
+ if (!JS_SealObject(cx, JSVAL_TO_OBJECT(v), deep))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ return js_ConstructObject(cx, clasp, proto, parent, 0, NULL);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv)
+{
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ return js_ConstructObject(cx, clasp, proto, parent, argc, argv);
+}
+
+static JSBool
+DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN tinyid)
+{
+ jsid id;
+ JSAtom *atom;
+
+ if (attrs & JSPROP_INDEX) {
+ id = INT_TO_JSID(JS_PTR_TO_INT32(name));
+ atom = NULL;
+ attrs &= ~JSPROP_INDEX;
+ } else {
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ id = ATOM_TO_JSID(atom);
+ }
+ if (flags != 0 && OBJ_IS_NATIVE(obj)) {
+ return js_DefineNativeProperty(cx, obj, id, value, getter, setter,
+ attrs, flags, tinyid, NULL);
+ }
+ return OBJ_DEFINE_PROPERTY(cx, obj, id, value, getter, setter, attrs,
+ NULL);
+}
+
+#define AUTO_NAMELEN(s,n) (((n) == (size_t)-1) ? js_strlen(s) : (n))
+
+static JSBool
+DefineUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN tinyid)
+{
+ JSAtom *atom;
+
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ if (flags != 0 && OBJ_IS_NATIVE(obj)) {
+ return js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value,
+ getter, setter, attrs, flags, tinyid,
+ NULL);
+ }
+ return OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), value,
+ getter, setter, attrs, NULL);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *clasp,
+ JSObject *proto, uintN attrs)
+{
+ JSObject *nobj;
+
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ nobj = js_NewObject(cx, clasp, proto, obj);
+ if (!nobj)
+ return NULL;
+ if (!DefineProperty(cx, obj, name, OBJECT_TO_JSVAL(nobj), NULL, NULL, attrs,
+ 0, 0)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ return nobj;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineConstDoubles(JSContext *cx, JSObject *obj, JSConstDoubleSpec *cds)
+{
+ JSBool ok;
+ jsval value;
+ uintN flags;
+
+ CHECK_REQUEST(cx);
+ for (ok = JS_TRUE; cds->name; cds++) {
+ ok = js_NewNumberValue(cx, cds->dval, &value);
+ if (!ok)
+ break;
+ flags = cds->flags;
+ if (!flags)
+ flags = JSPROP_READONLY | JSPROP_PERMANENT;
+ ok = DefineProperty(cx, obj, cds->name, value, NULL, NULL, flags, 0, 0);
+ if (!ok)
+ break;
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineProperties(JSContext *cx, JSObject *obj, JSPropertySpec *ps)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ for (ok = JS_TRUE; ps->name; ps++) {
+ ok = DefineProperty(cx, obj, ps->name, JSVAL_VOID,
+ ps->getter, ps->setter, ps->flags,
+ SPROP_HAS_SHORTID, ps->tinyid);
+ if (!ok)
+ break;
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineProperty(cx, obj, name, value, getter, setter, attrs, 0, 0);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefinePropertyWithTinyId(JSContext *cx, JSObject *obj, const char *name,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineProperty(cx, obj, name, value, getter, setter, attrs,
+ SPROP_HAS_SHORTID, tinyid);
+}
+
+static JSBool
+LookupProperty(JSContext *cx, JSObject *obj, const char *name, JSObject **objp,
+ JSProperty **propp)
+{
+ JSAtom *atom;
+
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), objp, propp);
+}
+
+static JSBool
+LookupUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ JSObject **objp, JSProperty **propp)
+{
+ JSAtom *atom;
+
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), objp, propp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name,
+ const char *alias)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSAtom *atom;
+ JSBool ok;
+ JSScopeProperty *sprop;
+
+ CHECK_REQUEST(cx);
+ if (!LookupProperty(cx, obj, name, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ js_ReportIsNotDefined(cx, name);
+ return JS_FALSE;
+ }
+ if (obj2 != obj || !OBJ_IS_NATIVE(obj)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_ALIAS,
+ alias, name, OBJ_GET_CLASS(cx, obj2)->name);
+ return JS_FALSE;
+ }
+ atom = js_Atomize(cx, alias, strlen(alias), 0);
+ if (!atom) {
+ ok = JS_FALSE;
+ } else {
+ sprop = (JSScopeProperty *)prop;
+ ok = (js_AddNativeProperty(cx, obj, ATOM_TO_JSID(atom),
+ sprop->getter, sprop->setter, sprop->slot,
+ sprop->attrs, sprop->flags | SPROP_IS_ALIAS,
+ sprop->shortid)
+ != NULL);
+ }
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+static jsval
+LookupResult(JSContext *cx, JSObject *obj, JSObject *obj2, JSProperty *prop)
+{
+ JSScopeProperty *sprop;
+ jsval rval;
+
+ if (!prop) {
+ /* XXX bad API: no way to tell "not defined" from "void value" */
+ return JSVAL_VOID;
+ }
+ if (OBJ_IS_NATIVE(obj2)) {
+ /* Peek at the native property's slot value, without doing a Get. */
+ sprop = (JSScopeProperty *)prop;
+ rval = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj2))
+ ? LOCKED_OBJ_GET_SLOT(obj2, sprop->slot)
+ : JSVAL_TRUE;
+ } else {
+ /* XXX bad API: no way to return "defined but value unknown" */
+ rval = JSVAL_TRUE;
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return rval;
+}
+
+static JSBool
+GetPropertyAttributes(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp, JSPropertyOp *setterp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+
+ if (!atom)
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+
+ if (!prop || obj != obj2) {
+ *attrsp = 0;
+ *foundp = JS_FALSE;
+ if (getterp)
+ *getterp = NULL;
+ if (setterp)
+ *setterp = NULL;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+ }
+
+ *foundp = JS_TRUE;
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, attrsp);
+ if (ok && OBJ_IS_NATIVE(obj)) {
+ JSScopeProperty *sprop = (JSScopeProperty *) prop;
+
+ if (getterp)
+ *getterp = sprop->getter;
+ if (setterp)
+ *setterp = sprop->setter;
+ }
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+static JSBool
+SetPropertyAttributes(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN attrs, JSBool *foundp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+
+ if (!atom)
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+ if (!prop || obj != obj2) {
+ *foundp = JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+ }
+
+ *foundp = JS_TRUE;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, &attrs);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN *attrsp, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_Atomize(cx, name, strlen(name), 0),
+ attrsp, foundp, NULL, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const char *name,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_Atomize(cx, name, strlen(name), 0),
+ attrsp, foundp, getterp, setterp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN attrs, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return SetPropertyAttributes(cx, obj,
+ js_Atomize(cx, name, strlen(name), 0),
+ attrs, foundp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasProperty(JSContext *cx, JSObject *obj, const char *name, JSBool *foundp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupProperty(cx, obj, name, &obj2, &prop);
+ if (ok) {
+ *foundp = (prop != NULL);
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupProperty(cx, obj, name, &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, const char *name,
+ uintN flags, jsval *vp)
+{
+ JSAtom *atom;
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ ok = OBJ_IS_NATIVE(obj)
+ ? js_LookupPropertyWithFlags(cx, obj, ATOM_TO_JSID(atom), flags,
+ &obj2, &prop)
+ : OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ jsval *vp)
+{
+ CHECK_REQUEST(cx);
+
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ obj = ops->getMethod(cx, obj, id, vp);
+ if (!obj)
+ return JS_FALSE;
+ } else
+#endif
+ {
+ if (!OBJ_GET_PROPERTY(cx, obj, id, vp))
+ return JS_FALSE;
+ }
+
+ *objp = obj;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetMethod(JSContext *cx, JSObject *obj, const char *name, JSObject **objp,
+ jsval *vp)
+{
+ JSAtom *atom;
+
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return JS_GetMethodById(cx, obj, ATOM_TO_JSID(atom), objp, vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_SET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteProperty(JSContext *cx, JSObject *obj, const char *name)
+{
+ jsval junk;
+
+ CHECK_REQUEST(cx);
+ return JS_DeleteProperty2(cx, obj, name, &junk);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteProperty2(JSContext *cx, JSObject *obj, const char *name,
+ jsval *rval)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_DELETE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineUCProperty(cx, obj, name, namelen, value, getter, setter,
+ attrs, 0, 0);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0),
+ attrsp, foundp, NULL, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0),
+ attrsp, foundp, getterp, setterp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN attrs, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return SetPropertyAttributes(cx, obj,
+ js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0),
+ attrs, foundp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineUCPropertyWithTinyId(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineUCProperty(cx, obj, name, namelen, value, getter, setter,
+ attrs, SPROP_HAS_SHORTID, tinyid);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ JSBool *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupUCProperty(cx, obj, name, namelen, &obj2, &prop);
+ if (ok) {
+ *vp = (prop != NULL);
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupUCProperty(cx, obj, name, namelen, &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_SET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteUCProperty2(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *rval)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_DELETE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), rval);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewArrayObject(JSContext *cx, jsint length, jsval *vector)
+{
+ CHECK_REQUEST(cx);
+ /* NB: jsuint cast does ToUint32. */
+ return js_NewArrayObject(cx, (jsuint)length, vector);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsArrayObject(JSContext *cx, JSObject *obj)
+{
+ return OBJ_GET_CLASS(cx, obj) == &js_ArrayClass;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ CHECK_REQUEST(cx);
+ return js_GetLengthProperty(cx, obj, lengthp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetArrayLength(JSContext *cx, JSObject *obj, jsuint length)
+{
+ CHECK_REQUEST(cx);
+ return js_SetLengthProperty(cx, obj, length);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ CHECK_REQUEST(cx);
+ return js_HasLengthProperty(cx, obj, lengthp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineElement(JSContext *cx, JSObject *obj, jsint index, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_DEFINE_PROPERTY(cx, obj, INT_TO_JSID(index), value,
+ getter, setter, attrs, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ if (!LookupProperty(cx, obj, name, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ js_ReportIsNotDefined(cx, name);
+ return JS_FALSE;
+ }
+ if (obj2 != obj || !OBJ_IS_NATIVE(obj)) {
+ char numBuf[12];
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ JS_snprintf(numBuf, sizeof numBuf, "%ld", (long)alias);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_ALIAS,
+ numBuf, name, OBJ_GET_CLASS(cx, obj2)->name);
+ return JS_FALSE;
+ }
+ sprop = (JSScopeProperty *)prop;
+ ok = (js_AddNativeProperty(cx, obj, INT_TO_JSID(alias),
+ sprop->getter, sprop->setter, sprop->slot,
+ sprop->attrs, sprop->flags | SPROP_IS_ALIAS,
+ sprop->shortid)
+ != NULL);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasElement(JSContext *cx, JSObject *obj, jsint index, JSBool *foundp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, INT_TO_JSID(index), &obj2, &prop);
+ if (ok) {
+ *foundp = (prop != NULL);
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, INT_TO_JSID(index), &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_GET_PROPERTY(cx, obj, INT_TO_JSID(index), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_SET_PROPERTY(cx, obj, INT_TO_JSID(index), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteElement(JSContext *cx, JSObject *obj, jsint index)
+{
+ jsval junk;
+
+ CHECK_REQUEST(cx);
+ return JS_DeleteElement2(cx, obj, index, &junk);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteElement2(JSContext *cx, JSObject *obj, jsint index, jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_DELETE_PROPERTY(cx, obj, INT_TO_JSID(index), rval);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearScope(JSContext *cx, JSObject *obj)
+{
+ CHECK_REQUEST(cx);
+
+ if (obj->map->ops->clear)
+ obj->map->ops->clear(cx, obj);
+
+ /* Clear cached class objects on the global object. */
+ if (JS_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL) {
+ JSProtoKey key;
+
+ for (key = JSProto_Null; key < JSProto_LIMIT; key++)
+ JS_SetReservedSlot(cx, obj, key, JSVAL_VOID);
+ }
+}
+
+JS_PUBLIC_API(JSIdArray *)
+JS_Enumerate(JSContext *cx, JSObject *obj)
+{
+ jsint i, n;
+ jsval iter_state, num_properties;
+ jsid id;
+ JSIdArray *ida;
+ jsval *vector;
+
+ CHECK_REQUEST(cx);
+
+ ida = NULL;
+ iter_state = JSVAL_NULL;
+
+ /* Get the number of properties to enumerate. */
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &iter_state, &num_properties))
+ goto error;
+ if (!JSVAL_IS_INT(num_properties)) {
+ JS_ASSERT(0);
+ goto error;
+ }
+
+ /* Grow as needed if we don't know the exact amount ahead of time. */
+ n = JSVAL_TO_INT(num_properties);
+ if (n <= 0)
+ n = 8;
+
+ /* Create an array of jsids large enough to hold all the properties */
+ ida = js_NewIdArray(cx, n);
+ if (!ida)
+ goto error;
+
+ i = 0;
+ vector = &ida->vector[0];
+ for (;;) {
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_NEXT, &iter_state, &id))
+ goto error;
+
+ /* No more jsid's to enumerate ? */
+ if (iter_state == JSVAL_NULL)
+ break;
+
+ if (i == ida->length) {
+ ida = js_SetIdArrayLength(cx, ida, ida->length * 2);
+ if (!ida)
+ goto error;
+ vector = &ida->vector[0];
+ }
+ vector[i++] = id;
+ }
+ return js_SetIdArrayLength(cx, ida, i);
+
+error:
+ if (iter_state != JSVAL_NULL)
+ OBJ_ENUMERATE(cx, obj, JSENUMERATE_DESTROY, &iter_state, 0);
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ return NULL;
+}
+
+/*
+ * XXX reverse iterator for properties, unreverse and meld with jsinterp.c's
+ * prop_iterator_class somehow...
+ * + preserve the OBJ_ENUMERATE API while optimizing the native object case
+ * + native case here uses a JSScopeProperty *, but that iterates in reverse!
+ * + so we make non-native match, by reverse-iterating after JS_Enumerating
+ */
+#define JSSLOT_ITER_INDEX (JSSLOT_PRIVATE + 1)
+
+#if JSSLOT_ITER_INDEX >= JS_INITIAL_NSLOTS
+# error "JSSLOT_ITER_INDEX botch!"
+#endif
+
+static void
+prop_iter_finalize(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+ jsint i;
+ JSIdArray *ida;
+
+ v = GC_AWARE_GET_SLOT(cx, obj, JSSLOT_ITER_INDEX);
+ if (JSVAL_IS_VOID(v))
+ return;
+
+ i = JSVAL_TO_INT(v);
+ if (i >= 0) {
+ /* Non-native case: destroy the ida enumerated when obj was created. */
+ ida = (JSIdArray *) JS_GetPrivate(cx, obj);
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ }
+}
+
+static uint32
+prop_iter_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ jsval v;
+ jsint i, n;
+ JSScopeProperty *sprop;
+ JSIdArray *ida;
+ jsid id;
+
+ v = GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(!JSVAL_IS_VOID(v));
+
+ i = JSVAL_TO_INT(OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_INDEX));
+ if (i < 0) {
+ /* Native case: just mark the next property to visit. */
+ sprop = (JSScopeProperty *) JSVAL_TO_PRIVATE(v);
+ if (sprop)
+ MARK_SCOPE_PROPERTY(cx, sprop);
+ } else {
+ /* Non-native case: mark each id in the JSIdArray private. */
+ ida = (JSIdArray *) JSVAL_TO_PRIVATE(v);
+ for (i = 0, n = ida->length; i < n; i++) {
+ id = ida->vector[i];
+ MARK_ID(cx, id);
+ }
+ }
+ return 0;
+}
+
+static JSClass prop_iter_class = {
+ "PropertyIterator",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, prop_iter_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, prop_iter_mark, NULL
+};
+
+JS_PUBLIC_API(JSObject *)
+JS_NewPropertyIterator(JSContext *cx, JSObject *obj)
+{
+ JSObject *iterobj;
+ JSScope *scope;
+ void *pdata;
+ jsint index;
+ JSIdArray *ida;
+
+ CHECK_REQUEST(cx);
+ iterobj = js_NewObject(cx, &prop_iter_class, NULL, obj);
+ if (!iterobj)
+ return NULL;
+
+ if (OBJ_IS_NATIVE(obj)) {
+ /* Native case: start with the last property in obj's own scope. */
+ scope = OBJ_SCOPE(obj);
+ pdata = (scope->object == obj) ? scope->lastProp : NULL;
+ index = -1;
+ } else {
+ JSTempValueRooter tvr;
+
+ /*
+ * Non-native case: enumerate a JSIdArray and keep it via private.
+ *
+ * Note: we have to make sure that we root obj around the call to
+ * JS_Enumerate to protect against multiple allocations under it.
+ */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(iterobj), &tvr);
+ ida = JS_Enumerate(cx, obj);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ida)
+ goto bad;
+ pdata = ida;
+ index = ida->length;
+ }
+
+ /* iterobj can not escape to other threads here. */
+ iterobj->slots[JSSLOT_PRIVATE] = PRIVATE_TO_JSVAL(pdata);
+ iterobj->slots[JSSLOT_ITER_INDEX] = INT_TO_JSVAL(index);
+ return iterobj;
+
+ bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp)
+{
+ jsint i;
+ JSObject *obj;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSIdArray *ida;
+
+ CHECK_REQUEST(cx);
+ i = JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_INDEX));
+ if (i < 0) {
+ /* Native case: private data is a property tree node pointer. */
+ obj = OBJ_GET_PARENT(cx, iterobj);
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->object == obj);
+ sprop = (JSScopeProperty *) JS_GetPrivate(cx, iterobj);
+
+ /*
+ * If the next property mapped by scope in the property tree ancestor
+ * line is not enumerable, or it's an alias, or one or more properties
+ * were deleted from the "middle" of the scope-mapped ancestor line
+ * and the next property was among those deleted, skip it and keep on
+ * trying to find an enumerable property that is still in scope.
+ */
+ while (sprop &&
+ (!(sprop->attrs & JSPROP_ENUMERATE) ||
+ (sprop->flags & SPROP_IS_ALIAS) ||
+ (SCOPE_HAD_MIDDLE_DELETE(scope) &&
+ !SCOPE_HAS_PROPERTY(scope, sprop)))) {
+ sprop = sprop->parent;
+ }
+
+ if (!sprop) {
+ *idp = JSVAL_VOID;
+ } else {
+ if (!JS_SetPrivate(cx, iterobj, sprop->parent))
+ return JS_FALSE;
+ *idp = sprop->id;
+ }
+ } else {
+ /* Non-native case: use the ida enumerated when iterobj was created. */
+ ida = (JSIdArray *) JS_GetPrivate(cx, iterobj);
+ JS_ASSERT(i <= ida->length);
+ if (i == 0) {
+ *idp = JSVAL_VOID;
+ } else {
+ *idp = ida->vector[--i];
+ OBJ_SET_SLOT(cx, iterobj, JSSLOT_ITER_INDEX, INT_TO_JSVAL(i));
+ }
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_CHECK_ACCESS(cx, obj, id, mode, vp, attrsp);
+}
+
+JS_PUBLIC_API(JSCheckAccessOp)
+JS_SetCheckObjectAccessCallback(JSRuntime *rt, JSCheckAccessOp acb)
+{
+ JSCheckAccessOp oldacb;
+
+ oldacb = rt->checkObjectAccess;
+ rt->checkObjectAccess = acb;
+ return oldacb;
+}
+
+static JSBool
+ReservedSlotIndexOK(JSContext *cx, JSObject *obj, JSClass *clasp,
+ uint32 index, uint32 limit)
+{
+ /* Check the computed, possibly per-instance, upper bound. */
+ if (clasp->reserveSlots)
+ JS_LOCK_OBJ_VOID(cx, obj, limit += clasp->reserveSlots(cx, obj));
+ if (index >= limit) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_RESERVED_SLOT_RANGE);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp)
+{
+ JSClass *clasp;
+ uint32 limit, slot;
+
+ CHECK_REQUEST(cx);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ limit = JSCLASS_RESERVED_SLOTS(clasp);
+ if (index >= limit && !ReservedSlotIndexOK(cx, obj, clasp, index, limit))
+ return JS_FALSE;
+ slot = JSSLOT_START(clasp) + index;
+ *vp = OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v)
+{
+ JSClass *clasp;
+ uint32 limit, slot;
+
+ CHECK_REQUEST(cx);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ limit = JSCLASS_RESERVED_SLOTS(clasp);
+ if (index >= limit && !ReservedSlotIndexOK(cx, obj, clasp, index, limit))
+ return JS_FALSE;
+ slot = JSSLOT_START(clasp) + index;
+ return OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
+}
+
+#ifdef JS_THREADSAFE
+JS_PUBLIC_API(jsrefcount)
+JS_HoldPrincipals(JSContext *cx, JSPrincipals *principals)
+{
+ return JS_ATOMIC_INCREMENT(&principals->refcount);
+}
+
+JS_PUBLIC_API(jsrefcount)
+JS_DropPrincipals(JSContext *cx, JSPrincipals *principals)
+{
+ jsrefcount rc = JS_ATOMIC_DECREMENT(&principals->refcount);
+ if (rc == 0)
+ principals->destroy(cx, principals);
+ return rc;
+}
+#endif
+
+JS_PUBLIC_API(JSPrincipalsTranscoder)
+JS_SetPrincipalsTranscoder(JSRuntime *rt, JSPrincipalsTranscoder px)
+{
+ JSPrincipalsTranscoder oldpx;
+
+ oldpx = rt->principalsTranscoder;
+ rt->principalsTranscoder = px;
+ return oldpx;
+}
+
+JS_PUBLIC_API(JSObjectPrincipalsFinder)
+JS_SetObjectPrincipalsFinder(JSRuntime *rt, JSObjectPrincipalsFinder fop)
+{
+ JSObjectPrincipalsFinder oldfop;
+
+ oldfop = rt->findObjectPrincipals;
+ rt->findObjectPrincipals = fop;
+ return oldfop;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_NewFunction(JSContext *cx, JSNative native, uintN nargs, uintN flags,
+ JSObject *parent, const char *name)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+
+ if (!name) {
+ atom = NULL;
+ } else {
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return NULL;
+ }
+ return js_NewFunction(cx, NULL, native, nargs, flags, parent, atom);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (OBJ_GET_CLASS(cx, funobj) != &js_FunctionClass) {
+ /* Indicate we cannot clone this object. */
+ return funobj;
+ }
+ return js_CloneFunctionObject(cx, funobj, parent);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFunctionObject(JSFunction *fun)
+{
+ return fun->object;
+}
+
+JS_PUBLIC_API(const char *)
+JS_GetFunctionName(JSFunction *fun)
+{
+ return fun->atom
+ ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
+ : js_anonymous_str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_GetFunctionId(JSFunction *fun)
+{
+ return fun->atom ? ATOM_TO_STRING(fun->atom) : NULL;
+}
+
+JS_PUBLIC_API(uintN)
+JS_GetFunctionFlags(JSFunction *fun)
+{
+#ifdef MOZILLA_1_8_BRANCH
+ uintN flags = fun->flags;
+
+ return JSFUN_DISJOINT_FLAGS(flags) |
+ (JSFUN_GETTER_TEST(flags) ? JSFUN_GETTER : 0) |
+ (JSFUN_SETTER_TEST(flags) ? JSFUN_SETTER : 0) |
+ (JSFUN_BOUND_METHOD_TEST(flags) ? JSFUN_BOUND_METHOD : 0) |
+ (JSFUN_HEAVYWEIGHT_TEST(flags) ? JSFUN_HEAVYWEIGHT : 0);
+#else
+ return fun->flags;
+#endif
+}
+
+JS_PUBLIC_API(uint16)
+JS_GetFunctionArity(JSFunction *fun)
+{
+ return fun->nargs;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ObjectIsFunction(JSContext *cx, JSObject *obj)
+{
+ return OBJ_GET_CLASS(cx, obj) == &js_FunctionClass;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+js_generic_native_method_dispatcher(JSContext *cx, JSObject *obj,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fsv;
+ JSFunctionSpec *fs;
+ JSObject *tmp;
+
+ if (!JS_GetReservedSlot(cx, JSVAL_TO_OBJECT(argv[-2]), 0, &fsv))
+ return JS_FALSE;
+ fs = (JSFunctionSpec *) JSVAL_TO_PRIVATE(fsv);
+
+ /*
+ * We know that argv[0] is valid because JS_DefineFunctions, which is our
+ * only (indirect) referrer, defined us as requiring at least one argument
+ * (notice how it passes fs->nargs + 1 as the next-to-last argument to
+ * JS_DefineFunction).
+ */
+ if (JSVAL_IS_PRIMITIVE(argv[0])) {
+ /*
+ * Make sure that this is an object or null, as required by the generic
+ * functions.
+ */
+ if (!js_ValueToObject(cx, argv[0], &tmp))
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(tmp);
+ }
+
+ /*
+ * Copy all actual (argc) and required but missing (fs->nargs + 1 - argc)
+ * args down over our |this| parameter, argv[-1], which is almost always
+ * the class constructor object, e.g. Array. Then call the corresponding
+ * prototype native method with our first argument passed as |this|.
+ */
+ memmove(argv - 1, argv, JS_MAX(fs->nargs + 1U, argc) * sizeof(jsval));
+
+ /*
+ * Follow Function.prototype.apply and .call by using the global object as
+ * the 'this' param if no args.
+ */
+ JS_ASSERT(cx->fp->argv == argv);
+ tmp = js_ComputeThis(cx, JSVAL_TO_OBJECT(argv[-1]), argv);
+ if (!tmp)
+ return JS_FALSE;
+ cx->fp->thisp = tmp;
+
+ /*
+ * Protect against argc - 1 underflowing below. By calling js_ComputeThis,
+ * we made it as if the static was called with one parameter.
+ */
+ if (argc == 0)
+ argc = 1;
+
+ return fs->call(cx, JSVAL_TO_OBJECT(argv[-1]), argc - 1, argv, rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineFunctions(JSContext *cx, JSObject *obj, JSFunctionSpec *fs)
+{
+ uintN flags;
+ JSObject *ctor;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ ctor = NULL;
+ for (; fs->name; fs++) {
+
+ /* High bits of fs->extra are reserved. */
+ JS_ASSERT((fs->extra & 0xFFFF0000) == 0);
+ flags = fs->flags;
+
+ /*
+ * Define a generic arity N+1 static method for the arity N prototype
+ * method if flags contains JSFUN_GENERIC_NATIVE.
+ */
+ if (flags & JSFUN_GENERIC_NATIVE) {
+ if (!ctor) {
+ ctor = JS_GetConstructor(cx, obj);
+ if (!ctor)
+ return JS_FALSE;
+ }
+
+ flags &= ~JSFUN_GENERIC_NATIVE;
+ fun = JS_DefineFunction(cx, ctor, fs->name,
+ js_generic_native_method_dispatcher,
+ fs->nargs + 1, flags);
+ if (!fun)
+ return JS_FALSE;
+ fun->u.n.extra = (uint16)fs->extra;
+
+ /*
+ * As jsapi.h notes, fs must point to storage that lives as long
+ * as fun->object lives.
+ */
+ if (!JS_SetReservedSlot(cx, fun->object, 0, PRIVATE_TO_JSVAL(fs)))
+ return JS_FALSE;
+ }
+
+ fun = JS_DefineFunction(cx, obj, fs->name, fs->call, fs->nargs, flags);
+ if (!fun)
+ return JS_FALSE;
+ fun->u.n.extra = (uint16)fs->extra;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_DefineFunction(JSContext *cx, JSObject *obj, const char *name, JSNative call,
+ uintN nargs, uintN attrs)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return NULL;
+ return js_DefineFunction(cx, obj, atom, call, nargs, attrs);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_DefineUCFunction(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, JSNative call,
+ uintN nargs, uintN attrs)
+{
+ JSAtom *atom;
+
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return NULL;
+ return js_DefineFunction(cx, obj, atom, call, nargs, attrs);
+}
+
+static JSScript *
+CompileTokenStream(JSContext *cx, JSObject *obj, JSTokenStream *ts,
+ void *tempMark, JSBool *eofp)
+{
+ JSBool eof;
+ JSArenaPool codePool, notePool;
+ JSCodeGenerator cg;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ eof = JS_FALSE;
+ JS_InitArenaPool(&codePool, "code", 1024, sizeof(jsbytecode));
+ JS_InitArenaPool(&notePool, "note", 1024, sizeof(jssrcnote));
+ if (!js_InitCodeGenerator(cx, &cg, &codePool, &notePool,
+ ts->filename, ts->lineno,
+ ts->principals)) {
+ script = NULL;
+ } else if (!js_CompileTokenStream(cx, obj, ts, &cg)) {
+ script = NULL;
+ eof = (ts->flags & TSF_EOF) != 0;
+ } else {
+ script = js_NewScriptFromCG(cx, &cg, NULL);
+ }
+ if (eofp)
+ *eofp = eof;
+ if (!js_CloseTokenStream(cx, ts)) {
+ if (script)
+ js_DestroyScript(cx, script);
+ script = NULL;
+ }
+ cg.tempMark = tempMark;
+ js_FinishCodeGenerator(cx, &cg);
+ JS_FinishArenaPool(&codePool);
+ JS_FinishArenaPool(&notePool);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileScript(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ script = JS_CompileUCScript(cx, obj, chars, length, filename, lineno);
+ JS_free(cx, chars);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ script = JS_CompileUCScriptForPrincipals(cx, obj, principals,
+ chars, length, filename, lineno);
+ JS_free(cx, chars);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ CHECK_REQUEST(cx);
+ return JS_CompileUCScriptForPrincipals(cx, obj, NULL, chars, length,
+ filename, lineno);
+}
+
+#define LAST_FRAME_EXCEPTION_CHECK(cx,result) \
+ JS_BEGIN_MACRO \
+ if (!(result) && !((cx)->options & JSOPTION_DONT_REPORT_UNCAUGHT)) \
+ js_ReportUncaughtException(cx); \
+ JS_END_MACRO
+
+#define LAST_FRAME_CHECKS(cx,result) \
+ JS_BEGIN_MACRO \
+ if (!(cx)->fp) { \
+ (cx)->weakRoots.lastInternalResult = JSVAL_NULL; \
+ LAST_FRAME_EXCEPTION_CHECK(cx, result); \
+ } \
+ JS_END_MACRO
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, chars, length, filename, lineno, principals);
+ if (!ts)
+ return NULL;
+ script = CompileTokenStream(cx, obj, ts, mark, NULL);
+ LAST_FRAME_CHECKS(cx, script);
+ return script;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length)
+{
+ jschar *chars;
+ JSBool result;
+ JSExceptionState *exnState;
+ void *tempMark;
+ JSTokenStream *ts;
+ JSErrorReporter older;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return JS_TRUE;
+
+ /*
+ * Return true on any out-of-memory error, so our caller doesn't try to
+ * collect more buffered source.
+ */
+ result = JS_TRUE;
+ exnState = JS_SaveExceptionState(cx);
+ tempMark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, chars, length, NULL, 0, NULL);
+ if (ts) {
+ older = JS_SetErrorReporter(cx, NULL);
+ if (!js_ParseTokenStream(cx, obj, ts) &&
+ (ts->flags & TSF_UNEXPECTED_EOF)) {
+ /*
+ * We ran into an error. If it was because we ran out of source,
+ * we return false, so our caller will know to try to collect more
+ * buffered source.
+ */
+ result = JS_FALSE;
+ }
+
+ JS_SetErrorReporter(cx, older);
+ js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, tempMark);
+ }
+
+ JS_free(cx, chars);
+ JS_RestoreExceptionState(cx, exnState);
+ return result;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileFile(JSContext *cx, JSObject *obj, const char *filename)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewFileTokenStream(cx, filename, stdin);
+ if (!ts)
+ return NULL;
+ script = CompileTokenStream(cx, obj, ts, mark, NULL);
+ LAST_FRAME_CHECKS(cx, script);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandle(JSContext *cx, JSObject *obj, const char *filename,
+ FILE *file)
+{
+ return JS_CompileFileHandleForPrincipals(cx, obj, filename, file, NULL);
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandleForPrincipals(JSContext *cx, JSObject *obj,
+ const char *filename, FILE *file,
+ JSPrincipals *principals)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewFileTokenStream(cx, NULL, file);
+ if (!ts)
+ return NULL;
+ ts->filename = filename;
+ /* XXXshaver js_NewFileTokenStream should do this, because it drops */
+ if (principals) {
+ ts->principals = principals;
+ JSPRINCIPALS_HOLD(cx, ts->principals);
+ }
+ script = CompileTokenStream(cx, obj, ts, mark, NULL);
+ LAST_FRAME_CHECKS(cx, script);
+ return script;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewScriptObject(JSContext *cx, JSScript *script)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ if (script) {
+ if (!JS_SetPrivate(cx, obj, script))
+ return NULL;
+ script->object = obj;
+ }
+ return obj;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetScriptObject(JSScript *script)
+{
+ return script->object;
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyScript(JSContext *cx, JSScript *script)
+{
+ CHECK_REQUEST(cx);
+ js_DestroyScript(cx, script);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ fun = JS_CompileUCFunction(cx, obj, name, nargs, argnames, chars, length,
+ filename, lineno);
+ JS_free(cx, chars);
+ return fun;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ fun = JS_CompileUCFunctionForPrincipals(cx, obj, principals, name,
+ nargs, argnames, chars, length,
+ filename, lineno);
+ JS_free(cx, chars);
+ return fun;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ CHECK_REQUEST(cx);
+ return JS_CompileUCFunctionForPrincipals(cx, obj, NULL, name,
+ nargs, argnames,
+ chars, length,
+ filename, lineno);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSFunction *fun;
+ JSAtom *funAtom, *argAtom;
+ uintN i;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, chars, length, filename, lineno, principals);
+ if (!ts) {
+ fun = NULL;
+ goto out;
+ }
+ if (!name) {
+ funAtom = NULL;
+ } else {
+ funAtom = js_Atomize(cx, name, strlen(name), 0);
+ if (!funAtom) {
+ fun = NULL;
+ goto out;
+ }
+ }
+ fun = js_NewFunction(cx, NULL, NULL, nargs, 0, obj, funAtom);
+ if (!fun)
+ goto out;
+ if (nargs) {
+ for (i = 0; i < nargs; i++) {
+ argAtom = js_Atomize(cx, argnames[i], strlen(argnames[i]), 0);
+ if (!argAtom)
+ break;
+ if (!js_AddHiddenProperty(cx, fun->object, ATOM_TO_JSID(argAtom),
+ js_GetArgument, js_SetArgument,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ SPROP_HAS_SHORTID, i)) {
+ break;
+ }
+ }
+ if (i < nargs) {
+ fun = NULL;
+ goto out;
+ }
+ }
+ if (!js_CompileFunctionBody(cx, ts, fun)) {
+ fun = NULL;
+ goto out;
+ }
+ if (obj && funAtom) {
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(funAtom),
+ OBJECT_TO_JSVAL(fun->object),
+ NULL, NULL, JSPROP_ENUMERATE, NULL)) {
+ return NULL;
+ }
+ }
+out:
+ if (ts)
+ js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ LAST_FRAME_CHECKS(cx, fun);
+ return fun;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_DecompileScript(JSContext *cx, JSScript *script, const char *name,
+ uintN indent)
+{
+ JSPrinter *jp;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ jp = js_NewPrinter(cx, name,
+ indent & ~JS_DONT_PRETTY_PRINT,
+ !(indent & JS_DONT_PRETTY_PRINT));
+ if (!jp)
+ return NULL;
+ if (js_DecompileScript(jp, script))
+ str = js_GetPrinterOutput(jp);
+ else
+ str = NULL;
+ js_DestroyPrinter(jp);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_DecompileFunction(JSContext *cx, JSFunction *fun, uintN indent)
+{
+ JSPrinter *jp;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ jp = js_NewPrinter(cx, JS_GetFunctionName(fun),
+ indent & ~JS_DONT_PRETTY_PRINT,
+ !(indent & JS_DONT_PRETTY_PRINT));
+ if (!jp)
+ return NULL;
+ if (js_DecompileFunction(jp, fun))
+ str = js_GetPrinterOutput(jp);
+ else
+ str = NULL;
+ js_DestroyPrinter(jp);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent)
+{
+ JSPrinter *jp;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ jp = js_NewPrinter(cx, JS_GetFunctionName(fun),
+ indent & ~JS_DONT_PRETTY_PRINT,
+ !(indent & JS_DONT_PRETTY_PRINT));
+ if (!jp)
+ return NULL;
+ if (js_DecompileFunctionBody(jp, fun))
+ str = js_GetPrinterOutput(jp);
+ else
+ str = NULL;
+ js_DestroyPrinter(jp);
+ return str;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_Execute(cx, obj, script, NULL, 0, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script,
+ JSExecPart part, jsval *rval)
+{
+ JSScript tmp;
+ JSRuntime *rt;
+ JSBool ok;
+
+ /* Make a temporary copy of the JSScript structure and farble it a bit. */
+ tmp = *script;
+ if (part == JSEXEC_PROLOG) {
+ tmp.length = PTRDIFF(tmp.main, tmp.code, jsbytecode);
+ } else {
+ tmp.length -= PTRDIFF(tmp.main, tmp.code, jsbytecode);
+ tmp.code = tmp.main;
+ }
+
+ /* Tell the debugger about our temporary copy of the script structure. */
+ rt = cx->runtime;
+ if (rt->newScriptHook) {
+ rt->newScriptHook(cx, tmp.filename, tmp.lineno, &tmp, NULL,
+ rt->newScriptHookData);
+ }
+
+ /* Execute the farbled struct and tell the debugger to forget about it. */
+ ok = JS_ExecuteScript(cx, obj, &tmp, rval);
+ if (rt->destroyScriptHook)
+ rt->destroyScriptHook(cx, &tmp, rt->destroyScriptHookData);
+ return ok;
+}
+
+/* Ancient uintN nbytes is part of API/ABI, so use size_t length local. */
+JS_PUBLIC_API(JSBool)
+JS_EvaluateScript(JSContext *cx, JSObject *obj,
+ const char *bytes, uintN nbytes,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ size_t length = nbytes;
+ jschar *chars;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return JS_FALSE;
+ ok = JS_EvaluateUCScript(cx, obj, chars, length, filename, lineno, rval);
+ JS_free(cx, chars);
+ return ok;
+}
+
+/* Ancient uintN nbytes is part of API/ABI, so use size_t length local. */
+JS_PUBLIC_API(JSBool)
+JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, uintN nbytes,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ size_t length = nbytes;
+ jschar *chars;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return JS_FALSE;
+ ok = JS_EvaluateUCScriptForPrincipals(cx, obj, principals, chars, length,
+ filename, lineno, rval);
+ JS_free(cx, chars);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return JS_EvaluateUCScriptForPrincipals(cx, obj, NULL, chars, length,
+ filename, lineno, rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ uint32 options;
+ JSScript *script;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ options = cx->options;
+ cx->options = options | JSOPTION_COMPILE_N_GO;
+ script = JS_CompileUCScriptForPrincipals(cx, obj, principals, chars, length,
+ filename, lineno);
+ cx->options = options;
+ if (!script)
+ return JS_FALSE;
+ ok = js_Execute(cx, obj, script, NULL, 0, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ JS_DestroyScript(cx, script);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CallFunction(JSContext *cx, JSObject *obj, JSFunction *fun, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_InternalCall(cx, obj, OBJECT_TO_JSVAL(fun->object), argc, argv,
+ rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CallFunctionName(JSContext *cx, JSObject *obj, const char *name, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSBool ok;
+ jsval fval;
+
+ CHECK_REQUEST(cx);
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+ JSAtom *atom;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ obj = ops->getMethod(cx, obj, ATOM_TO_JSID(atom), &fval);
+ if (!obj)
+ return JS_FALSE;
+ } else
+#endif
+ if (!JS_GetProperty(cx, obj, name, &fval))
+ return JS_FALSE;
+ ok = js_InternalCall(cx, obj, fval, argc, argv, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_InternalCall(cx, obj, fval, argc, argv, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBranchCallback)
+JS_SetBranchCallback(JSContext *cx, JSBranchCallback cb)
+{
+ JSBranchCallback oldcb;
+
+ oldcb = cx->branchCallback;
+ cx->branchCallback = cb;
+ return oldcb;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsRunning(JSContext *cx)
+{
+ return cx->fp != NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsConstructing(JSContext *cx)
+{
+ return cx->fp && (cx->fp->flags & JSFRAME_CONSTRUCTING);
+}
+
+JS_FRIEND_API(JSBool)
+JS_IsAssigning(JSContext *cx)
+{
+ JSStackFrame *fp;
+ jsbytecode *pc;
+
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ if (!fp || !(pc = fp->pc))
+ return JS_FALSE;
+ return (js_CodeSpec[*pc].format & JOF_ASSIGNING) != 0;
+}
+
+JS_PUBLIC_API(void)
+JS_SetCallReturnValue2(JSContext *cx, jsval v)
+{
+#if JS_HAS_LVALUE_RETURN
+ cx->rval2 = v;
+ cx->rval2set = JS_TRUE;
+#endif
+}
+
+JS_PUBLIC_API(JSStackFrame *)
+JS_SaveFrameChain(JSContext *cx)
+{
+ JSStackFrame *fp;
+
+ fp = cx->fp;
+ if (!fp)
+ return fp;
+
+ JS_ASSERT(!fp->dormantNext);
+ fp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = fp;
+ cx->fp = NULL;
+ return fp;
+}
+
+JS_PUBLIC_API(void)
+JS_RestoreFrameChain(JSContext *cx, JSStackFrame *fp)
+{
+ JS_ASSERT(!cx->fp);
+ if (!fp)
+ return;
+
+ JS_ASSERT(cx->dormantFrameChain == fp);
+ cx->fp = fp;
+ cx->dormantFrameChain = fp->dormantNext;
+ fp->dormantNext = NULL;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSString *)
+JS_NewString(JSContext *cx, char *bytes, size_t nbytes)
+{
+ size_t length = nbytes;
+ jschar *chars;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+
+ /* Make a UTF-16 vector from the 8-bit char codes in bytes. */
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+
+ /* Free chars (but not bytes, which caller frees on error) if we fail. */
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return NULL;
+ }
+
+ /* Hand off bytes to the deflated string cache, if possible. */
+ if (!js_SetStringBytes(cx->runtime, str, bytes, nbytes))
+ JS_free(cx, bytes);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewStringCopyN(JSContext *cx, const char *s, size_t n)
+{
+ jschar *js;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ js = js_InflateString(cx, s, &n);
+ if (!js)
+ return NULL;
+ str = js_NewString(cx, js, n, 0);
+ if (!str)
+ JS_free(cx, js);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewStringCopyZ(JSContext *cx, const char *s)
+{
+ size_t n;
+ jschar *js;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ if (!s)
+ return cx->runtime->emptyString;
+ n = strlen(s);
+ js = js_InflateString(cx, s, &n);
+ if (!js)
+ return NULL;
+ str = js_NewString(cx, js, n, 0);
+ if (!str)
+ JS_free(cx, js);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_InternString(JSContext *cx, const char *s)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, s, strlen(s), ATOM_INTERNED);
+ if (!atom)
+ return NULL;
+ return ATOM_TO_STRING(atom);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewUCString(JSContext *cx, jschar *chars, size_t length)
+{
+ CHECK_REQUEST(cx);
+ return js_NewString(cx, chars, length, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyN(JSContext *cx, const jschar *s, size_t n)
+{
+ CHECK_REQUEST(cx);
+ return js_NewStringCopyN(cx, s, n, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyZ(JSContext *cx, const jschar *s)
+{
+ CHECK_REQUEST(cx);
+ if (!s)
+ return cx->runtime->emptyString;
+ return js_NewStringCopyZ(cx, s, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_InternUCStringN(JSContext *cx, const jschar *s, size_t length)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, s, length, ATOM_INTERNED);
+ if (!atom)
+ return NULL;
+ return ATOM_TO_STRING(atom);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_InternUCString(JSContext *cx, const jschar *s)
+{
+ return JS_InternUCStringN(cx, s, js_strlen(s));
+}
+
+JS_PUBLIC_API(char *)
+JS_GetStringBytes(JSString *str)
+{
+ JSRuntime *rt;
+ char *bytes;
+
+ rt = js_GetGCStringRuntime(str);
+ bytes = js_GetStringBytes(rt, str);
+ return bytes ? bytes : "";
+}
+
+JS_PUBLIC_API(jschar *)
+JS_GetStringChars(JSString *str)
+{
+ /*
+ * API botch (again, shades of JS_GetStringBytes): we have no cx to pass
+ * to js_UndependString (called by js_GetStringChars) for out-of-memory
+ * error reports, so js_UndependString passes NULL and suppresses errors.
+ * If it fails to convert a dependent string into an independent one, our
+ * caller will not be guaranteed a \u0000 terminator as a backstop. This
+ * may break some clients who already misbehave on embedded NULs.
+ *
+ * The gain of dependent strings, which cure quadratic and cubic growth
+ * rate bugs in string concatenation, is worth this slight loss in API
+ * compatibility.
+ */
+ jschar *chars;
+
+ chars = js_GetStringChars(str);
+ return chars ? chars : JSSTRING_CHARS(str);
+}
+
+JS_PUBLIC_API(size_t)
+JS_GetStringLength(JSString *str)
+{
+ return JSSTRING_LENGTH(str);
+}
+
+JS_PUBLIC_API(intN)
+JS_CompareStrings(JSString *str1, JSString *str2)
+{
+ return js_CompareStrings(str1, str2);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewGrowableString(JSContext *cx, jschar *chars, size_t length)
+{
+ CHECK_REQUEST(cx);
+ return js_NewString(cx, chars, length, GCF_MUTABLE);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewDependentString(JSContext *cx, JSString *str, size_t start,
+ size_t length)
+{
+ CHECK_REQUEST(cx);
+ return js_NewDependentString(cx, str, start, length, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_ConcatStrings(JSContext *cx, JSString *left, JSString *right)
+{
+ CHECK_REQUEST(cx);
+ return js_ConcatStrings(cx, left, right);
+}
+
+JS_PUBLIC_API(const jschar *)
+JS_UndependString(JSContext *cx, JSString *str)
+{
+ CHECK_REQUEST(cx);
+ return js_UndependString(cx, str);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_MakeStringImmutable(JSContext *cx, JSString *str)
+{
+ CHECK_REQUEST(cx);
+ if (!js_UndependString(cx, str))
+ return JS_FALSE;
+
+ *js_GetGCThingFlags(str) &= ~GCF_MUTABLE;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EncodeCharacters(JSContext *cx, const jschar *src, size_t srclen, char *dst,
+ size_t *dstlenp)
+{
+ return js_DeflateStringToBuffer(cx, src, srclen, dst, dstlenp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DecodeBytes(JSContext *cx, const char *src, size_t srclen, jschar *dst,
+ size_t *dstlenp)
+{
+ return js_InflateStringToBuffer(cx, src, srclen, dst, dstlenp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CStringsAreUTF8()
+{
+#ifdef JS_C_STRINGS_ARE_UTF8
+ return JS_TRUE;
+#else
+ return JS_FALSE;
+#endif
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_ReportError(JSContext *cx, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ js_ReportErrorVA(cx, JSREPORT_ERROR, format, ap);
+ va_end(ap);
+}
+
+JS_PUBLIC_API(void)
+JS_ReportErrorNumber(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...)
+{
+ va_list ap;
+
+ va_start(ap, errorNumber);
+ js_ReportErrorNumberVA(cx, JSREPORT_ERROR, errorCallback, userRef,
+ errorNumber, JS_TRUE, ap);
+ va_end(ap);
+}
+
+JS_PUBLIC_API(void)
+JS_ReportErrorNumberUC(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...)
+{
+ va_list ap;
+
+ va_start(ap, errorNumber);
+ js_ReportErrorNumberVA(cx, JSREPORT_ERROR, errorCallback, userRef,
+ errorNumber, JS_FALSE, ap);
+ va_end(ap);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportWarning(JSContext *cx, const char *format, ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, format);
+ ok = js_ReportErrorVA(cx, JSREPORT_WARNING, format, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumber(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, errorNumber);
+ ok = js_ReportErrorNumberVA(cx, flags, errorCallback, userRef,
+ errorNumber, JS_TRUE, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumberUC(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, errorNumber);
+ ok = js_ReportErrorNumberVA(cx, flags, errorCallback, userRef,
+ errorNumber, JS_FALSE, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(void)
+JS_ReportOutOfMemory(JSContext *cx)
+{
+ js_ReportOutOfMemory(cx);
+}
+
+JS_PUBLIC_API(JSErrorReporter)
+JS_SetErrorReporter(JSContext *cx, JSErrorReporter er)
+{
+ JSErrorReporter older;
+
+ older = cx->errorReporter;
+ cx->errorReporter = er;
+ return older;
+}
+
+/************************************************************************/
+
+/*
+ * Regular Expressions.
+ */
+JS_PUBLIC_API(JSObject *)
+JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags)
+{
+ jschar *chars;
+ JSObject *obj;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ obj = js_NewRegExpObject(cx, NULL, chars, length, flags);
+ JS_free(cx, chars);
+ return obj;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewUCRegExpObject(JSContext *cx, jschar *chars, size_t length, uintN flags)
+{
+ CHECK_REQUEST(cx);
+ return js_NewRegExpObject(cx, NULL, chars, length, flags);
+}
+
+JS_PUBLIC_API(void)
+JS_SetRegExpInput(JSContext *cx, JSString *input, JSBool multiline)
+{
+ JSRegExpStatics *res;
+
+ CHECK_REQUEST(cx);
+ /* No locking required, cx is thread-private and input must be live. */
+ res = &cx->regExpStatics;
+ res->input = input;
+ res->multiline = multiline;
+ cx->runtime->gcPoke = JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_ClearRegExpStatics(JSContext *cx)
+{
+ JSRegExpStatics *res;
+
+ /* No locking required, cx is thread-private and input must be live. */
+ res = &cx->regExpStatics;
+ res->input = NULL;
+ res->multiline = JS_FALSE;
+ res->parenCount = 0;
+ res->lastMatch = res->lastParen = js_EmptySubString;
+ res->leftContext = res->rightContext = js_EmptySubString;
+ cx->runtime->gcPoke = JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_ClearRegExpRoots(JSContext *cx)
+{
+ JSRegExpStatics *res;
+
+ /* No locking required, cx is thread-private and input must be live. */
+ res = &cx->regExpStatics;
+ res->input = NULL;
+ cx->runtime->gcPoke = JS_TRUE;
+}
+
+/* TODO: compile, execute, get/set other statics... */
+
+/************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_SetLocaleCallbacks(JSContext *cx, JSLocaleCallbacks *callbacks)
+{
+ cx->localeCallbacks = callbacks;
+}
+
+JS_PUBLIC_API(JSLocaleCallbacks *)
+JS_GetLocaleCallbacks(JSContext *cx)
+{
+ return cx->localeCallbacks;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSBool)
+JS_IsExceptionPending(JSContext *cx)
+{
+ return (JSBool) cx->throwing;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPendingException(JSContext *cx, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ if (!cx->throwing)
+ return JS_FALSE;
+ *vp = cx->exception;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_SetPendingException(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ cx->throwing = JS_TRUE;
+ cx->exception = v;
+}
+
+JS_PUBLIC_API(void)
+JS_ClearPendingException(JSContext *cx)
+{
+ cx->throwing = JS_FALSE;
+ cx->exception = JSVAL_VOID;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportPendingException(JSContext *cx)
+{
+ JSBool save, ok;
+
+ CHECK_REQUEST(cx);
+
+ /*
+ * Set cx->creatingException to suppress the standard error-to-exception
+ * conversion done by all {js,JS}_Report* functions except for OOM. The
+ * cx->creatingException flag was added to suppress recursive divergence
+ * under js_ErrorToException, but it serves for our purposes here too.
+ */
+ save = cx->creatingException;
+ cx->creatingException = JS_TRUE;
+ ok = js_ReportUncaughtException(cx);
+ cx->creatingException = save;
+ return ok;
+}
+
+struct JSExceptionState {
+ JSBool throwing;
+ jsval exception;
+};
+
+JS_PUBLIC_API(JSExceptionState *)
+JS_SaveExceptionState(JSContext *cx)
+{
+ JSExceptionState *state;
+
+ CHECK_REQUEST(cx);
+ state = (JSExceptionState *) JS_malloc(cx, sizeof(JSExceptionState));
+ if (state) {
+ state->throwing = JS_GetPendingException(cx, &state->exception);
+ if (state->throwing && JSVAL_IS_GCTHING(state->exception))
+ js_AddRoot(cx, &state->exception, "JSExceptionState.exception");
+ }
+ return state;
+}
+
+JS_PUBLIC_API(void)
+JS_RestoreExceptionState(JSContext *cx, JSExceptionState *state)
+{
+ CHECK_REQUEST(cx);
+ if (state) {
+ if (state->throwing)
+ JS_SetPendingException(cx, state->exception);
+ else
+ JS_ClearPendingException(cx);
+ JS_DropExceptionState(cx, state);
+ }
+}
+
+JS_PUBLIC_API(void)
+JS_DropExceptionState(JSContext *cx, JSExceptionState *state)
+{
+ CHECK_REQUEST(cx);
+ if (state) {
+ if (state->throwing && JSVAL_IS_GCTHING(state->exception))
+ JS_RemoveRoot(cx, &state->exception);
+ JS_free(cx, state);
+ }
+}
+
+JS_PUBLIC_API(JSErrorReport *)
+JS_ErrorFromException(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ErrorFromException(cx, v);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ThrowReportedError(JSContext *cx, const char *message,
+ JSErrorReport *reportp)
+{
+ return js_ErrorToException(cx, message, reportp);
+}
+
+#ifdef JS_THREADSAFE
+/*
+ * Get the owning thread id of a context. Returns 0 if the context is not
+ * owned by any thread.
+ */
+JS_PUBLIC_API(jsword)
+JS_GetContextThread(JSContext *cx)
+{
+ return JS_THREAD_ID(cx);
+}
+
+/*
+ * Set the current thread as the owning thread of a context. Returns the
+ * old owning thread id, or -1 if the operation failed.
+ */
+JS_PUBLIC_API(jsword)
+JS_SetContextThread(JSContext *cx)
+{
+ jsword old = JS_THREAD_ID(cx);
+ if (!js_SetContextThread(cx))
+ return -1;
+ return old;
+}
+
+JS_PUBLIC_API(jsword)
+JS_ClearContextThread(JSContext *cx)
+{
+ jsword old = JS_THREAD_ID(cx);
+ js_ClearContextThread(cx);
+ return old;
+}
+#endif
+
+/************************************************************************/
+
+#if defined(XP_WIN)
+#include <windows.h>
+/*
+ * Initialization routine for the JS DLL...
+ */
+
+/*
+ * Global Instance handle...
+ * In Win32 this is the module handle of the DLL.
+ *
+ * In Win16 this is the instance handle of the application
+ * which loaded the DLL.
+ */
+
+#ifdef _WIN32
+BOOL WINAPI DllMain (HINSTANCE hDLL, DWORD dwReason, LPVOID lpReserved)
+{
+ return TRUE;
+}
+
+#else /* !_WIN32 */
+
+int CALLBACK LibMain( HINSTANCE hInst, WORD wDataSeg,
+ WORD cbHeapSize, LPSTR lpszCmdLine )
+{
+ return TRUE;
+}
+
+BOOL CALLBACK __loadds WEP(BOOL fSystemExit)
+{
+ return TRUE;
+}
+
+#endif /* !_WIN32 */
+#endif /* XP_WIN */
diff --git a/third_party/js-1.7/jsapi.h b/third_party/js-1.7/jsapi.h
new file mode 100644
index 0000000..464f19f
--- /dev/null
+++ b/third_party/js-1.7/jsapi.h
@@ -0,0 +1,2220 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsapi_h___
+#define jsapi_h___
+/*
+ * JavaScript API.
+ */
+#include <stddef.h>
+#include <stdio.h>
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Type tags stored in the low bits of a jsval.
+ */
+#define JSVAL_OBJECT 0x0 /* untagged reference to object */
+#define JSVAL_INT 0x1 /* tagged 31-bit integer value */
+#define JSVAL_DOUBLE 0x2 /* tagged reference to double */
+#define JSVAL_STRING 0x4 /* tagged reference to string */
+#define JSVAL_BOOLEAN 0x6 /* tagged boolean value */
+
+/* Type tag bitfield length and derived macros. */
+#define JSVAL_TAGBITS 3
+#define JSVAL_TAGMASK JS_BITMASK(JSVAL_TAGBITS)
+#define JSVAL_TAG(v) ((v) & JSVAL_TAGMASK)
+#define JSVAL_SETTAG(v,t) ((v) | (t))
+#define JSVAL_CLRTAG(v) ((v) & ~(jsval)JSVAL_TAGMASK)
+#define JSVAL_ALIGN JS_BIT(JSVAL_TAGBITS)
+
+/* Predicates for type testing. */
+#define JSVAL_IS_OBJECT(v) (JSVAL_TAG(v) == JSVAL_OBJECT)
+#define JSVAL_IS_NUMBER(v) (JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v))
+#define JSVAL_IS_INT(v) (((v) & JSVAL_INT) && (v) != JSVAL_VOID)
+#define JSVAL_IS_DOUBLE(v) (JSVAL_TAG(v) == JSVAL_DOUBLE)
+#define JSVAL_IS_STRING(v) (JSVAL_TAG(v) == JSVAL_STRING)
+#define JSVAL_IS_BOOLEAN(v) (JSVAL_TAG(v) == JSVAL_BOOLEAN)
+#define JSVAL_IS_NULL(v) ((v) == JSVAL_NULL)
+#define JSVAL_IS_VOID(v) ((v) == JSVAL_VOID)
+#define JSVAL_IS_PRIMITIVE(v) (!JSVAL_IS_OBJECT(v) || JSVAL_IS_NULL(v))
+
+/* Objects, strings, and doubles are GC'ed. */
+#define JSVAL_IS_GCTHING(v) (!((v) & JSVAL_INT) && !JSVAL_IS_BOOLEAN(v))
+#define JSVAL_TO_GCTHING(v) ((void *)JSVAL_CLRTAG(v))
+#define JSVAL_TO_OBJECT(v) ((JSObject *)JSVAL_TO_GCTHING(v))
+#define JSVAL_TO_DOUBLE(v) ((jsdouble *)JSVAL_TO_GCTHING(v))
+#define JSVAL_TO_STRING(v) ((JSString *)JSVAL_TO_GCTHING(v))
+#define OBJECT_TO_JSVAL(obj) ((jsval)(obj))
+#define DOUBLE_TO_JSVAL(dp) JSVAL_SETTAG((jsval)(dp), JSVAL_DOUBLE)
+#define STRING_TO_JSVAL(str) JSVAL_SETTAG((jsval)(str), JSVAL_STRING)
+
+/* Lock and unlock the GC thing held by a jsval. */
+#define JSVAL_LOCK(cx,v) (JSVAL_IS_GCTHING(v) \
+ ? JS_LockGCThing(cx, JSVAL_TO_GCTHING(v)) \
+ : JS_TRUE)
+#define JSVAL_UNLOCK(cx,v) (JSVAL_IS_GCTHING(v) \
+ ? JS_UnlockGCThing(cx, JSVAL_TO_GCTHING(v)) \
+ : JS_TRUE)
+
+/* Domain limits for the jsval int type. */
+#define JSVAL_INT_BITS 31
+#define JSVAL_INT_POW2(n) ((jsval)1 << (n))
+#define JSVAL_INT_MIN ((jsval)1 - JSVAL_INT_POW2(30))
+#define JSVAL_INT_MAX (JSVAL_INT_POW2(30) - 1)
+#define INT_FITS_IN_JSVAL(i) ((jsuint)((i)+JSVAL_INT_MAX) <= 2*JSVAL_INT_MAX)
+#define JSVAL_TO_INT(v) ((jsint)(v) >> 1)
+#define INT_TO_JSVAL(i) (((jsval)(i) << 1) | JSVAL_INT)
+
+/* Convert between boolean and jsval. */
+#define JSVAL_TO_BOOLEAN(v) ((JSBool)((v) >> JSVAL_TAGBITS))
+#define BOOLEAN_TO_JSVAL(b) JSVAL_SETTAG((jsval)(b) << JSVAL_TAGBITS, \
+ JSVAL_BOOLEAN)
+
+/* A private data pointer (2-byte-aligned) can be stored as an int jsval. */
+#define JSVAL_TO_PRIVATE(v) ((void *)((v) & ~JSVAL_INT))
+#define PRIVATE_TO_JSVAL(p) ((jsval)(p) | JSVAL_INT)
+
+/* Property attributes, set in JSPropertySpec and passed to API functions. */
+#define JSPROP_ENUMERATE 0x01 /* property is visible to for/in loop */
+#define JSPROP_READONLY 0x02 /* not settable: assignment is no-op */
+#define JSPROP_PERMANENT 0x04 /* property cannot be deleted */
+#define JSPROP_EXPORTED 0x08 /* property is exported from object */
+#define JSPROP_GETTER 0x10 /* property holds getter function */
+#define JSPROP_SETTER 0x20 /* property holds setter function */
+#define JSPROP_SHARED 0x40 /* don't allocate a value slot for this
+ property; don't copy the property on
+ set of the same-named property in an
+ object that delegates to a prototype
+ containing this property */
+#define JSPROP_INDEX 0x80 /* name is actually (jsint) index */
+
+/* Function flags, set in JSFunctionSpec and passed to JS_NewFunction etc. */
+#define JSFUN_LAMBDA 0x08 /* expressed, not declared, function */
+#define JSFUN_GETTER JSPROP_GETTER
+#define JSFUN_SETTER JSPROP_SETTER
+#define JSFUN_BOUND_METHOD 0x40 /* bind this to fun->object's parent */
+#define JSFUN_HEAVYWEIGHT 0x80 /* activation requires a Call object */
+
+#define JSFUN_DISJOINT_FLAGS(f) ((f) & 0x0f)
+#define JSFUN_GSFLAGS(f) ((f) & (JSFUN_GETTER | JSFUN_SETTER))
+
+#ifdef MOZILLA_1_8_BRANCH
+
+/*
+ * Squeeze three more bits into existing 8-bit flags by taking advantage of
+ * the invalid combination (JSFUN_GETTER | JSFUN_SETTER).
+ */
+#define JSFUN_GETTER_TEST(f) (JSFUN_GSFLAGS(f) == JSFUN_GETTER)
+#define JSFUN_SETTER_TEST(f) (JSFUN_GSFLAGS(f) == JSFUN_SETTER)
+#define JSFUN_FLAGS_TEST(f,t) (JSFUN_GSFLAGS(~(f)) ? (f) & (t) : 0)
+#define JSFUN_BOUND_METHOD_TEST(f) JSFUN_FLAGS_TEST(f, JSFUN_BOUND_METHOD)
+#define JSFUN_HEAVYWEIGHT_TEST(f) JSFUN_FLAGS_TEST(f, JSFUN_HEAVYWEIGHT)
+
+#define JSFUN_GSFLAG2ATTR(f) (JSFUN_GETTER_TEST(f) ? JSPROP_GETTER : \
+ JSFUN_SETTER_TEST(f) ? JSPROP_SETTER : 0)
+
+#define JSFUN_THISP_FLAGS(f) (JSFUN_GSFLAGS(~(f)) ? 0 : \
+ (f) & JSFUN_THISP_PRIMITIVE)
+#define JSFUN_THISP_TEST(f,t) ((f) == (t) || (f) == JSFUN_THISP_PRIMITIVE)
+
+#define JSFUN_THISP_STRING 0x30 /* |this| may be a primitive string */
+#define JSFUN_THISP_NUMBER 0x70 /* |this| may be a primitive number */
+#define JSFUN_THISP_BOOLEAN 0xb0 /* |this| may be a primitive boolean */
+#define JSFUN_THISP_PRIMITIVE 0xf0 /* |this| may be any primitive value */
+
+#define JSFUN_FLAGS_MASK 0xf8 /* overlay JSFUN_* attributes */
+
+#else
+
+#define JSFUN_GETTER_TEST(f) ((f) & JSFUN_GETTER)
+#define JSFUN_SETTER_TEST(f) ((f) & JSFUN_SETTER)
+#define JSFUN_BOUND_METHOD_TEST(f) ((f) & JSFUN_BOUND_METHOD)
+#define JSFUN_HEAVYWEIGHT_TEST(f) ((f) & JSFUN_HEAVYWEIGHT)
+
+#define JSFUN_GSFLAG2ATTR(f) JSFUN_GSFLAGS(f)
+
+#define JSFUN_THISP_FLAGS(f) (f)
+#define JSFUN_THISP_TEST(f,t) ((f) & t)
+
+#define JSFUN_THISP_STRING 0x0100 /* |this| may be a primitive string */
+#define JSFUN_THISP_NUMBER 0x0200 /* |this| may be a primitive number */
+#define JSFUN_THISP_BOOLEAN 0x0400 /* |this| may be a primitive boolean */
+#define JSFUN_THISP_PRIMITIVE 0x0700 /* |this| may be any primitive value */
+
+#define JSFUN_FLAGS_MASK 0x07f8 /* overlay JSFUN_* attributes --
+ note that bit #15 is used internally
+ to flag interpreted functions */
+
+#endif
+
+/*
+ * Re-use JSFUN_LAMBDA, which applies only to scripted functions, for use in
+ * JSFunctionSpec arrays that specify generic native prototype methods, i.e.,
+ * methods of a class prototype that are exposed as static methods taking an
+ * extra leading argument: the generic |this| parameter.
+ *
+ * If you set this flag in a JSFunctionSpec struct's flags initializer, then
+ * that struct must live at least as long as the native static method object
+ * created due to this flag by JS_DefineFunctions or JS_InitClass. Typically
+ * JSFunctionSpec structs are allocated in static arrays.
+ */
+#define JSFUN_GENERIC_NATIVE JSFUN_LAMBDA
+
+/*
+ * Well-known JS values. The extern'd variables are initialized when the
+ * first JSContext is created by JS_NewContext (see below).
+ */
+#define JSVAL_VOID INT_TO_JSVAL(0 - JSVAL_INT_POW2(30))
+#define JSVAL_NULL OBJECT_TO_JSVAL(0)
+#define JSVAL_ZERO INT_TO_JSVAL(0)
+#define JSVAL_ONE INT_TO_JSVAL(1)
+#define JSVAL_FALSE BOOLEAN_TO_JSVAL(JS_FALSE)
+#define JSVAL_TRUE BOOLEAN_TO_JSVAL(JS_TRUE)
+
+/*
+ * Microseconds since the epoch, midnight, January 1, 1970 UTC. See the
+ * comment in jstypes.h regarding safe int64 usage.
+ */
+extern JS_PUBLIC_API(int64)
+JS_Now();
+
+/* Don't want to export data, so provide accessors for non-inline jsvals. */
+extern JS_PUBLIC_API(jsval)
+JS_GetNaNValue(JSContext *cx);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetNegativeInfinityValue(JSContext *cx);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetPositiveInfinityValue(JSContext *cx);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetEmptyStringValue(JSContext *cx);
+
+/*
+ * Format is a string of the following characters (spaces are insignificant),
+ * specifying the tabulated type conversions:
+ *
+ * b JSBool Boolean
+ * c uint16/jschar ECMA uint16, Unicode char
+ * i int32 ECMA int32
+ * u uint32 ECMA uint32
+ * j int32 Rounded int32 (coordinate)
+ * d jsdouble IEEE double
+ * I jsdouble Integral IEEE double
+ * s char * C string
+ * S JSString * Unicode string, accessed by a JSString pointer
+ * W jschar * Unicode character vector, 0-terminated (W for wide)
+ * o JSObject * Object reference
+ * f JSFunction * Function private
+ * v jsval Argument value (no conversion)
+ * * N/A Skip this argument (no vararg)
+ * / N/A End of required arguments
+ *
+ * The variable argument list after format must consist of &b, &c, &s, e.g.,
+ * where those variables have the types given above. For the pointer types
+ * char *, JSString *, and JSObject *, the pointed-at memory returned belongs
+ * to the JS runtime, not to the calling native code. The runtime promises
+ * to keep this memory valid so long as argv refers to allocated stack space
+ * (so long as the native function is active).
+ *
+ * Fewer arguments than format specifies may be passed only if there is a /
+ * in format after the last required argument specifier and argc is at least
+ * the number of required arguments. More arguments than format specifies
+ * may be passed without error; it is up to the caller to deal with trailing
+ * unconverted arguments.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertArguments(JSContext *cx, uintN argc, jsval *argv, const char *format,
+ ...);
+
+#ifdef va_start
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertArgumentsVA(JSContext *cx, uintN argc, jsval *argv,
+ const char *format, va_list ap);
+#endif
+
+/*
+ * Inverse of JS_ConvertArguments: scan format and convert trailing arguments
+ * into jsvals, GC-rooted if necessary by the JS stack. Return null on error,
+ * and a pointer to the new argument vector on success. Also return a stack
+ * mark on success via *markp, in which case the caller must eventually clean
+ * up by calling JS_PopArguments.
+ *
+ * Note that the number of actual arguments supplied is specified exclusively
+ * by format, so there is no argc parameter.
+ */
+extern JS_PUBLIC_API(jsval *)
+JS_PushArguments(JSContext *cx, void **markp, const char *format, ...);
+
+#ifdef va_start
+extern JS_PUBLIC_API(jsval *)
+JS_PushArgumentsVA(JSContext *cx, void **markp, const char *format, va_list ap);
+#endif
+
+extern JS_PUBLIC_API(void)
+JS_PopArguments(JSContext *cx, void *mark);
+
+#ifdef JS_ARGUMENT_FORMATTER_DEFINED
+
+/*
+ * Add and remove a format string handler for JS_{Convert,Push}Arguments{,VA}.
+ * The handler function has this signature (see jspubtd.h):
+ *
+ * JSBool MyArgumentFormatter(JSContext *cx, const char *format,
+ * JSBool fromJS, jsval **vpp, va_list *app);
+ *
+ * It should return true on success, and return false after reporting an error
+ * or detecting an already-reported error.
+ *
+ * For a given format string, for example "AA", the formatter is called from
+ * JS_ConvertArgumentsVA like so:
+ *
+ * formatter(cx, "AA...", JS_TRUE, &sp, &ap);
+ *
+ * sp points into the arguments array on the JS stack, while ap points into
+ * the stdarg.h va_list on the C stack. The JS_TRUE passed for fromJS tells
+ * the formatter to convert zero or more jsvals at sp to zero or more C values
+ * accessed via pointers-to-values at ap, updating both sp (via *vpp) and ap
+ * (via *app) to point past the converted arguments and their result pointers
+ * on the C stack.
+ *
+ * When called from JS_PushArgumentsVA, the formatter is invoked thus:
+ *
+ * formatter(cx, "AA...", JS_FALSE, &sp, &ap);
+ *
+ * where JS_FALSE for fromJS means to wrap the C values at ap according to the
+ * format specifier and store them at sp, updating ap and sp appropriately.
+ *
+ * The "..." after "AA" is the rest of the format string that was passed into
+ * JS_{Convert,Push}Arguments{,VA}. The actual format trailing substring used
+ * in each Convert or PushArguments call is passed to the formatter, so that
+ * one such function may implement several formats, in order to share code.
+ *
+ * Remove just forgets about any handler associated with format. Add does not
+ * copy format, it points at the string storage allocated by the caller, which
+ * is typically a string constant. If format is in dynamic storage, it is up
+ * to the caller to keep the string alive until Remove is called.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_AddArgumentFormatter(JSContext *cx, const char *format,
+ JSArgumentFormatter formatter);
+
+extern JS_PUBLIC_API(void)
+JS_RemoveArgumentFormatter(JSContext *cx, const char *format);
+
+#endif /* JS_ARGUMENT_FORMATTER_DEFINED */
+
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertValue(JSContext *cx, jsval v, JSType type, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToObject(JSContext *cx, jsval v, JSObject **objp);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_ValueToFunction(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_ValueToConstructor(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(JSString *)
+JS_ValueToString(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp);
+
+/*
+ * Convert a value to a number, then to an int32, according to the ECMA rules
+ * for ToInt32.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip);
+
+/*
+ * Convert a value to a number, then to a uint32, according to the ECMA rules
+ * for ToUint32.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip);
+
+/*
+ * Convert a value to a number, then to an int32 if it fits by rounding to
+ * nearest; but failing with an error report if the double is out of range
+ * or unordered.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToInt32(JSContext *cx, jsval v, int32 *ip);
+
+/*
+ * ECMA ToUint16, for mapping a jsval to a Unicode point.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToUint16(JSContext *cx, jsval v, uint16 *ip);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp);
+
+extern JS_PUBLIC_API(JSType)
+JS_TypeOfValue(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(const char *)
+JS_GetTypeName(JSContext *cx, JSType type);
+
+/************************************************************************/
+
+/*
+ * Initialization, locking, contexts, and memory allocation.
+ */
+#define JS_NewRuntime JS_Init
+#define JS_DestroyRuntime JS_Finish
+#define JS_LockRuntime JS_Lock
+#define JS_UnlockRuntime JS_Unlock
+
+extern JS_PUBLIC_API(JSRuntime *)
+JS_NewRuntime(uint32 maxbytes);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyRuntime(JSRuntime *rt);
+
+extern JS_PUBLIC_API(void)
+JS_ShutDown(void);
+
+JS_PUBLIC_API(void *)
+JS_GetRuntimePrivate(JSRuntime *rt);
+
+JS_PUBLIC_API(void)
+JS_SetRuntimePrivate(JSRuntime *rt, void *data);
+
+#ifdef JS_THREADSAFE
+
+extern JS_PUBLIC_API(void)
+JS_BeginRequest(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_EndRequest(JSContext *cx);
+
+/* Yield to pending GC operations, regardless of request depth */
+extern JS_PUBLIC_API(void)
+JS_YieldRequest(JSContext *cx);
+
+extern JS_PUBLIC_API(jsrefcount)
+JS_SuspendRequest(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth);
+
+#ifdef __cplusplus
+JS_END_EXTERN_C
+
+class JSAutoRequest {
+ public:
+ JSAutoRequest(JSContext *cx) : mContext(cx), mSaveDepth(0) {
+ JS_BeginRequest(mContext);
+ }
+ ~JSAutoRequest() {
+ JS_EndRequest(mContext);
+ }
+
+ void suspend() {
+ mSaveDepth = JS_SuspendRequest(mContext);
+ }
+ void resume() {
+ JS_ResumeRequest(mContext, mSaveDepth);
+ }
+
+ protected:
+ JSContext *mContext;
+ jsrefcount mSaveDepth;
+
+#if 0
+ private:
+ static void *operator new(size_t) CPP_THROW_NEW { return 0; };
+ static void operator delete(void *, size_t) { };
+#endif
+};
+
+JS_BEGIN_EXTERN_C
+#endif
+
+#endif /* JS_THREADSAFE */
+
+extern JS_PUBLIC_API(void)
+JS_Lock(JSRuntime *rt);
+
+extern JS_PUBLIC_API(void)
+JS_Unlock(JSRuntime *rt);
+
+extern JS_PUBLIC_API(JSContextCallback)
+JS_SetContextCallback(JSRuntime *rt, JSContextCallback cxCallback);
+
+extern JS_PUBLIC_API(JSContext *)
+JS_NewContext(JSRuntime *rt, size_t stackChunkSize);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyContext(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyContextNoGC(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyContextMaybeGC(JSContext *cx);
+
+extern JS_PUBLIC_API(void *)
+JS_GetContextPrivate(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_SetContextPrivate(JSContext *cx, void *data);
+
+extern JS_PUBLIC_API(JSRuntime *)
+JS_GetRuntime(JSContext *cx);
+
+extern JS_PUBLIC_API(JSContext *)
+JS_ContextIterator(JSRuntime *rt, JSContext **iterp);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_GetVersion(JSContext *cx);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_SetVersion(JSContext *cx, JSVersion version);
+
+extern JS_PUBLIC_API(const char *)
+JS_VersionToString(JSVersion version);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_StringToVersion(const char *string);
+
+/*
+ * JS options are orthogonal to version, and may be freely composed with one
+ * another as well as with version.
+ *
+ * JSOPTION_VAROBJFIX is recommended -- see the comments associated with the
+ * prototypes for JS_ExecuteScript, JS_EvaluateScript, etc.
+ */
+#define JSOPTION_STRICT JS_BIT(0) /* warn on dubious practice */
+#define JSOPTION_WERROR JS_BIT(1) /* convert warning to error */
+#define JSOPTION_VAROBJFIX JS_BIT(2) /* make JS_EvaluateScript use
+ the last object on its 'obj'
+ param's scope chain as the
+ ECMA 'variables object' */
+#define JSOPTION_PRIVATE_IS_NSISUPPORTS \
+ JS_BIT(3) /* context private data points
+ to an nsISupports subclass */
+#define JSOPTION_COMPILE_N_GO JS_BIT(4) /* caller of JS_Compile*Script
+ promises to execute compiled
+ script once only; enables
+ compile-time scope chain
+ resolution of consts. */
+#define JSOPTION_ATLINE JS_BIT(5) /* //@line number ["filename"]
+ option supported for the
+ XUL preprocessor and kindred
+ beasts. */
+#define JSOPTION_XML JS_BIT(6) /* EMCAScript for XML support:
+ parse <!-- --> as a token,
+ not backward compatible with
+ the comment-hiding hack used
+ in HTML script tags. */
+#define JSOPTION_NATIVE_BRANCH_CALLBACK \
+ JS_BIT(7) /* the branch callback set by
+ JS_SetBranchCallback may be
+ called with a null script
+ parameter, by native code
+ that loops intensively */
+#define JSOPTION_DONT_REPORT_UNCAUGHT \
+ JS_BIT(8) /* When returning from the
+ outermost API call, prevent
+ uncaught exceptions from
+ being converted to error
+ reports */
+
+extern JS_PUBLIC_API(uint32)
+JS_GetOptions(JSContext *cx);
+
+extern JS_PUBLIC_API(uint32)
+JS_SetOptions(JSContext *cx, uint32 options);
+
+extern JS_PUBLIC_API(uint32)
+JS_ToggleOptions(JSContext *cx, uint32 options);
+
+extern JS_PUBLIC_API(const char *)
+JS_GetImplementationVersion(void);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetGlobalObject(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_SetGlobalObject(JSContext *cx, JSObject *obj);
+
+/*
+ * Initialize standard JS class constructors, prototypes, and any top-level
+ * functions and constants associated with the standard classes (e.g. isNaN
+ * for Number).
+ *
+ * NB: This sets cx's global object to obj if it was null.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_InitStandardClasses(JSContext *cx, JSObject *obj);
+
+/*
+ * Resolve id, which must contain either a string or an int, to a standard
+ * class name in obj if possible, defining the class's constructor and/or
+ * prototype and storing true in *resolved. If id does not name a standard
+ * class or a top-level property induced by initializing a standard class,
+ * store false in *resolved and just return true. Return false on error,
+ * as usual for JSBool result-typed API entry points.
+ *
+ * This API can be called directly from a global object class's resolve op,
+ * to define standard classes lazily. The class's enumerate op should call
+ * JS_EnumerateStandardClasses(cx, obj), to define eagerly during for..in
+ * loops any classes not yet resolved lazily.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id,
+ JSBool *resolved);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj);
+
+/*
+ * Enumerate any already-resolved standard class ids into ida, or into a new
+ * JSIdArray if ida is null. Return the augmented array on success, null on
+ * failure with ida (if it was non-null on entry) destroyed.
+ */
+extern JS_PUBLIC_API(JSIdArray *)
+JS_EnumerateResolvedStandardClasses(JSContext *cx, JSObject *obj,
+ JSIdArray *ida);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetScopeChain(JSContext *cx);
+
+extern JS_PUBLIC_API(void *)
+JS_malloc(JSContext *cx, size_t nbytes);
+
+extern JS_PUBLIC_API(void *)
+JS_realloc(JSContext *cx, void *p, size_t nbytes);
+
+extern JS_PUBLIC_API(void)
+JS_free(JSContext *cx, void *p);
+
+extern JS_PUBLIC_API(char *)
+JS_strdup(JSContext *cx, const char *s);
+
+extern JS_PUBLIC_API(jsdouble *)
+JS_NewDouble(JSContext *cx, jsdouble d);
+
+extern JS_PUBLIC_API(JSBool)
+JS_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval);
+
+/*
+ * A JS GC root is a pointer to a JSObject *, JSString *, or jsdouble * that
+ * itself points into the GC heap (more recently, we support this extension:
+ * a root may be a pointer to a jsval v for which JSVAL_IS_GCTHING(v) is true).
+ *
+ * Therefore, you never pass JSObject *obj to JS_AddRoot(cx, obj). You always
+ * call JS_AddRoot(cx, &obj), passing obj by reference. And later, before obj
+ * or the structure it is embedded within goes out of scope or is freed, you
+ * must call JS_RemoveRoot(cx, &obj).
+ *
+ * Also, use JS_AddNamedRoot(cx, &structPtr->memberObj, "structPtr->memberObj")
+ * in preference to JS_AddRoot(cx, &structPtr->memberObj), in order to identify
+ * roots by their source callsites. This way, you can find the callsite while
+ * debugging if you should fail to do JS_RemoveRoot(cx, &structPtr->memberObj)
+ * before freeing structPtr's memory.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_AddRoot(JSContext *cx, void *rp);
+
+#ifdef NAME_ALL_GC_ROOTS
+#define JS_DEFINE_TO_TOKEN(def) #def
+#define JS_DEFINE_TO_STRING(def) JS_DEFINE_TO_TOKEN(def)
+#define JS_AddRoot(cx,rp) JS_AddNamedRoot((cx), (rp), (__FILE__ ":" JS_TOKEN_TO_STRING(__LINE__))
+#endif
+
+extern JS_PUBLIC_API(JSBool)
+JS_AddNamedRoot(JSContext *cx, void *rp, const char *name);
+
+extern JS_PUBLIC_API(JSBool)
+JS_AddNamedRootRT(JSRuntime *rt, void *rp, const char *name);
+
+extern JS_PUBLIC_API(JSBool)
+JS_RemoveRoot(JSContext *cx, void *rp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_RemoveRootRT(JSRuntime *rt, void *rp);
+
+/*
+ * The last GC thing of each type (object, string, double, external string
+ * types) created on a given context is kept alive until another thing of the
+ * same type is created, using a newborn root in the context. These newborn
+ * roots help native code protect newly-created GC-things from GC invocations
+ * activated before those things can be rooted using local or global roots.
+ *
+ * However, the newborn roots can also entrain great gobs of garbage, so the
+ * JS_GC entry point clears them for the context on which GC is being forced.
+ * Embeddings may need to do likewise for all contexts.
+ *
+ * See the scoped local root API immediately below for a better way to manage
+ * newborns in cases where native hooks (functions, getters, setters, etc.)
+ * create many GC-things, potentially without connecting them to predefined
+ * local roots such as *rval or argv[i] in an active native function. Using
+ * JS_EnterLocalRootScope disables updating of the context's per-gc-thing-type
+ * newborn roots, until control flow unwinds and leaves the outermost nesting
+ * local root scope.
+ */
+extern JS_PUBLIC_API(void)
+JS_ClearNewbornRoots(JSContext *cx);
+
+/*
+ * Scoped local root management allows native functions, getter/setters, etc.
+ * to avoid worrying about the newborn root pigeon-holes, overloading local
+ * roots allocated in argv and *rval, or ending up having to call JS_Add*Root
+ * and JS_RemoveRoot to manage global roots temporarily.
+ *
+ * Instead, calling JS_EnterLocalRootScope and JS_LeaveLocalRootScope around
+ * the body of the native hook causes the engine to allocate a local root for
+ * each newborn created in between the two API calls, using a local root stack
+ * associated with cx. For example:
+ *
+ * JSBool
+ * my_GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+ * {
+ * JSBool ok;
+ *
+ * if (!JS_EnterLocalRootScope(cx))
+ * return JS_FALSE;
+ * ok = my_GetPropertyBody(cx, obj, id, vp);
+ * JS_LeaveLocalRootScope(cx);
+ * return ok;
+ * }
+ *
+ * NB: JS_LeaveLocalRootScope must be called once for every prior successful
+ * call to JS_EnterLocalRootScope. If JS_EnterLocalRootScope fails, you must
+ * not make the matching JS_LeaveLocalRootScope call.
+ *
+ * JS_LeaveLocalRootScopeWithResult(cx, rval) is an alternative way to leave
+ * a local root scope that protects a result or return value, by effectively
+ * pushing it in the caller's local root scope.
+ *
+ * In case a native hook allocates many objects or other GC-things, but the
+ * native protects some of those GC-things by storing them as property values
+ * in an object that is itself protected, the hook can call JS_ForgetLocalRoot
+ * to free the local root automatically pushed for the now-protected GC-thing.
+ *
+ * JS_ForgetLocalRoot works on any GC-thing allocated in the current local
+ * root scope, but it's more time-efficient when called on references to more
+ * recently created GC-things. Calling it successively on other than the most
+ * recently allocated GC-thing will tend to average the time inefficiency, and
+ * may risk O(n^2) growth rate, but in any event, you shouldn't allocate too
+ * many local roots if you can root as you go (build a tree of objects from
+ * the top down, forgetting each latest-allocated GC-thing immediately upon
+ * linking it to its parent).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_EnterLocalRootScope(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_LeaveLocalRootScope(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval);
+
+extern JS_PUBLIC_API(void)
+JS_ForgetLocalRoot(JSContext *cx, void *thing);
+
+#ifdef __cplusplus
+JS_END_EXTERN_C
+
+class JSAutoLocalRootScope {
+ public:
+ JSAutoLocalRootScope(JSContext *cx) : mContext(cx) {
+ JS_EnterLocalRootScope(mContext);
+ }
+ ~JSAutoLocalRootScope() {
+ JS_LeaveLocalRootScope(mContext);
+ }
+
+ void forget(void *thing) {
+ JS_ForgetLocalRoot(mContext, thing);
+ }
+
+ protected:
+ JSContext *mContext;
+
+#if 0
+ private:
+ static void *operator new(size_t) CPP_THROW_NEW { return 0; };
+ static void operator delete(void *, size_t) { };
+#endif
+};
+
+JS_BEGIN_EXTERN_C
+#endif
+
+#ifdef DEBUG
+extern JS_PUBLIC_API(void)
+JS_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data);
+#endif
+
+/*
+ * Call JS_MapGCRoots to map the GC's roots table using map(rp, name, data).
+ * The root is pointed at by rp; if the root is unnamed, name is null; data is
+ * supplied from the third parameter to JS_MapGCRoots.
+ *
+ * The map function should return JS_MAP_GCROOT_REMOVE to cause the currently
+ * enumerated root to be removed. To stop enumeration, set JS_MAP_GCROOT_STOP
+ * in the return value. To keep on mapping, return JS_MAP_GCROOT_NEXT. These
+ * constants are flags; you can OR them together.
+ *
+ * This function acquires and releases rt's GC lock around the mapping of the
+ * roots table, so the map function should run to completion in as few cycles
+ * as possible. Of course, map cannot call JS_GC, JS_MaybeGC, JS_BeginRequest,
+ * or any JS API entry point that acquires locks, without double-tripping or
+ * deadlocking on the GC lock.
+ *
+ * JS_MapGCRoots returns the count of roots that were successfully mapped.
+ */
+#define JS_MAP_GCROOT_NEXT 0 /* continue mapping entries */
+#define JS_MAP_GCROOT_STOP 1 /* stop mapping entries */
+#define JS_MAP_GCROOT_REMOVE 2 /* remove and free the current entry */
+
+typedef intN
+(* JS_DLL_CALLBACK JSGCRootMapFun)(void *rp, const char *name, void *data);
+
+extern JS_PUBLIC_API(uint32)
+JS_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LockGCThing(JSContext *cx, void *thing);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LockGCThingRT(JSRuntime *rt, void *thing);
+
+extern JS_PUBLIC_API(JSBool)
+JS_UnlockGCThing(JSContext *cx, void *thing);
+
+extern JS_PUBLIC_API(JSBool)
+JS_UnlockGCThingRT(JSRuntime *rt, void *thing);
+
+/*
+ * For implementors of JSObjectOps.mark, to mark a GC-thing reachable via a
+ * property or other strong ref identified for debugging purposes by name.
+ * The name argument's storage needs to live only as long as the call to
+ * this routine.
+ *
+ * The final arg is used by GC_MARK_DEBUG code to build a ref path through
+ * the GC's live thing graph. Implementors of JSObjectOps.mark should pass
+ * its final arg through to this function when marking all GC-things that are
+ * directly reachable from the object being marked.
+ *
+ * See the JSMarkOp typedef in jspubtd.h, and the JSObjectOps struct below.
+ */
+extern JS_PUBLIC_API(void)
+JS_MarkGCThing(JSContext *cx, void *thing, const char *name, void *arg);
+
+extern JS_PUBLIC_API(void)
+JS_GC(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_MaybeGC(JSContext *cx);
+
+extern JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallback(JSContext *cx, JSGCCallback cb);
+
+extern JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallbackRT(JSRuntime *rt, JSGCCallback cb);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsAboutToBeFinalized(JSContext *cx, void *thing);
+
+typedef enum JSGCParamKey {
+ JSGC_MAX_BYTES = 0, /* maximum nominal heap before last ditch GC */
+ JSGC_MAX_MALLOC_BYTES = 1 /* # of JS_malloc bytes before last ditch GC */
+} JSGCParamKey;
+
+extern JS_PUBLIC_API(void)
+JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value);
+
+/*
+ * Add a finalizer for external strings created by JS_NewExternalString (see
+ * below) using a type-code returned from this function, and that understands
+ * how to free or release the memory pointed at by JS_GetStringChars(str).
+ *
+ * Return a nonnegative type index if there is room for finalizer in the
+ * global GC finalizers table, else return -1. If the engine is compiled
+ * JS_THREADSAFE and used in a multi-threaded environment, this function must
+ * be invoked on the primordial thread only, at startup -- or else the entire
+ * program must single-thread itself while loading a module that calls this
+ * function.
+ */
+extern JS_PUBLIC_API(intN)
+JS_AddExternalStringFinalizer(JSStringFinalizeOp finalizer);
+
+/*
+ * Remove finalizer from the global GC finalizers table, returning its type
+ * code if found, -1 if not found.
+ *
+ * As with JS_AddExternalStringFinalizer, there is a threading restriction
+ * if you compile the engine JS_THREADSAFE: this function may be called for a
+ * given finalizer pointer on only one thread; different threads may call to
+ * remove distinct finalizers safely.
+ *
+ * You must ensure that all strings with finalizer's type have been collected
+ * before calling this function. Otherwise, string data will be leaked by the
+ * GC, for want of a finalizer to call.
+ */
+extern JS_PUBLIC_API(intN)
+JS_RemoveExternalStringFinalizer(JSStringFinalizeOp finalizer);
+
+/*
+ * Create a new JSString whose chars member refers to external memory, i.e.,
+ * memory requiring special, type-specific finalization. The type code must
+ * be a nonnegative return value from JS_AddExternalStringFinalizer.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewExternalString(JSContext *cx, jschar *chars, size_t length, intN type);
+
+/*
+ * Returns the external-string finalizer index for this string, or -1 if it is
+ * an "internal" (native to JS engine) string.
+ */
+extern JS_PUBLIC_API(intN)
+JS_GetExternalStringGCType(JSRuntime *rt, JSString *str);
+
+/*
+ * Sets maximum (if stack grows upward) or minimum (downward) legal stack byte
+ * address in limitAddr for the thread or process stack used by cx. To disable
+ * stack size checking, pass 0 for limitAddr.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetThreadStackLimit(JSContext *cx, jsuword limitAddr);
+
+/************************************************************************/
+
+/*
+ * Classes, objects, and properties.
+ */
+
+/* For detailed comments on the function pointer types, see jspubtd.h. */
+struct JSClass {
+ const char *name;
+ uint32 flags;
+
+ /* Mandatory non-null function pointer members. */
+ JSPropertyOp addProperty;
+ JSPropertyOp delProperty;
+ JSPropertyOp getProperty;
+ JSPropertyOp setProperty;
+ JSEnumerateOp enumerate;
+ JSResolveOp resolve;
+ JSConvertOp convert;
+ JSFinalizeOp finalize;
+
+ /* Optionally non-null members start here. */
+ JSGetObjectOps getObjectOps;
+ JSCheckAccessOp checkAccess;
+ JSNative call;
+ JSNative construct;
+ JSXDRObjectOp xdrObject;
+ JSHasInstanceOp hasInstance;
+ JSMarkOp mark;
+ JSReserveSlotsOp reserveSlots;
+};
+
+struct JSExtendedClass {
+ JSClass base;
+ JSEqualityOp equality;
+ JSObjectOp outerObject;
+ JSObjectOp innerObject;
+ void (*reserved0)();
+ void (*reserved1)();
+ void (*reserved2)();
+ void (*reserved3)();
+ void (*reserved4)();
+};
+
+#define JSCLASS_HAS_PRIVATE (1<<0) /* objects have private slot */
+#define JSCLASS_NEW_ENUMERATE (1<<1) /* has JSNewEnumerateOp hook */
+#define JSCLASS_NEW_RESOLVE (1<<2) /* has JSNewResolveOp hook */
+#define JSCLASS_PRIVATE_IS_NSISUPPORTS (1<<3) /* private is (nsISupports *) */
+#define JSCLASS_SHARE_ALL_PROPERTIES (1<<4) /* all properties are SHARED */
+#define JSCLASS_NEW_RESOLVE_GETS_START (1<<5) /* JSNewResolveOp gets starting
+ object in prototype chain
+ passed in via *objp in/out
+ parameter */
+#define JSCLASS_CONSTRUCT_PROTOTYPE (1<<6) /* call constructor on class
+ prototype */
+#define JSCLASS_DOCUMENT_OBSERVER (1<<7) /* DOM document observer */
+
+/*
+ * To reserve slots fetched and stored via JS_Get/SetReservedSlot, bitwise-or
+ * JSCLASS_HAS_RESERVED_SLOTS(n) into the initializer for JSClass.flags, where
+ * n is a constant in [1, 255]. Reserved slots are indexed from 0 to n-1.
+ */
+#define JSCLASS_RESERVED_SLOTS_SHIFT 8 /* room for 8 flags below */
+#define JSCLASS_RESERVED_SLOTS_WIDTH 8 /* and 16 above this field */
+#define JSCLASS_RESERVED_SLOTS_MASK JS_BITMASK(JSCLASS_RESERVED_SLOTS_WIDTH)
+#define JSCLASS_HAS_RESERVED_SLOTS(n) (((n) & JSCLASS_RESERVED_SLOTS_MASK) \
+ << JSCLASS_RESERVED_SLOTS_SHIFT)
+#define JSCLASS_RESERVED_SLOTS(clasp) (((clasp)->flags \
+ >> JSCLASS_RESERVED_SLOTS_SHIFT) \
+ & JSCLASS_RESERVED_SLOTS_MASK)
+
+#define JSCLASS_HIGH_FLAGS_SHIFT (JSCLASS_RESERVED_SLOTS_SHIFT + \
+ JSCLASS_RESERVED_SLOTS_WIDTH)
+
+/* True if JSClass is really a JSExtendedClass. */
+#define JSCLASS_IS_EXTENDED (1<<(JSCLASS_HIGH_FLAGS_SHIFT+0))
+#define JSCLASS_IS_ANONYMOUS (1<<(JSCLASS_HIGH_FLAGS_SHIFT+1))
+#define JSCLASS_IS_GLOBAL (1<<(JSCLASS_HIGH_FLAGS_SHIFT+2))
+
+/*
+ * ECMA-262 requires that most constructors used internally create objects
+ * with "the original Foo.prototype value" as their [[Prototype]] (__proto__)
+ * member initial value. The "original ... value" verbiage is there because
+ * in ECMA-262, global properties naming class objects are read/write and
+ * deleteable, for the most part.
+ *
+ * Implementing this efficiently requires that global objects have classes
+ * with the following flags. Failure to use JSCLASS_GLOBAL_FLAGS won't break
+ * anything except the ECMA-262 "original prototype value" behavior, which was
+ * broken for years in SpiderMonkey. In other words, without these flags you
+ * get backward compatibility.
+ */
+#define JSCLASS_GLOBAL_FLAGS \
+ (JSCLASS_IS_GLOBAL | JSCLASS_HAS_RESERVED_SLOTS(JSProto_LIMIT))
+
+/* Fast access to the original value of each standard class's prototype. */
+#define JSCLASS_CACHED_PROTO_SHIFT (JSCLASS_HIGH_FLAGS_SHIFT + 8)
+#define JSCLASS_CACHED_PROTO_WIDTH 8
+#define JSCLASS_CACHED_PROTO_MASK JS_BITMASK(JSCLASS_CACHED_PROTO_WIDTH)
+#define JSCLASS_HAS_CACHED_PROTO(key) ((key) << JSCLASS_CACHED_PROTO_SHIFT)
+#define JSCLASS_CACHED_PROTO_KEY(clasp) (((clasp)->flags \
+ >> JSCLASS_CACHED_PROTO_SHIFT) \
+ & JSCLASS_CACHED_PROTO_MASK)
+
+/* Initializer for unused members of statically initialized JSClass structs. */
+#define JSCLASS_NO_OPTIONAL_MEMBERS 0,0,0,0,0,0,0,0
+#define JSCLASS_NO_RESERVED_MEMBERS 0,0,0,0,0
+
+/* For detailed comments on these function pointer types, see jspubtd.h. */
+struct JSObjectOps {
+ /* Mandatory non-null function pointer members. */
+ JSNewObjectMapOp newObjectMap;
+ JSObjectMapOp destroyObjectMap;
+ JSLookupPropOp lookupProperty;
+ JSDefinePropOp defineProperty;
+ JSPropertyIdOp getProperty;
+ JSPropertyIdOp setProperty;
+ JSAttributesOp getAttributes;
+ JSAttributesOp setAttributes;
+ JSPropertyIdOp deleteProperty;
+ JSConvertOp defaultValue;
+ JSNewEnumerateOp enumerate;
+ JSCheckAccessIdOp checkAccess;
+
+ /* Optionally non-null members start here. */
+ JSObjectOp thisObject;
+ JSPropertyRefOp dropProperty;
+ JSNative call;
+ JSNative construct;
+ JSXDRObjectOp xdrObject;
+ JSHasInstanceOp hasInstance;
+ JSSetObjectSlotOp setProto;
+ JSSetObjectSlotOp setParent;
+ JSMarkOp mark;
+ JSFinalizeOp clear;
+ JSGetRequiredSlotOp getRequiredSlot;
+ JSSetRequiredSlotOp setRequiredSlot;
+};
+
+struct JSXMLObjectOps {
+ JSObjectOps base;
+ JSGetMethodOp getMethod;
+ JSSetMethodOp setMethod;
+ JSEnumerateValuesOp enumerateValues;
+ JSEqualityOp equality;
+ JSConcatenateOp concatenate;
+};
+
+/*
+ * Classes that expose JSObjectOps via a non-null getObjectOps class hook may
+ * derive a property structure from this struct, return a pointer to it from
+ * lookupProperty and defineProperty, and use the pointer to avoid rehashing
+ * in getAttributes and setAttributes.
+ *
+ * The jsid type contains either an int jsval (see JSVAL_IS_INT above), or an
+ * internal pointer that is opaque to users of this API, but which users may
+ * convert from and to a jsval using JS_ValueToId and JS_IdToValue.
+ */
+struct JSProperty {
+ jsid id;
+};
+
+struct JSIdArray {
+ jsint length;
+ jsid vector[1]; /* actually, length jsid words */
+};
+
+extern JS_PUBLIC_API(void)
+JS_DestroyIdArray(JSContext *cx, JSIdArray *ida);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToId(JSContext *cx, jsval v, jsid *idp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IdToValue(JSContext *cx, jsid id, jsval *vp);
+
+/*
+ * The magic XML namespace id is int-tagged, but not a valid integer jsval.
+ * Global object classes in embeddings that enable JS_HAS_XML_SUPPORT (E4X)
+ * should handle this id specially before converting id via JSVAL_TO_INT.
+ */
+#define JS_DEFAULT_XML_NAMESPACE_ID ((jsid) JSVAL_VOID)
+
+/*
+ * JSNewResolveOp flag bits.
+ */
+#define JSRESOLVE_QUALIFIED 0x01 /* resolve a qualified property id */
+#define JSRESOLVE_ASSIGNING 0x02 /* resolve on the left of assignment */
+#define JSRESOLVE_DETECTING 0x04 /* 'if (o.p)...' or '(o.p) ?...:...' */
+#define JSRESOLVE_DECLARING 0x08 /* var, const, or function prolog op */
+#define JSRESOLVE_CLASSNAME 0x10 /* class name used when constructing */
+
+extern JS_PUBLIC_API(JSBool)
+JS_PropertyStub(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EnumerateStub(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ResolveStub(JSContext *cx, JSObject *obj, jsval id);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertStub(JSContext *cx, JSObject *obj, JSType type, jsval *vp);
+
+extern JS_PUBLIC_API(void)
+JS_FinalizeStub(JSContext *cx, JSObject *obj);
+
+struct JSConstDoubleSpec {
+ jsdouble dval;
+ const char *name;
+ uint8 flags;
+ uint8 spare[3];
+};
+
+/*
+ * To define an array element rather than a named property member, cast the
+ * element's index to (const char *) and initialize name with it, and set the
+ * JSPROP_INDEX bit in flags.
+ */
+struct JSPropertySpec {
+ const char *name;
+ int8 tinyid;
+ uint8 flags;
+ JSPropertyOp getter;
+ JSPropertyOp setter;
+};
+
+struct JSFunctionSpec {
+ const char *name;
+ JSNative call;
+#ifdef MOZILLA_1_8_BRANCH
+ uint8 nargs;
+ uint8 flags;
+ uint16 extra;
+#else
+ uint16 nargs;
+ uint16 flags;
+ uint32 extra; /* extra & 0xFFFF:
+ number of arg slots for local GC roots
+ extra >> 16:
+ reserved, must be zero */
+#endif
+};
+
+extern JS_PUBLIC_API(JSObject *)
+JS_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
+ JSClass *clasp, JSNative constructor, uintN nargs,
+ JSPropertySpec *ps, JSFunctionSpec *fs,
+ JSPropertySpec *static_ps, JSFunctionSpec *static_fs);
+
+#ifdef JS_THREADSAFE
+extern JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSContext *cx, JSObject *obj);
+
+#define JS_GET_CLASS(cx,obj) JS_GetClass(cx, obj)
+#else
+extern JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSObject *obj);
+
+#define JS_GET_CLASS(cx,obj) JS_GetClass(obj)
+#endif
+
+extern JS_PUBLIC_API(JSBool)
+JS_InstanceOf(JSContext *cx, JSObject *obj, JSClass *clasp, jsval *argv);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+extern JS_PUBLIC_API(void *)
+JS_GetPrivate(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetPrivate(JSContext *cx, JSObject *obj, void *data);
+
+extern JS_PUBLIC_API(void *)
+JS_GetInstancePrivate(JSContext *cx, JSObject *obj, JSClass *clasp,
+ jsval *argv);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetPrototype(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetPrototype(JSContext *cx, JSObject *obj, JSObject *proto);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetParent(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetParent(JSContext *cx, JSObject *obj, JSObject *parent);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetConstructor(JSContext *cx, JSObject *proto);
+
+/*
+ * Get a unique identifier for obj, good for the lifetime of obj (even if it
+ * is moved by a copying GC). Return false on failure (likely out of memory),
+ * and true with *idp containing the unique id on success.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetObjectId(JSContext *cx, JSObject *obj, jsid *idp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *clasp,
+ JSObject *proto, uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineConstDoubles(JSContext *cx, JSObject *obj, JSConstDoubleSpec *cds);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineProperties(JSContext *cx, JSObject *obj, JSPropertySpec *ps);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs);
+
+/*
+ * Determine the attributes (JSPROP_* flags) of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and the value of *attrsp is undefined.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN *attrsp, JSBool *foundp);
+
+/*
+ * The same, but if the property is native, return its getter and setter via
+ * *getterp and *setterp, respectively (and only if the out parameter pointer
+ * is not null).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const char *name,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp);
+
+/*
+ * Set the attributes of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and nothing will be altered.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_SetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN attrs, JSBool *foundp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefinePropertyWithTinyId(JSContext *cx, JSObject *obj, const char *name,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name,
+ const char *alias);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasProperty(JSContext *cx, JSObject *obj, const char *name, JSBool *foundp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, const char *name,
+ uintN flags, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetMethod(JSContext *cx, JSObject *obj, const char *name, JSObject **objp,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteProperty(JSContext *cx, JSObject *obj, const char *name);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteProperty2(JSContext *cx, JSObject *obj, const char *name,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs);
+
+/*
+ * Determine the attributes (JSPROP_* flags) of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and the value of *attrsp is undefined.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp);
+
+/*
+ * The same, but if the property is native, return its getter and setter via
+ * *getterp and *setterp, respectively (and only if the out parameter pointer
+ * is not null).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp);
+
+/*
+ * Set the attributes of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and nothing will be altered.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_SetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN attrs, JSBool *foundp);
+
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineUCPropertyWithTinyId(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ JSBool *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteUCProperty2(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewArrayObject(JSContext *cx, jsint length, jsval *vector);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsArrayObject(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetArrayLength(JSContext *cx, JSObject *obj, jsuint length);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineElement(JSContext *cx, JSObject *obj, jsint index, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasElement(JSContext *cx, JSObject *obj, jsint index, JSBool *foundp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteElement(JSContext *cx, JSObject *obj, jsint index);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteElement2(JSContext *cx, JSObject *obj, jsint index, jsval *rval);
+
+extern JS_PUBLIC_API(void)
+JS_ClearScope(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSIdArray *)
+JS_Enumerate(JSContext *cx, JSObject *obj);
+
+/*
+ * Create an object to iterate over enumerable properties of obj, in arbitrary
+ * property definition order. NB: This differs from longstanding for..in loop
+ * order, which uses order of property definition in obj.
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_NewPropertyIterator(JSContext *cx, JSObject *obj);
+
+/*
+ * Return true on success with *idp containing the id of the next enumerable
+ * property to visit using iterobj, or JSVAL_VOID if there is no such property
+ * left to visit. Return false on error.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp);
+
+extern JS_PUBLIC_API(JSCheckAccessOp)
+JS_SetCheckObjectAccessCallback(JSRuntime *rt, JSCheckAccessOp acb);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v);
+
+/************************************************************************/
+
+/*
+ * Security protocol.
+ */
+struct JSPrincipals {
+ char *codebase;
+
+ /* XXX unspecified and unused by Mozilla code -- can we remove these? */
+ void * (* JS_DLL_CALLBACK getPrincipalArray)(JSContext *cx, JSPrincipals *);
+ JSBool (* JS_DLL_CALLBACK globalPrivilegesEnabled)(JSContext *cx, JSPrincipals *);
+
+ /* Don't call "destroy"; use reference counting macros below. */
+ jsrefcount refcount;
+
+ void (* JS_DLL_CALLBACK destroy)(JSContext *cx, JSPrincipals *);
+ JSBool (* JS_DLL_CALLBACK subsume)(JSPrincipals *, JSPrincipals *);
+};
+
+#ifdef JS_THREADSAFE
+#define JSPRINCIPALS_HOLD(cx, principals) JS_HoldPrincipals(cx,principals)
+#define JSPRINCIPALS_DROP(cx, principals) JS_DropPrincipals(cx,principals)
+
+extern JS_PUBLIC_API(jsrefcount)
+JS_HoldPrincipals(JSContext *cx, JSPrincipals *principals);
+
+extern JS_PUBLIC_API(jsrefcount)
+JS_DropPrincipals(JSContext *cx, JSPrincipals *principals);
+
+#else
+#define JSPRINCIPALS_HOLD(cx, principals) (++(principals)->refcount)
+#define JSPRINCIPALS_DROP(cx, principals) \
+ ((--(principals)->refcount == 0) \
+ ? ((*(principals)->destroy)((cx), (principals)), 0) \
+ : (principals)->refcount)
+#endif
+
+extern JS_PUBLIC_API(JSPrincipalsTranscoder)
+JS_SetPrincipalsTranscoder(JSRuntime *rt, JSPrincipalsTranscoder px);
+
+extern JS_PUBLIC_API(JSObjectPrincipalsFinder)
+JS_SetObjectPrincipalsFinder(JSRuntime *rt, JSObjectPrincipalsFinder fop);
+
+/************************************************************************/
+
+/*
+ * Functions and scripts.
+ */
+extern JS_PUBLIC_API(JSFunction *)
+JS_NewFunction(JSContext *cx, JSNative call, uintN nargs, uintN flags,
+ JSObject *parent, const char *name);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFunctionObject(JSFunction *fun);
+
+/*
+ * Deprecated, useful only for diagnostics. Use JS_GetFunctionId instead for
+ * anonymous vs. "anonymous" disambiguation and Unicode fidelity.
+ */
+extern JS_PUBLIC_API(const char *)
+JS_GetFunctionName(JSFunction *fun);
+
+/*
+ * Return the function's identifier as a JSString, or null if fun is unnamed.
+ * The returned string lives as long as fun, so you don't need to root a saved
+ * reference to it if fun is well-connected or rooted, and provided you bound
+ * the use of the saved reference by fun's lifetime.
+ *
+ * Prefer JS_GetFunctionId over JS_GetFunctionName because it returns null for
+ * truly anonymous functions, and because it doesn't chop to ISO-Latin-1 chars
+ * from UTF-16-ish jschars.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_GetFunctionId(JSFunction *fun);
+
+/*
+ * Return JSFUN_* flags for fun.
+ */
+extern JS_PUBLIC_API(uintN)
+JS_GetFunctionFlags(JSFunction *fun);
+
+/*
+ * Return the arity (length) of fun.
+ */
+extern JS_PUBLIC_API(uint16)
+JS_GetFunctionArity(JSFunction *fun);
+
+/*
+ * Infallible predicate to test whether obj is a function object (faster than
+ * comparing obj's class name to "Function", but equivalent unless someone has
+ * overwritten the "Function" identifier with a different constructor and then
+ * created instances using that constructor that might be passed in as obj).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ObjectIsFunction(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineFunctions(JSContext *cx, JSObject *obj, JSFunctionSpec *fs);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_DefineFunction(JSContext *cx, JSObject *obj, const char *name, JSNative call,
+ uintN nargs, uintN attrs);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_DefineUCFunction(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, JSNative call,
+ uintN nargs, uintN attrs);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent);
+
+/*
+ * Given a buffer, return JS_FALSE if the buffer might become a valid
+ * javascript statement with the addition of more lines. Otherwise return
+ * JS_TRUE. The intent is to support interactive compilation - accumulate
+ * lines in a buffer until JS_BufferIsCompilableUnit is true, then pass it to
+ * the compiler.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length);
+
+/*
+ * The JSScript objects returned by the following functions refer to string and
+ * other kinds of literals, including doubles and RegExp objects. These
+ * literals are vulnerable to garbage collection; to root script objects and
+ * prevent literals from being collected, create a rootable object using
+ * JS_NewScriptObject, and root the resulting object using JS_Add[Named]Root.
+ */
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileScript(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFile(JSContext *cx, JSObject *obj, const char *filename);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandle(JSContext *cx, JSObject *obj, const char *filename,
+ FILE *fh);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandleForPrincipals(JSContext *cx, JSObject *obj,
+ const char *filename, FILE *fh,
+ JSPrincipals *principals);
+
+/*
+ * NB: you must use JS_NewScriptObject and root a pointer to its return value
+ * in order to keep a JSScript and its atoms safe from garbage collection after
+ * creating the script via JS_Compile* and before a JS_ExecuteScript* call.
+ * E.g., and without error checks:
+ *
+ * JSScript *script = JS_CompileFile(cx, global, filename);
+ * JSObject *scrobj = JS_NewScriptObject(cx, script);
+ * JS_AddNamedRoot(cx, &scrobj, "scrobj");
+ * do {
+ * jsval result;
+ * JS_ExecuteScript(cx, global, script, &result);
+ * JS_GC();
+ * } while (!JSVAL_IS_BOOLEAN(result) || JSVAL_TO_BOOLEAN(result));
+ * JS_RemoveRoot(cx, &scrobj);
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_NewScriptObject(JSContext *cx, JSScript *script);
+
+/*
+ * Infallible getter for a script's object. If JS_NewScriptObject has not been
+ * called on script yet, the return value will be null.
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_GetScriptObject(JSScript *script);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyScript(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSString *)
+JS_DecompileScript(JSContext *cx, JSScript *script, const char *name,
+ uintN indent);
+
+/*
+ * API extension: OR this into indent to avoid pretty-printing the decompiled
+ * source resulting from JS_DecompileFunction{,Body}.
+ */
+#define JS_DONT_PRETTY_PRINT ((uintN)0x8000)
+
+extern JS_PUBLIC_API(JSString *)
+JS_DecompileFunction(JSContext *cx, JSFunction *fun, uintN indent);
+
+extern JS_PUBLIC_API(JSString *)
+JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent);
+
+/*
+ * NB: JS_ExecuteScript, JS_ExecuteScriptPart, and the JS_Evaluate*Script*
+ * quadruplets all use the obj parameter as the initial scope chain header,
+ * the 'this' keyword value, and the variables object (ECMA parlance for where
+ * 'var' and 'function' bind names) of the execution context for script.
+ *
+ * Using obj as the variables object is problematic if obj's parent (which is
+ * the scope chain link; see JS_SetParent and JS_NewObject) is not null: in
+ * this case, variables created by 'var x = 0', e.g., go in obj, but variables
+ * created by assignment to an unbound id, 'x = 0', go in the last object on
+ * the scope chain linked by parent.
+ *
+ * ECMA calls that last scoping object the "global object", but note that many
+ * embeddings have several such objects. ECMA requires that "global code" be
+ * executed with the variables object equal to this global object. But these
+ * JS API entry points provide freedom to execute code against a "sub-global",
+ * i.e., a parented or scoped object, in which case the variables object will
+ * differ from the last object on the scope chain, resulting in confusing and
+ * non-ECMA explicit vs. implicit variable creation.
+ *
+ * Caveat embedders: unless you already depend on this buggy variables object
+ * binding behavior, you should call JS_SetOptions(cx, JSOPTION_VAROBJFIX) or
+ * JS_SetOptions(cx, JS_GetOptions(cx) | JSOPTION_VAROBJFIX) -- the latter if
+ * someone may have set other options on cx already -- for each context in the
+ * application, if you pass parented objects as the obj parameter, or may ever
+ * pass such objects in the future.
+ *
+ * Why a runtime option? The alternative is to add six or so new API entry
+ * points with signatures matching the following six, and that doesn't seem
+ * worth the code bloat cost. Such new entry points would probably have less
+ * obvious names, too, so would not tend to be used. The JS_SetOption call,
+ * OTOH, can be more easily hacked into existing code that does not depend on
+ * the bug; such code can continue to use the familiar JS_EvaluateScript,
+ * etc., entry points.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval);
+
+/*
+ * Execute either the function-defining prolog of a script, or the script's
+ * main body, but not both.
+ */
+typedef enum JSExecPart { JSEXEC_PROLOG, JSEXEC_MAIN } JSExecPart;
+
+extern JS_PUBLIC_API(JSBool)
+JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script,
+ JSExecPart part, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateScript(JSContext *cx, JSObject *obj,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CallFunction(JSContext *cx, JSObject *obj, JSFunction *fun, uintN argc,
+ jsval *argv, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CallFunctionName(JSContext *cx, JSObject *obj, const char *name, uintN argc,
+ jsval *argv, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc,
+ jsval *argv, jsval *rval);
+
+extern JS_PUBLIC_API(JSBranchCallback)
+JS_SetBranchCallback(JSContext *cx, JSBranchCallback cb);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsRunning(JSContext *cx);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsConstructing(JSContext *cx);
+
+/*
+ * Returns true if a script is executing and its current bytecode is a set
+ * (assignment) operation, even if there are native (no script) stack frames
+ * between the script and the caller to JS_IsAssigning.
+ */
+extern JS_FRIEND_API(JSBool)
+JS_IsAssigning(JSContext *cx);
+
+/*
+ * Set the second return value, which should be a string or int jsval that
+ * identifies a property in the returned object, to form an ECMA reference
+ * type value (obj, id). Only native methods can return reference types,
+ * and if the returned value is used on the left-hand side of an assignment
+ * op, the identified property will be set. If the return value is in an
+ * r-value, the interpreter just gets obj[id]'s value.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetCallReturnValue2(JSContext *cx, jsval v);
+
+/*
+ * Saving and restoring frame chains.
+ *
+ * These two functions are used to set aside cx->fp while that frame is
+ * inactive. After a call to JS_SaveFrameChain, it looks as if there is no
+ * code running on cx. Before calling JS_RestoreFrameChain, cx's call stack
+ * must be balanced and all nested calls to JS_SaveFrameChain must have had
+ * matching JS_RestoreFrameChain calls.
+ *
+ * JS_SaveFrameChain deals with cx not having any code running on it. A null
+ * return does not signify an error and JS_RestoreFrameChain handles null
+ * frames.
+ */
+extern JS_PUBLIC_API(JSStackFrame *)
+JS_SaveFrameChain(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_RestoreFrameChain(JSContext *cx, JSStackFrame *fp);
+
+/************************************************************************/
+
+/*
+ * Strings.
+ *
+ * NB: JS_NewString takes ownership of bytes on success, avoiding a copy; but
+ * on error (signified by null return), it leaves bytes owned by the caller.
+ * So the caller must free bytes in the error case, if it has no use for them.
+ * In contrast, all the JS_New*StringCopy* functions do not take ownership of
+ * the character memory passed to them -- they copy it.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewString(JSContext *cx, char *bytes, size_t length);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewStringCopyN(JSContext *cx, const char *s, size_t n);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewStringCopyZ(JSContext *cx, const char *s);
+
+extern JS_PUBLIC_API(JSString *)
+JS_InternString(JSContext *cx, const char *s);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewUCString(JSContext *cx, jschar *chars, size_t length);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyN(JSContext *cx, const jschar *s, size_t n);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyZ(JSContext *cx, const jschar *s);
+
+extern JS_PUBLIC_API(JSString *)
+JS_InternUCStringN(JSContext *cx, const jschar *s, size_t length);
+
+extern JS_PUBLIC_API(JSString *)
+JS_InternUCString(JSContext *cx, const jschar *s);
+
+extern JS_PUBLIC_API(char *)
+JS_GetStringBytes(JSString *str);
+
+extern JS_PUBLIC_API(jschar *)
+JS_GetStringChars(JSString *str);
+
+extern JS_PUBLIC_API(size_t)
+JS_GetStringLength(JSString *str);
+
+extern JS_PUBLIC_API(intN)
+JS_CompareStrings(JSString *str1, JSString *str2);
+
+/*
+ * Mutable string support. A string's characters are never mutable in this JS
+ * implementation, but a growable string has a buffer that can be reallocated,
+ * and a dependent string is a substring of another (growable, dependent, or
+ * immutable) string. The direct data members of the (opaque to API clients)
+ * JSString struct may be changed in a single-threaded way for growable and
+ * dependent strings.
+ *
+ * Therefore mutable strings cannot be used by more than one thread at a time.
+ * You may call JS_MakeStringImmutable to convert the string from a mutable
+ * (growable or dependent) string to an immutable (and therefore thread-safe)
+ * string. The engine takes care of converting growable and dependent strings
+ * to immutable for you if you store strings in multi-threaded objects using
+ * JS_SetProperty or kindred API entry points.
+ *
+ * If you store a JSString pointer in a native data structure that is (safely)
+ * accessible to multiple threads, you must call JS_MakeStringImmutable before
+ * retiring the store.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewGrowableString(JSContext *cx, jschar *chars, size_t length);
+
+/*
+ * Create a dependent string, i.e., a string that owns no character storage,
+ * but that refers to a slice of another string's chars. Dependent strings
+ * are mutable by definition, so the thread safety comments above apply.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewDependentString(JSContext *cx, JSString *str, size_t start,
+ size_t length);
+
+/*
+ * Concatenate two strings, resulting in a new growable string. If you create
+ * the left string and pass it to JS_ConcatStrings on a single thread, try to
+ * use JS_NewGrowableString to create the left string -- doing so helps Concat
+ * avoid allocating a new buffer for the result and copying left's chars into
+ * the new buffer. See above for thread safety comments.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_ConcatStrings(JSContext *cx, JSString *left, JSString *right);
+
+/*
+ * Convert a dependent string into an independent one. This function does not
+ * change the string's mutability, so the thread safety comments above apply.
+ */
+extern JS_PUBLIC_API(const jschar *)
+JS_UndependString(JSContext *cx, JSString *str);
+
+/*
+ * Convert a mutable string (either growable or dependent) into an immutable,
+ * thread-safe one.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_MakeStringImmutable(JSContext *cx, JSString *str);
+
+/*
+ * Return JS_TRUE if C (char []) strings passed via the API and internally
+ * are UTF-8. The source must be compiled with JS_C_STRINGS_ARE_UTF8 defined
+ * to get UTF-8 support.
+ */
+JS_PUBLIC_API(JSBool)
+JS_CStringsAreUTF8();
+
+/*
+ * Character encoding support.
+ *
+ * For both JS_EncodeCharacters and JS_DecodeBytes, set *dstlenp to the size
+ * of the destination buffer before the call; on return, *dstlenp contains the
+ * number of bytes (JS_EncodeCharacters) or jschars (JS_DecodeBytes) actually
+ * stored. To determine the necessary destination buffer size, make a sizing
+ * call that passes NULL for dst.
+ *
+ * On errors, the functions report the error. In that case, *dstlenp contains
+ * the number of characters or bytes transferred so far. If cx is NULL, no
+ * error is reported on failure, and the functions simply return JS_FALSE.
+ *
+ * NB: Neither function stores an additional zero byte or jschar after the
+ * transcoded string.
+ *
+ * If the source has been compiled with the #define JS_C_STRINGS_ARE_UTF8 to
+ * enable UTF-8 interpretation of C char[] strings, then JS_EncodeCharacters
+ * encodes to UTF-8, and JS_DecodeBytes decodes from UTF-8, which may create
+ * addititional errors if the character sequence is malformed. If UTF-8
+ * support is disabled, the functions deflate and inflate, respectively.
+ */
+JS_PUBLIC_API(JSBool)
+JS_EncodeCharacters(JSContext *cx, const jschar *src, size_t srclen, char *dst,
+ size_t *dstlenp);
+
+JS_PUBLIC_API(JSBool)
+JS_DecodeBytes(JSContext *cx, const char *src, size_t srclen, jschar *dst,
+ size_t *dstlenp);
+
+/************************************************************************/
+
+/*
+ * Locale specific string conversion and error message callbacks.
+ */
+struct JSLocaleCallbacks {
+ JSLocaleToUpperCase localeToUpperCase;
+ JSLocaleToLowerCase localeToLowerCase;
+ JSLocaleCompare localeCompare;
+ JSLocaleToUnicode localeToUnicode;
+ JSErrorCallback localeGetErrorMessage;
+};
+
+/*
+ * Establish locale callbacks. The pointer must persist as long as the
+ * JSContext. Passing NULL restores the default behaviour.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetLocaleCallbacks(JSContext *cx, JSLocaleCallbacks *callbacks);
+
+/*
+ * Return the address of the current locale callbacks struct, which may
+ * be NULL.
+ */
+extern JS_PUBLIC_API(JSLocaleCallbacks *)
+JS_GetLocaleCallbacks(JSContext *cx);
+
+/************************************************************************/
+
+/*
+ * Error reporting.
+ */
+
+/*
+ * Report an exception represented by the sprintf-like conversion of format
+ * and its arguments. This exception message string is passed to a pre-set
+ * JSErrorReporter function (set by JS_SetErrorReporter; see jspubtd.h for
+ * the JSErrorReporter typedef).
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportError(JSContext *cx, const char *format, ...);
+
+/*
+ * Use an errorNumber to retrieve the format string, args are char *
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportErrorNumber(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...);
+
+/*
+ * Use an errorNumber to retrieve the format string, args are jschar *
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportErrorNumberUC(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...);
+
+/*
+ * As above, but report a warning instead (JSREPORT_IS_WARNING(report.flags)).
+ * Return true if there was no error trying to issue the warning, and if the
+ * warning was not converted into an error due to the JSOPTION_WERROR option
+ * being set, false otherwise.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ReportWarning(JSContext *cx, const char *format, ...);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumber(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumberUC(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...);
+
+/*
+ * Complain when out of memory.
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportOutOfMemory(JSContext *cx);
+
+struct JSErrorReport {
+ const char *filename; /* source file name, URL, etc., or null */
+ uintN lineno; /* source line number */
+ const char *linebuf; /* offending source line without final \n */
+ const char *tokenptr; /* pointer to error token in linebuf */
+ const jschar *uclinebuf; /* unicode (original) line buffer */
+ const jschar *uctokenptr; /* unicode (original) token pointer */
+ uintN flags; /* error/warning, etc. */
+ uintN errorNumber; /* the error number, e.g. see js.msg */
+ const jschar *ucmessage; /* the (default) error message */
+ const jschar **messageArgs; /* arguments for the error message */
+};
+
+/*
+ * JSErrorReport flag values. These may be freely composed.
+ */
+#define JSREPORT_ERROR 0x0 /* pseudo-flag for default case */
+#define JSREPORT_WARNING 0x1 /* reported via JS_ReportWarning */
+#define JSREPORT_EXCEPTION 0x2 /* exception was thrown */
+#define JSREPORT_STRICT 0x4 /* error or warning due to strict option */
+
+/*
+ * If JSREPORT_EXCEPTION is set, then a JavaScript-catchable exception
+ * has been thrown for this runtime error, and the host should ignore it.
+ * Exception-aware hosts should also check for JS_IsExceptionPending if
+ * JS_ExecuteScript returns failure, and signal or propagate the exception, as
+ * appropriate.
+ */
+#define JSREPORT_IS_WARNING(flags) (((flags) & JSREPORT_WARNING) != 0)
+#define JSREPORT_IS_EXCEPTION(flags) (((flags) & JSREPORT_EXCEPTION) != 0)
+#define JSREPORT_IS_STRICT(flags) (((flags) & JSREPORT_STRICT) != 0)
+
+extern JS_PUBLIC_API(JSErrorReporter)
+JS_SetErrorReporter(JSContext *cx, JSErrorReporter er);
+
+/************************************************************************/
+
+/*
+ * Regular Expressions.
+ */
+#define JSREG_FOLD 0x01 /* fold uppercase to lowercase */
+#define JSREG_GLOB 0x02 /* global exec, creates array of matches */
+#define JSREG_MULTILINE 0x04 /* treat ^ and $ as begin and end of line */
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewUCRegExpObject(JSContext *cx, jschar *chars, size_t length, uintN flags);
+
+extern JS_PUBLIC_API(void)
+JS_SetRegExpInput(JSContext *cx, JSString *input, JSBool multiline);
+
+extern JS_PUBLIC_API(void)
+JS_ClearRegExpStatics(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_ClearRegExpRoots(JSContext *cx);
+
+/* TODO: compile, exec, get/set other statics... */
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsExceptionPending(JSContext *cx);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetPendingException(JSContext *cx, jsval *vp);
+
+extern JS_PUBLIC_API(void)
+JS_SetPendingException(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(void)
+JS_ClearPendingException(JSContext *cx);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ReportPendingException(JSContext *cx);
+
+/*
+ * Save the current exception state. This takes a snapshot of cx's current
+ * exception state without making any change to that state.
+ *
+ * The returned state pointer MUST be passed later to JS_RestoreExceptionState
+ * (to restore that saved state, overriding any more recent state) or else to
+ * JS_DropExceptionState (to free the state struct in case it is not correct
+ * or desirable to restore it). Both Restore and Drop free the state struct,
+ * so callers must stop using the pointer returned from Save after calling the
+ * Release or Drop API.
+ */
+extern JS_PUBLIC_API(JSExceptionState *)
+JS_SaveExceptionState(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_RestoreExceptionState(JSContext *cx, JSExceptionState *state);
+
+extern JS_PUBLIC_API(void)
+JS_DropExceptionState(JSContext *cx, JSExceptionState *state);
+
+/*
+ * If the given value is an exception object that originated from an error,
+ * the exception will contain an error report struct, and this API will return
+ * the address of that struct. Otherwise, it returns NULL. The lifetime of
+ * the error report struct that might be returned is the same as the lifetime
+ * of the exception object.
+ */
+extern JS_PUBLIC_API(JSErrorReport *)
+JS_ErrorFromException(JSContext *cx, jsval v);
+
+/*
+ * Given a reported error's message and JSErrorReport struct pointer, throw
+ * the corresponding exception on cx.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ThrowReportedError(JSContext *cx, const char *message,
+ JSErrorReport *reportp);
+
+#ifdef JS_THREADSAFE
+
+/*
+ * Associate the current thread with the given context. This is done
+ * implicitly by JS_NewContext.
+ *
+ * Returns the old thread id for this context, which should be treated as
+ * an opaque value. This value is provided for comparison to 0, which
+ * indicates that ClearContextThread has been called on this context
+ * since the last SetContextThread, or non-0, which indicates the opposite.
+ */
+extern JS_PUBLIC_API(jsword)
+JS_GetContextThread(JSContext *cx);
+
+extern JS_PUBLIC_API(jsword)
+JS_SetContextThread(JSContext *cx);
+
+extern JS_PUBLIC_API(jsword)
+JS_ClearContextThread(JSContext *cx);
+
+#endif /* JS_THREADSAFE */
+
+/************************************************************************/
+
+JS_END_EXTERN_C
+
+#endif /* jsapi_h___ */
diff --git a/third_party/js-1.7/jsarena.c b/third_party/js-1.7/jsarena.c
new file mode 100644
index 0000000..ef6ccd1
--- /dev/null
+++ b/third_party/js-1.7/jsarena.c
@@ -0,0 +1,502 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Lifetime-based fast allocation, inspired by much prior art, including
+ * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
+ * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+
+#ifdef JS_ARENAMETER
+static JSArenaStats *arena_stats_list;
+
+#define COUNT(pool,what) (pool)->stats.what++
+#else
+#define COUNT(pool,what) /* nothing */
+#endif
+
+#define JS_ARENA_DEFAULT_ALIGN sizeof(double)
+
+JS_PUBLIC_API(void)
+JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align)
+{
+ if (align == 0)
+ align = JS_ARENA_DEFAULT_ALIGN;
+ pool->mask = JS_BITMASK(JS_CeilingLog2(align));
+ pool->first.next = NULL;
+ pool->first.base = pool->first.avail = pool->first.limit =
+ JS_ARENA_ALIGN(pool, &pool->first + 1);
+ pool->current = &pool->first;
+ pool->arenasize = size;
+#ifdef JS_ARENAMETER
+ memset(&pool->stats, 0, sizeof pool->stats);
+ pool->stats.name = strdup(name);
+ pool->stats.next = arena_stats_list;
+ arena_stats_list = &pool->stats;
+#endif
+}
+
+/*
+ * An allocation that consumes more than pool->arenasize also has a header
+ * pointing back to its previous arena's next member. This header is not
+ * included in [a->base, a->limit), so its space can't be wrongly claimed.
+ *
+ * As the header is a pointer, it must be well-aligned. If pool->mask is
+ * greater than or equal to POINTER_MASK, the header just preceding a->base
+ * for an oversized arena a is well-aligned, because a->base is well-aligned.
+ * However, we may need to add more space to pad the JSArena ** back-pointer
+ * so that it lies just behind a->base, because a might not be aligned such
+ * that (jsuword)(a + 1) is on a pointer boundary.
+ *
+ * By how much must we pad? Let M be the alignment modulus for pool and P
+ * the modulus for a pointer. Given M >= P, the base of an oversized arena
+ * that satisfies M is well-aligned for P.
+ *
+ * On the other hand, if M < P, we must include enough space in the header
+ * size to align the back-pointer on a P boundary so that it can be found by
+ * subtracting P from a->base. This means a->base must be on a P boundary,
+ * even though subsequent allocations from a may be aligned on a lesser (M)
+ * boundary. Given powers of two M and P as above, the extra space needed
+ * when M < P is P-M or POINTER_MASK - pool->mask.
+ *
+ * The size of a header including padding is given by the HEADER_SIZE macro,
+ * below, for any pool (for any value of M).
+ *
+ * The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
+ * HEADER_BASE_MASK(pool).
+ *
+ * PTR_TO_HEADER computes the address of the back-pointer, given an oversized
+ * allocation at p. By definition, p must be a->base for the arena a that
+ * contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
+ * the case of SET_HEADER with back-pointer ap.
+ */
+#define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
+#define HEADER_SIZE(pool) (sizeof(JSArena **) \
+ + (((pool)->mask < POINTER_MASK) \
+ ? POINTER_MASK - (pool)->mask \
+ : 0))
+#define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
+#define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
+ & HEADER_BASE_MASK(pool)) \
+ == 0), \
+ (JSArena ***)(p) - 1)
+#define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
+#define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
+
+JS_PUBLIC_API(void *)
+JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
+{
+ JSArena **ap, *a, *b;
+ jsuword extra, hdrsz, gross;
+ void *p;
+
+ /*
+ * Search pool from current forward till we find or make enough space.
+ *
+ * NB: subtract nb from a->limit in the loop condition, instead of adding
+ * nb to a->avail, to avoid overflowing a 32-bit address space (possible
+ * when running a 32-bit program on a 64-bit system where the kernel maps
+ * the heap up against the top of the 32-bit address space).
+ *
+ * Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
+ */
+ JS_ASSERT((nb & pool->mask) == 0);
+ for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
+ pool->current = a) {
+ ap = &a->next;
+ if (!*ap) {
+ /* Not enough space in pool, so we must malloc. */
+ extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
+ hdrsz = sizeof *a + extra + pool->mask;
+ gross = hdrsz + JS_MAX(nb, pool->arenasize);
+ if (gross < nb)
+ return NULL;
+ b = (JSArena *) malloc(gross);
+ if (!b)
+ return NULL;
+ b->next = NULL;
+ b->limit = (jsuword)b + gross;
+ JS_COUNT_ARENA(pool,++);
+ COUNT(pool, nmallocs);
+
+ /* If oversized, store ap in the header, just before a->base. */
+ *ap = a = b;
+ JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
+ if (extra) {
+ a->base = a->avail =
+ ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
+ SET_HEADER(pool, a, ap);
+ } else {
+ a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
+ }
+ continue;
+ }
+ a = *ap; /* move to next arena */
+ }
+
+ p = (void *)a->avail;
+ a->avail += nb;
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+ return p;
+}
+
+JS_PUBLIC_API(void *)
+JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
+{
+ JSArena **ap, *a, *b;
+ jsuword boff, aoff, extra, hdrsz, gross;
+
+ /*
+ * Use the oversized-single-allocation header to avoid searching for ap.
+ * See JS_ArenaAllocate, the SET_HEADER call.
+ */
+ if (size > pool->arenasize) {
+ ap = *PTR_TO_HEADER(pool, p);
+ a = *ap;
+ } else {
+ ap = &pool->first.next;
+ while ((a = *ap) != pool->current)
+ ap = &a->next;
+ }
+
+ JS_ASSERT(a->base == (jsuword)p);
+ boff = JS_UPTRDIFF(a->base, a);
+ aoff = JS_ARENA_ALIGN(pool, size + incr);
+ JS_ASSERT(aoff > pool->arenasize);
+ extra = HEADER_SIZE(pool); /* oversized header holds ap */
+ hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
+ gross = hdrsz + aoff;
+ JS_ASSERT(gross > aoff);
+ a = (JSArena *) realloc(a, gross);
+ if (!a)
+ return NULL;
+#ifdef JS_ARENAMETER
+ pool->stats.nreallocs++;
+#endif
+
+ if (a != *ap) {
+ /* Oops, realloc moved the allocation: update other pointers to a. */
+ if (pool->current == *ap)
+ pool->current = a;
+ b = a->next;
+ if (b && b->avail - b->base > pool->arenasize) {
+ JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
+ SET_HEADER(pool, b, &a->next);
+ }
+
+ /* Now update *ap, the next link of the arena before a. */
+ *ap = a;
+ }
+
+ a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
+ a->limit = (jsuword)a + gross;
+ a->avail = a->base + aoff;
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+
+ /* Check whether realloc aligned differently, and copy if necessary. */
+ if (boff != JS_UPTRDIFF(a->base, a))
+ memmove((void *)a->base, (char *)a + boff, size);
+
+ /* Store ap in the oversized-load arena header. */
+ SET_HEADER(pool, a, ap);
+ return (void *)a->base;
+}
+
+JS_PUBLIC_API(void *)
+JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr)
+{
+ void *newp;
+
+ /*
+ * If p points to an oversized allocation, it owns an entire arena, so we
+ * can simply realloc the arena.
+ */
+ if (size > pool->arenasize)
+ return JS_ArenaRealloc(pool, p, size, incr);
+
+ JS_ARENA_ALLOCATE(newp, pool, size + incr);
+ if (newp)
+ memcpy(newp, p, size);
+ return newp;
+}
+
+/*
+ * Free tail arenas linked after head, which may not be the true list head.
+ * Reset pool->current to point to head in case it pointed at a tail arena.
+ */
+static void
+FreeArenaList(JSArenaPool *pool, JSArena *head)
+{
+ JSArena **ap, *a;
+
+ ap = &head->next;
+ a = *ap;
+ if (!a)
+ return;
+
+#ifdef DEBUG
+ do {
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+ a->avail = a->base;
+ JS_CLEAR_UNUSED(a);
+ } while ((a = a->next) != NULL);
+ a = *ap;
+#endif
+
+ do {
+ *ap = a->next;
+ JS_CLEAR_ARENA(a);
+ JS_COUNT_ARENA(pool,--);
+ free(a);
+ } while ((a = *ap) != NULL);
+
+ pool->current = head;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaRelease(JSArenaPool *pool, char *mark)
+{
+ JSArena *a;
+
+ for (a = &pool->first; a; a = a->next) {
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+
+ if (JS_UPTRDIFF(mark, a->base) <= JS_UPTRDIFF(a->avail, a->base)) {
+ a->avail = JS_ARENA_ALIGN(pool, mark);
+ JS_ASSERT(a->avail <= a->limit);
+ FreeArenaList(pool, a);
+ return;
+ }
+ }
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size)
+{
+ JSArena **ap, *a, *b;
+ jsuword q;
+
+ /*
+ * If the allocation is oversized, it consumes an entire arena, and it has
+ * a header just before the allocation pointing back to its predecessor's
+ * next member. Otherwise, we have to search pool for a.
+ */
+ if (size > pool->arenasize) {
+ ap = *PTR_TO_HEADER(pool, p);
+ a = *ap;
+ } else {
+ q = (jsuword)p + size;
+ q = JS_ARENA_ALIGN(pool, q);
+ ap = &pool->first.next;
+ while ((a = *ap) != NULL) {
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+
+ if (a->avail == q) {
+ /*
+ * If a is consumed by the allocation at p, we can free it to
+ * the malloc heap.
+ */
+ if (a->base == (jsuword)p)
+ break;
+
+ /*
+ * We can't free a, but we can "retract" its avail cursor --
+ * whether there are others after it in pool.
+ */
+ a->avail = (jsuword)p;
+ return;
+ }
+ ap = &a->next;
+ }
+ }
+
+ /*
+ * At this point, a is doomed, so ensure that pool->current doesn't point
+ * at it. We must preserve LIFO order of mark/release cursors, so we use
+ * the oversized-allocation arena's back pointer (or if not oversized, we
+ * use the result of searching the entire pool) to compute the address of
+ * the arena that precedes a.
+ */
+ if (pool->current == a)
+ pool->current = (JSArena *) ((char *)ap - offsetof(JSArena, next));
+
+ /*
+ * This is a non-LIFO deallocation, so take care to fix up a->next's back
+ * pointer in its header, if a->next is oversized.
+ */
+ *ap = b = a->next;
+ if (b && b->avail - b->base > pool->arenasize) {
+ JS_ASSERT(GET_HEADER(pool, b) == &a->next);
+ SET_HEADER(pool, b, ap);
+ }
+ JS_CLEAR_ARENA(a);
+ JS_COUNT_ARENA(pool,--);
+ free(a);
+}
+
+JS_PUBLIC_API(void)
+JS_FreeArenaPool(JSArenaPool *pool)
+{
+ FreeArenaList(pool, &pool->first);
+ COUNT(pool, ndeallocs);
+}
+
+JS_PUBLIC_API(void)
+JS_FinishArenaPool(JSArenaPool *pool)
+{
+ FreeArenaList(pool, &pool->first);
+#ifdef JS_ARENAMETER
+ {
+ JSArenaStats *stats, **statsp;
+
+ if (pool->stats.name)
+ free(pool->stats.name);
+ for (statsp = &arena_stats_list; (stats = *statsp) != 0;
+ statsp = &stats->next) {
+ if (stats == &pool->stats) {
+ *statsp = stats->next;
+ return;
+ }
+ }
+ }
+#endif
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaFinish()
+{
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaShutDown(void)
+{
+}
+
+#ifdef JS_ARENAMETER
+JS_PUBLIC_API(void)
+JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb)
+{
+ pool->stats.nallocs++;
+ pool->stats.nbytes += nb;
+ if (nb > pool->stats.maxalloc)
+ pool->stats.maxalloc = nb;
+ pool->stats.variance += nb * nb;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr)
+{
+ pool->stats.ninplace++;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr)
+{
+ pool->stats.ngrows++;
+ pool->stats.nbytes += incr;
+ pool->stats.variance -= size * size;
+ size += incr;
+ if (size > pool->stats.maxalloc)
+ pool->stats.maxalloc = size;
+ pool->stats.variance += size * size;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountRelease(JSArenaPool *pool, char *mark)
+{
+ pool->stats.nreleases++;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountRetract(JSArenaPool *pool, char *mark)
+{
+ pool->stats.nfastrels++;
+}
+
+#include <math.h>
+#include <stdio.h>
+
+JS_PUBLIC_API(void)
+JS_DumpArenaStats(FILE *fp)
+{
+ JSArenaStats *stats;
+ uint32 nallocs, nbytes;
+ double mean, variance, sigma;
+
+ for (stats = arena_stats_list; stats; stats = stats->next) {
+ nallocs = stats->nallocs;
+ if (nallocs != 0) {
+ nbytes = stats->nbytes;
+ mean = (double)nbytes / nallocs;
+ variance = stats->variance * nallocs - nbytes * nbytes;
+ if (variance < 0 || nallocs == 1)
+ variance = 0;
+ else
+ variance /= nallocs * (nallocs - 1);
+ sigma = sqrt(variance);
+ } else {
+ mean = variance = sigma = 0;
+ }
+
+ fprintf(fp, "\n%s allocation statistics:\n", stats->name);
+ fprintf(fp, " number of arenas: %u\n", stats->narenas);
+ fprintf(fp, " number of allocations: %u\n", stats->nallocs);
+ fprintf(fp, " number of free arena reclaims: %u\n", stats->nreclaims);
+ fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
+ fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
+ fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
+ fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
+ fprintf(fp, " number of realloc'ing growths: %u\n", stats->nreallocs);
+ fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
+ fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
+ fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
+ fprintf(fp, " mean allocation size: %g\n", mean);
+ fprintf(fp, " standard deviation: %g\n", sigma);
+ fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
+ }
+}
+#endif /* JS_ARENAMETER */
diff --git a/third_party/js-1.7/jsarena.h b/third_party/js-1.7/jsarena.h
new file mode 100644
index 0000000..8be15d0
--- /dev/null
+++ b/third_party/js-1.7/jsarena.h
@@ -0,0 +1,303 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsarena_h___
+#define jsarena_h___
+/*
+ * Lifetime-based fast allocation, inspired by much prior art, including
+ * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
+ * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
+ *
+ * Also supports LIFO allocation (JS_ARENA_MARK/JS_ARENA_RELEASE).
+ */
+#include <stdlib.h>
+#include "jstypes.h"
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+typedef struct JSArena JSArena;
+typedef struct JSArenaPool JSArenaPool;
+
+struct JSArena {
+ JSArena *next; /* next arena for this lifetime */
+ jsuword base; /* aligned base address, follows this header */
+ jsuword limit; /* one beyond last byte in arena */
+ jsuword avail; /* points to next available byte */
+};
+
+#ifdef JS_ARENAMETER
+typedef struct JSArenaStats JSArenaStats;
+
+struct JSArenaStats {
+ JSArenaStats *next; /* next in arenaStats list */
+ char *name; /* name for debugging */
+ uint32 narenas; /* number of arenas in pool */
+ uint32 nallocs; /* number of JS_ARENA_ALLOCATE() calls */
+ uint32 nmallocs; /* number of malloc() calls */
+ uint32 ndeallocs; /* number of lifetime deallocations */
+ uint32 ngrows; /* number of JS_ARENA_GROW() calls */
+ uint32 ninplace; /* number of in-place growths */
+ uint32 nreallocs; /* number of arena grow extending reallocs */
+ uint32 nreleases; /* number of JS_ARENA_RELEASE() calls */
+ uint32 nfastrels; /* number of "fast path" releases */
+ size_t nbytes; /* total bytes allocated */
+ size_t maxalloc; /* maximum allocation size in bytes */
+ double variance; /* size variance accumulator */
+};
+#endif
+
+struct JSArenaPool {
+ JSArena first; /* first arena in pool list */
+ JSArena *current; /* arena from which to allocate space */
+ size_t arenasize; /* net exact size of a new arena */
+ jsuword mask; /* alignment mask (power-of-2 - 1) */
+#ifdef JS_ARENAMETER
+ JSArenaStats stats;
+#endif
+};
+
+/*
+ * If the including .c file uses only one power-of-2 alignment, it may define
+ * JS_ARENA_CONST_ALIGN_MASK to the alignment mask and save a few instructions
+ * per ALLOCATE and GROW.
+ */
+#ifdef JS_ARENA_CONST_ALIGN_MASK
+#define JS_ARENA_ALIGN(pool, n) (((jsuword)(n) + JS_ARENA_CONST_ALIGN_MASK) \
+ & ~(jsuword)JS_ARENA_CONST_ALIGN_MASK)
+
+#define JS_INIT_ARENA_POOL(pool, name, size) \
+ JS_InitArenaPool(pool, name, size, JS_ARENA_CONST_ALIGN_MASK + 1)
+#else
+#define JS_ARENA_ALIGN(pool, n) (((jsuword)(n) + (pool)->mask) & ~(pool)->mask)
+#endif
+
+#define JS_ARENA_ALLOCATE(p, pool, nb) \
+ JS_ARENA_ALLOCATE_CAST(p, void *, pool, nb)
+
+#define JS_ARENA_ALLOCATE_TYPE(p, type, pool) \
+ JS_ARENA_ALLOCATE_COMMON(p, type *, pool, sizeof(type), 0)
+
+#define JS_ARENA_ALLOCATE_CAST(p, type, pool, nb) \
+ JS_ARENA_ALLOCATE_COMMON(p, type, pool, nb, _nb > _a->limit)
+
+/*
+ * NB: In JS_ARENA_ALLOCATE_CAST and JS_ARENA_GROW_CAST, always subtract _nb
+ * from a->limit rather than adding _nb to _p, to avoid overflowing a 32-bit
+ * address space (possible when running a 32-bit program on a 64-bit system
+ * where the kernel maps the heap up against the top of the 32-bit address
+ * space).
+ *
+ * Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
+ */
+#define JS_ARENA_ALLOCATE_COMMON(p, type, pool, nb, guard) \
+ JS_BEGIN_MACRO \
+ JSArena *_a = (pool)->current; \
+ size_t _nb = JS_ARENA_ALIGN(pool, nb); \
+ jsuword _p = _a->avail; \
+ if ((guard) || _p > _a->limit - _nb) \
+ _p = (jsuword)JS_ArenaAllocate(pool, _nb); \
+ else \
+ _a->avail = _p + _nb; \
+ p = (type) _p; \
+ JS_ArenaCountAllocation(pool, nb); \
+ JS_END_MACRO
+
+#define JS_ARENA_GROW(p, pool, size, incr) \
+ JS_ARENA_GROW_CAST(p, void *, pool, size, incr)
+
+#define JS_ARENA_GROW_CAST(p, type, pool, size, incr) \
+ JS_BEGIN_MACRO \
+ JSArena *_a = (pool)->current; \
+ if (_a->avail == (jsuword)(p) + JS_ARENA_ALIGN(pool, size)) { \
+ size_t _nb = (size) + (incr); \
+ _nb = JS_ARENA_ALIGN(pool, _nb); \
+ if (_a->limit >= _nb && (jsuword)(p) <= _a->limit - _nb) { \
+ _a->avail = (jsuword)(p) + _nb; \
+ JS_ArenaCountInplaceGrowth(pool, size, incr); \
+ } else if ((jsuword)(p) == _a->base) { \
+ p = (type) JS_ArenaRealloc(pool, p, size, incr); \
+ } else { \
+ p = (type) JS_ArenaGrow(pool, p, size, incr); \
+ } \
+ } else { \
+ p = (type) JS_ArenaGrow(pool, p, size, incr); \
+ } \
+ JS_ArenaCountGrowth(pool, size, incr); \
+ JS_END_MACRO
+
+#define JS_ARENA_MARK(pool) ((void *) (pool)->current->avail)
+#define JS_UPTRDIFF(p,q) ((jsuword)(p) - (jsuword)(q))
+
+#ifdef DEBUG
+#define JS_FREE_PATTERN 0xDA
+#define JS_CLEAR_UNUSED(a) (JS_ASSERT((a)->avail <= (a)->limit), \
+ memset((void*)(a)->avail, JS_FREE_PATTERN, \
+ (a)->limit - (a)->avail))
+#define JS_CLEAR_ARENA(a) memset((void*)(a), JS_FREE_PATTERN, \
+ (a)->limit - (jsuword)(a))
+#else
+#define JS_CLEAR_UNUSED(a) /* nothing */
+#define JS_CLEAR_ARENA(a) /* nothing */
+#endif
+
+#define JS_ARENA_RELEASE(pool, mark) \
+ JS_BEGIN_MACRO \
+ char *_m = (char *)(mark); \
+ JSArena *_a = (pool)->current; \
+ if (_a != &(pool)->first && \
+ JS_UPTRDIFF(_m, _a->base) <= JS_UPTRDIFF(_a->avail, _a->base)) { \
+ _a->avail = (jsuword)JS_ARENA_ALIGN(pool, _m); \
+ JS_ASSERT(_a->avail <= _a->limit); \
+ JS_CLEAR_UNUSED(_a); \
+ JS_ArenaCountRetract(pool, _m); \
+ } else { \
+ JS_ArenaRelease(pool, _m); \
+ } \
+ JS_ArenaCountRelease(pool, _m); \
+ JS_END_MACRO
+
+#ifdef JS_ARENAMETER
+#define JS_COUNT_ARENA(pool,op) ((pool)->stats.narenas op)
+#else
+#define JS_COUNT_ARENA(pool,op)
+#endif
+
+#define JS_ARENA_DESTROY(pool, a, pnext) \
+ JS_BEGIN_MACRO \
+ JS_COUNT_ARENA(pool,--); \
+ if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
+ *(pnext) = (a)->next; \
+ JS_CLEAR_ARENA(a); \
+ free(a); \
+ (a) = NULL; \
+ JS_END_MACRO
+
+/*
+ * Initialize an arena pool with the given name for debugging and metering,
+ * with a minimum size per arena of size bytes.
+ */
+extern JS_PUBLIC_API(void)
+JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size,
+ size_t align);
+
+/*
+ * Free the arenas in pool. The user may continue to allocate from pool
+ * after calling this function. There is no need to call JS_InitArenaPool()
+ * again unless JS_FinishArenaPool(pool) has been called.
+ */
+extern JS_PUBLIC_API(void)
+JS_FreeArenaPool(JSArenaPool *pool);
+
+/*
+ * Free the arenas in pool and finish using it altogether.
+ */
+extern JS_PUBLIC_API(void)
+JS_FinishArenaPool(JSArenaPool *pool);
+
+/*
+ * Deprecated do-nothing function.
+ */
+extern JS_PUBLIC_API(void)
+JS_ArenaFinish(void);
+
+/*
+ * Deprecated do-nothing function.
+ */
+extern JS_PUBLIC_API(void)
+JS_ArenaShutDown(void);
+
+/*
+ * Friend functions used by the JS_ARENA_*() macros.
+ */
+extern JS_PUBLIC_API(void *)
+JS_ArenaAllocate(JSArenaPool *pool, size_t nb);
+
+extern JS_PUBLIC_API(void *)
+JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void *)
+JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaRelease(JSArenaPool *pool, char *mark);
+
+/*
+ * Function to be used directly when an allocation has likely grown to consume
+ * an entire JSArena, in which case the arena is returned to the malloc heap.
+ */
+extern JS_PUBLIC_API(void)
+JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size);
+
+#ifdef JS_ARENAMETER
+
+#include <stdio.h>
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountRelease(JSArenaPool *pool, char *mark);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountRetract(JSArenaPool *pool, char *mark);
+
+extern JS_PUBLIC_API(void)
+JS_DumpArenaStats(FILE *fp);
+
+#else /* !JS_ARENAMETER */
+
+#define JS_ArenaCountAllocation(ap, nb) /* nothing */
+#define JS_ArenaCountInplaceGrowth(ap, size, incr) /* nothing */
+#define JS_ArenaCountGrowth(ap, size, incr) /* nothing */
+#define JS_ArenaCountRelease(ap, mark) /* nothing */
+#define JS_ArenaCountRetract(ap, mark) /* nothing */
+
+#endif /* !JS_ARENAMETER */
+
+JS_END_EXTERN_C
+
+#endif /* jsarena_h___ */
diff --git a/third_party/js-1.7/jsarray.c b/third_party/js-1.7/jsarray.c
new file mode 100644
index 0000000..532a1be
--- /dev/null
+++ b/third_party/js-1.7/jsarray.c
@@ -0,0 +1,1864 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS array class.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+/* 2^32 - 1 as a number and a string */
+#define MAXINDEX 4294967295u
+#define MAXSTR "4294967295"
+
+/*
+ * Determine if the id represents an array index or an XML property index.
+ *
+ * An id is an array index according to ECMA by (15.4):
+ *
+ * "Array objects give special treatment to a certain class of property names.
+ * A property name P (in the form of a string value) is an array index if and
+ * only if ToString(ToUint32(P)) is equal to P and ToUint32(P) is not equal
+ * to 2^32-1."
+ *
+ * In our implementation, it would be sufficient to check for JSVAL_IS_INT(id)
+ * except that by using signed 32-bit integers we miss the top half of the
+ * valid range. This function checks the string representation itself; note
+ * that calling a standard conversion routine might allow strings such as
+ * "08" or "4.0" as array indices, which they are not.
+ */
+JSBool
+js_IdIsIndex(jsval id, jsuint *indexp)
+{
+ JSString *str;
+ jschar *cp;
+
+ if (JSVAL_IS_INT(id)) {
+ jsint i;
+ i = JSVAL_TO_INT(id);
+ if (i < 0)
+ return JS_FALSE;
+ *indexp = (jsuint)i;
+ return JS_TRUE;
+ }
+
+ /* NB: id should be a string, but jsxml.c may call us with an object id. */
+ if (!JSVAL_IS_STRING(id))
+ return JS_FALSE;
+
+ str = JSVAL_TO_STRING(id);
+ cp = JSSTRING_CHARS(str);
+ if (JS7_ISDEC(*cp) && JSSTRING_LENGTH(str) < sizeof(MAXSTR)) {
+ jsuint index = JS7_UNDEC(*cp++);
+ jsuint oldIndex = 0;
+ jsuint c = 0;
+ if (index != 0) {
+ while (JS7_ISDEC(*cp)) {
+ oldIndex = index;
+ c = JS7_UNDEC(*cp);
+ index = 10*index + c;
+ cp++;
+ }
+ }
+
+ /* Ensure that all characters were consumed and we didn't overflow. */
+ if (*cp == 0 &&
+ (oldIndex < (MAXINDEX / 10) ||
+ (oldIndex == (MAXINDEX / 10) && c < (MAXINDEX % 10))))
+ {
+ *indexp = index;
+ return JS_TRUE;
+ }
+ }
+ return JS_FALSE;
+}
+
+static JSBool
+ValueIsLength(JSContext *cx, jsval v, jsuint *lengthp)
+{
+ jsint i;
+ jsdouble d;
+
+ if (JSVAL_IS_INT(v)) {
+ i = JSVAL_TO_INT(v);
+ if (i < 0) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ *lengthp = (jsuint) i;
+ return JS_TRUE;
+ }
+
+ if (!js_ValueToNumber(cx, v, &d)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ if (!js_DoubleToECMAUint32(cx, d, (uint32 *)lengthp)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ if (JSDOUBLE_IS_NaN(d) || d != *lengthp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ JSTempValueRooter tvr;
+ jsid id;
+ JSBool ok;
+ jsint i;
+
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &tvr.u.value);
+ if (ok) {
+ /*
+ * Short-circuit, because js_ValueToECMAUint32 fails when called
+ * during init time.
+ */
+ if (JSVAL_IS_INT(tvr.u.value)) {
+ i = JSVAL_TO_INT(tvr.u.value);
+ *lengthp = (jsuint)i; /* jsuint cast does ToUint32 */
+ } else {
+ ok = js_ValueToECMAUint32(cx, tvr.u.value, (uint32 *)lengthp);
+ }
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+static JSBool
+IndexToValue(JSContext *cx, jsuint index, jsval *vp)
+{
+ if (index <= JSVAL_INT_MAX) {
+ *vp = INT_TO_JSVAL(index);
+ return JS_TRUE;
+ }
+ return js_NewDoubleValue(cx, (jsdouble)index, vp);
+}
+
+static JSBool
+BigIndexToId(JSContext *cx, JSObject *obj, jsuint index, JSBool createAtom,
+ jsid *idp)
+{
+ jschar buf[10], *start;
+ JSClass *clasp;
+ JSAtom *atom;
+ JS_STATIC_ASSERT((jsuint)-1 == 4294967295U);
+
+ JS_ASSERT(index > JSVAL_INT_MAX);
+
+ start = JS_ARRAY_END(buf);
+ do {
+ --start;
+ *start = (jschar)('0' + index % 10);
+ index /= 10;
+ } while (index != 0);
+
+ /*
+ * Skip the atomization if the class is known to store atoms corresponding
+ * to big indexes together with elements. In such case we know that the
+ * array does not have an element at the given index if its atom does not
+ * exist.
+ */
+ if (!createAtom &&
+ ((clasp = OBJ_GET_CLASS(cx, obj)) == &js_ArrayClass ||
+ clasp == &js_ArgumentsClass ||
+ clasp == &js_ObjectClass)) {
+ atom = js_GetExistingStringAtom(cx, start, JS_ARRAY_END(buf) - start);
+ if (!atom) {
+ *idp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ } else {
+ atom = js_AtomizeChars(cx, start, JS_ARRAY_END(buf) - start, 0);
+ if (!atom)
+ return JS_FALSE;
+ }
+
+ *idp = ATOM_TO_JSID(atom);
+ return JS_TRUE;
+}
+
+/*
+ * If the property at the given index exists, get its value into location
+ * pointed by vp and set *hole to false. Otherwise set *hole to true and *vp
+ * to JSVAL_VOID. This function assumes that the location pointed by vp is
+ * properly rooted and can be used as GC-protected storage for temporaries.
+ */
+static JSBool
+GetArrayElement(JSContext *cx, JSObject *obj, jsuint index, JSBool *hole,
+ jsval *vp)
+{
+ jsid id;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ if (index <= JSVAL_INT_MAX) {
+ id = INT_TO_JSID(index);
+ } else {
+ if (!BigIndexToId(cx, obj, index, JS_FALSE, &id))
+ return JS_FALSE;
+ if (id == JSVAL_VOID) {
+ *hole = JS_TRUE;
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ }
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ *hole = JS_TRUE;
+ *vp = JSVAL_VOID;
+ } else {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!OBJ_GET_PROPERTY(cx, obj, id, vp))
+ return JS_FALSE;
+ *hole = JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Set the value of the property at the given index to v assuming v is rooted.
+ */
+static JSBool
+SetArrayElement(JSContext *cx, JSObject *obj, jsuint index, jsval v)
+{
+ jsid id;
+
+ if (index <= JSVAL_INT_MAX) {
+ id = INT_TO_JSID(index);
+ } else {
+ if (!BigIndexToId(cx, obj, index, JS_TRUE, &id))
+ return JS_FALSE;
+ JS_ASSERT(id != JSVAL_VOID);
+ }
+ return OBJ_SET_PROPERTY(cx, obj, id, &v);
+}
+
+static JSBool
+DeleteArrayElement(JSContext *cx, JSObject *obj, jsuint index)
+{
+ jsid id;
+ jsval junk;
+
+ if (index <= JSVAL_INT_MAX) {
+ id = INT_TO_JSID(index);
+ } else {
+ if (!BigIndexToId(cx, obj, index, JS_FALSE, &id))
+ return JS_FALSE;
+ if (id == JSVAL_VOID)
+ return JS_TRUE;
+ }
+ return OBJ_DELETE_PROPERTY(cx, obj, id, &junk);
+}
+
+/*
+ * When hole is true, delete the property at the given index. Otherwise set
+ * its value to v assuming v is rooted.
+ */
+static JSBool
+SetOrDeleteArrayElement(JSContext *cx, JSObject *obj, jsuint index,
+ JSBool hole, jsval v)
+{
+ if (hole) {
+ JS_ASSERT(v == JSVAL_VOID);
+ return DeleteArrayElement(cx, obj, index);
+ } else {
+ return SetArrayElement(cx, obj, index, v);
+ }
+}
+
+
+JSBool
+js_SetLengthProperty(JSContext *cx, JSObject *obj, jsuint length)
+{
+ jsval v;
+ jsid id;
+
+ if (!IndexToValue(cx, length, &v))
+ return JS_FALSE;
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ return OBJ_SET_PROPERTY(cx, obj, id, &v);
+}
+
+JSBool
+js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ JSErrorReporter older;
+ JSTempValueRooter tvr;
+ jsid id;
+ JSBool ok;
+
+ older = JS_SetErrorReporter(cx, NULL);
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &tvr.u.value);
+ JS_SetErrorReporter(cx, older);
+ if (ok)
+ ok = ValueIsLength(cx, tvr.u.value, lengthp);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+JSBool
+js_IsArrayLike(JSContext *cx, JSObject *obj, JSBool *answerp, jsuint *lengthp)
+{
+ JSClass *clasp;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ *answerp = (clasp == &js_ArgumentsClass || clasp == &js_ArrayClass);
+ if (!*answerp) {
+ *lengthp = 0;
+ return JS_TRUE;
+ }
+ return js_GetLengthProperty(cx, obj, lengthp);
+}
+
+/*
+ * This get function is specific to Array.prototype.length and other array
+ * instance length properties. It calls back through the class get function
+ * in case some magic happens there (see call_getProperty in jsfun.c).
+ */
+static JSBool
+array_length_getter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return OBJ_GET_CLASS(cx, obj)->getProperty(cx, obj, id, vp);
+}
+
+static JSBool
+array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsuint newlen, oldlen, gap, index;
+ jsid id2;
+ jsval junk;
+ JSObject *iter;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ if (!ValueIsLength(cx, *vp, &newlen))
+ return JS_FALSE;
+ if (!js_GetLengthProperty(cx, obj, &oldlen))
+ return JS_FALSE;
+ if (oldlen > newlen) {
+ if (oldlen - newlen < (1 << 24)) {
+ do {
+ --oldlen;
+ if (!DeleteArrayElement(cx, obj, oldlen))
+ return JS_FALSE;
+ } while (oldlen != newlen);
+ } else {
+ /*
+ * We are going to remove a lot of indexes in a presumably sparse
+ * array. So instead of looping through indexes between newlen and
+ * oldlen, we iterate through all properties and remove those that
+ * correspond to indexes from the [newlen, oldlen) range.
+ * See bug 322135.
+ */
+ iter = JS_NewPropertyIterator(cx, obj);
+ if (!iter)
+ return JS_FALSE;
+
+ /* Protect iter against GC in OBJ_DELETE_PROPERTY. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, iter, &tvr);
+ gap = oldlen - newlen;
+ for (;;) {
+ ok = JS_NextProperty(cx, iter, &id2);
+ if (!ok)
+ break;
+ if (id2 == JSVAL_VOID)
+ break;
+ if (js_IdIsIndex(id2, &index) && index - newlen < gap) {
+ ok = OBJ_DELETE_PROPERTY(cx, obj, id2, &junk);
+ if (!ok)
+ break;
+ }
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ return JS_FALSE;
+ }
+ }
+ return IndexToValue(cx, newlen, vp);
+}
+
+static JSBool
+array_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsuint index, length;
+
+ if (!js_IdIsIndex(id, &index))
+ return JS_TRUE;
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (index >= length) {
+ length = index + 1;
+ return js_SetLengthProperty(cx, obj, length);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+array_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ return js_TryValueOf(cx, obj, type, vp);
+}
+
+JSClass js_ArrayClass = {
+ "Array",
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Array),
+ array_addProperty, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, array_convert, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+enum ArrayToStringOp {
+ TO_STRING,
+ TO_LOCALE_STRING,
+ TO_SOURCE
+};
+
+/*
+ * When op is TO_STRING or TO_LOCALE_STRING sep indicates a separator to use
+ * or "," when sep is NULL.
+ * When op is TO_SOURCE sep must be NULL.
+ */
+static JSBool
+array_join_sub(JSContext *cx, JSObject *obj, enum ArrayToStringOp op,
+ JSString *sep, jsval *rval)
+{
+ JSBool ok, hole;
+ jsuint length, index;
+ jschar *chars, *ochars;
+ size_t nchars, growth, seplen, tmplen, extratail;
+ const jschar *sepstr;
+ JSString *str;
+ JSHashEntry *he;
+ JSTempValueRooter tvr;
+ JSAtom *atom;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ ok = js_GetLengthProperty(cx, obj, &length);
+ if (!ok)
+ return JS_FALSE;
+
+ he = js_EnterSharpObject(cx, obj, NULL, &chars);
+ if (!he)
+ return JS_FALSE;
+#ifdef DEBUG
+ growth = (size_t) -1;
+#endif
+
+ if (op == TO_SOURCE) {
+ if (IS_SHARP(he)) {
+#if JS_HAS_SHARP_VARS
+ nchars = js_strlen(chars);
+#else
+ chars[0] = '[';
+ chars[1] = ']';
+ chars[2] = 0;
+ nchars = 2;
+#endif
+ goto make_string;
+ }
+
+ /*
+ * Always allocate 2 extra chars for closing ']' and terminating 0
+ * and then preallocate 1 + extratail to include starting '['.
+ */
+ extratail = 2;
+ growth = (1 + extratail) * sizeof(jschar);
+ if (!chars) {
+ nchars = 0;
+ chars = (jschar *) malloc(growth);
+ if (!chars)
+ goto done;
+ } else {
+ MAKE_SHARP(he);
+ nchars = js_strlen(chars);
+ growth += nchars * sizeof(jschar);
+ chars = (jschar *)realloc((ochars = chars), growth);
+ if (!chars) {
+ free(ochars);
+ goto done;
+ }
+ }
+ chars[nchars++] = '[';
+ JS_ASSERT(sep == NULL);
+ sepstr = NULL; /* indicates to use ", " as separator */
+ seplen = 2;
+ } else {
+ /*
+ * Free any sharp variable definition in chars. Normally, we would
+ * MAKE_SHARP(he) so that only the first sharp variable annotation is
+ * a definition, and all the rest are references, but in the current
+ * case of (op != TO_SOURCE), we don't need chars at all.
+ */
+ if (chars)
+ JS_free(cx, chars);
+ chars = NULL;
+ nchars = 0;
+ extratail = 1; /* allocate extra char for terminating 0 */
+
+ /* Return the empty string on a cycle as well as on empty join. */
+ if (IS_BUSY(he) || length == 0) {
+ js_LeaveSharpObject(cx, NULL);
+ *rval = JS_GetEmptyStringValue(cx);
+ return ok;
+ }
+
+ /* Flag he as BUSY so we can distinguish a cycle from a join-point. */
+ MAKE_BUSY(he);
+
+ if (sep) {
+ sepstr = JSSTRING_CHARS(sep);
+ seplen = JSSTRING_LENGTH(sep);
+ } else {
+ sepstr = NULL; /* indicates to use "," as separator */
+ seplen = 1;
+ }
+ }
+
+ /* Use rval to locally root each element value as we loop and convert. */
+#define v (*rval)
+
+ for (index = 0; index < length; index++) {
+ ok = GetArrayElement(cx, obj, index, &hole, &v);
+ if (!ok)
+ goto done;
+ if (hole ||
+ (op != TO_SOURCE && (JSVAL_IS_VOID(v) || JSVAL_IS_NULL(v)))) {
+ str = cx->runtime->emptyString;
+ } else {
+ if (op == TO_LOCALE_STRING) {
+ atom = cx->runtime->atomState.toLocaleStringAtom;
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, NULL, &tvr);
+ ok = js_ValueToObject(cx, v, &tvr.u.object) &&
+ js_TryMethod(cx, tvr.u.object, atom, 0, NULL, &v);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ goto done;
+ str = js_ValueToString(cx, v);
+ } else if (op == TO_STRING) {
+ str = js_ValueToString(cx, v);
+ } else {
+ JS_ASSERT(op == TO_SOURCE);
+ str = js_ValueToSource(cx, v);
+ }
+ if (!str) {
+ ok = JS_FALSE;
+ goto done;
+ }
+ }
+
+ /*
+ * Do not append separator after the last element unless it is a hole
+ * and we are in toSource. In that case we append single ",".
+ */
+ if (index + 1 == length)
+ seplen = (hole && op == TO_SOURCE) ? 1 : 0;
+
+ /* Allocate 1 at end for closing bracket and zero. */
+ tmplen = JSSTRING_LENGTH(str);
+ growth = nchars + tmplen + seplen + extratail;
+ if (nchars > growth || tmplen > growth ||
+ growth > (size_t)-1 / sizeof(jschar)) {
+ if (chars) {
+ free(chars);
+ chars = NULL;
+ }
+ goto done;
+ }
+ growth *= sizeof(jschar);
+ if (!chars) {
+ chars = (jschar *) malloc(growth);
+ if (!chars)
+ goto done;
+ } else {
+ chars = (jschar *) realloc((ochars = chars), growth);
+ if (!chars) {
+ free(ochars);
+ goto done;
+ }
+ }
+
+ js_strncpy(&chars[nchars], JSSTRING_CHARS(str), tmplen);
+ nchars += tmplen;
+
+ if (seplen) {
+ if (sepstr) {
+ js_strncpy(&chars[nchars], sepstr, seplen);
+ } else {
+ JS_ASSERT(seplen == 1 || seplen == 2);
+ chars[nchars] = ',';
+ if (seplen == 2)
+ chars[nchars + 1] = ' ';
+ }
+ nchars += seplen;
+ }
+ }
+
+ done:
+ if (op == TO_SOURCE) {
+ if (chars)
+ chars[nchars++] = ']';
+ } else {
+ CLEAR_BUSY(he);
+ }
+ js_LeaveSharpObject(cx, NULL);
+ if (!ok) {
+ if (chars)
+ free(chars);
+ return ok;
+ }
+
+#undef v
+
+ make_string:
+ if (!chars) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ chars[nchars] = 0;
+ JS_ASSERT(growth == (size_t)-1 || (nchars + 1) * sizeof(jschar) == growth);
+ str = js_NewString(cx, chars, nchars, 0);
+ if (!str) {
+ free(chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+array_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_join_sub(cx, obj, TO_SOURCE, NULL, rval);
+}
+#endif
+
+static JSBool
+array_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_join_sub(cx, obj, TO_STRING, NULL, rval);
+}
+
+static JSBool
+array_toLocaleString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ /*
+ * Passing comma here as the separator. Need a way to get a
+ * locale-specific version.
+ */
+ return array_join_sub(cx, obj, TO_LOCALE_STRING, NULL, rval);
+}
+
+static JSBool
+InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint end,
+ jsval *vector)
+{
+ while (start != end) {
+ if (!SetArrayElement(cx, obj, start++, *vector++))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, jsval *vector)
+{
+ jsval v;
+ jsid id;
+
+ if (!IndexToValue(cx, length, &v))
+ return JS_FALSE;
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, id, v,
+ array_length_getter, array_length_setter,
+ JSPROP_PERMANENT,
+ NULL)) {
+ return JS_FALSE;
+ }
+ if (!vector)
+ return JS_TRUE;
+ return InitArrayElements(cx, obj, 0, length, vector);
+}
+
+/*
+ * Perl-inspired join, reverse, and sort.
+ */
+static JSBool
+array_join(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ if (JSVAL_IS_VOID(argv[0])) {
+ str = NULL;
+ } else {
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ }
+ return array_join_sub(cx, obj, TO_STRING, str, rval);
+}
+
+static JSBool
+array_reverse(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsuint len, half, i;
+ JSBool hole, hole2;
+ jsval *tmproot, *tmproot2;
+
+ if (!js_GetLengthProperty(cx, obj, &len))
+ return JS_FALSE;
+
+ /*
+ * Use argv[argc] and argv[argc + 1] as local roots to hold temporarily
+ * array elements for GC-safe swap.
+ */
+ tmproot = argv + argc;
+ tmproot2 = argv + argc + 1;
+ half = len / 2;
+ for (i = 0; i < half; i++) {
+ if (!GetArrayElement(cx, obj, i, &hole, tmproot) ||
+ !GetArrayElement(cx, obj, len - i - 1, &hole2, tmproot2) ||
+ !SetOrDeleteArrayElement(cx, obj, len - i - 1, hole, *tmproot) ||
+ !SetOrDeleteArrayElement(cx, obj, i, hole2, *tmproot2)) {
+ return JS_FALSE;
+ }
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+typedef struct HSortArgs {
+ void *vec;
+ size_t elsize;
+ void *pivot;
+ JSComparator cmp;
+ void *arg;
+ JSBool fastcopy;
+} HSortArgs;
+
+static JSBool
+sort_compare(void *arg, const void *a, const void *b, int *result);
+
+static int
+sort_compare_strings(void *arg, const void *a, const void *b, int *result);
+
+static JSBool
+HeapSortHelper(JSBool building, HSortArgs *hsa, size_t lo, size_t hi)
+{
+ void *pivot, *vec, *vec2, *arg, *a, *b;
+ size_t elsize;
+ JSComparator cmp;
+ JSBool fastcopy;
+ size_t j, hiDiv2;
+ int cmp_result;
+
+ pivot = hsa->pivot;
+ vec = hsa->vec;
+ elsize = hsa->elsize;
+ vec2 = (char *)vec - 2 * elsize;
+ cmp = hsa->cmp;
+ arg = hsa->arg;
+
+ fastcopy = hsa->fastcopy;
+#define MEMCPY(p,q,n) \
+ (fastcopy ? (void)(*(jsval*)(p) = *(jsval*)(q)) : (void)memcpy(p, q, n))
+#define CALL_CMP(a, b) \
+ if (!cmp(arg, (a), (b), &cmp_result)) return JS_FALSE;
+
+ if (lo == 1) {
+ j = 2;
+ b = (char *)vec + elsize;
+ if (j < hi) {
+ CALL_CMP(vec, b);
+ if (cmp_result < 0)
+ j++;
+ }
+ a = (char *)vec + (hi - 1) * elsize;
+ b = (char *)vec2 + j * elsize;
+
+ /*
+ * During sorting phase b points to a member of heap that cannot be
+ * bigger then biggest of vec[0] and vec[1], and cmp(a, b, arg) <= 0
+ * always holds.
+ */
+ if (building || hi == 2) {
+ CALL_CMP(a, b);
+ if (cmp_result >= 0)
+ return JS_TRUE;
+ }
+
+ MEMCPY(pivot, a, elsize);
+ MEMCPY(a, b, elsize);
+ lo = j;
+ } else {
+ a = (char *)vec2 + lo * elsize;
+ MEMCPY(pivot, a, elsize);
+ }
+
+ hiDiv2 = hi/2;
+ while (lo <= hiDiv2) {
+ j = lo + lo;
+ a = (char *)vec2 + j * elsize;
+ b = (char *)vec + (j - 1) * elsize;
+ if (j < hi) {
+ CALL_CMP(a, b);
+ if (cmp_result < 0)
+ j++;
+ }
+ b = (char *)vec2 + j * elsize;
+ CALL_CMP(pivot, b);
+ if (cmp_result >= 0)
+ break;
+
+ a = (char *)vec2 + lo * elsize;
+ MEMCPY(a, b, elsize);
+ lo = j;
+ }
+
+ a = (char *)vec2 + lo * elsize;
+ MEMCPY(a, pivot, elsize);
+
+ return JS_TRUE;
+
+#undef CALL_CMP
+#undef MEMCPY
+
+}
+
+JSBool
+js_HeapSort(void *vec, size_t nel, void *pivot, size_t elsize,
+ JSComparator cmp, void *arg)
+{
+ HSortArgs hsa;
+ size_t i;
+
+ hsa.vec = vec;
+ hsa.elsize = elsize;
+ hsa.pivot = pivot;
+ hsa.cmp = cmp;
+ hsa.arg = arg;
+ hsa.fastcopy = (cmp == sort_compare || cmp == sort_compare_strings);
+
+ for (i = nel/2; i != 0; i--) {
+ if (!HeapSortHelper(JS_TRUE, &hsa, i, nel))
+ return JS_FALSE;
+ }
+ while (nel > 2) {
+ if (!HeapSortHelper(JS_FALSE, &hsa, 1, --nel))
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+}
+
+typedef struct CompareArgs {
+ JSContext *context;
+ jsval fval;
+ jsval *localroot; /* need one local root, for sort_compare */
+} CompareArgs;
+
+static JSBool
+sort_compare(void *arg, const void *a, const void *b, int *result)
+{
+ jsval av = *(const jsval *)a, bv = *(const jsval *)b;
+ CompareArgs *ca = (CompareArgs *) arg;
+ JSContext *cx = ca->context;
+ jsval fval;
+ JSBool ok;
+
+ /**
+ * array_sort deals with holes and undefs on its own and they should not
+ * come here.
+ */
+ JS_ASSERT(av != JSVAL_VOID);
+ JS_ASSERT(bv != JSVAL_VOID);
+
+ *result = 0;
+ ok = JS_TRUE;
+ fval = ca->fval;
+ if (fval == JSVAL_NULL) {
+ JSString *astr, *bstr;
+
+ if (av != bv) {
+ /*
+ * Set our local root to astr in case the second js_ValueToString
+ * displaces the newborn root in cx, and the GC nests under that
+ * call. Don't bother guarding the local root store with an astr
+ * non-null test. If we tag null as a string, the GC will untag,
+ * null-test, and avoid dereferencing null.
+ */
+ astr = js_ValueToString(cx, av);
+ *ca->localroot = STRING_TO_JSVAL(astr);
+ if (astr && (bstr = js_ValueToString(cx, bv)))
+ *result = js_CompareStrings(astr, bstr);
+ else
+ ok = JS_FALSE;
+ }
+ } else {
+ jsdouble cmp;
+ jsval argv[2];
+
+ argv[0] = av;
+ argv[1] = bv;
+ ok = js_InternalCall(cx,
+ OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(fval)),
+ fval, 2, argv, ca->localroot);
+ if (ok) {
+ ok = js_ValueToNumber(cx, *ca->localroot, &cmp);
+
+ /* Clamp cmp to -1, 0, 1. */
+ if (ok) {
+ if (JSDOUBLE_IS_NaN(cmp)) {
+ /*
+ * XXX report some kind of error here? ECMA talks about
+ * 'consistent compare functions' that don't return NaN,
+ * but is silent about what the result should be. So we
+ * currently ignore it.
+ */
+ } else if (cmp != 0) {
+ *result = cmp > 0 ? 1 : -1;
+ }
+ }
+ }
+ }
+ return ok;
+}
+
+static int
+sort_compare_strings(void *arg, const void *a, const void *b, int *result)
+{
+ jsval av = *(const jsval *)a, bv = *(const jsval *)b;
+
+ *result = (int) js_CompareStrings(JSVAL_TO_STRING(av), JSVAL_TO_STRING(bv));
+ return JS_TRUE;
+}
+
+static JSBool
+array_sort(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval, *vec, *pivotroot;
+ CompareArgs ca;
+ jsuint len, newlen, i, undefs;
+ JSTempValueRooter tvr;
+ JSBool hole, ok;
+
+ /*
+ * Optimize the default compare function case if all of obj's elements
+ * have values of type string.
+ */
+ JSBool all_strings;
+
+ if (argc > 0) {
+ if (JSVAL_IS_PRIMITIVE(argv[0])) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SORT_ARG);
+ return JS_FALSE;
+ }
+ fval = argv[0];
+ all_strings = JS_FALSE; /* non-default compare function */
+ } else {
+ fval = JSVAL_NULL;
+ all_strings = JS_TRUE; /* check for all string values */
+ }
+
+ if (!js_GetLengthProperty(cx, obj, &len))
+ return JS_FALSE;
+ if (len == 0) {
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ }
+
+ /*
+ * We need a temporary array of len jsvals to hold elements of the array.
+ * Check that its size does not overflow size_t, which would allow for
+ * indexing beyond the end of the malloc'd vector.
+ */
+ if (len > ((size_t) -1) / sizeof(jsval)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ vec = (jsval *) JS_malloc(cx, ((size_t) len) * sizeof(jsval));
+ if (!vec)
+ return JS_FALSE;
+
+ /*
+ * Initialize vec as a root. We will clear elements of vec one by
+ * one while increasing tvr.count when we know that the property at
+ * the corresponding index exists and its value must be rooted.
+ *
+ * In this way when sorting a huge mostly sparse array we will not
+ * access the tail of vec corresponding to properties that do not
+ * exist, allowing OS to avoiding committing RAM. See bug 330812.
+ *
+ * After this point control must flow through label out: to exit.
+ */
+ JS_PUSH_TEMP_ROOT(cx, 0, vec, &tvr);
+
+ /*
+ * By ECMA 262, 15.4.4.11, a property that does not exist (which we
+ * call a "hole") is always greater than an existing property with
+ * value undefined and that is always greater than any other property.
+ * Thus to sort holes and undefs we simply count them, sort the rest
+ * of elements, append undefs after them and then make holes after
+ * undefs.
+ */
+ undefs = 0;
+ newlen = 0;
+ for (i = 0; i < len; i++) {
+ /* Clear vec[newlen] before including it in the rooted set. */
+ vec[newlen] = JSVAL_NULL;
+ tvr.count = newlen + 1;
+ ok = GetArrayElement(cx, obj, i, &hole, &vec[newlen]);
+ if (!ok)
+ goto out;
+
+ if (hole)
+ continue;
+
+ if (vec[newlen] == JSVAL_VOID) {
+ ++undefs;
+ continue;
+ }
+
+ /* We know JSVAL_IS_STRING yields 0 or 1, so avoid a branch via &=. */
+ all_strings &= JSVAL_IS_STRING(vec[newlen]);
+
+ ++newlen;
+ }
+
+ /* Here len == newlen + undefs + number_of_holes. */
+ ca.context = cx;
+ ca.fval = fval;
+ ca.localroot = argv + argc; /* local GC root for temporary string */
+ pivotroot = argv + argc + 1; /* local GC root for pivot val */
+ ok = js_HeapSort(vec, (size_t) newlen, pivotroot, sizeof(jsval),
+ all_strings ? sort_compare_strings : sort_compare,
+ &ca);
+ if (!ok)
+ goto out;
+
+ ok = InitArrayElements(cx, obj, 0, newlen, vec);
+ if (!ok)
+ goto out;
+
+ out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ JS_free(cx, vec);
+ if (!ok)
+ return JS_FALSE;
+
+ /* Set undefs that sorted after the rest of elements. */
+ while (undefs != 0) {
+ --undefs;
+ if (!SetArrayElement(cx, obj, newlen++, JSVAL_VOID))
+ return JS_FALSE;
+ }
+
+ /* Re-create any holes that sorted to the end of the array. */
+ while (len > newlen) {
+ if (!DeleteArrayElement(cx, obj, --len))
+ return JS_FALSE;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * Perl-inspired push, pop, shift, unshift, and splice methods.
+ */
+static JSBool
+array_push(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint length, newlength;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ newlength = length + argc;
+ if (!InitArrayElements(cx, obj, length, newlength, argv))
+ return JS_FALSE;
+
+ /* Per ECMA-262, return the new array length. */
+ if (!IndexToValue(cx, newlength, rval))
+ return JS_FALSE;
+ return js_SetLengthProperty(cx, obj, newlength);
+}
+
+static JSBool
+array_pop(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint index;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &index))
+ return JS_FALSE;
+ if (index > 0) {
+ index--;
+
+ /* Get the to-be-deleted property's value into rval. */
+ if (!GetArrayElement(cx, obj, index, &hole, rval))
+ return JS_FALSE;
+ if (!hole && !DeleteArrayElement(cx, obj, index))
+ return JS_FALSE;
+ }
+ return js_SetLengthProperty(cx, obj, index);
+}
+
+static JSBool
+array_shift(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint length, i;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (length == 0) {
+ *rval = JSVAL_VOID;
+ } else {
+ length--;
+
+ /* Get the to-be-deleted property's value into rval ASAP. */
+ if (!GetArrayElement(cx, obj, 0, &hole, rval))
+ return JS_FALSE;
+
+ /*
+ * Slide down the array above the first element.
+ */
+ for (i = 0; i != length; i++) {
+ if (!GetArrayElement(cx, obj, i + 1, &hole, &argv[0]))
+ return JS_FALSE;
+ if (!SetOrDeleteArrayElement(cx, obj, i, hole, argv[0]))
+ return JS_FALSE;
+ }
+
+ /* Delete the only or last element when it exist. */
+ if (!hole && !DeleteArrayElement(cx, obj, length))
+ return JS_FALSE;
+ }
+ return js_SetLengthProperty(cx, obj, length);
+}
+
+static JSBool
+array_unshift(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsuint length, last;
+ jsval *vp;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (argc > 0) {
+ /* Slide up the array to make room for argc at the bottom. */
+ if (length > 0) {
+ last = length;
+ vp = argv + argc; /* local root */
+ do {
+ --last;
+ if (!GetArrayElement(cx, obj, last, &hole, vp) ||
+ !SetOrDeleteArrayElement(cx, obj, last + argc, hole, *vp)) {
+ return JS_FALSE;
+ }
+ } while (last != 0);
+ }
+
+ /* Copy from argv to the bottom of the array. */
+ if (!InitArrayElements(cx, obj, 0, argc, argv))
+ return JS_FALSE;
+
+ length += argc;
+ if (!js_SetLengthProperty(cx, obj, length))
+ return JS_FALSE;
+ }
+
+ /* Follow Perl by returning the new array length. */
+ return IndexToValue(cx, length, rval);
+}
+
+static JSBool
+array_splice(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp;
+ jsuint length, begin, end, count, delta, last;
+ jsdouble d;
+ JSBool hole;
+ JSObject *obj2;
+
+ /*
+ * Nothing to do if no args. Otherwise point vp at our one explicit local
+ * root and get length.
+ */
+ if (argc == 0)
+ return JS_TRUE;
+ vp = argv + argc;
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+
+ /* Convert the first argument into a starting index. */
+ if (!js_ValueToNumber(cx, *argv, &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0) {
+ d += length;
+ if (d < 0)
+ d = 0;
+ } else if (d > length) {
+ d = length;
+ }
+ begin = (jsuint)d; /* d has been clamped to uint32 */
+ argc--;
+ argv++;
+
+ /* Convert the second argument from a count into a fencepost index. */
+ delta = length - begin;
+ if (argc == 0) {
+ count = delta;
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, *argv, &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0)
+ d = 0;
+ else if (d > delta)
+ d = delta;
+ count = (jsuint)d;
+ end = begin + count;
+ argc--;
+ argv++;
+ }
+
+
+ /*
+ * Create a new array value to return. Our ECMA v2 proposal specs
+ * that splice always returns an array value, even when given no
+ * arguments. We think this is best because it eliminates the need
+ * for callers to do an extra test to handle the empty splice case.
+ */
+ obj2 = js_NewArrayObject(cx, 0, NULL);
+ if (!obj2)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj2);
+
+ /* If there are elements to remove, put them into the return value. */
+ if (count > 0) {
+ for (last = begin; last < end; last++) {
+ if (!GetArrayElement(cx, obj, last, &hole, vp))
+ return JS_FALSE;
+
+ /* Copy *vp to new array unless it's a hole. */
+ if (!hole && !SetArrayElement(cx, obj2, last - begin, *vp))
+ return JS_FALSE;
+ }
+
+ if (!js_SetLengthProperty(cx, obj2, end - begin))
+ return JS_FALSE;
+ }
+
+ /* Find the direction (up or down) to copy and make way for argv. */
+ if (argc > count) {
+ delta = (jsuint)argc - count;
+ last = length;
+ /* (uint) end could be 0, so can't use vanilla >= test */
+ while (last-- > end) {
+ if (!GetArrayElement(cx, obj, last, &hole, vp) ||
+ !SetOrDeleteArrayElement(cx, obj, last + delta, hole, *vp)) {
+ return JS_FALSE;
+ }
+ }
+ length += delta;
+ } else if (argc < count) {
+ delta = count - (jsuint)argc;
+ for (last = end; last < length; last++) {
+ if (!GetArrayElement(cx, obj, last, &hole, vp) ||
+ !SetOrDeleteArrayElement(cx, obj, last - delta, hole, *vp)) {
+ return JS_FALSE;
+ }
+ }
+ length -= delta;
+ }
+
+ /* Copy from argv into the hole to complete the splice. */
+ if (!InitArrayElements(cx, obj, begin, begin + argc, argv))
+ return JS_FALSE;
+
+ /* Update length in case we deleted elements from the end. */
+ return js_SetLengthProperty(cx, obj, length);
+}
+
+/*
+ * Python-esque sequence operations.
+ */
+static JSBool
+array_concat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp, v;
+ JSObject *nobj, *aobj;
+ jsuint length, alength, slot;
+ uintN i;
+ JSBool hole;
+
+ /* Hoist the explicit local root address computation. */
+ vp = argv + argc;
+
+ /* Treat obj as the first argument; see ECMA 15.4.4.4. */
+ --argv;
+ JS_ASSERT(obj == JSVAL_TO_OBJECT(argv[0]));
+
+ /* Create a new Array object and store it in the rval local root. */
+ nobj = js_NewArrayObject(cx, 0, NULL);
+ if (!nobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(nobj);
+
+ /* Loop over [0, argc] to concat args into nobj, expanding all Arrays. */
+ length = 0;
+ for (i = 0; i <= argc; i++) {
+ v = argv[i];
+ if (JSVAL_IS_OBJECT(v)) {
+ aobj = JSVAL_TO_OBJECT(v);
+ if (aobj && OBJ_GET_CLASS(cx, aobj) == &js_ArrayClass) {
+ if (!OBJ_GET_PROPERTY(cx, aobj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .lengthAtom),
+ vp)) {
+ return JS_FALSE;
+ }
+ if (!ValueIsLength(cx, *vp, &alength))
+ return JS_FALSE;
+ for (slot = 0; slot < alength; slot++) {
+ if (!GetArrayElement(cx, aobj, slot, &hole, vp))
+ return JS_FALSE;
+
+ /*
+ * Per ECMA 262, 15.4.4.4, step 9, ignore non-existent
+ * properties.
+ */
+ if (!hole && !SetArrayElement(cx, nobj, length + slot, *vp))
+ return JS_FALSE;
+ }
+ length += alength;
+ continue;
+ }
+ }
+
+ if (!SetArrayElement(cx, nobj, length, v))
+ return JS_FALSE;
+ length++;
+ }
+
+ return js_SetLengthProperty(cx, nobj, length);
+}
+
+static JSBool
+array_slice(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp;
+ JSObject *nobj;
+ jsuint length, begin, end, slot;
+ jsdouble d;
+ JSBool hole;
+
+ /* Hoist the explicit local root address computation. */
+ vp = argv + argc;
+
+ /* Create a new Array object and store it in the rval local root. */
+ nobj = js_NewArrayObject(cx, 0, NULL);
+ if (!nobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(nobj);
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ begin = 0;
+ end = length;
+
+ if (argc > 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0) {
+ d += length;
+ if (d < 0)
+ d = 0;
+ } else if (d > length) {
+ d = length;
+ }
+ begin = (jsuint)d;
+
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0) {
+ d += length;
+ if (d < 0)
+ d = 0;
+ } else if (d > length) {
+ d = length;
+ }
+ end = (jsuint)d;
+ }
+ }
+
+ if (begin > end)
+ begin = end;
+
+ for (slot = begin; slot < end; slot++) {
+ if (!GetArrayElement(cx, obj, slot, &hole, vp))
+ return JS_FALSE;
+ if (!hole && !SetArrayElement(cx, nobj, slot - begin, *vp))
+ return JS_FALSE;
+ }
+ return js_SetLengthProperty(cx, nobj, end - begin);
+}
+
+#if JS_HAS_ARRAY_EXTRAS
+
+static JSBool
+array_indexOfHelper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval, JSBool isLast)
+{
+ jsuint length, i, stop;
+ jsint direction;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (length == 0)
+ goto not_found;
+
+ if (argc <= 1) {
+ i = isLast ? length - 1 : 0;
+ } else {
+ jsdouble start;
+
+ if (!js_ValueToNumber(cx, argv[1], &start))
+ return JS_FALSE;
+ start = js_DoubleToInteger(start);
+ if (start < 0) {
+ start += length;
+ if (start < 0) {
+ if (isLast)
+ goto not_found;
+ i = 0;
+ } else {
+ i = (jsuint)start;
+ }
+ } else if (start >= length) {
+ if (!isLast)
+ goto not_found;
+ i = length - 1;
+ } else {
+ i = (jsuint)start;
+ }
+ }
+
+ if (isLast) {
+ stop = 0;
+ direction = -1;
+ } else {
+ stop = length - 1;
+ direction = 1;
+ }
+
+ for (;;) {
+ if (!GetArrayElement(cx, obj, (jsuint)i, &hole, rval))
+ return JS_FALSE;
+ if (!hole && js_StrictlyEqual(*rval, argv[0]))
+ return js_NewNumberValue(cx, i, rval);
+ if (i == stop)
+ goto not_found;
+ i += direction;
+ }
+
+ not_found:
+ *rval = INT_TO_JSVAL(-1);
+ return JS_TRUE;
+}
+
+static JSBool
+array_indexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_indexOfHelper(cx, obj, argc, argv, rval, JS_FALSE);
+}
+
+static JSBool
+array_lastIndexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_indexOfHelper(cx, obj, argc, argv, rval, JS_TRUE);
+}
+
+/* Order is important; extras that use a caller's predicate must follow MAP. */
+typedef enum ArrayExtraMode {
+ FOREACH,
+ MAP,
+ FILTER,
+ SOME,
+ EVERY
+} ArrayExtraMode;
+
+static JSBool
+array_extra(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval,
+ ArrayExtraMode mode)
+{
+ jsval *vp, *sp, *origsp, *oldsp;
+ jsuint length, newlen, i;
+ JSObject *callable, *thisp, *newarr;
+ void *mark;
+ JSStackFrame *fp;
+ JSBool ok, cond, hole;
+
+ /* Hoist the explicit local root address computation. */
+ vp = argv + argc;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+
+ /*
+ * First, get or compute our callee, so that we error out consistently
+ * when passed a non-callable object.
+ */
+ callable = js_ValueToCallableObject(cx, &argv[0], JSV2F_SEARCH_STACK);
+ if (!callable)
+ return JS_FALSE;
+
+ /*
+ * Set our initial return condition, used for zero-length array cases
+ * (and pre-size our map return to match our known length, for all cases).
+ */
+#ifdef __GNUC__ /* quell GCC overwarning */
+ newlen = 0;
+ newarr = NULL;
+ ok = JS_TRUE;
+#endif
+ switch (mode) {
+ case MAP:
+ case FILTER:
+ newlen = (mode == MAP) ? length : 0;
+ newarr = js_NewArrayObject(cx, newlen, NULL);
+ if (!newarr)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(newarr);
+ break;
+ case SOME:
+ *rval = JSVAL_FALSE;
+ break;
+ case EVERY:
+ *rval = JSVAL_TRUE;
+ break;
+ case FOREACH:
+ break;
+ }
+
+ if (length == 0)
+ return JS_TRUE;
+
+ if (argc > 1) {
+ if (!js_ValueToObject(cx, argv[1], &thisp))
+ return JS_FALSE;
+ argv[1] = OBJECT_TO_JSVAL(thisp);
+ } else {
+ thisp = NULL;
+ }
+
+ /* We call with 3 args (value, index, array), plus room for rval. */
+ origsp = js_AllocStack(cx, 2 + 3 + 1, &mark);
+ if (!origsp)
+ return JS_FALSE;
+
+ /* Lift current frame to include our args. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+
+ for (i = 0; i < length; i++) {
+ ok = GetArrayElement(cx, obj, i, &hole, vp);
+ if (!ok)
+ break;
+ if (hole)
+ continue;
+
+ /*
+ * Push callable and 'this', then args. We must do this for every
+ * iteration around the loop since js_Invoke uses origsp[0] for rval
+ * storage and some native functions use origsp[1] for local rooting.
+ */
+ sp = origsp;
+ *sp++ = OBJECT_TO_JSVAL(callable);
+ *sp++ = OBJECT_TO_JSVAL(thisp);
+ *sp++ = *vp;
+ *sp++ = INT_TO_JSVAL(i);
+ *sp++ = OBJECT_TO_JSVAL(obj);
+
+ /* Do the call. */
+ fp->sp = sp;
+ ok = js_Invoke(cx, 3, JSINVOKE_INTERNAL);
+ vp[1] = fp->sp[-1];
+ fp->sp = oldsp;
+ if (!ok)
+ break;
+
+ if (mode > MAP) {
+ if (vp[1] == JSVAL_NULL) {
+ cond = JS_FALSE;
+ } else if (JSVAL_IS_BOOLEAN(vp[1])) {
+ cond = JSVAL_TO_BOOLEAN(vp[1]);
+ } else {
+ ok = js_ValueToBoolean(cx, vp[1], &cond);
+ if (!ok)
+ goto out;
+ }
+ }
+
+ switch (mode) {
+ case FOREACH:
+ break;
+ case MAP:
+ ok = SetArrayElement(cx, newarr, i, vp[1]);
+ if (!ok)
+ goto out;
+ break;
+ case FILTER:
+ if (!cond)
+ break;
+ /* Filter passed *vp, push as result. */
+ ok = SetArrayElement(cx, newarr, newlen++, *vp);
+ if (!ok)
+ goto out;
+ break;
+ case SOME:
+ if (cond) {
+ *rval = JSVAL_TRUE;
+ goto out;
+ }
+ break;
+ case EVERY:
+ if (!cond) {
+ *rval = JSVAL_FALSE;
+ goto out;
+ }
+ break;
+ }
+ }
+
+ out:
+ js_FreeStack(cx, mark);
+ if (ok && mode == FILTER)
+ ok = js_SetLengthProperty(cx, newarr, newlen);
+ return ok;
+}
+
+static JSBool
+array_forEach(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, FOREACH);
+}
+
+static JSBool
+array_map(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, MAP);
+}
+
+static JSBool
+array_filter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, FILTER);
+}
+
+static JSBool
+array_some(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, SOME);
+}
+
+static JSBool
+array_every(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, EVERY);
+}
+#endif
+
+static JSFunctionSpec array_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, array_toSource, 0,0,0},
+#endif
+ {js_toString_str, array_toString, 0,0,0},
+ {js_toLocaleString_str, array_toLocaleString, 0,0,0},
+
+ /* Perl-ish methods. */
+ {"join", array_join, 1,JSFUN_GENERIC_NATIVE,0},
+ {"reverse", array_reverse, 0,JSFUN_GENERIC_NATIVE,2},
+ {"sort", array_sort, 1,JSFUN_GENERIC_NATIVE,2},
+ {"push", array_push, 1,JSFUN_GENERIC_NATIVE,0},
+ {"pop", array_pop, 0,JSFUN_GENERIC_NATIVE,0},
+ {"shift", array_shift, 0,JSFUN_GENERIC_NATIVE,1},
+ {"unshift", array_unshift, 1,JSFUN_GENERIC_NATIVE,1},
+ {"splice", array_splice, 2,JSFUN_GENERIC_NATIVE,1},
+
+ /* Python-esque sequence methods. */
+ {"concat", array_concat, 1,JSFUN_GENERIC_NATIVE,1},
+ {"slice", array_slice, 2,JSFUN_GENERIC_NATIVE,1},
+
+#if JS_HAS_ARRAY_EXTRAS
+ {"indexOf", array_indexOf, 1,JSFUN_GENERIC_NATIVE,0},
+ {"lastIndexOf", array_lastIndexOf, 1,JSFUN_GENERIC_NATIVE,0},
+ {"forEach", array_forEach, 1,JSFUN_GENERIC_NATIVE,2},
+ {"map", array_map, 1,JSFUN_GENERIC_NATIVE,2},
+ {"filter", array_filter, 1,JSFUN_GENERIC_NATIVE,2},
+ {"some", array_some, 1,JSFUN_GENERIC_NATIVE,2},
+ {"every", array_every, 1,JSFUN_GENERIC_NATIVE,2},
+#endif
+
+ {0,0,0,0,0}
+};
+
+static JSBool
+Array(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint length;
+ jsval *vector;
+
+ /* If called without new, replace obj with a new Array object. */
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ obj = js_NewObject(cx, &js_ArrayClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ if (argc == 0) {
+ length = 0;
+ vector = NULL;
+ } else if (argc > 1) {
+ length = (jsuint) argc;
+ vector = argv;
+ } else if (!JSVAL_IS_NUMBER(argv[0])) {
+ length = 1;
+ vector = argv;
+ } else {
+ if (!ValueIsLength(cx, argv[0], &length))
+ return JS_FALSE;
+ vector = NULL;
+ }
+ return InitArrayObject(cx, obj, length, vector);
+}
+
+JSObject *
+js_InitArrayClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_ArrayClass, Array, 1,
+ NULL, array_methods, NULL, NULL);
+
+ /* Initialize the Array prototype object so it gets a length property. */
+ if (!proto || !InitArrayObject(cx, proto, 0, NULL))
+ return NULL;
+ return proto;
+}
+
+JSObject *
+js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector)
+{
+ JSTempValueRooter tvr;
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_ArrayClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+ if (!InitArrayObject(cx, obj, length, vector))
+ obj = NULL;
+ JS_POP_TEMP_ROOT(cx, &tvr);
+
+ /* Set/clear newborn root, in case we lost it. */
+ cx->weakRoots.newborn[GCX_OBJECT] = (JSGCThing *) obj;
+ return obj;
+}
diff --git a/third_party/js-1.7/jsarray.h b/third_party/js-1.7/jsarray.h
new file mode 100644
index 0000000..a89561b
--- /dev/null
+++ b/third_party/js-1.7/jsarray.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsarray_h___
+#define jsarray_h___
+/*
+ * JS Array interface.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/* Generous sanity-bound on length (in elements) of array initialiser. */
+#define ARRAY_INIT_LIMIT JS_BIT(24)
+
+extern JSBool
+js_IdIsIndex(jsval id, jsuint *indexp);
+
+extern JSClass js_ArrayClass;
+
+extern JSObject *
+js_InitArrayClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector);
+
+extern JSBool
+js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+extern JSBool
+js_SetLengthProperty(JSContext *cx, JSObject *obj, jsuint length);
+
+extern JSBool
+js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+/*
+ * Test whether an object is "array-like". Currently this means whether obj
+ * is an Array or an arguments object. We would like an API, and probably a
+ * way in the language, to bless other objects as array-like: having indexed
+ * properties, and a 'length' property of uint32 value equal to one more than
+ * the greatest index.
+ */
+extern JSBool
+js_IsArrayLike(JSContext *cx, JSObject *obj, JSBool *answerp, jsuint *lengthp);
+
+/*
+ * JS-specific heap sort function.
+ */
+typedef JSBool (*JSComparator)(void *arg, const void *a, const void *b,
+ int *result);
+
+extern JSBool
+js_HeapSort(void *vec, size_t nel, void *pivot, size_t elsize,
+ JSComparator cmp, void *arg);
+
+JS_END_EXTERN_C
+
+#endif /* jsarray_h___ */
diff --git a/third_party/js-1.7/jsatom.c b/third_party/js-1.7/jsatom.c
new file mode 100644
index 0000000..02ee250
--- /dev/null
+++ b/third_party/js-1.7/jsatom.c
@@ -0,0 +1,999 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS atom table.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsscan.h"
+#include "jsstr.h"
+
+JS_FRIEND_API(const char *)
+js_AtomToPrintableString(JSContext *cx, JSAtom *atom)
+{
+ return js_ValueToPrintableString(cx, ATOM_KEY(atom));
+}
+
+/*
+ * Keep this in sync with jspubtd.h -- an assertion below will insist that
+ * its length match the JSType enum's JSTYPE_LIMIT limit value.
+ */
+const char *js_type_strs[] = {
+ "undefined",
+ js_object_str,
+ "function",
+ "string",
+ "number",
+ "boolean",
+ "null",
+ "xml",
+};
+
+JS_STATIC_ASSERT(JSTYPE_LIMIT ==
+ sizeof js_type_strs / sizeof js_type_strs[0]);
+
+const char *js_boolean_strs[] = {
+ js_false_str,
+ js_true_str
+};
+
+#define JS_PROTO(name,code,init) const char js_##name##_str[] = #name;
+#include "jsproto.tbl"
+#undef JS_PROTO
+
+const char *js_proto_strs[JSProto_LIMIT] = {
+#define JS_PROTO(name,code,init) js_##name##_str,
+#include "jsproto.tbl"
+#undef JS_PROTO
+};
+
+const char js_anonymous_str[] = "anonymous";
+const char js_arguments_str[] = "arguments";
+const char js_arity_str[] = "arity";
+const char js_callee_str[] = "callee";
+const char js_caller_str[] = "caller";
+const char js_class_prototype_str[] = "prototype";
+const char js_constructor_str[] = "constructor";
+const char js_count_str[] = "__count__";
+const char js_each_str[] = "each";
+const char js_eval_str[] = "eval";
+const char js_fileName_str[] = "fileName";
+const char js_get_str[] = "get";
+const char js_getter_str[] = "getter";
+const char js_index_str[] = "index";
+const char js_input_str[] = "input";
+const char js_iterator_str[] = "__iterator__";
+const char js_length_str[] = "length";
+const char js_lineNumber_str[] = "lineNumber";
+const char js_message_str[] = "message";
+const char js_name_str[] = "name";
+const char js_next_str[] = "next";
+const char js_noSuchMethod_str[] = "__noSuchMethod__";
+const char js_object_str[] = "object";
+const char js_parent_str[] = "__parent__";
+const char js_proto_str[] = "__proto__";
+const char js_setter_str[] = "setter";
+const char js_set_str[] = "set";
+const char js_stack_str[] = "stack";
+const char js_toSource_str[] = "toSource";
+const char js_toString_str[] = "toString";
+const char js_toLocaleString_str[] = "toLocaleString";
+const char js_valueOf_str[] = "valueOf";
+
+#if JS_HAS_XML_SUPPORT
+const char js_etago_str[] = "</";
+const char js_namespace_str[] = "namespace";
+const char js_ptagc_str[] = "/>";
+const char js_qualifier_str[] = "::";
+const char js_space_str[] = " ";
+const char js_stago_str[] = "<";
+const char js_star_str[] = "*";
+const char js_starQualifier_str[] = "*::";
+const char js_tagc_str[] = ">";
+const char js_xml_str[] = "xml";
+#endif
+
+#if JS_HAS_GENERATORS
+const char js_close_str[] = "close";
+const char js_send_str[] = "send";
+#endif
+
+#ifdef NARCISSUS
+const char js_call_str[] = "__call__";
+const char js_construct_str[] = "__construct__";
+const char js_hasInstance_str[] = "__hasInstance__";
+const char js_ExecutionContext_str[] = "ExecutionContext";
+const char js_current_str[] = "current";
+#endif
+
+#define HASH_OBJECT(o) (JS_PTR_TO_UINT32(o) >> JSVAL_TAGBITS)
+#define HASH_INT(i) ((JSHashNumber)(i))
+#define HASH_DOUBLE(dp) ((JSDOUBLE_HI32(*dp) ^ JSDOUBLE_LO32(*dp)))
+#define HASH_BOOLEAN(b) ((JSHashNumber)(b))
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_atom_key(const void *key)
+{
+ jsval v;
+ jsdouble *dp;
+
+ /* Order JSVAL_IS_* tests by likelihood of success. */
+ v = (jsval)key;
+ if (JSVAL_IS_STRING(v))
+ return js_HashString(JSVAL_TO_STRING(v));
+ if (JSVAL_IS_INT(v))
+ return HASH_INT(JSVAL_TO_INT(v));
+ if (JSVAL_IS_DOUBLE(v)) {
+ dp = JSVAL_TO_DOUBLE(v);
+ return HASH_DOUBLE(dp);
+ }
+ if (JSVAL_IS_OBJECT(v))
+ return HASH_OBJECT(JSVAL_TO_OBJECT(v));
+ if (JSVAL_IS_BOOLEAN(v))
+ return HASH_BOOLEAN(JSVAL_TO_BOOLEAN(v));
+ return (JSHashNumber)v;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_compare_atom_keys(const void *k1, const void *k2)
+{
+ jsval v1, v2;
+
+ v1 = (jsval)k1, v2 = (jsval)k2;
+ if (JSVAL_IS_STRING(v1) && JSVAL_IS_STRING(v2))
+ return js_EqualStrings(JSVAL_TO_STRING(v1), JSVAL_TO_STRING(v2));
+ if (JSVAL_IS_DOUBLE(v1) && JSVAL_IS_DOUBLE(v2)) {
+ double d1 = *JSVAL_TO_DOUBLE(v1);
+ double d2 = *JSVAL_TO_DOUBLE(v2);
+ if (JSDOUBLE_IS_NaN(d1))
+ return JSDOUBLE_IS_NaN(d2);
+#if defined(XP_WIN)
+ /* XXX MSVC miscompiles such that (NaN == 0) */
+ if (JSDOUBLE_IS_NaN(d2))
+ return JS_FALSE;
+#endif
+ return d1 == d2;
+ }
+ return v1 == v2;
+}
+
+JS_STATIC_DLL_CALLBACK(int)
+js_compare_stub(const void *v1, const void *v2)
+{
+ return 1;
+}
+
+/* These next two are exported to jsscript.c and used similarly there. */
+void * JS_DLL_CALLBACK
+js_alloc_table_space(void *priv, size_t size)
+{
+ return malloc(size);
+}
+
+void JS_DLL_CALLBACK
+js_free_table_space(void *priv, void *item)
+{
+ free(item);
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashEntry *)
+js_alloc_atom(void *priv, const void *key)
+{
+ JSAtomState *state = (JSAtomState *) priv;
+ JSAtom *atom;
+
+ atom = (JSAtom *) malloc(sizeof(JSAtom));
+ if (!atom)
+ return NULL;
+#ifdef JS_THREADSAFE
+ state->tablegen++;
+#endif
+ atom->entry.key = key;
+ atom->entry.value = NULL;
+ atom->flags = 0;
+ atom->number = state->number++;
+ return &atom->entry;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_atom(void *priv, JSHashEntry *he, uintN flag)
+{
+ if (flag != HT_FREE_ENTRY)
+ return;
+#ifdef JS_THREADSAFE
+ ((JSAtomState *)priv)->tablegen++;
+#endif
+ free(he);
+}
+
+static JSHashAllocOps atom_alloc_ops = {
+ js_alloc_table_space, js_free_table_space,
+ js_alloc_atom, js_free_atom
+};
+
+#define JS_ATOM_HASH_SIZE 1024
+
+JSBool
+js_InitAtomState(JSContext *cx, JSAtomState *state)
+{
+ state->table = JS_NewHashTable(JS_ATOM_HASH_SIZE, js_hash_atom_key,
+ js_compare_atom_keys, js_compare_stub,
+ &atom_alloc_ops, state);
+ if (!state->table) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ state->runtime = cx->runtime;
+#ifdef JS_THREADSAFE
+ js_InitLock(&state->lock);
+ state->tablegen = 0;
+#endif
+
+ if (!js_InitPinnedAtoms(cx, state)) {
+ js_FreeAtomState(cx, state);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_InitPinnedAtoms(JSContext *cx, JSAtomState *state)
+{
+ uintN i;
+
+#define FROB(lval,str) \
+ JS_BEGIN_MACRO \
+ if (!(state->lval = js_Atomize(cx, str, strlen(str), ATOM_PINNED))) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+ for (i = 0; i < JSTYPE_LIMIT; i++)
+ FROB(typeAtoms[i], js_type_strs[i]);
+
+ for (i = 0; i < JSProto_LIMIT; i++)
+ FROB(classAtoms[i], js_proto_strs[i]);
+
+ FROB(booleanAtoms[0], js_false_str);
+ FROB(booleanAtoms[1], js_true_str);
+ FROB(nullAtom, js_null_str);
+
+ FROB(anonymousAtom, js_anonymous_str);
+ FROB(argumentsAtom, js_arguments_str);
+ FROB(arityAtom, js_arity_str);
+ FROB(calleeAtom, js_callee_str);
+ FROB(callerAtom, js_caller_str);
+ FROB(classPrototypeAtom, js_class_prototype_str);
+ FROB(constructorAtom, js_constructor_str);
+ FROB(countAtom, js_count_str);
+ FROB(eachAtom, js_each_str);
+ FROB(evalAtom, js_eval_str);
+ FROB(fileNameAtom, js_fileName_str);
+ FROB(getAtom, js_get_str);
+ FROB(getterAtom, js_getter_str);
+ FROB(indexAtom, js_index_str);
+ FROB(inputAtom, js_input_str);
+ FROB(iteratorAtom, js_iterator_str);
+ FROB(lengthAtom, js_length_str);
+ FROB(lineNumberAtom, js_lineNumber_str);
+ FROB(messageAtom, js_message_str);
+ FROB(nameAtom, js_name_str);
+ FROB(nextAtom, js_next_str);
+ FROB(noSuchMethodAtom, js_noSuchMethod_str);
+ FROB(parentAtom, js_parent_str);
+ FROB(protoAtom, js_proto_str);
+ FROB(setAtom, js_set_str);
+ FROB(setterAtom, js_setter_str);
+ FROB(stackAtom, js_stack_str);
+ FROB(toSourceAtom, js_toSource_str);
+ FROB(toStringAtom, js_toString_str);
+ FROB(toLocaleStringAtom, js_toLocaleString_str);
+ FROB(valueOfAtom, js_valueOf_str);
+
+#if JS_HAS_XML_SUPPORT
+ FROB(etagoAtom, js_etago_str);
+ FROB(namespaceAtom, js_namespace_str);
+ FROB(ptagcAtom, js_ptagc_str);
+ FROB(qualifierAtom, js_qualifier_str);
+ FROB(spaceAtom, js_space_str);
+ FROB(stagoAtom, js_stago_str);
+ FROB(starAtom, js_star_str);
+ FROB(starQualifierAtom, js_starQualifier_str);
+ FROB(tagcAtom, js_tagc_str);
+ FROB(xmlAtom, js_xml_str);
+#endif
+
+#if JS_HAS_GENERATORS
+ FROB(closeAtom, js_close_str);
+#endif
+
+#ifdef NARCISSUS
+ FROB(callAtom, js_call_str);
+ FROB(constructAtom, js_construct_str);
+ FROB(hasInstanceAtom, js_hasInstance_str);
+ FROB(ExecutionContextAtom, js_ExecutionContext_str);
+ FROB(currentAtom, js_current_str);
+#endif
+
+#undef FROB
+
+ memset(&state->lazy, 0, sizeof state->lazy);
+ return JS_TRUE;
+}
+
+/* NB: cx unused; js_FinishAtomState calls us with null cx. */
+void
+js_FreeAtomState(JSContext *cx, JSAtomState *state)
+{
+ if (state->table)
+ JS_HashTableDestroy(state->table);
+#ifdef JS_THREADSAFE
+ js_FinishLock(&state->lock);
+#endif
+ memset(state, 0, sizeof *state);
+}
+
+typedef struct UninternArgs {
+ JSRuntime *rt;
+ jsatomid leaks;
+} UninternArgs;
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_uninterner(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+ UninternArgs *args;
+
+ atom = (JSAtom *)he;
+ args = (UninternArgs *)arg;
+ if (ATOM_IS_STRING(atom))
+ js_FinalizeStringRT(args->rt, ATOM_TO_STRING(atom));
+ else if (ATOM_IS_OBJECT(atom))
+ args->leaks++;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_FinishAtomState(JSAtomState *state)
+{
+ UninternArgs args;
+
+ if (!state->table)
+ return;
+ args.rt = state->runtime;
+ args.leaks = 0;
+ JS_HashTableEnumerateEntries(state->table, js_atom_uninterner, &args);
+#ifdef DEBUG
+ if (args.leaks != 0) {
+ fprintf(stderr,
+"JS engine warning: %lu atoms remain after destroying the JSRuntime.\n"
+" These atoms may point to freed memory. Things reachable\n"
+" through them have not been finalized.\n",
+ (unsigned long) args.leaks);
+ }
+#endif
+ js_FreeAtomState(NULL, state);
+}
+
+typedef struct MarkArgs {
+ JSBool keepAtoms;
+ JSGCThingMarker mark;
+ void *data;
+} MarkArgs;
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_marker(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+ MarkArgs *args;
+ jsval key;
+
+ atom = (JSAtom *)he;
+ args = (MarkArgs *)arg;
+ if ((atom->flags & (ATOM_PINNED | ATOM_INTERNED)) || args->keepAtoms) {
+ atom->flags |= ATOM_MARK;
+ key = ATOM_KEY(atom);
+ if (JSVAL_IS_GCTHING(key))
+ args->mark(JSVAL_TO_GCTHING(key), args->data);
+ }
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_MarkAtomState(JSAtomState *state, JSBool keepAtoms, JSGCThingMarker mark,
+ void *data)
+{
+ MarkArgs args;
+
+ if (!state->table)
+ return;
+ args.keepAtoms = keepAtoms;
+ args.mark = mark;
+ args.data = data;
+ JS_HashTableEnumerateEntries(state->table, js_atom_marker, &args);
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_sweeper(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+ JSAtomState *state;
+
+ atom = (JSAtom *)he;
+ if (atom->flags & ATOM_MARK) {
+ atom->flags &= ~ATOM_MARK;
+ state = (JSAtomState *)arg;
+ state->liveAtoms++;
+ return HT_ENUMERATE_NEXT;
+ }
+ JS_ASSERT((atom->flags & (ATOM_PINNED | ATOM_INTERNED)) == 0);
+ atom->entry.key = atom->entry.value = NULL;
+ atom->flags = 0;
+ return HT_ENUMERATE_REMOVE;
+}
+
+void
+js_SweepAtomState(JSAtomState *state)
+{
+ state->liveAtoms = 0;
+ if (state->table)
+ JS_HashTableEnumerateEntries(state->table, js_atom_sweeper, state);
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_unpinner(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+
+ atom = (JSAtom *)he;
+ atom->flags &= ~ATOM_PINNED;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_UnpinPinnedAtoms(JSAtomState *state)
+{
+ if (state->table)
+ JS_HashTableEnumerateEntries(state->table, js_atom_unpinner, NULL);
+}
+
+static JSAtom *
+js_AtomizeHashedKey(JSContext *cx, jsval key, JSHashNumber keyHash, uintN flags)
+{
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry *he, **hep;
+ JSAtom *atom;
+
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) == NULL) {
+ he = JS_HashTableRawAdd(table, hep, keyHash, (void *)key, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ atom = NULL;
+ goto out;
+ }
+ }
+
+ atom = (JSAtom *)he;
+ atom->flags |= flags;
+ cx->weakRoots.lastAtom = atom;
+out:
+ JS_UNLOCK(&state->lock,cx);
+ return atom;
+}
+
+JSAtom *
+js_AtomizeObject(JSContext *cx, JSObject *obj, uintN flags)
+{
+ jsval key;
+ JSHashNumber keyHash;
+
+ /* XXX must be set in the following order or MSVC1.52 will crash */
+ keyHash = HASH_OBJECT(obj);
+ key = OBJECT_TO_JSVAL(obj);
+ return js_AtomizeHashedKey(cx, key, keyHash, flags);
+}
+
+JSAtom *
+js_AtomizeBoolean(JSContext *cx, JSBool b, uintN flags)
+{
+ jsval key;
+ JSHashNumber keyHash;
+
+ key = BOOLEAN_TO_JSVAL(b);
+ keyHash = HASH_BOOLEAN(b);
+ return js_AtomizeHashedKey(cx, key, keyHash, flags);
+}
+
+JSAtom *
+js_AtomizeInt(JSContext *cx, jsint i, uintN flags)
+{
+ jsval key;
+ JSHashNumber keyHash;
+
+ key = INT_TO_JSVAL(i);
+ keyHash = HASH_INT(i);
+ return js_AtomizeHashedKey(cx, key, keyHash, flags);
+}
+
+/* Worst-case alignment grain and aligning macro for 2x-sized buffer. */
+#define ALIGNMENT(t) JS_MAX(JSVAL_ALIGN, sizeof(t))
+#define ALIGN(b,t) ((t*) &(b)[ALIGNMENT(t) - (jsuword)(b) % ALIGNMENT(t)])
+
+JSAtom *
+js_AtomizeDouble(JSContext *cx, jsdouble d, uintN flags)
+{
+ jsdouble *dp;
+ JSHashNumber keyHash;
+ jsval key;
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry *he, **hep;
+ JSAtom *atom;
+ char buf[2 * ALIGNMENT(double)];
+
+ dp = ALIGN(buf, double);
+ *dp = d;
+ keyHash = HASH_DOUBLE(dp);
+ key = DOUBLE_TO_JSVAL(dp);
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) == NULL) {
+#ifdef JS_THREADSAFE
+ uint32 gen = state->tablegen;
+#endif
+ JS_UNLOCK(&state->lock,cx);
+ if (!js_NewDoubleValue(cx, d, &key))
+ return NULL;
+ JS_LOCK(&state->lock, cx);
+#ifdef JS_THREADSAFE
+ if (state->tablegen != gen) {
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) != NULL) {
+ atom = (JSAtom *)he;
+ goto out;
+ }
+ }
+#endif
+ he = JS_HashTableRawAdd(table, hep, keyHash, (void *)key, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ atom = NULL;
+ goto out;
+ }
+ }
+
+ atom = (JSAtom *)he;
+ atom->flags |= flags;
+ cx->weakRoots.lastAtom = atom;
+out:
+ JS_UNLOCK(&state->lock,cx);
+ return atom;
+}
+
+/*
+ * To put an atom into the hidden subspace. XOR its keyHash with this value,
+ * which is (sqrt(2)-1) in 32-bit fixed point.
+ */
+#define HIDDEN_ATOM_SUBSPACE_KEYHASH 0x6A09E667
+
+JSAtom *
+js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
+{
+ JSHashNumber keyHash;
+ jsval key;
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry *he, **hep;
+ JSAtom *atom;
+
+ keyHash = js_HashString(str);
+ if (flags & ATOM_HIDDEN)
+ keyHash ^= HIDDEN_ATOM_SUBSPACE_KEYHASH;
+ key = STRING_TO_JSVAL(str);
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) == NULL) {
+#ifdef JS_THREADSAFE
+ uint32 gen = state->tablegen;
+ JS_UNLOCK(&state->lock, cx);
+#endif
+
+ if (flags & ATOM_TMPSTR) {
+ str = (flags & ATOM_NOCOPY)
+ ? js_NewString(cx, str->chars, str->length, 0)
+ : js_NewStringCopyN(cx, str->chars, str->length, 0);
+ if (!str)
+ return NULL;
+ key = STRING_TO_JSVAL(str);
+ } else {
+ if (!JS_MakeStringImmutable(cx, str))
+ return NULL;
+ }
+
+#ifdef JS_THREADSAFE
+ JS_LOCK(&state->lock, cx);
+ if (state->tablegen != gen) {
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) != NULL) {
+ atom = (JSAtom *)he;
+ if (flags & ATOM_NOCOPY)
+ str->chars = NULL;
+ goto out;
+ }
+ }
+#endif
+
+ he = JS_HashTableRawAdd(table, hep, keyHash, (void *)key, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ atom = NULL;
+ goto out;
+ }
+ }
+
+ atom = (JSAtom *)he;
+ atom->flags |= flags & (ATOM_PINNED | ATOM_INTERNED | ATOM_HIDDEN);
+ cx->weakRoots.lastAtom = atom;
+out:
+ JS_UNLOCK(&state->lock,cx);
+ return atom;
+}
+
+JS_FRIEND_API(JSAtom *)
+js_Atomize(JSContext *cx, const char *bytes, size_t length, uintN flags)
+{
+ jschar *chars;
+ JSString *str;
+ JSAtom *atom;
+ char buf[2 * ALIGNMENT(JSString)];
+
+ /*
+ * Avoiding the malloc in js_InflateString on shorter strings saves us
+ * over 20,000 malloc calls on mozilla browser startup. This compares to
+ * only 131 calls where the string is longer than a 31 char (net) buffer.
+ * The vast majority of atomized strings are already in the hashtable. So
+ * js_AtomizeString rarely has to copy the temp string we make.
+ */
+#define ATOMIZE_BUF_MAX 32
+ jschar inflated[ATOMIZE_BUF_MAX];
+ size_t inflatedLength = ATOMIZE_BUF_MAX - 1;
+
+ if (length < ATOMIZE_BUF_MAX) {
+ js_InflateStringToBuffer(cx, bytes, length, inflated, &inflatedLength);
+ inflated[inflatedLength] = 0;
+ chars = inflated;
+ } else {
+ inflatedLength = length;
+ chars = js_InflateString(cx, bytes, &inflatedLength);
+ if (!chars)
+ return NULL;
+ flags |= ATOM_NOCOPY;
+ }
+
+ str = ALIGN(buf, JSString);
+
+ str->chars = chars;
+ str->length = inflatedLength;
+ atom = js_AtomizeString(cx, str, ATOM_TMPSTR | flags);
+ if (chars != inflated && (!atom || ATOM_TO_STRING(atom)->chars != chars))
+ JS_free(cx, chars);
+ return atom;
+}
+
+JS_FRIEND_API(JSAtom *)
+js_AtomizeChars(JSContext *cx, const jschar *chars, size_t length, uintN flags)
+{
+ JSString *str;
+ char buf[2 * ALIGNMENT(JSString)];
+
+ str = ALIGN(buf, JSString);
+ str->chars = (jschar *)chars;
+ str->length = length;
+ return js_AtomizeString(cx, str, ATOM_TMPSTR | flags);
+}
+
+JSAtom *
+js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
+{
+ JSString *str;
+ char buf[2 * ALIGNMENT(JSString)];
+ JSHashNumber keyHash;
+ jsval key;
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry **hep;
+
+ str = ALIGN(buf, JSString);
+ str->chars = (jschar *)chars;
+ str->length = length;
+ keyHash = js_HashString(str);
+ key = STRING_TO_JSVAL(str);
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ JS_UNLOCK(&state->lock, cx);
+ return (hep) ? (JSAtom *)*hep : NULL;
+}
+
+JSAtom *
+js_AtomizeValue(JSContext *cx, jsval value, uintN flags)
+{
+ if (JSVAL_IS_STRING(value))
+ return js_AtomizeString(cx, JSVAL_TO_STRING(value), flags);
+ if (JSVAL_IS_INT(value))
+ return js_AtomizeInt(cx, JSVAL_TO_INT(value), flags);
+ if (JSVAL_IS_DOUBLE(value))
+ return js_AtomizeDouble(cx, *JSVAL_TO_DOUBLE(value), flags);
+ if (JSVAL_IS_OBJECT(value))
+ return js_AtomizeObject(cx, JSVAL_TO_OBJECT(value), flags);
+ if (JSVAL_IS_BOOLEAN(value))
+ return js_AtomizeBoolean(cx, JSVAL_TO_BOOLEAN(value), flags);
+ return js_AtomizeHashedKey(cx, value, (JSHashNumber)value, flags);
+}
+
+JSAtom *
+js_ValueToStringAtom(JSContext *cx, jsval v)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ return js_AtomizeString(cx, str, 0);
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_atom_ptr(const void *key)
+{
+ const JSAtom *atom = key;
+ return atom->number;
+}
+
+JS_STATIC_DLL_CALLBACK(void *)
+js_alloc_temp_space(void *priv, size_t size)
+{
+ JSContext *cx = priv;
+ void *space;
+
+ JS_ARENA_ALLOCATE(space, &cx->tempPool, size);
+ if (!space)
+ JS_ReportOutOfMemory(cx);
+ return space;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_temp_space(void *priv, void *item)
+{
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashEntry *)
+js_alloc_temp_entry(void *priv, const void *key)
+{
+ JSContext *cx = priv;
+ JSAtomListElement *ale;
+
+ JS_ARENA_ALLOCATE_TYPE(ale, JSAtomListElement, &cx->tempPool);
+ if (!ale) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ return &ale->entry;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_temp_entry(void *priv, JSHashEntry *he, uintN flag)
+{
+}
+
+static JSHashAllocOps temp_alloc_ops = {
+ js_alloc_temp_space, js_free_temp_space,
+ js_alloc_temp_entry, js_free_temp_entry
+};
+
+JSAtomListElement *
+js_IndexAtom(JSContext *cx, JSAtom *atom, JSAtomList *al)
+{
+ JSAtomListElement *ale, *ale2, *next;
+ JSHashEntry **hep;
+
+ ATOM_LIST_LOOKUP(ale, hep, al, atom);
+ if (!ale) {
+ if (al->count < 10) {
+ /* Few enough for linear search, no hash table needed. */
+ JS_ASSERT(!al->table);
+ ale = (JSAtomListElement *)js_alloc_temp_entry(cx, atom);
+ if (!ale)
+ return NULL;
+ ALE_SET_ATOM(ale, atom);
+ ALE_SET_NEXT(ale, al->list);
+ al->list = ale;
+ } else {
+ /* We want to hash. Have we already made a hash table? */
+ if (!al->table) {
+ /* No hash table yet, so hep had better be null! */
+ JS_ASSERT(!hep);
+ al->table = JS_NewHashTable(al->count + 1, js_hash_atom_ptr,
+ JS_CompareValues, JS_CompareValues,
+ &temp_alloc_ops, cx);
+ if (!al->table)
+ return NULL;
+
+ /*
+ * Set ht->nentries explicitly, because we are moving entries
+ * from al to ht, not calling JS_HashTable(Raw|)Add.
+ */
+ al->table->nentries = al->count;
+
+ /* Insert each ale on al->list into the new hash table. */
+ for (ale2 = al->list; ale2; ale2 = next) {
+ next = ALE_NEXT(ale2);
+ ale2->entry.keyHash = ALE_ATOM(ale2)->number;
+ hep = JS_HashTableRawLookup(al->table, ale2->entry.keyHash,
+ ale2->entry.key);
+ ALE_SET_NEXT(ale2, *hep);
+ *hep = &ale2->entry;
+ }
+ al->list = NULL;
+
+ /* Set hep for insertion of atom's ale, immediately below. */
+ hep = JS_HashTableRawLookup(al->table, atom->number, atom);
+ }
+
+ /* Finally, add an entry for atom into the hash bucket at hep. */
+ ale = (JSAtomListElement *)
+ JS_HashTableRawAdd(al->table, hep, atom->number, atom, NULL);
+ if (!ale)
+ return NULL;
+ }
+
+ ALE_SET_INDEX(ale, al->count++);
+ }
+ return ale;
+}
+
+JS_FRIEND_API(JSAtom *)
+js_GetAtom(JSContext *cx, JSAtomMap *map, jsatomid i)
+{
+ JSAtom *atom;
+ static JSAtom dummy;
+
+ JS_ASSERT(map->vector && i < map->length);
+ if (!map->vector || i >= map->length) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%lu", (unsigned long)i);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ATOMIC_NUMBER, numBuf);
+ return &dummy;
+ }
+ atom = map->vector[i];
+ JS_ASSERT(atom);
+ return atom;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_map_atom(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtomListElement *ale = (JSAtomListElement *)he;
+ JSAtom **vector = arg;
+
+ vector[ALE_INDEX(ale)] = ALE_ATOM(ale);
+ return HT_ENUMERATE_NEXT;
+}
+
+#ifdef DEBUG
+static jsrefcount js_atom_map_count;
+static jsrefcount js_atom_map_hash_table_count;
+#endif
+
+JS_FRIEND_API(JSBool)
+js_InitAtomMap(JSContext *cx, JSAtomMap *map, JSAtomList *al)
+{
+ JSAtom **vector;
+ JSAtomListElement *ale;
+ uint32 count;
+
+#ifdef DEBUG
+ JS_ATOMIC_INCREMENT(&js_atom_map_count);
+#endif
+ ale = al->list;
+ if (!ale && !al->table) {
+ map->vector = NULL;
+ map->length = 0;
+ return JS_TRUE;
+ }
+
+ count = al->count;
+ if (count >= ATOM_INDEX_LIMIT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_LITERALS);
+ return JS_FALSE;
+ }
+ vector = (JSAtom **) JS_malloc(cx, (size_t) count * sizeof *vector);
+ if (!vector)
+ return JS_FALSE;
+
+ if (al->table) {
+#ifdef DEBUG
+ JS_ATOMIC_INCREMENT(&js_atom_map_hash_table_count);
+#endif
+ JS_HashTableEnumerateEntries(al->table, js_map_atom, vector);
+ } else {
+ do {
+ vector[ALE_INDEX(ale)] = ALE_ATOM(ale);
+ } while ((ale = ALE_NEXT(ale)) != NULL);
+ }
+ ATOM_LIST_INIT(al);
+
+ map->vector = vector;
+ map->length = (jsatomid)count;
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(void)
+js_FreeAtomMap(JSContext *cx, JSAtomMap *map)
+{
+ if (map->vector) {
+ JS_free(cx, map->vector);
+ map->vector = NULL;
+ }
+ map->length = 0;
+}
diff --git a/third_party/js-1.7/jsatom.h b/third_party/js-1.7/jsatom.h
new file mode 100644
index 0000000..4fb3d8d
--- /dev/null
+++ b/third_party/js-1.7/jsatom.h
@@ -0,0 +1,456 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsatom_h___
+#define jsatom_h___
+/*
+ * JS atom table.
+ */
+#include <stddef.h>
+#include "jstypes.h"
+#include "jshash.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+#ifdef JS_THREADSAFE
+#include "jslock.h"
+#endif
+
+JS_BEGIN_EXTERN_C
+
+#define ATOM_PINNED 0x01 /* atom is pinned against GC */
+#define ATOM_INTERNED 0x02 /* pinned variant for JS_Intern* API */
+#define ATOM_MARK 0x04 /* atom is reachable via GC */
+#define ATOM_HIDDEN 0x08 /* atom is in special hidden subspace */
+#define ATOM_NOCOPY 0x40 /* don't copy atom string bytes */
+#define ATOM_TMPSTR 0x80 /* internal, to avoid extra string */
+
+struct JSAtom {
+ JSHashEntry entry; /* key is jsval or unhidden atom
+ if ATOM_HIDDEN */
+ uint32 flags; /* pinned, interned, and mark flags */
+ jsatomid number; /* atom serial number and hash code */
+};
+
+#define ATOM_KEY(atom) ((jsval)(atom)->entry.key)
+#define ATOM_IS_OBJECT(atom) JSVAL_IS_OBJECT(ATOM_KEY(atom))
+#define ATOM_TO_OBJECT(atom) JSVAL_TO_OBJECT(ATOM_KEY(atom))
+#define ATOM_IS_INT(atom) JSVAL_IS_INT(ATOM_KEY(atom))
+#define ATOM_TO_INT(atom) JSVAL_TO_INT(ATOM_KEY(atom))
+#define ATOM_IS_DOUBLE(atom) JSVAL_IS_DOUBLE(ATOM_KEY(atom))
+#define ATOM_TO_DOUBLE(atom) JSVAL_TO_DOUBLE(ATOM_KEY(atom))
+#define ATOM_IS_STRING(atom) JSVAL_IS_STRING(ATOM_KEY(atom))
+#define ATOM_TO_STRING(atom) JSVAL_TO_STRING(ATOM_KEY(atom))
+#define ATOM_IS_BOOLEAN(atom) JSVAL_IS_BOOLEAN(ATOM_KEY(atom))
+#define ATOM_TO_BOOLEAN(atom) JSVAL_TO_BOOLEAN(ATOM_KEY(atom))
+
+/*
+ * Return a printable, lossless char[] representation of a string-type atom.
+ * The lifetime of the result extends at least until the next GC activation,
+ * longer if cx's string newborn root is not overwritten.
+ */
+extern JS_FRIEND_API(const char *)
+js_AtomToPrintableString(JSContext *cx, JSAtom *atom);
+
+struct JSAtomListElement {
+ JSHashEntry entry;
+};
+
+#define ALE_ATOM(ale) ((JSAtom *) (ale)->entry.key)
+#define ALE_INDEX(ale) ((jsatomid) JS_PTR_TO_UINT32((ale)->entry.value))
+#define ALE_JSOP(ale) ((JSOp) (ale)->entry.value)
+#define ALE_VALUE(ale) ((jsval) (ale)->entry.value)
+#define ALE_NEXT(ale) ((JSAtomListElement *) (ale)->entry.next)
+
+#define ALE_SET_ATOM(ale,atom) ((ale)->entry.key = (const void *)(atom))
+#define ALE_SET_INDEX(ale,index)((ale)->entry.value = JS_UINT32_TO_PTR(index))
+#define ALE_SET_JSOP(ale,op) ((ale)->entry.value = JS_UINT32_TO_PTR(op))
+#define ALE_SET_VALUE(ale,val) ((ale)->entry.value = (JSHashEntry *)(val))
+#define ALE_SET_NEXT(ale,link) ((ale)->entry.next = (JSHashEntry *)(link))
+
+struct JSAtomList {
+ JSAtomListElement *list; /* literals indexed for mapping */
+ JSHashTable *table; /* hash table if list gets too long */
+ jsuint count; /* count of indexed literals */
+};
+
+#define ATOM_LIST_INIT(al) ((al)->list = NULL, (al)->table = NULL, \
+ (al)->count = 0)
+
+#define ATOM_LIST_SEARCH(_ale,_al,_atom) \
+ JS_BEGIN_MACRO \
+ JSHashEntry **_hep; \
+ ATOM_LIST_LOOKUP(_ale, _hep, _al, _atom); \
+ JS_END_MACRO
+
+#define ATOM_LIST_LOOKUP(_ale,_hep,_al,_atom) \
+ JS_BEGIN_MACRO \
+ if ((_al)->table) { \
+ _hep = JS_HashTableRawLookup((_al)->table, _atom->number, _atom); \
+ _ale = *_hep ? (JSAtomListElement *) *_hep : NULL; \
+ } else { \
+ JSAtomListElement **_alep = &(_al)->list; \
+ _hep = NULL; \
+ while ((_ale = *_alep) != NULL) { \
+ if (ALE_ATOM(_ale) == (_atom)) { \
+ /* Hit, move atom's element to the front of the list. */ \
+ *_alep = ALE_NEXT(_ale); \
+ ALE_SET_NEXT(_ale, (_al)->list); \
+ (_al)->list = _ale; \
+ break; \
+ } \
+ _alep = (JSAtomListElement **)&_ale->entry.next; \
+ } \
+ } \
+ JS_END_MACRO
+
+struct JSAtomMap {
+ JSAtom **vector; /* array of ptrs to indexed atoms */
+ jsatomid length; /* count of (to-be-)indexed atoms */
+};
+
+struct JSAtomState {
+ JSRuntime *runtime; /* runtime that owns us */
+ JSHashTable *table; /* hash table containing all atoms */
+ jsatomid number; /* one beyond greatest atom number */
+ jsatomid liveAtoms; /* number of live atoms after last GC */
+
+ /* The rt->emptyString atom, see jsstr.c's js_InitRuntimeStringState. */
+ JSAtom *emptyAtom;
+
+ /* Type names and value literals. */
+ JSAtom *typeAtoms[JSTYPE_LIMIT];
+ JSAtom *booleanAtoms[2];
+ JSAtom *nullAtom;
+
+ /* Standard class constructor or prototype names. */
+ JSAtom *classAtoms[JSProto_LIMIT];
+
+ /* Various built-in or commonly-used atoms, pinned on first context. */
+ JSAtom *anonymousAtom;
+ JSAtom *argumentsAtom;
+ JSAtom *arityAtom;
+ JSAtom *calleeAtom;
+ JSAtom *callerAtom;
+ JSAtom *classPrototypeAtom;
+ JSAtom *closeAtom;
+ JSAtom *constructorAtom;
+ JSAtom *countAtom;
+ JSAtom *eachAtom;
+ JSAtom *etagoAtom;
+ JSAtom *evalAtom;
+ JSAtom *fileNameAtom;
+ JSAtom *getAtom;
+ JSAtom *getterAtom;
+ JSAtom *indexAtom;
+ JSAtom *inputAtom;
+ JSAtom *iteratorAtom;
+ JSAtom *lengthAtom;
+ JSAtom *lineNumberAtom;
+ JSAtom *messageAtom;
+ JSAtom *nameAtom;
+ JSAtom *namespaceAtom;
+ JSAtom *nextAtom;
+ JSAtom *noSuchMethodAtom;
+ JSAtom *parentAtom;
+ JSAtom *protoAtom;
+ JSAtom *ptagcAtom;
+ JSAtom *qualifierAtom;
+ JSAtom *setAtom;
+ JSAtom *setterAtom;
+ JSAtom *spaceAtom;
+ JSAtom *stackAtom;
+ JSAtom *stagoAtom;
+ JSAtom *starAtom;
+ JSAtom *starQualifierAtom;
+ JSAtom *tagcAtom;
+ JSAtom *toLocaleStringAtom;
+ JSAtom *toSourceAtom;
+ JSAtom *toStringAtom;
+ JSAtom *valueOfAtom;
+ JSAtom *xmlAtom;
+
+ /* Less frequently used atoms, pinned lazily by JS_ResolveStandardClass. */
+ struct {
+ JSAtom *InfinityAtom;
+ JSAtom *NaNAtom;
+ JSAtom *XMLListAtom;
+ JSAtom *decodeURIAtom;
+ JSAtom *decodeURIComponentAtom;
+ JSAtom *defineGetterAtom;
+ JSAtom *defineSetterAtom;
+ JSAtom *encodeURIAtom;
+ JSAtom *encodeURIComponentAtom;
+ JSAtom *escapeAtom;
+ JSAtom *functionNamespaceURIAtom;
+ JSAtom *hasOwnPropertyAtom;
+ JSAtom *isFiniteAtom;
+ JSAtom *isNaNAtom;
+ JSAtom *isPrototypeOfAtom;
+ JSAtom *isXMLNameAtom;
+ JSAtom *lookupGetterAtom;
+ JSAtom *lookupSetterAtom;
+ JSAtom *parseFloatAtom;
+ JSAtom *parseIntAtom;
+ JSAtom *propertyIsEnumerableAtom;
+ JSAtom *unescapeAtom;
+ JSAtom *unevalAtom;
+ JSAtom *unwatchAtom;
+ JSAtom *watchAtom;
+ } lazy;
+
+#ifdef JS_THREADSAFE
+ JSThinLock lock;
+ volatile uint32 tablegen;
+#endif
+#ifdef NARCISSUS
+ JSAtom *callAtom;
+ JSAtom *constructAtom;
+ JSAtom *hasInstanceAtom;
+ JSAtom *ExecutionContextAtom;
+ JSAtom *currentAtom;
+#endif
+};
+
+#define CLASS_ATOM(cx,name) \
+ ((cx)->runtime->atomState.classAtoms[JSProto_##name])
+
+/* Well-known predefined strings and their atoms. */
+extern const char *js_type_strs[];
+extern const char *js_boolean_strs[];
+extern const char *js_proto_strs[];
+
+#define JS_PROTO(name,code,init) extern const char js_##name##_str[];
+#include "jsproto.tbl"
+#undef JS_PROTO
+
+extern const char js_anonymous_str[];
+extern const char js_arguments_str[];
+extern const char js_arity_str[];
+extern const char js_callee_str[];
+extern const char js_caller_str[];
+extern const char js_class_prototype_str[];
+extern const char js_close_str[];
+extern const char js_constructor_str[];
+extern const char js_count_str[];
+extern const char js_etago_str[];
+extern const char js_each_str[];
+extern const char js_eval_str[];
+extern const char js_fileName_str[];
+extern const char js_get_str[];
+extern const char js_getter_str[];
+extern const char js_index_str[];
+extern const char js_input_str[];
+extern const char js_iterator_str[];
+extern const char js_length_str[];
+extern const char js_lineNumber_str[];
+extern const char js_message_str[];
+extern const char js_name_str[];
+extern const char js_namespace_str[];
+extern const char js_next_str[];
+extern const char js_noSuchMethod_str[];
+extern const char js_object_str[];
+extern const char js_parent_str[];
+extern const char js_private_str[];
+extern const char js_proto_str[];
+extern const char js_ptagc_str[];
+extern const char js_qualifier_str[];
+extern const char js_send_str[];
+extern const char js_setter_str[];
+extern const char js_set_str[];
+extern const char js_space_str[];
+extern const char js_stack_str[];
+extern const char js_stago_str[];
+extern const char js_star_str[];
+extern const char js_starQualifier_str[];
+extern const char js_tagc_str[];
+extern const char js_toSource_str[];
+extern const char js_toString_str[];
+extern const char js_toLocaleString_str[];
+extern const char js_valueOf_str[];
+extern const char js_xml_str[];
+
+#ifdef NARCISSUS
+extern const char js_call_str[];
+extern const char js_construct_str[];
+extern const char js_hasInstance_str[];
+extern const char js_ExecutionContext_str[];
+extern const char js_current_str[];
+#endif
+
+/*
+ * Initialize atom state. Return true on success, false with an out of
+ * memory error report on failure.
+ */
+extern JSBool
+js_InitAtomState(JSContext *cx, JSAtomState *state);
+
+/*
+ * Free and clear atom state (except for any interned string atoms).
+ */
+extern void
+js_FreeAtomState(JSContext *cx, JSAtomState *state);
+
+/*
+ * Interned strings are atoms that live until state's runtime is destroyed.
+ * This function frees all interned string atoms, and then frees and clears
+ * state's members (just as js_FreeAtomState does), unless there aren't any
+ * interned strings in state -- in which case state must be "free" already.
+ *
+ * NB: js_FreeAtomState is called for each "last" context being destroyed in
+ * a runtime, where there may yet be another context created in the runtime;
+ * whereas js_FinishAtomState is called from JS_DestroyRuntime, when we know
+ * that no more contexts will be created. Thus we minimize garbage during
+ * context-free episodes on a runtime, while preserving atoms created by the
+ * JS_Intern*String APIs for the life of the runtime.
+ */
+extern void
+js_FinishAtomState(JSAtomState *state);
+
+/*
+ * Atom garbage collection hooks.
+ */
+typedef void
+(*JSGCThingMarker)(void *thing, void *data);
+
+extern void
+js_MarkAtomState(JSAtomState *state, JSBool keepAtoms, JSGCThingMarker mark,
+ void *data);
+
+extern void
+js_SweepAtomState(JSAtomState *state);
+
+extern JSBool
+js_InitPinnedAtoms(JSContext *cx, JSAtomState *state);
+
+extern void
+js_UnpinPinnedAtoms(JSAtomState *state);
+
+/*
+ * Find or create the atom for an object. If we create a new atom, give it the
+ * type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeObject(JSContext *cx, JSObject *obj, uintN flags);
+
+/*
+ * Find or create the atom for a Boolean value. If we create a new atom, give
+ * it the type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeBoolean(JSContext *cx, JSBool b, uintN flags);
+
+/*
+ * Find or create the atom for an integer value. If we create a new atom, give
+ * it the type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeInt(JSContext *cx, jsint i, uintN flags);
+
+/*
+ * Find or create the atom for a double value. If we create a new atom, give
+ * it the type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeDouble(JSContext *cx, jsdouble d, uintN flags);
+
+/*
+ * Find or create the atom for a string. If we create a new atom, give it the
+ * type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeString(JSContext *cx, JSString *str, uintN flags);
+
+extern JS_FRIEND_API(JSAtom *)
+js_Atomize(JSContext *cx, const char *bytes, size_t length, uintN flags);
+
+extern JS_FRIEND_API(JSAtom *)
+js_AtomizeChars(JSContext *cx, const jschar *chars, size_t length, uintN flags);
+
+/*
+ * Return an existing atom for the given char array or null if the char
+ * sequence is currently not atomized.
+ */
+extern JSAtom *
+js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length);
+
+/*
+ * This variant handles all value tag types.
+ */
+extern JSAtom *
+js_AtomizeValue(JSContext *cx, jsval value, uintN flags);
+
+/*
+ * Convert v to an atomized string.
+ */
+extern JSAtom *
+js_ValueToStringAtom(JSContext *cx, jsval v);
+
+/*
+ * Assign atom an index and insert it on al.
+ */
+extern JSAtomListElement *
+js_IndexAtom(JSContext *cx, JSAtom *atom, JSAtomList *al);
+
+/*
+ * Get the atom with index i from map.
+ */
+extern JS_FRIEND_API(JSAtom *)
+js_GetAtom(JSContext *cx, JSAtomMap *map, jsatomid i);
+
+/*
+ * For all unmapped atoms recorded in al, add a mapping from the atom's index
+ * to its address. The GC must not run until all indexed atoms in atomLists
+ * have been mapped by scripts connected to live objects (Function and Script
+ * class objects have scripts as/in their private data -- the GC knows about
+ * these two classes).
+ */
+extern JS_FRIEND_API(JSBool)
+js_InitAtomMap(JSContext *cx, JSAtomMap *map, JSAtomList *al);
+
+/*
+ * Free map->vector and clear map.
+ */
+extern JS_FRIEND_API(void)
+js_FreeAtomMap(JSContext *cx, JSAtomMap *map);
+
+JS_END_EXTERN_C
+
+#endif /* jsatom_h___ */
diff --git a/third_party/js-1.7/jsbit.h b/third_party/js-1.7/jsbit.h
new file mode 100644
index 0000000..87bb047
--- /dev/null
+++ b/third_party/js-1.7/jsbit.h
@@ -0,0 +1,195 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsbit_h___
+#define jsbit_h___
+
+#include "jstypes.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+** A jsbitmap_t is a long integer that can be used for bitmaps
+*/
+typedef JSUword jsbitmap_t; /* NSPR name, a la Unix system types */
+typedef jsbitmap_t jsbitmap; /* JS-style scalar typedef name */
+
+#define JS_TEST_BIT(_map,_bit) \
+ ((_map)[(_bit)>>JS_BITS_PER_WORD_LOG2] & (1L << ((_bit) & (JS_BITS_PER_WORD-1))))
+#define JS_SET_BIT(_map,_bit) \
+ ((_map)[(_bit)>>JS_BITS_PER_WORD_LOG2] |= (1L << ((_bit) & (JS_BITS_PER_WORD-1))))
+#define JS_CLEAR_BIT(_map,_bit) \
+ ((_map)[(_bit)>>JS_BITS_PER_WORD_LOG2] &= ~(1L << ((_bit) & (JS_BITS_PER_WORD-1))))
+
+/*
+** Compute the log of the least power of 2 greater than or equal to n
+*/
+extern JS_PUBLIC_API(JSIntn) JS_CeilingLog2(JSUint32 i);
+
+/*
+** Compute the log of the greatest power of 2 less than or equal to n
+*/
+extern JS_PUBLIC_API(JSIntn) JS_FloorLog2(JSUint32 i);
+
+/*
+ * Check if __builtin_clz is available which apeared first in GCC 3.4.
+ * The built-in allows to speedup calculations of ceiling/floor log2,
+ * see bug 327129.
+ */
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define JS_HAS_GCC_BUILTIN_CLZ
+#endif
+
+/*
+** Macro version of JS_CeilingLog2: Compute the log of the least power of
+** 2 greater than or equal to _n. The result is returned in _log2.
+*/
+#ifdef JS_HAS_GCC_BUILTIN_CLZ
+/*
+ * Use __builtin_clz or count-leading-zeros to calculate ceil(log2(_n)).
+ * The macro checks for "n <= 1" and not "n != 0" as __builtin_clz(0) is
+ * undefined.
+ */
+# define JS_CEILING_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JS_STATIC_ASSERT(sizeof(unsigned int) == sizeof(JSUint32)); \
+ unsigned int j_ = (unsigned int)(_n); \
+ (_log2) = (j_ <= 1 ? 0 : 32 - __builtin_clz(j_ - 1)); \
+ JS_END_MACRO
+#else
+# define JS_CEILING_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JSUint32 j_ = (JSUint32)(_n); \
+ (_log2) = 0; \
+ if ((j_) & ((j_)-1)) \
+ (_log2) += 1; \
+ if ((j_) >> 16) \
+ (_log2) += 16, (j_) >>= 16; \
+ if ((j_) >> 8) \
+ (_log2) += 8, (j_) >>= 8; \
+ if ((j_) >> 4) \
+ (_log2) += 4, (j_) >>= 4; \
+ if ((j_) >> 2) \
+ (_log2) += 2, (j_) >>= 2; \
+ if ((j_) >> 1) \
+ (_log2) += 1; \
+ JS_END_MACRO
+#endif
+
+/*
+** Macro version of JS_FloorLog2: Compute the log of the greatest power of
+** 2 less than or equal to _n. The result is returned in _log2.
+**
+** This is equivalent to finding the highest set bit in the word.
+*/
+#if JS_GCC_HAS_BUILTIN_CLZ
+/*
+ * Use __builtin_clz or count-leading-zeros to calculate floor(log2(_n)).
+ * Since __builtin_clz(0) is undefined, the macro set the loweset bit to 1
+ * to ensure 0 result when _n == 0.
+ */
+# define JS_FLOOR_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JS_STATIC_ASSERT(sizeof(unsigned int) == sizeof(JSUint32)); \
+ (_log2) = 31 - __builtin_clz(((unsigned int)(_n)) | 1); \
+ JS_END_MACRO
+#else
+# define JS_FLOOR_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JSUint32 j_ = (JSUint32)(_n); \
+ (_log2) = 0; \
+ if ((j_) >> 16) \
+ (_log2) += 16, (j_) >>= 16; \
+ if ((j_) >> 8) \
+ (_log2) += 8, (j_) >>= 8; \
+ if ((j_) >> 4) \
+ (_log2) += 4, (j_) >>= 4; \
+ if ((j_) >> 2) \
+ (_log2) += 2, (j_) >>= 2; \
+ if ((j_) >> 1) \
+ (_log2) += 1; \
+ JS_END_MACRO
+#endif
+
+/*
+ * Internal function.
+ * Compute the log of the least power of 2 greater than or equal to n.
+ * This is a version of JS_CeilingLog2 that operates on jsuword with
+ * CPU-dependant size.
+ */
+#define JS_CEILING_LOG2W(n) ((n) <= 1 ? 0 : 1 + JS_FLOOR_LOG2W((n) - 1))
+
+/*
+ * Internal function.
+ * Compute the log of the greatest power of 2 less than or equal to n.
+ * This is a version of JS_FloorLog2 that operates on jsuword with
+ * CPU-dependant size and requires that n != 0.
+ */
+#define JS_FLOOR_LOG2W(n) (JS_ASSERT((n) != 0), js_FloorLog2wImpl(n))
+
+#ifdef JS_HAS_GCC_BUILTIN_CLZ
+
+# if JS_BYTES_PER_WORD == 4
+JS_STATIC_ASSERT(sizeof(unsigned) == sizeof(JSUword));
+# define js_FloorLog2wImpl(n) \
+ ((JSUword)(JS_BITS_PER_WORD - 1 - __builtin_clz(n)))
+# elif JS_BYTES_PER_WORD == 8
+JS_STATIC_ASSERT(sizeof(unsigned long long) == sizeof(JSUword));
+# define js_FloorLog2wImpl(n) \
+ ((JSUword)(JS_BITS_PER_WORD - 1 - __builtin_clzll(n)))
+# else
+# error "NOT SUPPORTED"
+# endif
+
+#else
+
+# if JS_BYTES_PER_WORD == 4
+# define js_FloorLog2wImpl(n) ((JSUword)JS_FloorLog2(n))
+# elif JS_BYTES_PER_WORD == 8
+extern JSUword
+js_FloorLog2wImpl(JSUword n);
+# else
+# error "NOT SUPPORTED"
+# endif
+
+#endif
+
+
+JS_END_EXTERN_C
+#endif /* jsbit_h___ */
diff --git a/third_party/js-1.7/jsbool.c b/third_party/js-1.7/jsbool.c
new file mode 100644
index 0000000..543b4f3
--- /dev/null
+++ b/third_party/js-1.7/jsbool.c
@@ -0,0 +1,227 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS boolean implementation.
+ */
+#include "jsstddef.h"
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+JSClass js_BooleanClass = {
+ "Boolean",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Boolean),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#if JS_HAS_TOSOURCE
+#include "jsprf.h"
+
+static JSBool
+bool_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval v;
+ char buf[32];
+ JSString *str;
+
+ if (JSVAL_IS_BOOLEAN((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_BooleanClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_BOOLEAN(v))
+ return js_obj_toSource(cx, obj, argc, argv, rval);
+ }
+ JS_snprintf(buf, sizeof buf, "(new %s(%s))",
+ js_BooleanClass.name,
+ js_boolean_strs[JSVAL_TO_BOOLEAN(v) ? 1 : 0]);
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+bool_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval v;
+ JSAtom *atom;
+ JSString *str;
+
+ if (JSVAL_IS_BOOLEAN((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_BooleanClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_BOOLEAN(v))
+ return js_obj_toString(cx, obj, argc, argv, rval);
+ }
+ atom = cx->runtime->atomState.booleanAtoms[JSVAL_TO_BOOLEAN(v) ? 1 : 0];
+ str = ATOM_TO_STRING(atom);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+bool_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (JSVAL_IS_BOOLEAN((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_BooleanClass, argv))
+ return JS_FALSE;
+ *rval = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec boolean_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, bool_toSource, 0,JSFUN_THISP_BOOLEAN,0},
+#endif
+ {js_toString_str, bool_toString, 0,JSFUN_THISP_BOOLEAN,0},
+ {js_valueOf_str, bool_valueOf, 0,JSFUN_THISP_BOOLEAN,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+Boolean(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool b;
+ jsval bval;
+
+ if (argc != 0) {
+ if (!js_ValueToBoolean(cx, argv[0], &b))
+ return JS_FALSE;
+ bval = BOOLEAN_TO_JSVAL(b);
+ } else {
+ bval = JSVAL_FALSE;
+ }
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ *rval = bval;
+ return JS_TRUE;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, bval);
+ return JS_TRUE;
+}
+
+JSObject *
+js_InitBooleanClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_BooleanClass, Boolean, 1,
+ NULL, boolean_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+ OBJ_SET_SLOT(cx, proto, JSSLOT_PRIVATE, JSVAL_FALSE);
+ return proto;
+}
+
+JSObject *
+js_BooleanToObject(JSContext *cx, JSBool b)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_BooleanClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, BOOLEAN_TO_JSVAL(b));
+ return obj;
+}
+
+JSString *
+js_BooleanToString(JSContext *cx, JSBool b)
+{
+ return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[b ? 1 : 0]);
+}
+
+JSBool
+js_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp)
+{
+ JSBool b;
+ jsdouble d;
+
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ b = JS_FALSE;
+ } else if (JSVAL_IS_OBJECT(v)) {
+ if (!JS_VERSION_IS_ECMA(cx)) {
+ if (!OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), JSTYPE_BOOLEAN, &v))
+ return JS_FALSE;
+ if (!JSVAL_IS_BOOLEAN(v))
+ v = JSVAL_TRUE; /* non-null object is true */
+ b = JSVAL_TO_BOOLEAN(v);
+ } else {
+ b = JS_TRUE;
+ }
+ } else if (JSVAL_IS_STRING(v)) {
+ b = JSSTRING_LENGTH(JSVAL_TO_STRING(v)) ? JS_TRUE : JS_FALSE;
+ } else if (JSVAL_IS_INT(v)) {
+ b = JSVAL_TO_INT(v) ? JS_TRUE : JS_FALSE;
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ d = *JSVAL_TO_DOUBLE(v);
+ b = (!JSDOUBLE_IS_NaN(d) && d != 0) ? JS_TRUE : JS_FALSE;
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(v));
+ b = JSVAL_TO_BOOLEAN(v);
+ }
+
+ *bp = b;
+ return JS_TRUE;
+}
diff --git a/third_party/js-1.7/jsbool.h b/third_party/js-1.7/jsbool.h
new file mode 100644
index 0000000..8dbd218
--- /dev/null
+++ b/third_party/js-1.7/jsbool.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsbool_h___
+#define jsbool_h___
+/*
+ * JS boolean interface.
+ */
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Crypto-booleans, not visible to script but used internally by the engine.
+ *
+ * JSVAL_HOLE is a useful value for identifying a hole in an array. It's also
+ * used in the interpreter to represent "no exception pending". In general it
+ * can be used to represent "no value".
+ *
+ * JSVAL_ARETURN is used to throw asynchronous return for generator.close().
+ */
+#define JSVAL_HOLE BOOLEAN_TO_JSVAL(2)
+#define JSVAL_ARETURN BOOLEAN_TO_JSVAL(3)
+
+extern JSClass js_BooleanClass;
+
+extern JSObject *
+js_InitBooleanClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_BooleanToObject(JSContext *cx, JSBool b);
+
+extern JSString *
+js_BooleanToString(JSContext *cx, JSBool b);
+
+extern JSBool
+js_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp);
+
+JS_END_EXTERN_C
+
+#endif /* jsbool_h___ */
diff --git a/third_party/js-1.7/jsclist.h b/third_party/js-1.7/jsclist.h
new file mode 100644
index 0000000..604ec0e
--- /dev/null
+++ b/third_party/js-1.7/jsclist.h
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsclist_h___
+#define jsclist_h___
+
+#include "jstypes.h"
+
+/*
+** Circular linked list
+*/
+typedef struct JSCListStr {
+ struct JSCListStr *next;
+ struct JSCListStr *prev;
+} JSCList;
+
+/*
+** Insert element "_e" into the list, before "_l".
+*/
+#define JS_INSERT_BEFORE(_e,_l) \
+ JS_BEGIN_MACRO \
+ (_e)->next = (_l); \
+ (_e)->prev = (_l)->prev; \
+ (_l)->prev->next = (_e); \
+ (_l)->prev = (_e); \
+ JS_END_MACRO
+
+/*
+** Insert element "_e" into the list, after "_l".
+*/
+#define JS_INSERT_AFTER(_e,_l) \
+ JS_BEGIN_MACRO \
+ (_e)->next = (_l)->next; \
+ (_e)->prev = (_l); \
+ (_l)->next->prev = (_e); \
+ (_l)->next = (_e); \
+ JS_END_MACRO
+
+/*
+** Return the element following element "_e"
+*/
+#define JS_NEXT_LINK(_e) \
+ ((_e)->next)
+/*
+** Return the element preceding element "_e"
+*/
+#define JS_PREV_LINK(_e) \
+ ((_e)->prev)
+
+/*
+** Append an element "_e" to the end of the list "_l"
+*/
+#define JS_APPEND_LINK(_e,_l) JS_INSERT_BEFORE(_e,_l)
+
+/*
+** Insert an element "_e" at the head of the list "_l"
+*/
+#define JS_INSERT_LINK(_e,_l) JS_INSERT_AFTER(_e,_l)
+
+/* Return the head/tail of the list */
+#define JS_LIST_HEAD(_l) (_l)->next
+#define JS_LIST_TAIL(_l) (_l)->prev
+
+/*
+** Remove the element "_e" from it's circular list.
+*/
+#define JS_REMOVE_LINK(_e) \
+ JS_BEGIN_MACRO \
+ (_e)->prev->next = (_e)->next; \
+ (_e)->next->prev = (_e)->prev; \
+ JS_END_MACRO
+
+/*
+** Remove the element "_e" from it's circular list. Also initializes the
+** linkage.
+*/
+#define JS_REMOVE_AND_INIT_LINK(_e) \
+ JS_BEGIN_MACRO \
+ (_e)->prev->next = (_e)->next; \
+ (_e)->next->prev = (_e)->prev; \
+ (_e)->next = (_e); \
+ (_e)->prev = (_e); \
+ JS_END_MACRO
+
+/*
+** Return non-zero if the given circular list "_l" is empty, zero if the
+** circular list is not empty
+*/
+#define JS_CLIST_IS_EMPTY(_l) \
+ ((_l)->next == (_l))
+
+/*
+** Initialize a circular list
+*/
+#define JS_INIT_CLIST(_l) \
+ JS_BEGIN_MACRO \
+ (_l)->next = (_l); \
+ (_l)->prev = (_l); \
+ JS_END_MACRO
+
+#define JS_INIT_STATIC_CLIST(_l) \
+ {(_l), (_l)}
+
+#endif /* jsclist_h___ */
diff --git a/third_party/js-1.7/jscntxt.c b/third_party/js-1.7/jscntxt.c
new file mode 100644
index 0000000..139ad9b
--- /dev/null
+++ b/third_party/js-1.7/jscntxt.c
@@ -0,0 +1,1229 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS execution context.
+ */
+#include "jsstddef.h"
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jsprf.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsexn.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#ifdef JS_THREADSAFE
+
+/*
+ * Callback function to delete a JSThread info when the thread that owns it
+ * is destroyed.
+ */
+void JS_DLL_CALLBACK
+js_ThreadDestructorCB(void *ptr)
+{
+ JSThread *thread = (JSThread *)ptr;
+
+ if (!thread)
+ return;
+ while (!JS_CLIST_IS_EMPTY(&thread->contextList)) {
+ /* NB: use a temporary, as the macro evaluates its args many times. */
+ JSCList *link = thread->contextList.next;
+
+ JS_REMOVE_AND_INIT_LINK(link);
+ }
+ GSN_CACHE_CLEAR(&thread->gsnCache);
+ free(thread);
+}
+
+/*
+ * Get current thread-local JSThread info, creating one if it doesn't exist.
+ * Each thread has a unique JSThread pointer.
+ *
+ * Since we are dealing with thread-local data, no lock is needed.
+ *
+ * Return a pointer to the thread local info, NULL if the system runs out
+ * of memory, or it failed to set thread private data (neither case is very
+ * likely; both are probably due to out-of-memory). It is up to the caller
+ * to report an error, if possible.
+ */
+JSThread *
+js_GetCurrentThread(JSRuntime *rt)
+{
+ JSThread *thread;
+
+ thread = (JSThread *)PR_GetThreadPrivate(rt->threadTPIndex);
+ if (!thread) {
+ thread = (JSThread *) calloc(1, sizeof(JSThread));
+ if (!thread)
+ return NULL;
+
+ if (PR_FAILURE == PR_SetThreadPrivate(rt->threadTPIndex, thread)) {
+ free(thread);
+ return NULL;
+ }
+
+ JS_INIT_CLIST(&thread->contextList);
+ thread->id = js_CurrentThreadId();
+
+ /* js_SetContextThread initialize gcFreeLists as necessary. */
+#ifdef DEBUG
+ memset(thread->gcFreeLists, JS_FREE_PATTERN,
+ sizeof(thread->gcFreeLists));
+#endif
+ }
+ return thread;
+}
+
+/*
+ * Sets current thread as owning thread of a context by assigning the
+ * thread-private info to the context. If the current thread doesn't have
+ * private JSThread info, create one.
+ */
+JSBool
+js_SetContextThread(JSContext *cx)
+{
+ JSThread *thread = js_GetCurrentThread(cx->runtime);
+
+ if (!thread) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ /*
+ * Clear gcFreeLists on each transition from 0 to 1 context active on the
+ * current thread. See bug 351602.
+ */
+ if (JS_CLIST_IS_EMPTY(&thread->contextList))
+ memset(thread->gcFreeLists, 0, sizeof(thread->gcFreeLists));
+
+ cx->thread = thread;
+ JS_REMOVE_LINK(&cx->threadLinks);
+ JS_APPEND_LINK(&cx->threadLinks, &thread->contextList);
+ return JS_TRUE;
+}
+
+/* Remove the owning thread info of a context. */
+void
+js_ClearContextThread(JSContext *cx)
+{
+ JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
+#ifdef DEBUG
+ if (JS_CLIST_IS_EMPTY(&cx->thread->contextList)) {
+ memset(cx->thread->gcFreeLists, JS_FREE_PATTERN,
+ sizeof(cx->thread->gcFreeLists));
+ }
+#endif
+ cx->thread = NULL;
+}
+
+#endif /* JS_THREADSAFE */
+
+void
+js_OnVersionChange(JSContext *cx)
+{
+#ifdef DEBUG
+ JSVersion version = JSVERSION_NUMBER(cx);
+
+ JS_ASSERT(version == JSVERSION_DEFAULT || version >= JSVERSION_ECMA_3);
+#endif
+}
+
+void
+js_SetVersion(JSContext *cx, JSVersion version)
+{
+ cx->version = version;
+ js_OnVersionChange(cx);
+}
+
+JSContext *
+js_NewContext(JSRuntime *rt, size_t stackChunkSize)
+{
+ JSContext *cx;
+ JSBool ok, first;
+ JSContextCallback cxCallback;
+
+ cx = (JSContext *) malloc(sizeof *cx);
+ if (!cx)
+ return NULL;
+ memset(cx, 0, sizeof *cx);
+
+ cx->runtime = rt;
+#if JS_STACK_GROWTH_DIRECTION > 0
+ cx->stackLimit = (jsuword)-1;
+#endif
+#ifdef JS_THREADSAFE
+ JS_INIT_CLIST(&cx->threadLinks);
+ js_SetContextThread(cx);
+#endif
+
+ JS_LOCK_GC(rt);
+ for (;;) {
+ first = (rt->contextList.next == &rt->contextList);
+ if (rt->state == JSRTS_UP) {
+ JS_ASSERT(!first);
+ break;
+ }
+ if (rt->state == JSRTS_DOWN) {
+ JS_ASSERT(first);
+ rt->state = JSRTS_LAUNCHING;
+ break;
+ }
+ JS_WAIT_CONDVAR(rt->stateChange, JS_NO_TIMEOUT);
+ }
+ JS_APPEND_LINK(&cx->links, &rt->contextList);
+ JS_UNLOCK_GC(rt);
+
+ /*
+ * First we do the infallible, every-time per-context initializations.
+ * Should a later, fallible initialization (js_InitRegExpStatics, e.g.,
+ * or the stuff under 'if (first)' below) fail, at least the version
+ * and arena-pools will be valid and safe to use (say, from the last GC
+ * done by js_DestroyContext).
+ */
+ cx->version = JSVERSION_DEFAULT;
+ cx->jsop_eq = JSOP_EQ;
+ cx->jsop_ne = JSOP_NE;
+ JS_InitArenaPool(&cx->stackPool, "stack", stackChunkSize, sizeof(jsval));
+ JS_InitArenaPool(&cx->tempPool, "temp", 1024, sizeof(jsdouble));
+
+ if (!js_InitRegExpStatics(cx, &cx->regExpStatics)) {
+ js_DestroyContext(cx, JSDCM_NEW_FAILED);
+ return NULL;
+ }
+
+ /*
+ * If cx is the first context on this runtime, initialize well-known atoms,
+ * keywords, numbers, and strings. If one of these steps should fail, the
+ * runtime will be left in a partially initialized state, with zeroes and
+ * nulls stored in the default-initialized remainder of the struct. We'll
+ * clean the runtime up under js_DestroyContext, because cx will be "last"
+ * as well as "first".
+ */
+ if (first) {
+#ifdef JS_THREADSAFE
+ JS_BeginRequest(cx);
+#endif
+ /*
+ * Both atomState and the scriptFilenameTable may be left over from a
+ * previous episode of non-zero contexts alive in rt, so don't re-init
+ * either table if it's not necessary. Just repopulate atomState with
+ * well-known internal atoms, and with the reserved identifiers added
+ * by the scanner.
+ */
+ ok = (rt->atomState.liveAtoms == 0)
+ ? js_InitAtomState(cx, &rt->atomState)
+ : js_InitPinnedAtoms(cx, &rt->atomState);
+ if (ok && !rt->scriptFilenameTable)
+ ok = js_InitRuntimeScriptState(rt);
+ if (ok)
+ ok = js_InitRuntimeNumberState(cx);
+ if (ok)
+ ok = js_InitRuntimeStringState(cx);
+#ifdef JS_THREADSAFE
+ JS_EndRequest(cx);
+#endif
+ if (!ok) {
+ js_DestroyContext(cx, JSDCM_NEW_FAILED);
+ return NULL;
+ }
+
+ JS_LOCK_GC(rt);
+ rt->state = JSRTS_UP;
+ JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
+ JS_UNLOCK_GC(rt);
+ }
+
+ cxCallback = rt->cxCallback;
+ if (cxCallback && !cxCallback(cx, JSCONTEXT_NEW)) {
+ js_DestroyContext(cx, JSDCM_NEW_FAILED);
+ return NULL;
+ }
+ return cx;
+}
+
+void
+js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
+{
+ JSRuntime *rt;
+ JSContextCallback cxCallback;
+ JSBool last;
+ JSArgumentFormatMap *map;
+ JSLocalRootStack *lrs;
+ JSLocalRootChunk *lrc;
+
+ rt = cx->runtime;
+
+ if (mode != JSDCM_NEW_FAILED) {
+ cxCallback = rt->cxCallback;
+ if (cxCallback) {
+ /*
+ * JSCONTEXT_DESTROY callback is not allowed to fail and must
+ * return true.
+ */
+#ifdef DEBUG
+ JSBool callbackStatus =
+#endif
+ cxCallback(cx, JSCONTEXT_DESTROY);
+ JS_ASSERT(callbackStatus);
+ }
+ }
+
+ /* Remove cx from context list first. */
+ JS_LOCK_GC(rt);
+ JS_ASSERT(rt->state == JSRTS_UP || rt->state == JSRTS_LAUNCHING);
+ JS_REMOVE_LINK(&cx->links);
+ last = (rt->contextList.next == &rt->contextList);
+ if (last)
+ rt->state = JSRTS_LANDING;
+ JS_UNLOCK_GC(rt);
+
+ if (last) {
+#ifdef JS_THREADSAFE
+ /*
+ * If cx is not in a request already, begin one now so that we wait
+ * for any racing GC started on a not-last context to finish, before
+ * we plow ahead and unpin atoms. Note that even though we begin a
+ * request here if necessary, we end all requests on cx below before
+ * forcing a final GC. This lets any not-last context destruction
+ * racing in another thread try to force or maybe run the GC, but by
+ * that point, rt->state will not be JSRTS_UP, and that GC attempt
+ * will return early.
+ */
+ if (cx->requestDepth == 0)
+ JS_BeginRequest(cx);
+#endif
+
+ /* Unpin all pinned atoms before final GC. */
+ js_UnpinPinnedAtoms(&rt->atomState);
+
+ /* Unlock and clear GC things held by runtime pointers. */
+ js_FinishRuntimeNumberState(cx);
+ js_FinishRuntimeStringState(cx);
+
+ /* Clear debugging state to remove GC roots. */
+ JS_ClearAllTraps(cx);
+ JS_ClearAllWatchPoints(cx);
+ }
+
+ /*
+ * Remove more GC roots in regExpStatics, then collect garbage.
+ * XXX anti-modularity alert: we rely on the call to js_RemoveRoot within
+ * XXX this function call to wait for any racing GC to complete, in the
+ * XXX case where JS_DestroyContext is called outside of a request on cx
+ */
+ js_FreeRegExpStatics(cx, &cx->regExpStatics);
+
+#ifdef JS_THREADSAFE
+ /*
+ * Destroying a context implicitly calls JS_EndRequest(). Also, we must
+ * end our request here in case we are "last" -- in that event, another
+ * js_DestroyContext that was not last might be waiting in the GC for our
+ * request to end. We'll let it run below, just before we do the truly
+ * final GC and then free atom state.
+ *
+ * At this point, cx must be inaccessible to other threads. It's off the
+ * rt->contextList, and it should not be reachable via any object private
+ * data structure.
+ */
+ while (cx->requestDepth != 0)
+ JS_EndRequest(cx);
+#endif
+
+ if (last) {
+ js_GC(cx, GC_LAST_CONTEXT);
+
+ /* Try to free atom state, now that no unrooted scripts survive. */
+ if (rt->atomState.liveAtoms == 0)
+ js_FreeAtomState(cx, &rt->atomState);
+
+ /* Also free the script filename table if it exists and is empty. */
+ if (rt->scriptFilenameTable && rt->scriptFilenameTable->nentries == 0)
+ js_FinishRuntimeScriptState(rt);
+
+ /*
+ * Free the deflated string cache, but only after the last GC has
+ * collected all unleaked strings.
+ */
+ js_FinishDeflatedStringCache(rt);
+
+ /* Take the runtime down, now that it has no contexts or atoms. */
+ JS_LOCK_GC(rt);
+ rt->state = JSRTS_DOWN;
+ JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
+ JS_UNLOCK_GC(rt);
+ } else {
+ if (mode == JSDCM_FORCE_GC)
+ js_GC(cx, GC_NORMAL);
+ else if (mode == JSDCM_MAYBE_GC)
+ JS_MaybeGC(cx);
+ }
+
+ /* Free the stuff hanging off of cx. */
+ JS_FinishArenaPool(&cx->stackPool);
+ JS_FinishArenaPool(&cx->tempPool);
+
+ if (cx->lastMessage)
+ free(cx->lastMessage);
+
+ /* Remove any argument formatters. */
+ map = cx->argumentFormatMap;
+ while (map) {
+ JSArgumentFormatMap *temp = map;
+ map = map->next;
+ JS_free(cx, temp);
+ }
+
+ /* Destroy the resolve recursion damper. */
+ if (cx->resolvingTable) {
+ JS_DHashTableDestroy(cx->resolvingTable);
+ cx->resolvingTable = NULL;
+ }
+
+ lrs = cx->localRootStack;
+ if (lrs) {
+ while ((lrc = lrs->topChunk) != &lrs->firstChunk) {
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ }
+ JS_free(cx, lrs);
+ }
+
+#ifdef JS_THREADSAFE
+ js_ClearContextThread(cx);
+#endif
+
+ /* Finally, free cx itself. */
+ free(cx);
+}
+
+JSBool
+js_ValidContextPointer(JSRuntime *rt, JSContext *cx)
+{
+ JSCList *cl;
+
+ for (cl = rt->contextList.next; cl != &rt->contextList; cl = cl->next) {
+ if (cl == &cx->links)
+ return JS_TRUE;
+ }
+ JS_RUNTIME_METER(rt, deadContexts);
+ return JS_FALSE;
+}
+
+JSContext *
+js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp)
+{
+ JSContext *cx = *iterp;
+
+ if (unlocked)
+ JS_LOCK_GC(rt);
+ if (!cx)
+ cx = (JSContext *)&rt->contextList;
+ cx = (JSContext *)cx->links.next;
+ if (&cx->links == &rt->contextList)
+ cx = NULL;
+ *iterp = cx;
+ if (unlocked)
+ JS_UNLOCK_GC(rt);
+ return cx;
+}
+
+JS_STATIC_DLL_CALLBACK(const void *)
+resolving_GetKey(JSDHashTable *table, JSDHashEntryHdr *hdr)
+{
+ JSResolvingEntry *entry = (JSResolvingEntry *)hdr;
+
+ return &entry->key;
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashNumber)
+resolving_HashKey(JSDHashTable *table, const void *ptr)
+{
+ const JSResolvingKey *key = (const JSResolvingKey *)ptr;
+
+ return ((JSDHashNumber)JS_PTR_TO_UINT32(key->obj) >> JSVAL_TAGBITS) ^ key->id;
+}
+
+JS_PUBLIC_API(JSBool)
+resolving_MatchEntry(JSDHashTable *table,
+ const JSDHashEntryHdr *hdr,
+ const void *ptr)
+{
+ const JSResolvingEntry *entry = (const JSResolvingEntry *)hdr;
+ const JSResolvingKey *key = (const JSResolvingKey *)ptr;
+
+ return entry->key.obj == key->obj && entry->key.id == key->id;
+}
+
+static const JSDHashTableOps resolving_dhash_ops = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ resolving_GetKey,
+ resolving_HashKey,
+ resolving_MatchEntry,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+JSBool
+js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry **entryp)
+{
+ JSDHashTable *table;
+ JSResolvingEntry *entry;
+
+ table = cx->resolvingTable;
+ if (!table) {
+ table = JS_NewDHashTable(&resolving_dhash_ops, NULL,
+ sizeof(JSResolvingEntry),
+ JS_DHASH_MIN_SIZE);
+ if (!table)
+ goto outofmem;
+ cx->resolvingTable = table;
+ }
+
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, key, JS_DHASH_ADD);
+ if (!entry)
+ goto outofmem;
+
+ if (entry->flags & flag) {
+ /* An entry for (key, flag) exists already -- dampen recursion. */
+ entry = NULL;
+ } else {
+ /* Fill in key if we were the first to add entry, then set flag. */
+ if (!entry->key.obj)
+ entry->key = *key;
+ entry->flags |= flag;
+ }
+ *entryp = entry;
+ return JS_TRUE;
+
+outofmem:
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+}
+
+void
+js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry *entry, uint32 generation)
+{
+ JSDHashTable *table;
+
+ /*
+ * Clear flag from entry->flags and return early if other flags remain.
+ * We must take care to re-lookup entry if the table has changed since
+ * it was found by js_StartResolving.
+ */
+ table = cx->resolvingTable;
+ if (!entry || table->generation != generation) {
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, key, JS_DHASH_LOOKUP);
+ }
+ JS_ASSERT(JS_DHASH_ENTRY_IS_BUSY(&entry->hdr));
+ entry->flags &= ~flag;
+ if (entry->flags)
+ return;
+
+ /*
+ * Do a raw remove only if fewer entries were removed than would cause
+ * alpha to be less than .5 (alpha is at most .75). Otherwise, we just
+ * call JS_DHashTableOperate to re-lookup the key and remove its entry,
+ * compressing or shrinking the table as needed.
+ */
+ if (table->removedCount < JS_DHASH_TABLE_SIZE(table) >> 2)
+ JS_DHashTableRawRemove(table, &entry->hdr);
+ else
+ JS_DHashTableOperate(table, key, JS_DHASH_REMOVE);
+}
+
+JSBool
+js_EnterLocalRootScope(JSContext *cx)
+{
+ JSLocalRootStack *lrs;
+ int mark;
+
+ lrs = cx->localRootStack;
+ if (!lrs) {
+ lrs = (JSLocalRootStack *) JS_malloc(cx, sizeof *lrs);
+ if (!lrs)
+ return JS_FALSE;
+ lrs->scopeMark = JSLRS_NULL_MARK;
+ lrs->rootCount = 0;
+ lrs->topChunk = &lrs->firstChunk;
+ lrs->firstChunk.down = NULL;
+ cx->localRootStack = lrs;
+ }
+
+ /* Push lrs->scopeMark to save it for restore when leaving. */
+ mark = js_PushLocalRoot(cx, lrs, INT_TO_JSVAL(lrs->scopeMark));
+ if (mark < 0)
+ return JS_FALSE;
+ lrs->scopeMark = (uint32) mark;
+ return JS_TRUE;
+}
+
+void
+js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval)
+{
+ JSLocalRootStack *lrs;
+ uint32 mark, m, n;
+ JSLocalRootChunk *lrc;
+
+ /* Defend against buggy native callers. */
+ lrs = cx->localRootStack;
+ JS_ASSERT(lrs && lrs->rootCount != 0);
+ if (!lrs || lrs->rootCount == 0)
+ return;
+
+ mark = lrs->scopeMark;
+ JS_ASSERT(mark != JSLRS_NULL_MARK);
+ if (mark == JSLRS_NULL_MARK)
+ return;
+
+ /* Free any chunks being popped by this leave operation. */
+ m = mark >> JSLRS_CHUNK_SHIFT;
+ n = (lrs->rootCount - 1) >> JSLRS_CHUNK_SHIFT;
+ while (n > m) {
+ lrc = lrs->topChunk;
+ JS_ASSERT(lrc != &lrs->firstChunk);
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ --n;
+ }
+
+ /*
+ * Pop the scope, restoring lrs->scopeMark. If rval is a GC-thing, push
+ * it on the caller's scope, or store it in lastInternalResult if we are
+ * leaving the outermost scope. We don't need to allocate a new lrc
+ * because we can overwrite the old mark's slot with rval.
+ */
+ lrc = lrs->topChunk;
+ m = mark & JSLRS_CHUNK_MASK;
+ lrs->scopeMark = (uint32) JSVAL_TO_INT(lrc->roots[m]);
+ if (JSVAL_IS_GCTHING(rval) && !JSVAL_IS_NULL(rval)) {
+ if (mark == 0) {
+ cx->weakRoots.lastInternalResult = rval;
+ } else {
+ /*
+ * Increment m to avoid the "else if (m == 0)" case below. If
+ * rval is not a GC-thing, that case would take care of freeing
+ * any chunk that contained only the old mark. Since rval *is*
+ * a GC-thing here, we want to reuse that old mark's slot.
+ */
+ lrc->roots[m++] = rval;
+ ++mark;
+ }
+ }
+ lrs->rootCount = (uint32) mark;
+
+ /*
+ * Free the stack eagerly, risking malloc churn. The alternative would
+ * require an lrs->entryCount member, maintained by Enter and Leave, and
+ * tested by the GC in addition to the cx->localRootStack non-null test.
+ *
+ * That approach would risk hoarding 264 bytes (net) per context. Right
+ * now it seems better to give fresh (dirty in CPU write-back cache, and
+ * the data is no longer needed) memory back to the malloc heap.
+ */
+ if (mark == 0) {
+ cx->localRootStack = NULL;
+ JS_free(cx, lrs);
+ } else if (m == 0) {
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ }
+}
+
+void
+js_ForgetLocalRoot(JSContext *cx, jsval v)
+{
+ JSLocalRootStack *lrs;
+ uint32 i, j, m, n, mark;
+ JSLocalRootChunk *lrc, *lrc2;
+ jsval top;
+
+ lrs = cx->localRootStack;
+ JS_ASSERT(lrs && lrs->rootCount);
+ if (!lrs || lrs->rootCount == 0)
+ return;
+
+ /* Prepare to pop the top-most value from the stack. */
+ n = lrs->rootCount - 1;
+ m = n & JSLRS_CHUNK_MASK;
+ lrc = lrs->topChunk;
+ top = lrc->roots[m];
+
+ /* Be paranoid about calls on an empty scope. */
+ mark = lrs->scopeMark;
+ JS_ASSERT(mark < n);
+ if (mark >= n)
+ return;
+
+ /* If v was not the last root pushed in the top scope, find it. */
+ if (top != v) {
+ /* Search downward in case v was recently pushed. */
+ i = n;
+ j = m;
+ lrc2 = lrc;
+ while (--i > mark) {
+ if (j == 0)
+ lrc2 = lrc2->down;
+ j = i & JSLRS_CHUNK_MASK;
+ if (lrc2->roots[j] == v)
+ break;
+ }
+
+ /* If we didn't find v in this scope, assert and bail out. */
+ JS_ASSERT(i != mark);
+ if (i == mark)
+ return;
+
+ /* Swap top and v so common tail code can pop v. */
+ lrc2->roots[j] = top;
+ }
+
+ /* Pop the last value from the stack. */
+ lrc->roots[m] = JSVAL_NULL;
+ lrs->rootCount = n;
+ if (m == 0) {
+ JS_ASSERT(n != 0);
+ JS_ASSERT(lrc != &lrs->firstChunk);
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ }
+}
+
+int
+js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v)
+{
+ uint32 n, m;
+ JSLocalRootChunk *lrc;
+
+ n = lrs->rootCount;
+ m = n & JSLRS_CHUNK_MASK;
+ if (n == 0 || m != 0) {
+ /*
+ * At start of first chunk, or not at start of a non-first top chunk.
+ * Check for lrs->rootCount overflow.
+ */
+ if ((uint32)(n + 1) == 0) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_LOCAL_ROOTS);
+ return -1;
+ }
+ lrc = lrs->topChunk;
+ JS_ASSERT(n != 0 || lrc == &lrs->firstChunk);
+ } else {
+ /*
+ * After lrs->firstChunk, trying to index at a power-of-two chunk
+ * boundary: need a new chunk.
+ */
+ lrc = (JSLocalRootChunk *) JS_malloc(cx, sizeof *lrc);
+ if (!lrc)
+ return -1;
+ lrc->down = lrs->topChunk;
+ lrs->topChunk = lrc;
+ }
+ lrs->rootCount = n + 1;
+ lrc->roots[m] = v;
+ return (int) n;
+}
+
+void
+js_MarkLocalRoots(JSContext *cx, JSLocalRootStack *lrs)
+{
+ uint32 n, m, mark;
+ JSLocalRootChunk *lrc;
+
+ n = lrs->rootCount;
+ if (n == 0)
+ return;
+
+ mark = lrs->scopeMark;
+ lrc = lrs->topChunk;
+ do {
+ while (--n > mark) {
+#ifdef GC_MARK_DEBUG
+ char name[22];
+ JS_snprintf(name, sizeof name, "<local root %u>", n);
+#endif
+ m = n & JSLRS_CHUNK_MASK;
+ JS_ASSERT(JSVAL_IS_GCTHING(lrc->roots[m]));
+ GC_MARK(cx, JSVAL_TO_GCTHING(lrc->roots[m]), name);
+ if (m == 0)
+ lrc = lrc->down;
+ }
+ m = n & JSLRS_CHUNK_MASK;
+ mark = JSVAL_TO_INT(lrc->roots[m]);
+ if (m == 0)
+ lrc = lrc->down;
+ } while (n != 0);
+ JS_ASSERT(!lrc);
+}
+
+static void
+ReportError(JSContext *cx, const char *message, JSErrorReport *reportp)
+{
+ /*
+ * Check the error report, and set a JavaScript-catchable exception
+ * if the error is defined to have an associated exception. If an
+ * exception is thrown, then the JSREPORT_EXCEPTION flag will be set
+ * on the error report, and exception-aware hosts should ignore it.
+ */
+ JS_ASSERT(reportp);
+ if (reportp->errorNumber == JSMSG_UNCAUGHT_EXCEPTION)
+ reportp->flags |= JSREPORT_EXCEPTION;
+
+ /*
+ * Call the error reporter only if an exception wasn't raised.
+ *
+ * If an exception was raised, then we call the debugErrorHook
+ * (if present) to give it a chance to see the error before it
+ * propagates out of scope. This is needed for compatability
+ * with the old scheme.
+ */
+ if (!js_ErrorToException(cx, message, reportp)) {
+ js_ReportErrorAgain(cx, message, reportp);
+ } else if (cx->runtime->debugErrorHook && cx->errorReporter) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+ /* test local in case debugErrorHook changed on another thread */
+ if (hook)
+ hook(cx, message, reportp, cx->runtime->debugErrorHookData);
+ }
+}
+
+/*
+ * We don't post an exception in this case, since doing so runs into
+ * complications of pre-allocating an exception object which required
+ * running the Exception class initializer early etc.
+ * Instead we just invoke the errorReporter with an "Out Of Memory"
+ * type message, and then hope the process ends swiftly.
+ */
+void
+js_ReportOutOfMemory(JSContext *cx)
+{
+ JSStackFrame *fp;
+ JSErrorReport report;
+ JSErrorReporter onError = cx->errorReporter;
+
+ /* Get the message for this error, but we won't expand any arguments. */
+ const JSErrorFormatString *efs =
+ js_GetLocalizedErrorMessage(cx, NULL, NULL, JSMSG_OUT_OF_MEMORY);
+ const char *msg = efs ? efs->format : "Out of memory";
+
+ /* Fill out the report, but don't do anything that requires allocation. */
+ memset(&report, 0, sizeof (struct JSErrorReport));
+ report.flags = JSREPORT_ERROR;
+ report.errorNumber = JSMSG_OUT_OF_MEMORY;
+
+ /*
+ * Walk stack until we find a frame that is associated with some script
+ * rather than a native frame.
+ */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report.filename = fp->script->filename;
+ report.lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+
+ /*
+ * If debugErrorHook is present then we give it a chance to veto
+ * sending the error on to the regular ErrorReporter.
+ */
+ if (onError) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+ if (hook &&
+ !hook(cx, msg, &report, cx->runtime->debugErrorHookData)) {
+ onError = NULL;
+ }
+ }
+
+ if (onError)
+ onError(cx, msg, &report);
+}
+
+JSBool
+js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap)
+{
+ char *message;
+ jschar *ucmessage;
+ size_t messagelen;
+ JSStackFrame *fp;
+ JSErrorReport report;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ message = JS_vsmprintf(format, ap);
+ if (!message)
+ return JS_FALSE;
+ messagelen = strlen(message);
+
+ memset(&report, 0, sizeof (struct JSErrorReport));
+ report.flags = flags;
+ report.errorNumber = JSMSG_USER_DEFINED_ERROR;
+ report.ucmessage = ucmessage = js_InflateString(cx, message, &messagelen);
+
+ /* Find the top-most active script frame, for best line number blame. */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report.filename = fp->script->filename;
+ report.lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+
+ warning = JSREPORT_IS_WARNING(report.flags);
+ if (warning && JS_HAS_WERROR_OPTION(cx)) {
+ report.flags &= ~JSREPORT_WARNING;
+ warning = JS_FALSE;
+ }
+
+ ReportError(cx, message, &report);
+ free(message);
+ JS_free(cx, ucmessage);
+ return warning;
+}
+
+/*
+ * The arguments from ap need to be packaged up into an array and stored
+ * into the report struct.
+ *
+ * The format string addressed by the error number may contain operands
+ * identified by the format {N}, where N is a decimal digit. Each of these
+ * is to be replaced by the Nth argument from the va_list. The complete
+ * message is placed into reportp->ucmessage converted to a JSString.
+ *
+ * Returns true if the expansion succeeds (can fail if out of memory).
+ */
+JSBool
+js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ char **messagep, JSErrorReport *reportp,
+ JSBool *warningp, JSBool charArgs, va_list ap)
+{
+ const JSErrorFormatString *efs;
+ int i;
+ int argCount;
+
+ *warningp = JSREPORT_IS_WARNING(reportp->flags);
+ if (*warningp && JS_HAS_WERROR_OPTION(cx)) {
+ reportp->flags &= ~JSREPORT_WARNING;
+ *warningp = JS_FALSE;
+ }
+
+ *messagep = NULL;
+
+ /* Most calls supply js_GetErrorMessage; if this is so, assume NULL. */
+ if (!callback || callback == js_GetErrorMessage)
+ efs = js_GetLocalizedErrorMessage(cx, userRef, NULL, errorNumber);
+ else
+ efs = callback(userRef, NULL, errorNumber);
+ if (efs) {
+ size_t totalArgsLength = 0;
+ size_t argLengths[10]; /* only {0} thru {9} supported */
+ argCount = efs->argCount;
+ JS_ASSERT(argCount <= 10);
+ if (argCount > 0) {
+ /*
+ * Gather the arguments into an array, and accumulate
+ * their sizes. We allocate 1 more than necessary and
+ * null it out to act as the caboose when we free the
+ * pointers later.
+ */
+ reportp->messageArgs = (const jschar **)
+ JS_malloc(cx, sizeof(jschar *) * (argCount + 1));
+ if (!reportp->messageArgs)
+ return JS_FALSE;
+ reportp->messageArgs[argCount] = NULL;
+ for (i = 0; i < argCount; i++) {
+ if (charArgs) {
+ char *charArg = va_arg(ap, char *);
+ size_t charArgLength = strlen(charArg);
+ reportp->messageArgs[i]
+ = js_InflateString(cx, charArg, &charArgLength);
+ if (!reportp->messageArgs[i])
+ goto error;
+ } else {
+ reportp->messageArgs[i] = va_arg(ap, jschar *);
+ }
+ argLengths[i] = js_strlen(reportp->messageArgs[i]);
+ totalArgsLength += argLengths[i];
+ }
+ /* NULL-terminate for easy copying. */
+ reportp->messageArgs[i] = NULL;
+ }
+ /*
+ * Parse the error format, substituting the argument X
+ * for {X} in the format.
+ */
+ if (argCount > 0) {
+ if (efs->format) {
+ jschar *buffer, *fmt, *out;
+ int expandedArgs = 0;
+ size_t expandedLength;
+ size_t len = strlen(efs->format);
+
+ buffer = fmt = js_InflateString (cx, efs->format, &len);
+ if (!buffer)
+ goto error;
+ expandedLength = len
+ - (3 * argCount) /* exclude the {n} */
+ + totalArgsLength;
+
+ /*
+ * Note - the above calculation assumes that each argument
+ * is used once and only once in the expansion !!!
+ */
+ reportp->ucmessage = out = (jschar *)
+ JS_malloc(cx, (expandedLength + 1) * sizeof(jschar));
+ if (!out) {
+ JS_free (cx, buffer);
+ goto error;
+ }
+ while (*fmt) {
+ if (*fmt == '{') {
+ if (isdigit(fmt[1])) {
+ int d = JS7_UNDEC(fmt[1]);
+ JS_ASSERT(d < argCount);
+ js_strncpy(out, reportp->messageArgs[d],
+ argLengths[d]);
+ out += argLengths[d];
+ fmt += 3;
+ expandedArgs++;
+ continue;
+ }
+ }
+ *out++ = *fmt++;
+ }
+ JS_ASSERT(expandedArgs == argCount);
+ *out = 0;
+ JS_free (cx, buffer);
+ *messagep =
+ js_DeflateString(cx, reportp->ucmessage,
+ (size_t)(out - reportp->ucmessage));
+ if (!*messagep)
+ goto error;
+ }
+ } else {
+ /*
+ * Zero arguments: the format string (if it exists) is the
+ * entire message.
+ */
+ if (efs->format) {
+ size_t len;
+ *messagep = JS_strdup(cx, efs->format);
+ if (!*messagep)
+ goto error;
+ len = strlen(*messagep);
+ reportp->ucmessage = js_InflateString(cx, *messagep, &len);
+ if (!reportp->ucmessage)
+ goto error;
+ }
+ }
+ }
+ if (*messagep == NULL) {
+ /* where's the right place for this ??? */
+ const char *defaultErrorMessage
+ = "No error message available for error number %d";
+ size_t nbytes = strlen(defaultErrorMessage) + 16;
+ *messagep = (char *)JS_malloc(cx, nbytes);
+ if (!*messagep)
+ goto error;
+ JS_snprintf(*messagep, nbytes, defaultErrorMessage, errorNumber);
+ }
+ return JS_TRUE;
+
+error:
+ if (reportp->messageArgs) {
+ /* free the arguments only if we allocated them */
+ if (charArgs) {
+ i = 0;
+ while (reportp->messageArgs[i])
+ JS_free(cx, (void *)reportp->messageArgs[i++]);
+ }
+ JS_free(cx, (void *)reportp->messageArgs);
+ reportp->messageArgs = NULL;
+ }
+ if (reportp->ucmessage) {
+ JS_free(cx, (void *)reportp->ucmessage);
+ reportp->ucmessage = NULL;
+ }
+ if (*messagep) {
+ JS_free(cx, (void *)*messagep);
+ *messagep = NULL;
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ JSBool charArgs, va_list ap)
+{
+ JSStackFrame *fp;
+ JSErrorReport report;
+ char *message;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ memset(&report, 0, sizeof (struct JSErrorReport));
+ report.flags = flags;
+ report.errorNumber = errorNumber;
+
+ /*
+ * If we can't find out where the error was based on the current frame,
+ * see if the next frame has a script/pc combo we can use.
+ */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report.filename = fp->script->filename;
+ report.lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+
+ if (!js_ExpandErrorArguments(cx, callback, userRef, errorNumber,
+ &message, &report, &warning, charArgs, ap)) {
+ return JS_FALSE;
+ }
+
+ ReportError(cx, message, &report);
+
+ if (message)
+ JS_free(cx, message);
+ if (report.messageArgs) {
+ /*
+ * js_ExpandErrorArguments owns its messageArgs only if it had to
+ * inflate the arguments (from regular |char *|s).
+ */
+ if (charArgs) {
+ int i = 0;
+ while (report.messageArgs[i])
+ JS_free(cx, (void *)report.messageArgs[i++]);
+ }
+ JS_free(cx, (void *)report.messageArgs);
+ }
+ if (report.ucmessage)
+ JS_free(cx, (void *)report.ucmessage);
+
+ return warning;
+}
+
+JS_FRIEND_API(void)
+js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *reportp)
+{
+ JSErrorReporter onError;
+
+ if (!message)
+ return;
+
+ if (cx->lastMessage)
+ free(cx->lastMessage);
+ cx->lastMessage = JS_strdup(cx, message);
+ if (!cx->lastMessage)
+ return;
+ onError = cx->errorReporter;
+
+ /*
+ * If debugErrorHook is present then we give it a chance to veto
+ * sending the error on to the regular ErrorReporter.
+ */
+ if (onError) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+ if (hook &&
+ !hook(cx, cx->lastMessage, reportp,
+ cx->runtime->debugErrorHookData)) {
+ onError = NULL;
+ }
+ }
+ if (onError)
+ onError(cx, cx->lastMessage, reportp);
+}
+
+void
+js_ReportIsNotDefined(JSContext *cx, const char *name)
+{
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_DEFINED, name);
+}
+
+#if defined DEBUG && defined XP_UNIX
+/* For gdb usage. */
+void js_traceon(JSContext *cx) { cx->tracefp = stderr; }
+void js_traceoff(JSContext *cx) { cx->tracefp = NULL; }
+#endif
+
+JSErrorFormatString js_ErrorFormatString[JSErr_Limit] = {
+#define MSG_DEF(name, number, count, exception, format) \
+ { format, count, exception } ,
+#include "js.msg"
+#undef MSG_DEF
+};
+
+const JSErrorFormatString *
+js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
+{
+ if ((errorNumber > 0) && (errorNumber < JSErr_Limit))
+ return &js_ErrorFormatString[errorNumber];
+ return NULL;
+}
diff --git a/third_party/js-1.7/jscntxt.h b/third_party/js-1.7/jscntxt.h
new file mode 100644
index 0000000..7ca678e
--- /dev/null
+++ b/third_party/js-1.7/jscntxt.h
@@ -0,0 +1,1013 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jscntxt_h___
+#define jscntxt_h___
+/*
+ * JS execution context.
+ */
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jslong.h"
+#include "jsatom.h"
+#include "jsconfig.h"
+#include "jsdhash.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsobj.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsregexp.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
+ * given pc in a script.
+ */
+typedef struct JSGSNCache {
+ JSScript *script;
+ JSDHashTable table;
+#ifdef JS_GSNMETER
+ uint32 hits;
+ uint32 misses;
+ uint32 fills;
+ uint32 clears;
+# define GSN_CACHE_METER(cache,cnt) (++(cache)->cnt)
+#else
+# define GSN_CACHE_METER(cache,cnt) /* nothing */
+#endif
+} JSGSNCache;
+
+#define GSN_CACHE_CLEAR(cache) \
+ JS_BEGIN_MACRO \
+ (cache)->script = NULL; \
+ if ((cache)->table.ops) { \
+ JS_DHashTableFinish(&(cache)->table); \
+ (cache)->table.ops = NULL; \
+ } \
+ GSN_CACHE_METER(cache, clears); \
+ JS_END_MACRO
+
+/* These helper macros take a cx as parameter and operate on its GSN cache. */
+#define JS_CLEAR_GSN_CACHE(cx) GSN_CACHE_CLEAR(&JS_GSN_CACHE(cx))
+#define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt)
+
+#ifdef JS_THREADSAFE
+
+/*
+ * Structure uniquely representing a thread. It holds thread-private data
+ * that can be accessed without a global lock.
+ */
+struct JSThread {
+ /* Linked list of all contexts active on this thread. */
+ JSCList contextList;
+
+ /* Opaque thread-id, from NSPR's PR_GetCurrentThread(). */
+ jsword id;
+
+ /* Thread-local gc free lists array. */
+ JSGCThing *gcFreeLists[GC_NUM_FREELISTS];
+
+ /*
+ * Thread-local version of JSRuntime.gcMallocBytes to avoid taking
+ * locks on each JS_malloc.
+ */
+ uint32 gcMallocBytes;
+
+#if JS_HAS_GENERATORS
+ /* Flag indicating that the current thread is executing close hooks. */
+ JSBool gcRunningCloseHooks;
+#endif
+
+ /*
+ * Store the GSN cache in struct JSThread, not struct JSContext, both to
+ * save space and to simplify cleanup in js_GC. Any embedding (Firefox
+ * or another Gecko application) that uses many contexts per thread is
+ * unlikely to interleave js_GetSrcNote-intensive loops in the decompiler
+ * among two or more contexts running script in one thread.
+ */
+ JSGSNCache gsnCache;
+};
+
+#define JS_GSN_CACHE(cx) ((cx)->thread->gsnCache)
+
+extern void JS_DLL_CALLBACK
+js_ThreadDestructorCB(void *ptr);
+
+extern JSBool
+js_SetContextThread(JSContext *cx);
+
+extern void
+js_ClearContextThread(JSContext *cx);
+
+extern JSThread *
+js_GetCurrentThread(JSRuntime *rt);
+
+#endif /* JS_THREADSAFE */
+
+typedef enum JSDestroyContextMode {
+ JSDCM_NO_GC,
+ JSDCM_MAYBE_GC,
+ JSDCM_FORCE_GC,
+ JSDCM_NEW_FAILED
+} JSDestroyContextMode;
+
+typedef enum JSRuntimeState {
+ JSRTS_DOWN,
+ JSRTS_LAUNCHING,
+ JSRTS_UP,
+ JSRTS_LANDING
+} JSRuntimeState;
+
+typedef struct JSPropertyTreeEntry {
+ JSDHashEntryHdr hdr;
+ JSScopeProperty *child;
+} JSPropertyTreeEntry;
+
+/*
+ * Forward declaration for opaque JSRuntime.nativeIteratorStates.
+ */
+typedef struct JSNativeIteratorState JSNativeIteratorState;
+
+struct JSRuntime {
+ /* Runtime state, synchronized by the stateChange/gcLock condvar/lock. */
+ JSRuntimeState state;
+
+ /* Context create/destroy callback. */
+ JSContextCallback cxCallback;
+
+ /* Garbage collector state, used by jsgc.c. */
+ JSGCArenaList gcArenaList[GC_NUM_FREELISTS];
+ JSDHashTable gcRootsHash;
+ JSDHashTable *gcLocksHash;
+ jsrefcount gcKeepAtoms;
+ uint32 gcBytes;
+ uint32 gcLastBytes;
+ uint32 gcMaxBytes;
+ uint32 gcMaxMallocBytes;
+ uint32 gcLevel;
+ uint32 gcNumber;
+
+ /*
+ * NB: do not pack another flag here by claiming gcPadding unless the new
+ * flag is written only by the GC thread. Atomic updates to packed bytes
+ * are not guaranteed, so stores issued by one thread may be lost due to
+ * unsynchronized read-modify-write cycles on other threads.
+ */
+ JSPackedBool gcPoke;
+ JSPackedBool gcRunning;
+ uint16 gcPadding;
+
+ JSGCCallback gcCallback;
+ uint32 gcMallocBytes;
+ JSGCArena *gcUnscannedArenaStackTop;
+#ifdef DEBUG
+ size_t gcUnscannedBagSize;
+#endif
+
+ /*
+ * API compatibility requires keeping GCX_PRIVATE bytes separate from the
+ * original GC types' byte tally. Otherwise embeddings that configure a
+ * good limit for pre-GCX_PRIVATE versions of the engine will see memory
+ * over-pressure too often, possibly leading to failed last-ditch GCs.
+ *
+ * The new XML GC-thing types do add to gcBytes, and they're larger than
+ * the original GC-thing type size (8 bytes on most architectures). So a
+ * user who enables E4X may want to increase the maxbytes value passed to
+ * JS_NewRuntime. TODO: Note this in the API docs.
+ */
+ uint32 gcPrivateBytes;
+
+ /*
+ * Table for tracking iterators to ensure that we close iterator's state
+ * before finalizing the iterable object.
+ */
+ JSPtrTable gcIteratorTable;
+
+#if JS_HAS_GENERATORS
+ /* Runtime state to support close hooks. */
+ JSGCCloseState gcCloseState;
+#endif
+
+#ifdef JS_GCMETER
+ JSGCStats gcStats;
+#endif
+
+ /* Literal table maintained by jsatom.c functions. */
+ JSAtomState atomState;
+
+ /* Random number generator state, used by jsmath.c. */
+ JSBool rngInitialized;
+ int64 rngMultiplier;
+ int64 rngAddend;
+ int64 rngMask;
+ int64 rngSeed;
+ jsdouble rngDscale;
+
+ /* Well-known numbers held for use by this runtime's contexts. */
+ jsdouble *jsNaN;
+ jsdouble *jsNegativeInfinity;
+ jsdouble *jsPositiveInfinity;
+
+#ifdef JS_THREADSAFE
+ JSLock *deflatedStringCacheLock;
+#endif
+ JSHashTable *deflatedStringCache;
+#ifdef DEBUG
+ uint32 deflatedStringCacheBytes;
+#endif
+
+ /* Empty string held for use by this runtime's contexts. */
+ JSString *emptyString;
+
+ /* List of active contexts sharing this runtime; protected by gcLock. */
+ JSCList contextList;
+
+ /* These are used for debugging -- see jsprvtd.h and jsdbgapi.h. */
+ JSTrapHandler interruptHandler;
+ void *interruptHandlerData;
+ JSNewScriptHook newScriptHook;
+ void *newScriptHookData;
+ JSDestroyScriptHook destroyScriptHook;
+ void *destroyScriptHookData;
+ JSTrapHandler debuggerHandler;
+ void *debuggerHandlerData;
+ JSSourceHandler sourceHandler;
+ void *sourceHandlerData;
+ JSInterpreterHook executeHook;
+ void *executeHookData;
+ JSInterpreterHook callHook;
+ void *callHookData;
+ JSObjectHook objectHook;
+ void *objectHookData;
+ JSTrapHandler throwHook;
+ void *throwHookData;
+ JSDebugErrorHook debugErrorHook;
+ void *debugErrorHookData;
+
+ /* More debugging state, see jsdbgapi.c. */
+ JSCList trapList;
+ JSCList watchPointList;
+
+ /* Weak links to properties, indexed by quickened get/set opcodes. */
+ /* XXX must come after JSCLists or MSVC alignment bug bites empty lists */
+ JSPropertyCache propertyCache;
+
+ /* Client opaque pointer */
+ void *data;
+
+#ifdef JS_THREADSAFE
+ /* These combine to interlock the GC and new requests. */
+ PRLock *gcLock;
+ PRCondVar *gcDone;
+ PRCondVar *requestDone;
+ uint32 requestCount;
+ JSThread *gcThread;
+
+ /* Lock and owning thread pointer for JS_LOCK_RUNTIME. */
+ PRLock *rtLock;
+#ifdef DEBUG
+ jsword rtLockOwner;
+#endif
+
+ /* Used to synchronize down/up state change; protected by gcLock. */
+ PRCondVar *stateChange;
+
+ /* Used to serialize cycle checks when setting __proto__ or __parent__. */
+ PRLock *setSlotLock;
+ PRCondVar *setSlotDone;
+ JSBool setSlotBusy;
+ JSScope *setSlotScope; /* deadlock avoidance, see jslock.c */
+
+ /*
+ * State for sharing single-threaded scopes, once a second thread tries to
+ * lock a scope. The scopeSharingDone condvar is protected by rt->gcLock,
+ * to minimize number of locks taken in JS_EndRequest.
+ *
+ * The scopeSharingTodo linked list is likewise "global" per runtime, not
+ * one-list-per-context, to conserve space over all contexts, optimizing
+ * for the likely case that scopes become shared rarely, and among a very
+ * small set of threads (contexts).
+ */
+ PRCondVar *scopeSharingDone;
+ JSScope *scopeSharingTodo;
+
+/*
+ * Magic terminator for the rt->scopeSharingTodo linked list, threaded through
+ * scope->u.link. This hack allows us to test whether a scope is on the list
+ * by asking whether scope->u.link is non-null. We use a large, likely bogus
+ * pointer here to distinguish this value from any valid u.count (small int)
+ * value.
+ */
+#define NO_SCOPE_SHARING_TODO ((JSScope *) 0xfeedbeef)
+
+ /*
+ * The index for JSThread info, returned by PR_NewThreadPrivateIndex.
+ * The value is visible and shared by all threads, but the data is
+ * private to each thread.
+ */
+ PRUintn threadTPIndex;
+#endif /* JS_THREADSAFE */
+
+ /*
+ * Check property accessibility for objects of arbitrary class. Used at
+ * present to check f.caller accessibility for any function object f.
+ */
+ JSCheckAccessOp checkObjectAccess;
+
+ /* Security principals serialization support. */
+ JSPrincipalsTranscoder principalsTranscoder;
+
+ /* Optional hook to find principals for an object in this runtime. */
+ JSObjectPrincipalsFinder findObjectPrincipals;
+
+ /*
+ * Shared scope property tree, and arena-pool for allocating its nodes.
+ * The propertyRemovals counter is incremented for every js_ClearScope,
+ * and for each js_RemoveScopeProperty that frees a slot in an object.
+ * See js_NativeGet and js_NativeSet in jsobj.c.
+ */
+ JSDHashTable propertyTreeHash;
+ JSScopeProperty *propertyFreeList;
+ JSArenaPool propertyArenaPool;
+ int32 propertyRemovals;
+
+ /* Script filename table. */
+ struct JSHashTable *scriptFilenameTable;
+ JSCList scriptFilenamePrefixes;
+#ifdef JS_THREADSAFE
+ PRLock *scriptFilenameTableLock;
+#endif
+
+ /* Number localization, used by jsnum.c */
+ const char *thousandsSeparator;
+ const char *decimalSeparator;
+ const char *numGrouping;
+
+ /*
+ * Weak references to lazily-created, well-known XML singletons.
+ *
+ * NB: Singleton objects must be carefully disconnected from the rest of
+ * the object graph usually associated with a JSContext's global object,
+ * including the set of standard class objects. See jsxml.c for details.
+ */
+ JSObject *anynameObject;
+ JSObject *functionNamespaceObject;
+
+ /*
+ * A helper list for the GC, so it can mark native iterator states. See
+ * js_MarkNativeIteratorStates for details.
+ */
+ JSNativeIteratorState *nativeIteratorStates;
+
+#ifndef JS_THREADSAFE
+ /*
+ * For thread-unsafe embeddings, the GSN cache lives in the runtime and
+ * not each context, since we expect it to be filled once when decompiling
+ * a longer script, then hit repeatedly as js_GetSrcNote is called during
+ * the decompiler activation that filled it.
+ */
+ JSGSNCache gsnCache;
+
+#define JS_GSN_CACHE(cx) ((cx)->runtime->gsnCache)
+#endif
+
+#ifdef DEBUG
+ /* Function invocation metering. */
+ jsrefcount inlineCalls;
+ jsrefcount nativeCalls;
+ jsrefcount nonInlineCalls;
+ jsrefcount constructs;
+
+ /* Scope lock and property metering. */
+ jsrefcount claimAttempts;
+ jsrefcount claimedScopes;
+ jsrefcount deadContexts;
+ jsrefcount deadlocksAvoided;
+ jsrefcount liveScopes;
+ jsrefcount sharedScopes;
+ jsrefcount totalScopes;
+ jsrefcount badUndependStrings;
+ jsrefcount liveScopeProps;
+ jsrefcount totalScopeProps;
+ jsrefcount livePropTreeNodes;
+ jsrefcount duplicatePropTreeNodes;
+ jsrefcount totalPropTreeNodes;
+ jsrefcount propTreeKidsChunks;
+ jsrefcount middleDeleteFixups;
+
+ /* String instrumentation. */
+ jsrefcount liveStrings;
+ jsrefcount totalStrings;
+ jsrefcount liveDependentStrings;
+ jsrefcount totalDependentStrings;
+ double lengthSum;
+ double lengthSquaredSum;
+ double strdepLengthSum;
+ double strdepLengthSquaredSum;
+#endif
+};
+
+#ifdef DEBUG
+# define JS_RUNTIME_METER(rt, which) JS_ATOMIC_INCREMENT(&(rt)->which)
+# define JS_RUNTIME_UNMETER(rt, which) JS_ATOMIC_DECREMENT(&(rt)->which)
+#else
+# define JS_RUNTIME_METER(rt, which) /* nothing */
+# define JS_RUNTIME_UNMETER(rt, which) /* nothing */
+#endif
+
+#define JS_KEEP_ATOMS(rt) JS_ATOMIC_INCREMENT(&(rt)->gcKeepAtoms);
+#define JS_UNKEEP_ATOMS(rt) JS_ATOMIC_DECREMENT(&(rt)->gcKeepAtoms);
+
+#ifdef JS_ARGUMENT_FORMATTER_DEFINED
+/*
+ * Linked list mapping format strings for JS_{Convert,Push}Arguments{,VA} to
+ * formatter functions. Elements are sorted in non-increasing format string
+ * length order.
+ */
+struct JSArgumentFormatMap {
+ const char *format;
+ size_t length;
+ JSArgumentFormatter formatter;
+ JSArgumentFormatMap *next;
+};
+#endif
+
+struct JSStackHeader {
+ uintN nslots;
+ JSStackHeader *down;
+};
+
+#define JS_STACK_SEGMENT(sh) ((jsval *)(sh) + 2)
+
+/*
+ * Key and entry types for the JSContext.resolvingTable hash table, typedef'd
+ * here because all consumers need to see these declarations (and not just the
+ * typedef names, as would be the case for an opaque pointer-to-typedef'd-type
+ * declaration), along with cx->resolvingTable.
+ */
+typedef struct JSResolvingKey {
+ JSObject *obj;
+ jsid id;
+} JSResolvingKey;
+
+typedef struct JSResolvingEntry {
+ JSDHashEntryHdr hdr;
+ JSResolvingKey key;
+ uint32 flags;
+} JSResolvingEntry;
+
+#define JSRESFLAG_LOOKUP 0x1 /* resolving id from lookup */
+#define JSRESFLAG_WATCH 0x2 /* resolving id from watch */
+
+typedef struct JSLocalRootChunk JSLocalRootChunk;
+
+#define JSLRS_CHUNK_SHIFT 8
+#define JSLRS_CHUNK_SIZE JS_BIT(JSLRS_CHUNK_SHIFT)
+#define JSLRS_CHUNK_MASK JS_BITMASK(JSLRS_CHUNK_SHIFT)
+
+struct JSLocalRootChunk {
+ jsval roots[JSLRS_CHUNK_SIZE];
+ JSLocalRootChunk *down;
+};
+
+typedef struct JSLocalRootStack {
+ uint32 scopeMark;
+ uint32 rootCount;
+ JSLocalRootChunk *topChunk;
+ JSLocalRootChunk firstChunk;
+} JSLocalRootStack;
+
+#define JSLRS_NULL_MARK ((uint32) -1)
+
+typedef struct JSTempValueRooter JSTempValueRooter;
+typedef void
+(* JS_DLL_CALLBACK JSTempValueMarker)(JSContext *cx, JSTempValueRooter *tvr);
+
+typedef union JSTempValueUnion {
+ jsval value;
+ JSObject *object;
+ JSString *string;
+ void *gcthing;
+ JSTempValueMarker marker;
+ JSScopeProperty *sprop;
+ JSWeakRoots *weakRoots;
+ jsval *array;
+} JSTempValueUnion;
+
+/*
+ * The following allows to reinterpret JSTempValueUnion.object as jsval using
+ * the tagging property of a generic jsval described below.
+ */
+JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval));
+JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(JSObject *));
+
+/*
+ * Context-linked stack of temporary GC roots.
+ *
+ * If count is -1, then u.value contains the single value or GC-thing to root.
+ * If count is -2, then u.marker holds a mark hook called to mark the values.
+ * If count is -3, then u.sprop points to the property tree node to mark.
+ * If count is -4, then u.weakRoots points to saved weak roots.
+ * If count >= 0, then u.array points to a stack-allocated vector of jsvals.
+ *
+ * To root a single GC-thing pointer, which need not be tagged and stored as a
+ * jsval, use JS_PUSH_TEMP_ROOT_GCTHING. The macro reinterprets an arbitrary
+ * GC-thing as jsval. It works because a GC-thing is aligned on a 0 mod 8
+ * boundary, and object has the 0 jsval tag. So any GC-thing may be tagged as
+ * if it were an object and untagged, if it's then used only as an opaque
+ * pointer until discriminated by other means than tag bits (this is how the
+ * GC mark function uses its |thing| parameter -- it consults GC-thing flags
+ * stored separately from the thing to decide the type of thing).
+ *
+ * JS_PUSH_TEMP_ROOT_OBJECT and JS_PUSH_TEMP_ROOT_STRING are type-safe
+ * alternatives to JS_PUSH_TEMP_ROOT_GCTHING for JSObject and JSString. They
+ * also provide a simple way to get a single pointer to rooted JSObject or
+ * JSString via JS_PUSH_TEMP_ROOT_(OBJECT|STRTING)(cx, NULL, &tvr). Then
+ * &tvr.u.object or tvr.u.string gives the necessary pointer, which puns
+ * tvr.u.value safely because JSObject * and JSString * are GC-things and, as
+ * such, their tag bits are all zeroes.
+ *
+ * If you need to protect a result value that flows out of a C function across
+ * several layers of other functions, use the js_LeaveLocalRootScopeWithResult
+ * internal API (see further below) instead.
+ */
+struct JSTempValueRooter {
+ JSTempValueRooter *down;
+ ptrdiff_t count;
+ JSTempValueUnion u;
+};
+
+#define JSTVU_SINGLE (-1)
+#define JSTVU_MARKER (-2)
+#define JSTVU_SPROP (-3)
+#define JSTVU_WEAK_ROOTS (-4)
+
+#define JS_PUSH_TEMP_ROOT_COMMON(cx,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((cx)->tempValueRooters != (tvr)); \
+ (tvr)->down = (cx)->tempValueRooters; \
+ (cx)->tempValueRooters = (tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_SINGLE_TEMP_ROOT(cx,val,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.value = val; \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT(cx,cnt,arr,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((ptrdiff_t)(cnt) >= 0); \
+ (tvr)->count = (ptrdiff_t)(cnt); \
+ (tvr)->u.array = (arr); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_MARKER(cx,marker_,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_MARKER; \
+ (tvr)->u.marker = (marker_); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_OBJECT(cx,obj,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.object = (obj); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_STRING(cx,str,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.string = (str); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_GCTHING(cx,thing,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT(JSVAL_IS_OBJECT((jsval)thing)); \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.gcthing = (thing); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_POP_TEMP_ROOT(cx,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((cx)->tempValueRooters == (tvr)); \
+ (cx)->tempValueRooters = (tvr)->down; \
+ JS_END_MACRO
+
+#define JS_TEMP_ROOT_EVAL(cx,cnt,val,expr) \
+ JS_BEGIN_MACRO \
+ JSTempValueRooter tvr; \
+ JS_PUSH_TEMP_ROOT(cx, cnt, val, &tvr); \
+ (expr); \
+ JS_POP_TEMP_ROOT(cx, &tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_SPROP(cx,sprop_,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SPROP; \
+ (tvr)->u.sprop = (sprop_); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_WEAK_ROOTS; \
+ (tvr)->u.weakRoots = (weakRoots_); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+struct JSContext {
+ /* JSRuntime contextList linkage. */
+ JSCList links;
+
+ /* Interpreter activation count. */
+ uintN interpLevel;
+
+ /* Limit pointer for checking stack consumption during recursion. */
+ jsuword stackLimit;
+
+ /* Runtime version control identifier and equality operators. */
+ uint16 version;
+ jsbytecode jsop_eq;
+ jsbytecode jsop_ne;
+
+ /* Data shared by threads in an address space. */
+ JSRuntime *runtime;
+
+ /* Stack arena pool and frame pointer register. */
+ JSArenaPool stackPool;
+ JSStackFrame *fp;
+
+ /* Temporary arena pool used while compiling and decompiling. */
+ JSArenaPool tempPool;
+
+ /* Top-level object and pointer to top stack frame's scope chain. */
+ JSObject *globalObject;
+
+ /* Storage to root recently allocated GC things and script result. */
+ JSWeakRoots weakRoots;
+
+ /* Regular expression class statics (XXX not shared globally). */
+ JSRegExpStatics regExpStatics;
+
+ /* State for object and array toSource conversion. */
+ JSSharpObjectMap sharpObjectMap;
+
+ /* Argument formatter support for JS_{Convert,Push}Arguments{,VA}. */
+ JSArgumentFormatMap *argumentFormatMap;
+
+ /* Last message string and trace file for debugging. */
+ char *lastMessage;
+#ifdef DEBUG
+ void *tracefp;
+#endif
+
+ /* Per-context optional user callbacks. */
+ JSBranchCallback branchCallback;
+ JSErrorReporter errorReporter;
+
+ /* Client opaque pointer */
+ void *data;
+
+ /* GC and thread-safe state. */
+ JSStackFrame *dormantFrameChain; /* dormant stack frame to scan */
+#ifdef JS_THREADSAFE
+ JSThread *thread;
+ jsrefcount requestDepth;
+ JSScope *scopeToShare; /* weak reference, see jslock.c */
+ JSScope *lockedSealedScope; /* weak ref, for low-cost sealed
+ scope locking */
+ JSCList threadLinks; /* JSThread contextList linkage */
+
+#define CX_FROM_THREAD_LINKS(tl) \
+ ((JSContext *)((char *)(tl) - offsetof(JSContext, threadLinks)))
+#endif
+
+#if JS_HAS_LVALUE_RETURN
+ /*
+ * Secondary return value from native method called on the left-hand side
+ * of an assignment operator. The native should store the object in which
+ * to set a property in *rval, and return the property's id expressed as a
+ * jsval by calling JS_SetCallReturnValue2(cx, idval).
+ */
+ jsval rval2;
+ JSPackedBool rval2set;
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ /*
+ * Bit-set formed from binary exponentials of the XML_* tiny-ids defined
+ * for boolean settings in jsxml.c, plus an XSF_CACHE_VALID bit. Together
+ * these act as a cache of the boolean XML.ignore* and XML.prettyPrinting
+ * property values associated with this context's global object.
+ */
+ uint8 xmlSettingFlags;
+#endif
+
+ /*
+ * True if creating an exception object, to prevent runaway recursion.
+ * NB: creatingException packs with rval2set, #if JS_HAS_LVALUE_RETURN;
+ * with xmlSettingFlags, #if JS_HAS_XML_SUPPORT; and with throwing below.
+ */
+ JSPackedBool creatingException;
+
+ /*
+ * Exception state -- the exception member is a GC root by definition.
+ * NB: throwing packs with creatingException and rval2set, above.
+ */
+ JSPackedBool throwing; /* is there a pending exception? */
+ jsval exception; /* most-recently-thrown exception */
+ /* Flag to indicate that we run inside gcCallback(cx, JSGC_MARK_END). */
+ JSPackedBool insideGCMarkCallback;
+
+ /* Per-context options. */
+ uint32 options; /* see jsapi.h for JSOPTION_* */
+
+ /* Locale specific callbacks for string conversion. */
+ JSLocaleCallbacks *localeCallbacks;
+
+ /*
+ * cx->resolvingTable is non-null and non-empty if we are initializing
+ * standard classes lazily, or if we are otherwise recursing indirectly
+ * from js_LookupProperty through a JSClass.resolve hook. It is used to
+ * limit runaway recursion (see jsapi.c and jsobj.c).
+ */
+ JSDHashTable *resolvingTable;
+
+ /* PDL of stack headers describing stack slots not rooted by argv, etc. */
+ JSStackHeader *stackHeaders;
+
+ /* Optional stack of heap-allocated scoped local GC roots. */
+ JSLocalRootStack *localRootStack;
+
+ /* Stack of thread-stack-allocated temporary GC roots. */
+ JSTempValueRooter *tempValueRooters;
+
+#ifdef GC_MARK_DEBUG
+ /* Top of the GC mark stack. */
+ void *gcCurrentMarkNode;
+#endif
+};
+
+#ifdef JS_THREADSAFE
+# define JS_THREAD_ID(cx) ((cx)->thread ? (cx)->thread->id : 0)
+#endif
+
+#ifdef __cplusplus
+/* FIXME(bug 332648): Move this into a public header. */
+class JSAutoTempValueRooter
+{
+ public:
+ JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec)
+ : mContext(cx) {
+ JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr);
+ }
+ JSAutoTempValueRooter(JSContext *cx, jsval v)
+ : mContext(cx) {
+ JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr);
+ }
+
+ ~JSAutoTempValueRooter() {
+ JS_POP_TEMP_ROOT(mContext, &mTvr);
+ }
+
+ private:
+ static void *operator new(size_t);
+ static void operator delete(void *, size_t);
+
+ JSContext *mContext;
+ JSTempValueRooter mTvr;
+};
+#endif
+
+/*
+ * Slightly more readable macros for testing per-context option settings (also
+ * to hide bitset implementation detail).
+ *
+ * JSOPTION_XML must be handled specially in order to propagate from compile-
+ * to run-time (from cx->options to script->version/cx->version). To do that,
+ * we copy JSOPTION_XML from cx->options into cx->version as JSVERSION_HAS_XML
+ * whenever options are set, and preserve this XML flag across version number
+ * changes done via the JS_SetVersion API.
+ *
+ * But when executing a script or scripted function, the interpreter changes
+ * cx->version, including the XML flag, to script->version. Thus JSOPTION_XML
+ * is a compile-time option that causes a run-time version change during each
+ * activation of the compiled script. That version change has the effect of
+ * changing JS_HAS_XML_OPTION, so that any compiling done via eval enables XML
+ * support. If an XML-enabled script or function calls a non-XML function,
+ * the flag bit will be cleared during the callee's activation.
+ *
+ * Note that JS_SetVersion API calls never pass JSVERSION_HAS_XML or'd into
+ * that API's version parameter.
+ *
+ * Note also that script->version must contain this XML option flag in order
+ * for XDR'ed scripts to serialize and deserialize with that option preserved
+ * for detection at run-time. We can't copy other compile-time options into
+ * script->version because that would break backward compatibility (certain
+ * other options, e.g. JSOPTION_VAROBJFIX, are analogous to JSOPTION_XML).
+ */
+#define JS_HAS_OPTION(cx,option) (((cx)->options & (option)) != 0)
+#define JS_HAS_STRICT_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_STRICT)
+#define JS_HAS_WERROR_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_WERROR)
+#define JS_HAS_COMPILE_N_GO_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_COMPILE_N_GO)
+#define JS_HAS_ATLINE_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_ATLINE)
+
+#define JSVERSION_MASK 0x0FFF /* see JSVersion in jspubtd.h */
+#define JSVERSION_HAS_XML 0x1000 /* flag induced by XML option */
+
+#define JSVERSION_NUMBER(cx) ((cx)->version & JSVERSION_MASK)
+#define JS_HAS_XML_OPTION(cx) ((cx)->version & JSVERSION_HAS_XML || \
+ JSVERSION_NUMBER(cx) >= JSVERSION_1_6)
+
+#define JS_HAS_NATIVE_BRANCH_CALLBACK_OPTION(cx) \
+ JS_HAS_OPTION(cx, JSOPTION_NATIVE_BRANCH_CALLBACK)
+
+/*
+ * Wrappers for the JSVERSION_IS_* macros from jspubtd.h taking JSContext *cx
+ * and masking off the XML flag and any other high order bits.
+ */
+#define JS_VERSION_IS_ECMA(cx) JSVERSION_IS_ECMA(JSVERSION_NUMBER(cx))
+
+/*
+ * Common subroutine of JS_SetVersion and js_SetVersion, to update per-context
+ * data that depends on version.
+ */
+extern void
+js_OnVersionChange(JSContext *cx);
+
+/*
+ * Unlike the JS_SetVersion API, this function stores JSVERSION_HAS_XML and
+ * any future non-version-number flags induced by compiler options.
+ */
+extern void
+js_SetVersion(JSContext *cx, JSVersion version);
+
+/*
+ * Create and destroy functions for JSContext, which is manually allocated
+ * and exclusively owned.
+ */
+extern JSContext *
+js_NewContext(JSRuntime *rt, size_t stackChunkSize);
+
+extern void
+js_DestroyContext(JSContext *cx, JSDestroyContextMode mode);
+
+/*
+ * Return true if cx points to a context in rt->contextList, else return false.
+ * NB: the caller (see jslock.c:ClaimScope) must hold rt->gcLock.
+ */
+extern JSBool
+js_ValidContextPointer(JSRuntime *rt, JSContext *cx);
+
+/*
+ * If unlocked, acquire and release rt->gcLock around *iterp update; otherwise
+ * the caller must be holding rt->gcLock.
+ */
+extern JSContext *
+js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp);
+
+/*
+ * JSClass.resolve and watchpoint recursion damping machinery.
+ */
+extern JSBool
+js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry **entryp);
+
+extern void
+js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry *entry, uint32 generation);
+
+/*
+ * Local root set management.
+ *
+ * NB: the jsval parameters below may be properly tagged jsvals, or GC-thing
+ * pointers cast to (jsval). This relies on JSObject's tag being zero, but
+ * on the up side it lets us push int-jsval-encoded scopeMark values on the
+ * local root stack.
+ */
+extern JSBool
+js_EnterLocalRootScope(JSContext *cx);
+
+#define js_LeaveLocalRootScope(cx) \
+ js_LeaveLocalRootScopeWithResult(cx, JSVAL_NULL)
+
+extern void
+js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval);
+
+extern void
+js_ForgetLocalRoot(JSContext *cx, jsval v);
+
+extern int
+js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v);
+
+extern void
+js_MarkLocalRoots(JSContext *cx, JSLocalRootStack *lrs);
+
+/*
+ * Report an exception, which is currently realized as a printf-style format
+ * string and its arguments.
+ */
+typedef enum JSErrNum {
+#define MSG_DEF(name, number, count, exception, format) \
+ name = number,
+#include "js.msg"
+#undef MSG_DEF
+ JSErr_Limit
+} JSErrNum;
+
+extern const JSErrorFormatString *
+js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber);
+
+#ifdef va_start
+extern JSBool
+js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap);
+
+extern JSBool
+js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ JSBool charArgs, va_list ap);
+
+extern JSBool
+js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ char **message, JSErrorReport *reportp,
+ JSBool *warningp, JSBool charArgs, va_list ap);
+#endif
+
+extern void
+js_ReportOutOfMemory(JSContext *cx);
+
+/*
+ * Report an exception using a previously composed JSErrorReport.
+ * XXXbe remove from "friend" API
+ */
+extern JS_FRIEND_API(void)
+js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *report);
+
+extern void
+js_ReportIsNotDefined(JSContext *cx, const char *name);
+
+extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit];
+
+/*
+ * See JS_SetThreadStackLimit in jsapi.c, where we check that the stack grows
+ * in the expected direction. On Unix-y systems, JS_STACK_GROWTH_DIRECTION is
+ * computed on the build host by jscpucfg.c and written into jsautocfg.h. The
+ * macro is hardcoded in jscpucfg.h on Windows and Mac systems (for historical
+ * reasons pre-dating autoconf usage).
+ */
+#if JS_STACK_GROWTH_DIRECTION > 0
+# define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) < (cx)->stackLimit)
+#else
+# define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) > (cx)->stackLimit)
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jscntxt_h___ */
diff --git a/third_party/js-1.7/jscompat.h b/third_party/js-1.7/jscompat.h
new file mode 100644
index 0000000..80d8605
--- /dev/null
+++ b/third_party/js-1.7/jscompat.h
@@ -0,0 +1,57 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* -*- Mode: C; tab-width: 8 -*-
+ * Copyright (C) 1996-1999 Netscape Communications Corporation, All Rights Reserved.
+ */
+#ifndef jscompat_h___
+#define jscompat_h___
+/*
+ * Compatibility glue for various NSPR versions. We must always define int8,
+ * int16, jsword, and so on to minimize differences with js/ref, no matter what
+ * the NSPR typedef names may be.
+ */
+#include "jstypes.h"
+#include "jslong.h"
+
+typedef JSIntn intN;
+typedef JSUintn uintN;
+typedef JSUword jsuword;
+typedef JSWord jsword;
+typedef float float32;
+#define allocPriv allocPool
+#endif /* jscompat_h___ */
diff --git a/third_party/js-1.7/jsconfig.h b/third_party/js-1.7/jsconfig.h
new file mode 100644
index 0000000..d61e802
--- /dev/null
+++ b/third_party/js-1.7/jsconfig.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS configuration macros.
+ */
+#ifndef JS_VERSION
+#define JS_VERSION 170
+#endif
+
+/*
+ * Compile-time JS version configuration. The JS version numbers lie on the
+ * number line like so:
+ *
+ * 1.0 1.1 1.2 1.3 1.4 ECMAv3 1.5 1.6
+ * ^ ^
+ * | |
+ * basis for ECMAv1 close to ECMAv2
+ *
+ * where ECMAv3 stands for ECMA-262 Edition 3. See the runtime version enum
+ * JSVersion in jspubtd.h. Code in the engine can therefore count on version
+ * <= JSVERSION_1_4 to mean "before the Third Edition of ECMA-262" and version
+ * > JSVERSION_1_4 to mean "at or after the Third Edition".
+ *
+ * In the (likely?) event that SpiderMonkey grows to implement JavaScript 2.0,
+ * or ECMA-262 Edition 4 (JS2 without certain extensions), the version number
+ * to use would be near 200, or greater.
+ *
+ * The JS_VERSION_ECMA_3 version is the minimal configuration conforming to
+ * the ECMA-262 Edition 3 specification. Use it for minimal embeddings, where
+ * you're sure you don't need any of the extensions disabled in this version.
+ * In order to facilitate testing, JS_HAS_OBJ_PROTO_PROP is defined as part of
+ * the JS_VERSION_ECMA_3_TEST version.
+ *
+ * To keep things sane in the modern age, where we need exceptions in order to
+ * implement, e.g., iterators and generators, we are dropping support for all
+ * versions <= 1.4.
+ */
+#define JS_VERSION_ECMA_3 148
+#define JS_VERSION_ECMA_3_TEST 149
+
+#if JS_VERSION == JS_VERSION_ECMA_3 || \
+ JS_VERSION == JS_VERSION_ECMA_3_TEST
+
+#define JS_HAS_STR_HTML_HELPERS 0 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 0 /* has str.substr */
+#if JS_VERSION == JS_VERSION_ECMA_3_TEST
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#else
+#define JS_HAS_OBJ_PROTO_PROP 0 /* has o.__proto__ etc. */
+#endif
+#define JS_HAS_OBJ_WATCHPOINT 0 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 0 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 0 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 0 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 0 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 0 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 0 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 0 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 0 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 0 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 0 /* has uneval() top-level function */
+#define JS_HAS_CONST 0 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 0 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 0 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 0 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 0 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 0 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 0 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 0 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#elif JS_VERSION < 150
+
+#error "unsupported JS_VERSION"
+
+#elif JS_VERSION == 150
+
+#define JS_HAS_STR_HTML_HELPERS 1 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 1 /* has str.substr */
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 1 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 1 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 1 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 1 /* has uneval() top-level function */
+#define JS_HAS_CONST 1 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 1 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 1 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 0 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 0 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 0 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 0 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 0 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#elif JS_VERSION == 160
+
+#define JS_HAS_STR_HTML_HELPERS 1 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 1 /* has str.substr */
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 1 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 1 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 1 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 1 /* has uneval() top-level function */
+#define JS_HAS_CONST 1 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 1 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 1 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 1 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 1 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 0 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 0 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 0 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#elif JS_VERSION == 170
+
+#define JS_HAS_STR_HTML_HELPERS 1 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 1 /* has str.substr */
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 1 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 1 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 1 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 1 /* has uneval() top-level function */
+#define JS_HAS_CONST 1 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 1 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 1 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 1 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 1 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 1 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 1 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 1 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#else
+
+#error "unknown JS_VERSION"
+
+#endif
+
+/* Features that are present in all versions. */
+#define JS_HAS_RESERVED_JAVA_KEYWORDS 1
+#define JS_HAS_RESERVED_ECMA_KEYWORDS 1
+
diff --git a/third_party/js-1.7/jsconfig.mk b/third_party/js-1.7/jsconfig.mk
new file mode 100644
index 0000000..a3b8867
--- /dev/null
+++ b/third_party/js-1.7/jsconfig.mk
@@ -0,0 +1,181 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998-1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+ifndef OBJDIR
+ ifdef OBJDIR_NAME
+ OBJDIR = $(OBJDIR_NAME)
+ endif
+endif
+
+NSPR_VERSION = v4.0
+NSPR_LIBSUFFIX = 4
+
+NSPR_LOCAL = $(MOZ_DEPTH)/dist/$(OBJDIR)/nspr
+NSPR_DIST = $(MOZ_DEPTH)/dist/$(OBJDIR)
+NSPR_OBJDIR = $(OBJDIR)
+ifeq ($(OS_ARCH), SunOS)
+ NSPR_OBJDIR := $(subst _sparc,,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_ARCH), Linux)
+ LINUX_REL := $(shell uname -r)
+ ifneq (,$(findstring 2.0,$(LINUX_REL)))
+ NSPR_OBJDIR := $(subst _All,2.0_x86_glibc_PTH,$(NSPR_OBJDIR))
+ else
+ NSPR_OBJDIR := $(subst _All,2.2_x86_glibc_PTH,$(NSPR_OBJDIR))
+ endif
+endif
+ifeq ($(OS_ARCH), AIX)
+ NSPR_OBJDIR := $(subst 4.1,4.2,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_CONFIG), IRIX6.2)
+ NSPR_OBJDIR := $(subst 6.2,6.2_n32_PTH,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_CONFIG), IRIX6.5)
+ NSPR_OBJDIR := $(subst 6.5,6.5_n32_PTH,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_ARCH), WINNT)
+ ifeq ($(OBJDIR), WIN32_D.OBJ)
+ NSPR_OBJDIR = WINNT4.0_DBG.OBJ
+ endif
+ ifeq ($(OBJDIR), WIN32_O.OBJ)
+ NSPR_OBJDIR = WINNT4.0_OPT.OBJ
+ endif
+endif
+NSPR_SHARED = /share/builds/components/nspr20/$(NSPR_VERSION)/$(NSPR_OBJDIR)
+ifeq ($(OS_ARCH), WINNT)
+ NSPR_SHARED = nspr20/$(NSPR_VERSION)/$(NSPR_OBJDIR)
+endif
+NSPR_VERSIONFILE = $(NSPR_LOCAL)/Version
+NSPR_CURVERSION := $(shell cat $(NSPR_VERSIONFILE))
+
+get_nspr:
+ @echo "Grabbing NSPR component..."
+ifeq ($(NSPR_VERSION), $(NSPR_CURVERSION))
+ @echo "No need, NSPR is up to date in this tree (ver=$(NSPR_VERSION))."
+else
+ mkdir -p $(NSPR_LOCAL)
+ mkdir -p $(NSPR_DIST)
+ ifneq ($(OS_ARCH), WINNT)
+ cp $(NSPR_SHARED)/*.jar $(NSPR_LOCAL)
+ else
+ sh $(MOZ_DEPTH)/../reltools/compftp.sh $(NSPR_SHARED) $(NSPR_LOCAL) *.jar
+ endif
+ unzip -o $(NSPR_LOCAL)/mdbinary.jar -d $(NSPR_DIST)
+ mkdir -p $(NSPR_DIST)/include
+ unzip -o $(NSPR_LOCAL)/mdheader.jar -d $(NSPR_DIST)/include
+ rm -rf $(NSPR_DIST)/META-INF
+ rm -rf $(NSPR_DIST)/include/META-INF
+ echo $(NSPR_VERSION) > $(NSPR_VERSIONFILE)
+endif
+
+SHIP_DIST = $(MOZ_DEPTH)/dist/$(OBJDIR)
+SHIP_DIR = $(SHIP_DIST)/SHIP
+
+SHIP_LIBS = libjs.$(SO_SUFFIX) libjs.a
+ifdef JS_LIVECONNECT
+ SHIP_LIBS += libjsj.$(SO_SUFFIX) libjsj.a
+endif
+ifeq ($(OS_ARCH), WINNT)
+ SHIP_LIBS = js32.dll js32.lib
+ ifdef JS_LIVECONNECT
+ SHIP_LIBS += jsj.dll jsj.lib
+ endif
+endif
+SHIP_LIBS += $(LCJAR)
+SHIP_LIBS := $(addprefix $(SHIP_DIST)/lib/, $(SHIP_LIBS))
+
+SHIP_INCS = js*.h prmjtime.h resource.h *.msg *.tbl
+ifdef JS_LIVECONNECT
+ SHIP_INCS += netscape*.h nsC*.h nsI*.h
+endif
+SHIP_INCS := $(addprefix $(SHIP_DIST)/include/, $(SHIP_INCS))
+
+SHIP_BINS = js
+ifdef JS_LIVECONNECT
+ SHIP_BINS += lcshell
+endif
+ifeq ($(OS_ARCH), WINNT)
+ SHIP_BINS := $(addsuffix .exe, $(SHIP_BINS))
+endif
+SHIP_BINS := $(addprefix $(SHIP_DIST)/bin/, $(SHIP_BINS))
+
+ifdef BUILD_OPT
+ JSREFJAR = jsref_opt.jar
+else
+ifdef BUILD_IDG
+ JSREFJAR = jsref_idg.jar
+else
+ JSREFJAR = jsref_dbg.jar
+endif
+endif
+
+ship:
+ mkdir -p $(SHIP_DIR)/$(LIBDIR)
+ mkdir -p $(SHIP_DIR)/include
+ mkdir -p $(SHIP_DIR)/bin
+ cp $(SHIP_LIBS) $(SHIP_DIR)/$(LIBDIR)
+ cp $(SHIP_INCS) $(SHIP_DIR)/include
+ cp $(SHIP_BINS) $(SHIP_DIR)/bin
+ cd $(SHIP_DIR); \
+ zip -r $(JSREFJAR) bin lib include
+ifdef BUILD_SHIP
+ cp $(SHIP_DIR)/$(JSREFJAR) $(BUILD_SHIP)
+endif
+
+CWD = $(shell pwd)
+shipSource: $(SHIP_DIR)/jsref_src.lst .FORCE
+ mkdir -p $(SHIP_DIR)
+ cd $(MOZ_DEPTH)/.. ; \
+ zip $(CWD)/$(SHIP_DIR)/jsref_src.jar -@ < $(CWD)/$(SHIP_DIR)/jsref_src.lst
+ifdef BUILD_SHIP
+ cp $(SHIP_DIR)/jsref_src.jar $(BUILD_SHIP)
+endif
+
+JSREFSRCDIRS := $(shell cat $(DEPTH)/SpiderMonkey.rsp)
+$(SHIP_DIR)/jsref_src.lst: .FORCE
+ mkdir -p $(SHIP_DIR)
+ rm -f $@
+ touch $@
+ for d in $(JSREFSRCDIRS); do \
+ cd $(MOZ_DEPTH)/..; \
+ ls -1 -d $$d | grep -v CVS | grep -v \.OBJ >> $(CWD)/$@; \
+ cd $(CWD); \
+ done
+
+.FORCE:
diff --git a/third_party/js-1.7/jscpucfg.c b/third_party/js-1.7/jscpucfg.c
new file mode 100644
index 0000000..daa9121
--- /dev/null
+++ b/third_party/js-1.7/jscpucfg.c
@@ -0,0 +1,380 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Roland Mainz <roland.mainz@informatik.med.uni-giessen.de>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Generate CPU-specific bit-size and similar #defines.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef CROSS_COMPILE
+#include <prtypes.h>
+#define INT64 PRInt64
+#else
+
+/************************************************************************/
+
+/* Generate cpucfg.h */
+
+#if defined(XP_WIN) || defined(XP_OS2)
+#ifdef WIN32
+#if defined(__GNUC__)
+#define INT64 long long
+#else
+#define INT64 _int64
+#endif /* __GNUC__ */
+#else
+#define INT64 long
+#endif
+#else
+#if defined(HPUX) || defined(__QNX__) || defined(_SCO_DS) || defined(UNIXWARE)
+#define INT64 long
+#else
+#define INT64 long long
+#endif
+#endif
+
+#endif /* CROSS_COMPILE */
+
+#ifdef __GNUC__
+#define NS_NEVER_INLINE __attribute__((noinline))
+#else
+#define NS_NEVER_INLINE
+#endif
+
+#ifdef __SUNPRO_C
+static int StackGrowthDirection(int *dummy1addr);
+#pragma no_inline(StackGrowthDirection)
+#endif
+
+typedef void *prword;
+
+struct align_short {
+ char c;
+ short a;
+};
+struct align_int {
+ char c;
+ int a;
+};
+struct align_long {
+ char c;
+ long a;
+};
+struct align_int64 {
+ char c;
+ INT64 a;
+};
+struct align_fakelonglong {
+ char c;
+ struct {
+ long hi, lo;
+ } a;
+};
+struct align_float {
+ char c;
+ float a;
+};
+struct align_double {
+ char c;
+ double a;
+};
+struct align_pointer {
+ char c;
+ void *a;
+};
+struct align_prword {
+ char c;
+ prword a;
+};
+
+#define ALIGN_OF(type) \
+ (((char*)&(((struct align_##type *)0)->a)) - ((char*)0))
+
+unsigned int bpb;
+
+static int Log2(unsigned int n)
+{
+ int log2 = 0;
+
+ if (n & (n-1))
+ log2++;
+ if (n >> 16)
+ log2 += 16, n >>= 16;
+ if (n >> 8)
+ log2 += 8, n >>= 8;
+ if (n >> 4)
+ log2 += 4, n >>= 4;
+ if (n >> 2)
+ log2 += 2, n >>= 2;
+ if (n >> 1)
+ log2++;
+ return log2;
+}
+
+/*
+ * Conceivably this could actually be used, but there is lots of code out
+ * there with ands and shifts in it that assumes a byte is exactly 8 bits,
+ * so forget about porting THIS code to all those non 8 bit byte machines.
+ */
+static void BitsPerByte(void)
+{
+ bpb = 8;
+}
+
+static int NS_NEVER_INLINE StackGrowthDirection(int *dummy1addr)
+{
+ int dummy2;
+
+ return (&dummy2 < dummy1addr) ? -1 : 1;
+}
+
+int main(int argc, char **argv)
+{
+ int sizeof_char, sizeof_short, sizeof_int, sizeof_int64, sizeof_long,
+ sizeof_float, sizeof_double, sizeof_word, sizeof_dword;
+ int bits_per_int64_log2, align_of_short, align_of_int, align_of_long,
+ align_of_int64, align_of_float, align_of_double, align_of_pointer,
+ align_of_word;
+ int dummy1;
+
+ BitsPerByte();
+
+ printf("#ifndef js_cpucfg___\n");
+ printf("#define js_cpucfg___\n\n");
+
+ printf("/* AUTOMATICALLY GENERATED - DO NOT EDIT */\n\n");
+
+#ifdef CROSS_COMPILE
+#if defined(IS_LITTLE_ENDIAN)
+ printf("#define IS_LITTLE_ENDIAN 1\n");
+ printf("#undef IS_BIG_ENDIAN\n\n");
+#elif defined(IS_BIG_ENDIAN)
+ printf("#undef IS_LITTLE_ENDIAN\n");
+ printf("#define IS_BIG_ENDIAN 1\n\n");
+#else
+#error "Endianess not defined."
+#endif
+
+ sizeof_char = PR_BYTES_PER_BYTE;
+ sizeof_short = PR_BYTES_PER_SHORT;
+ sizeof_int = PR_BYTES_PER_INT;
+ sizeof_int64 = PR_BYTES_PER_INT64;
+ sizeof_long = PR_BYTES_PER_LONG;
+ sizeof_float = PR_BYTES_PER_FLOAT;
+ sizeof_double = PR_BYTES_PER_DOUBLE;
+ sizeof_word = PR_BYTES_PER_WORD;
+ sizeof_dword = PR_BYTES_PER_DWORD;
+
+ bits_per_int64_log2 = PR_BITS_PER_INT64_LOG2;
+
+ align_of_short = PR_ALIGN_OF_SHORT;
+ align_of_int = PR_ALIGN_OF_INT;
+ align_of_long = PR_ALIGN_OF_LONG;
+ align_of_int64 = PR_ALIGN_OF_INT64;
+ align_of_float = PR_ALIGN_OF_FLOAT;
+ align_of_double = PR_ALIGN_OF_DOUBLE;
+ align_of_pointer = PR_ALIGN_OF_POINTER;
+ align_of_word = PR_ALIGN_OF_WORD;
+
+#else /* !CROSS_COMPILE */
+
+ /*
+ * We don't handle PDP-endian or similar orders: if a short is big-endian,
+ * so must int and long be big-endian for us to generate the IS_BIG_ENDIAN
+ * #define and the IS_LITTLE_ENDIAN #undef.
+ */
+ {
+ int big_endian = 0, little_endian = 0, ntests = 0;
+
+ if (sizeof(short) == 2) {
+ /* force |volatile| here to get rid of any compiler optimisations
+ * (var in register etc.) which may be appiled to |auto| vars -
+ * even those in |union|s...
+ * (|static| is used to get the same functionality for compilers
+ * which do not honor |volatile|...).
+ */
+ volatile static union {
+ short i;
+ char c[2];
+ } u;
+
+ u.i = 0x0102;
+ big_endian += (u.c[0] == 0x01 && u.c[1] == 0x02);
+ little_endian += (u.c[0] == 0x02 && u.c[1] == 0x01);
+ ntests++;
+ }
+
+ if (sizeof(int) == 4) {
+ /* force |volatile| here ... */
+ volatile static union {
+ int i;
+ char c[4];
+ } u;
+
+ u.i = 0x01020304;
+ big_endian += (u.c[0] == 0x01 && u.c[1] == 0x02 &&
+ u.c[2] == 0x03 && u.c[3] == 0x04);
+ little_endian += (u.c[0] == 0x04 && u.c[1] == 0x03 &&
+ u.c[2] == 0x02 && u.c[3] == 0x01);
+ ntests++;
+ }
+
+ if (sizeof(long) == 8) {
+ /* force |volatile| here ... */
+ volatile static union {
+ long i;
+ char c[8];
+ } u;
+
+ /*
+ * Write this as portably as possible: avoid 0x0102030405060708L
+ * and <<= 32.
+ */
+ u.i = 0x01020304;
+ u.i <<= 16, u.i <<= 16;
+ u.i |= 0x05060708;
+ big_endian += (u.c[0] == 0x01 && u.c[1] == 0x02 &&
+ u.c[2] == 0x03 && u.c[3] == 0x04 &&
+ u.c[4] == 0x05 && u.c[5] == 0x06 &&
+ u.c[6] == 0x07 && u.c[7] == 0x08);
+ little_endian += (u.c[0] == 0x08 && u.c[1] == 0x07 &&
+ u.c[2] == 0x06 && u.c[3] == 0x05 &&
+ u.c[4] == 0x04 && u.c[5] == 0x03 &&
+ u.c[6] == 0x02 && u.c[7] == 0x01);
+ ntests++;
+ }
+
+ if (big_endian && big_endian == ntests) {
+ printf("#undef IS_LITTLE_ENDIAN\n");
+ printf("#define IS_BIG_ENDIAN 1\n\n");
+ } else if (little_endian && little_endian == ntests) {
+ printf("#define IS_LITTLE_ENDIAN 1\n");
+ printf("#undef IS_BIG_ENDIAN\n\n");
+ } else {
+ fprintf(stderr, "%s: unknown byte order"
+ "(big_endian=%d, little_endian=%d, ntests=%d)!\n",
+ argv[0], big_endian, little_endian, ntests);
+ return EXIT_FAILURE;
+ }
+ }
+
+ sizeof_char = sizeof(char);
+ sizeof_short = sizeof(short);
+ sizeof_int = sizeof(int);
+ sizeof_int64 = 8;
+ sizeof_long = sizeof(long);
+ sizeof_float = sizeof(float);
+ sizeof_double = sizeof(double);
+ sizeof_word = sizeof(prword);
+ sizeof_dword = 8;
+
+ bits_per_int64_log2 = 6;
+
+ align_of_short = ALIGN_OF(short);
+ align_of_int = ALIGN_OF(int);
+ align_of_long = ALIGN_OF(long);
+ if (sizeof(INT64) < 8) {
+ /* this machine doesn't actually support int64's */
+ align_of_int64 = ALIGN_OF(fakelonglong);
+ } else {
+ align_of_int64 = ALIGN_OF(int64);
+ }
+ align_of_float = ALIGN_OF(float);
+ align_of_double = ALIGN_OF(double);
+ align_of_pointer = ALIGN_OF(pointer);
+ align_of_word = ALIGN_OF(prword);
+
+#endif /* CROSS_COMPILE */
+
+ printf("#define JS_BYTES_PER_BYTE %dL\n", sizeof_char);
+ printf("#define JS_BYTES_PER_SHORT %dL\n", sizeof_short);
+ printf("#define JS_BYTES_PER_INT %dL\n", sizeof_int);
+ printf("#define JS_BYTES_PER_INT64 %dL\n", sizeof_int64);
+ printf("#define JS_BYTES_PER_LONG %dL\n", sizeof_long);
+ printf("#define JS_BYTES_PER_FLOAT %dL\n", sizeof_float);
+ printf("#define JS_BYTES_PER_DOUBLE %dL\n", sizeof_double);
+ printf("#define JS_BYTES_PER_WORD %dL\n", sizeof_word);
+ printf("#define JS_BYTES_PER_DWORD %dL\n", sizeof_dword);
+ printf("\n");
+
+ printf("#define JS_BITS_PER_BYTE %dL\n", bpb);
+ printf("#define JS_BITS_PER_SHORT %dL\n", bpb * sizeof_short);
+ printf("#define JS_BITS_PER_INT %dL\n", bpb * sizeof_int);
+ printf("#define JS_BITS_PER_INT64 %dL\n", bpb * sizeof_int64);
+ printf("#define JS_BITS_PER_LONG %dL\n", bpb * sizeof_long);
+ printf("#define JS_BITS_PER_FLOAT %dL\n", bpb * sizeof_float);
+ printf("#define JS_BITS_PER_DOUBLE %dL\n", bpb * sizeof_double);
+ printf("#define JS_BITS_PER_WORD %dL\n", bpb * sizeof_word);
+ printf("\n");
+
+ printf("#define JS_BITS_PER_BYTE_LOG2 %dL\n", Log2(bpb));
+ printf("#define JS_BITS_PER_SHORT_LOG2 %dL\n", Log2(bpb * sizeof_short));
+ printf("#define JS_BITS_PER_INT_LOG2 %dL\n", Log2(bpb * sizeof_int));
+ printf("#define JS_BITS_PER_INT64_LOG2 %dL\n", bits_per_int64_log2);
+ printf("#define JS_BITS_PER_LONG_LOG2 %dL\n", Log2(bpb * sizeof_long));
+ printf("#define JS_BITS_PER_FLOAT_LOG2 %dL\n", Log2(bpb * sizeof_float));
+ printf("#define JS_BITS_PER_DOUBLE_LOG2 %dL\n", Log2(bpb * sizeof_double));
+ printf("#define JS_BITS_PER_WORD_LOG2 %dL\n", Log2(bpb * sizeof_word));
+ printf("\n");
+
+ printf("#define JS_ALIGN_OF_SHORT %dL\n", align_of_short);
+ printf("#define JS_ALIGN_OF_INT %dL\n", align_of_int);
+ printf("#define JS_ALIGN_OF_LONG %dL\n", align_of_long);
+ printf("#define JS_ALIGN_OF_INT64 %dL\n", align_of_int64);
+ printf("#define JS_ALIGN_OF_FLOAT %dL\n", align_of_float);
+ printf("#define JS_ALIGN_OF_DOUBLE %dL\n", align_of_double);
+ printf("#define JS_ALIGN_OF_POINTER %dL\n", align_of_pointer);
+ printf("#define JS_ALIGN_OF_WORD %dL\n", align_of_word);
+ printf("\n");
+
+ printf("#define JS_BYTES_PER_WORD_LOG2 %dL\n", Log2(sizeof_word));
+ printf("#define JS_BYTES_PER_DWORD_LOG2 %dL\n", Log2(sizeof_dword));
+ printf("#define JS_WORDS_PER_DWORD_LOG2 %dL\n", Log2(sizeof_dword/sizeof_word));
+ printf("\n");
+
+ printf("#define JS_STACK_GROWTH_DIRECTION (%d)\n", StackGrowthDirection(&dummy1));
+ printf("\n");
+
+ printf("#endif /* js_cpucfg___ */\n");
+
+ return EXIT_SUCCESS;
+}
+
diff --git a/third_party/js-1.7/jscpucfg.h b/third_party/js-1.7/jscpucfg.h
new file mode 100644
index 0000000..63ef932
--- /dev/null
+++ b/third_party/js-1.7/jscpucfg.h
@@ -0,0 +1,212 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef js_cpucfg___
+#define js_cpucfg___
+
+#include "jsosdep.h"
+
+#if defined(XP_WIN) || defined(XP_OS2) || defined(WINCE)
+
+#if defined(_WIN64)
+
+#if defined(_M_X64) || defined(_M_AMD64) || defined(_AMD64_)
+#define IS_LITTLE_ENDIAN 1
+#undef IS_BIG_ENDIAN
+
+#define JS_BYTES_PER_BYTE 1L
+#define JS_BYTES_PER_SHORT 2L
+#define JS_BYTES_PER_INT 4L
+#define JS_BYTES_PER_INT64 8L
+#define JS_BYTES_PER_LONG 4L
+#define JS_BYTES_PER_FLOAT 4L
+#define JS_BYTES_PER_DOUBLE 8L
+#define JS_BYTES_PER_WORD 8L
+#define JS_BYTES_PER_DWORD 8L
+
+#define JS_BITS_PER_BYTE 8L
+#define JS_BITS_PER_SHORT 16L
+#define JS_BITS_PER_INT 32L
+#define JS_BITS_PER_INT64 64L
+#define JS_BITS_PER_LONG 32L
+#define JS_BITS_PER_FLOAT 32L
+#define JS_BITS_PER_DOUBLE 64L
+#define JS_BITS_PER_WORD 64L
+
+#define JS_BITS_PER_BYTE_LOG2 3L
+#define JS_BITS_PER_SHORT_LOG2 4L
+#define JS_BITS_PER_INT_LOG2 5L
+#define JS_BITS_PER_INT64_LOG2 6L
+#define JS_BITS_PER_LONG_LOG2 5L
+#define JS_BITS_PER_FLOAT_LOG2 5L
+#define JS_BITS_PER_DOUBLE_LOG2 6L
+#define JS_BITS_PER_WORD_LOG2 6L
+
+#define JS_ALIGN_OF_SHORT 2L
+#define JS_ALIGN_OF_INT 4L
+#define JS_ALIGN_OF_LONG 4L
+#define JS_ALIGN_OF_INT64 8L
+#define JS_ALIGN_OF_FLOAT 4L
+#define JS_ALIGN_OF_DOUBLE 8L
+#define JS_ALIGN_OF_POINTER 8L
+#define JS_ALIGN_OF_WORD 8L
+
+#define JS_BYTES_PER_WORD_LOG2 3L
+#define JS_BYTES_PER_DWORD_LOG2 3L
+#define PR_WORDS_PER_DWORD_LOG2 0L
+#else /* !(defined(_M_X64) || defined(_M_AMD64) || defined(_AMD64_)) */
+#error "CPU type is unknown"
+#endif /* !(defined(_M_X64) || defined(_M_AMD64) || defined(_AMD64_)) */
+
+#elif defined(_WIN32) || defined(XP_OS2) || defined(WINCE)
+
+#ifdef __WATCOMC__
+#define HAVE_VA_LIST_AS_ARRAY
+#endif
+
+#define IS_LITTLE_ENDIAN 1
+#undef IS_BIG_ENDIAN
+
+#define JS_BYTES_PER_BYTE 1L
+#define JS_BYTES_PER_SHORT 2L
+#define JS_BYTES_PER_INT 4L
+#define JS_BYTES_PER_INT64 8L
+#define JS_BYTES_PER_LONG 4L
+#define JS_BYTES_PER_FLOAT 4L
+#define JS_BYTES_PER_DOUBLE 8L
+#define JS_BYTES_PER_WORD 4L
+#define JS_BYTES_PER_DWORD 8L
+
+#define JS_BITS_PER_BYTE 8L
+#define JS_BITS_PER_SHORT 16L
+#define JS_BITS_PER_INT 32L
+#define JS_BITS_PER_INT64 64L
+#define JS_BITS_PER_LONG 32L
+#define JS_BITS_PER_FLOAT 32L
+#define JS_BITS_PER_DOUBLE 64L
+#define JS_BITS_PER_WORD 32L
+
+#define JS_BITS_PER_BYTE_LOG2 3L
+#define JS_BITS_PER_SHORT_LOG2 4L
+#define JS_BITS_PER_INT_LOG2 5L
+#define JS_BITS_PER_INT64_LOG2 6L
+#define JS_BITS_PER_LONG_LOG2 5L
+#define JS_BITS_PER_FLOAT_LOG2 5L
+#define JS_BITS_PER_DOUBLE_LOG2 6L
+#define JS_BITS_PER_WORD_LOG2 5L
+
+#define JS_ALIGN_OF_SHORT 2L
+#define JS_ALIGN_OF_INT 4L
+#define JS_ALIGN_OF_LONG 4L
+#define JS_ALIGN_OF_INT64 8L
+#define JS_ALIGN_OF_FLOAT 4L
+#define JS_ALIGN_OF_DOUBLE 4L
+#define JS_ALIGN_OF_POINTER 4L
+#define JS_ALIGN_OF_WORD 4L
+
+#define JS_BYTES_PER_WORD_LOG2 2L
+#define JS_BYTES_PER_DWORD_LOG2 3L
+#define PR_WORDS_PER_DWORD_LOG2 1L
+#endif /* _WIN32 || XP_OS2 || WINCE*/
+
+#if defined(_WINDOWS) && !defined(_WIN32) /* WIN16 */
+
+#define IS_LITTLE_ENDIAN 1
+#undef IS_BIG_ENDIAN
+
+#define JS_BYTES_PER_BYTE 1L
+#define JS_BYTES_PER_SHORT 2L
+#define JS_BYTES_PER_INT 2L
+#define JS_BYTES_PER_INT64 8L
+#define JS_BYTES_PER_LONG 4L
+#define JS_BYTES_PER_FLOAT 4L
+#define JS_BYTES_PER_DOUBLE 8L
+#define JS_BYTES_PER_WORD 4L
+#define JS_BYTES_PER_DWORD 8L
+
+#define JS_BITS_PER_BYTE 8L
+#define JS_BITS_PER_SHORT 16L
+#define JS_BITS_PER_INT 16L
+#define JS_BITS_PER_INT64 64L
+#define JS_BITS_PER_LONG 32L
+#define JS_BITS_PER_FLOAT 32L
+#define JS_BITS_PER_DOUBLE 64L
+#define JS_BITS_PER_WORD 32L
+
+#define JS_BITS_PER_BYTE_LOG2 3L
+#define JS_BITS_PER_SHORT_LOG2 4L
+#define JS_BITS_PER_INT_LOG2 4L
+#define JS_BITS_PER_INT64_LOG2 6L
+#define JS_BITS_PER_LONG_LOG2 5L
+#define JS_BITS_PER_FLOAT_LOG2 5L
+#define JS_BITS_PER_DOUBLE_LOG2 6L
+#define JS_BITS_PER_WORD_LOG2 5L
+
+#define JS_ALIGN_OF_SHORT 2L
+#define JS_ALIGN_OF_INT 2L
+#define JS_ALIGN_OF_LONG 2L
+#define JS_ALIGN_OF_INT64 2L
+#define JS_ALIGN_OF_FLOAT 2L
+#define JS_ALIGN_OF_DOUBLE 2L
+#define JS_ALIGN_OF_POINTER 2L
+#define JS_ALIGN_OF_WORD 2L
+
+#define JS_BYTES_PER_WORD_LOG2 2L
+#define JS_BYTES_PER_DWORD_LOG2 3L
+#define PR_WORDS_PER_DWORD_LOG2 1L
+
+#endif /* defined(_WINDOWS) && !defined(_WIN32) */
+
+#elif defined(XP_UNIX) || defined(XP_BEOS)
+
+#error "This file is supposed to be auto-generated on UNIX platforms, but the"
+#error "static version for Mac and Windows platforms is being used."
+#error "Something's probably wrong with paths/headers/dependencies/Makefiles."
+
+#else
+
+#error "Must define one of XP_BEOS, XP_OS2, XP_WIN, or XP_UNIX"
+
+#endif
+
+#ifndef JS_STACK_GROWTH_DIRECTION
+#define JS_STACK_GROWTH_DIRECTION (-1)
+#endif
+
+#endif /* js_cpucfg___ */
diff --git a/third_party/js-1.7/jsdate.c b/third_party/js-1.7/jsdate.c
new file mode 100644
index 0000000..9e6697f
--- /dev/null
+++ b/third_party/js-1.7/jsdate.c
@@ -0,0 +1,2371 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS date methods.
+ */
+
+/*
+ * "For example, OS/360 devotes 26 bytes of the permanently
+ * resident date-turnover routine to the proper handling of
+ * December 31 on leap years (when it is Day 366). That
+ * might have been left to the operator."
+ *
+ * Frederick Brooks, 'The Second-System Effect'.
+ */
+
+#include "jsstddef.h"
+#include <ctype.h>
+#include <locale.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsprf.h"
+#include "prmjtime.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsconfig.h"
+#include "jscntxt.h"
+#include "jsdate.h"
+#include "jsinterp.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+/*
+ * The JS 'Date' object is patterned after the Java 'Date' object.
+ * Here is an script:
+ *
+ * today = new Date();
+ *
+ * print(today.toLocaleString());
+ *
+ * weekDay = today.getDay();
+ *
+ *
+ * These Java (and ECMA-262) methods are supported:
+ *
+ * UTC
+ * getDate (getUTCDate)
+ * getDay (getUTCDay)
+ * getHours (getUTCHours)
+ * getMinutes (getUTCMinutes)
+ * getMonth (getUTCMonth)
+ * getSeconds (getUTCSeconds)
+ * getMilliseconds (getUTCMilliseconds)
+ * getTime
+ * getTimezoneOffset
+ * getYear
+ * getFullYear (getUTCFullYear)
+ * parse
+ * setDate (setUTCDate)
+ * setHours (setUTCHours)
+ * setMinutes (setUTCMinutes)
+ * setMonth (setUTCMonth)
+ * setSeconds (setUTCSeconds)
+ * setMilliseconds (setUTCMilliseconds)
+ * setTime
+ * setYear (setFullYear, setUTCFullYear)
+ * toGMTString (toUTCString)
+ * toLocaleString
+ * toString
+ *
+ *
+ * These Java methods are not supported
+ *
+ * setDay
+ * before
+ * after
+ * equals
+ * hashCode
+ */
+
+/*
+ * 11/97 - jsdate.c has been rewritten to conform to the ECMA-262 language
+ * definition and reduce dependence on NSPR. NSPR is used to get the current
+ * time in milliseconds, the time zone offset, and the daylight savings time
+ * offset for a given time. NSPR is also used for Date.toLocaleString(), for
+ * locale-specific formatting, and to get a string representing the timezone.
+ * (Which turns out to be platform-dependent.)
+ *
+ * To do:
+ * (I did some performance tests by timing how long it took to run what
+ * I had of the js ECMA conformance tests.)
+ *
+ * - look at saving results across multiple calls to supporting
+ * functions; the toString functions compute some of the same values
+ * multiple times. Although - I took a quick stab at this, and I lost
+ * rather than gained. (Fractionally.) Hard to tell what compilers/processors
+ * are doing these days.
+ *
+ * - look at tweaking function return types to return double instead
+ * of int; this seems to make things run slightly faster sometimes.
+ * (though it could be architecture-dependent.) It'd be good to see
+ * how this does on win32. (Tried it on irix.) Types could use a
+ * general going-over.
+ */
+
+/*
+ * Supporting functions - ECMA 15.9.1.*
+ */
+
+#define HalfTimeDomain 8.64e15
+#define HoursPerDay 24.0
+#define MinutesPerDay (HoursPerDay * MinutesPerHour)
+#define MinutesPerHour 60.0
+#define SecondsPerDay (MinutesPerDay * SecondsPerMinute)
+#define SecondsPerHour (MinutesPerHour * SecondsPerMinute)
+#define SecondsPerMinute 60.0
+
+#if defined(XP_WIN) || defined(XP_OS2)
+/* Work around msvc double optimization bug by making these runtime values; if
+ * they're available at compile time, msvc optimizes division by them by
+ * computing the reciprocal and multiplying instead of dividing - this loses
+ * when the reciprocal isn't representable in a double.
+ */
+static jsdouble msPerSecond = 1000.0;
+static jsdouble msPerDay = SecondsPerDay * 1000.0;
+static jsdouble msPerHour = SecondsPerHour * 1000.0;
+static jsdouble msPerMinute = SecondsPerMinute * 1000.0;
+#else
+#define msPerDay (SecondsPerDay * msPerSecond)
+#define msPerHour (SecondsPerHour * msPerSecond)
+#define msPerMinute (SecondsPerMinute * msPerSecond)
+#define msPerSecond 1000.0
+#endif
+
+#define Day(t) floor((t) / msPerDay)
+
+static jsdouble
+TimeWithinDay(jsdouble t)
+{
+ jsdouble result;
+ result = fmod(t, msPerDay);
+ if (result < 0)
+ result += msPerDay;
+ return result;
+}
+
+#define DaysInYear(y) ((y) % 4 == 0 && ((y) % 100 || ((y) % 400 == 0)) \
+ ? 366 : 365)
+
+/* math here has to be f.p, because we need
+ * floor((1968 - 1969) / 4) == -1
+ */
+#define DayFromYear(y) (365 * ((y)-1970) + floor(((y)-1969)/4.0) \
+ - floor(((y)-1901)/100.0) + floor(((y)-1601)/400.0))
+#define TimeFromYear(y) (DayFromYear(y) * msPerDay)
+
+static jsint
+YearFromTime(jsdouble t)
+{
+ jsint y = (jsint) floor(t /(msPerDay*365.2425)) + 1970;
+ jsdouble t2 = (jsdouble) TimeFromYear(y);
+
+ if (t2 > t) {
+ y--;
+ } else {
+ if (t2 + msPerDay * DaysInYear(y) <= t)
+ y++;
+ }
+ return y;
+}
+
+#define InLeapYear(t) (JSBool) (DaysInYear(YearFromTime(t)) == 366)
+
+#define DayWithinYear(t, year) ((intN) (Day(t) - DayFromYear(year)))
+
+/*
+ * The following array contains the day of year for the first day of
+ * each month, where index 0 is January, and day 0 is January 1.
+ */
+static jsdouble firstDayOfMonth[2][12] = {
+ {0.0, 31.0, 59.0, 90.0, 120.0, 151.0, 181.0, 212.0, 243.0, 273.0, 304.0, 334.0},
+ {0.0, 31.0, 60.0, 91.0, 121.0, 152.0, 182.0, 213.0, 244.0, 274.0, 305.0, 335.0}
+};
+
+#define DayFromMonth(m, leap) firstDayOfMonth[leap][(intN)m];
+
+static intN
+MonthFromTime(jsdouble t)
+{
+ intN d, step;
+ jsint year = YearFromTime(t);
+ d = DayWithinYear(t, year);
+
+ if (d < (step = 31))
+ return 0;
+ step += (InLeapYear(t) ? 29 : 28);
+ if (d < step)
+ return 1;
+ if (d < (step += 31))
+ return 2;
+ if (d < (step += 30))
+ return 3;
+ if (d < (step += 31))
+ return 4;
+ if (d < (step += 30))
+ return 5;
+ if (d < (step += 31))
+ return 6;
+ if (d < (step += 31))
+ return 7;
+ if (d < (step += 30))
+ return 8;
+ if (d < (step += 31))
+ return 9;
+ if (d < (step += 30))
+ return 10;
+ return 11;
+}
+
+static intN
+DateFromTime(jsdouble t)
+{
+ intN d, step, next;
+ jsint year = YearFromTime(t);
+ d = DayWithinYear(t, year);
+
+ if (d <= (next = 30))
+ return d + 1;
+ step = next;
+ next += (InLeapYear(t) ? 29 : 28);
+ if (d <= next)
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ return d - step;
+}
+
+static intN
+WeekDay(jsdouble t)
+{
+ jsint result;
+ result = (jsint) Day(t) + 4;
+ result = result % 7;
+ if (result < 0)
+ result += 7;
+ return (intN) result;
+}
+
+#define MakeTime(hour, min, sec, ms) \
+((((hour) * MinutesPerHour + (min)) * SecondsPerMinute + (sec)) * msPerSecond + (ms))
+
+static jsdouble
+MakeDay(jsdouble year, jsdouble month, jsdouble date)
+{
+ JSBool leap;
+ jsdouble yearday;
+ jsdouble monthday;
+
+ year += floor(month / 12);
+
+ month = fmod(month, 12.0);
+ if (month < 0)
+ month += 12;
+
+ leap = (DaysInYear((jsint) year) == 366);
+
+ yearday = floor(TimeFromYear(year) / msPerDay);
+ monthday = DayFromMonth(month, leap);
+
+ return yearday + monthday + date - 1;
+}
+
+#define MakeDate(day, time) ((day) * msPerDay + (time))
+
+/*
+ * Years and leap years on which Jan 1 is a Sunday, Monday, etc.
+ *
+ * yearStartingWith[0][i] is an example non-leap year where
+ * Jan 1 appears on Sunday (i == 0), Monday (i == 1), etc.
+ *
+ * yearStartingWith[1][i] is an example leap year where
+ * Jan 1 appears on Sunday (i == 0), Monday (i == 1), etc.
+ */
+static jsint yearStartingWith[2][7] = {
+ {1978, 1973, 1974, 1975, 1981, 1971, 1977},
+ {1984, 1996, 1980, 1992, 1976, 1988, 1972}
+};
+
+/*
+ * Find a year for which any given date will fall on the same weekday.
+ *
+ * This function should be used with caution when used other than
+ * for determining DST; it hasn't been proven not to produce an
+ * incorrect year for times near year boundaries.
+ */
+static jsint
+EquivalentYearForDST(jsint year)
+{
+ jsint day;
+ JSBool isLeapYear;
+
+ day = (jsint) DayFromYear(year) + 4;
+ day = day % 7;
+ if (day < 0)
+ day += 7;
+
+ isLeapYear = (DaysInYear(year) == 366);
+
+ return yearStartingWith[isLeapYear][day];
+}
+
+/* LocalTZA gets set by js_InitDateClass() */
+static jsdouble LocalTZA;
+
+static jsdouble
+DaylightSavingTA(jsdouble t)
+{
+ volatile int64 PR_t;
+ int64 ms2us;
+ int64 offset;
+ jsdouble result;
+
+ /* abort if NaN */
+ if (JSDOUBLE_IS_NaN(t))
+ return t;
+
+ /*
+ * If earlier than 1970 or after 2038, potentially beyond the ken of
+ * many OSes, map it to an equivalent year before asking.
+ */
+ if (t < 0.0 || t > 2145916800000.0) {
+ jsint year;
+ jsdouble day;
+
+ year = EquivalentYearForDST(YearFromTime(t));
+ day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ t = MakeDate(day, TimeWithinDay(t));
+ }
+
+ /* put our t in an LL, and map it to usec for prtime */
+ JSLL_D2L(PR_t, t);
+ JSLL_I2L(ms2us, PRMJ_USEC_PER_MSEC);
+ JSLL_MUL(PR_t, PR_t, ms2us);
+
+ offset = PRMJ_DSTOffset(PR_t);
+
+ JSLL_DIV(offset, offset, ms2us);
+ JSLL_L2D(result, offset);
+ return result;
+}
+
+
+#define AdjustTime(t) fmod(LocalTZA + DaylightSavingTA(t), msPerDay)
+
+#define LocalTime(t) ((t) + AdjustTime(t))
+
+static jsdouble
+UTC(jsdouble t)
+{
+ return t - AdjustTime(t - LocalTZA);
+}
+
+static intN
+HourFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(floor(t/msPerHour), HoursPerDay);
+ if (result < 0)
+ result += (intN)HoursPerDay;
+ return result;
+}
+
+static intN
+MinFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(floor(t / msPerMinute), MinutesPerHour);
+ if (result < 0)
+ result += (intN)MinutesPerHour;
+ return result;
+}
+
+static intN
+SecFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(floor(t / msPerSecond), SecondsPerMinute);
+ if (result < 0)
+ result += (intN)SecondsPerMinute;
+ return result;
+}
+
+static intN
+msFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(t, msPerSecond);
+ if (result < 0)
+ result += (intN)msPerSecond;
+ return result;
+}
+
+#define TIMECLIP(d) ((JSDOUBLE_IS_FINITE(d) \
+ && !((d < 0 ? -d : d) > HalfTimeDomain)) \
+ ? js_DoubleToInteger(d + (+0.)) : *cx->runtime->jsNaN)
+
+/**
+ * end of ECMA 'support' functions
+ */
+
+/*
+ * Other Support routines and definitions
+ */
+
+JSClass js_DateClass = {
+ js_Date_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Date),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+/* for use by date_parse */
+
+static const char* wtb[] = {
+ "am", "pm",
+ "monday", "tuesday", "wednesday", "thursday", "friday",
+ "saturday", "sunday",
+ "january", "february", "march", "april", "may", "june",
+ "july", "august", "september", "october", "november", "december",
+ "gmt", "ut", "utc",
+ "est", "edt",
+ "cst", "cdt",
+ "mst", "mdt",
+ "pst", "pdt"
+ /* time zone table needs to be expanded */
+};
+
+static int ttb[] = {
+ -1, -2, 0, 0, 0, 0, 0, 0, 0, /* AM/PM */
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 10000 + 0, 10000 + 0, 10000 + 0, /* GMT/UT/UTC */
+ 10000 + 5 * 60, 10000 + 4 * 60, /* EST/EDT */
+ 10000 + 6 * 60, 10000 + 5 * 60, /* CST/CDT */
+ 10000 + 7 * 60, 10000 + 6 * 60, /* MST/MDT */
+ 10000 + 8 * 60, 10000 + 7 * 60 /* PST/PDT */
+};
+
+/* helper for date_parse */
+static JSBool
+date_regionMatches(const char* s1, int s1off, const jschar* s2, int s2off,
+ int count, int ignoreCase)
+{
+ JSBool result = JS_FALSE;
+ /* return true if matches, otherwise, false */
+
+ while (count > 0 && s1[s1off] && s2[s2off]) {
+ if (ignoreCase) {
+ if (JS_TOLOWER((jschar)s1[s1off]) != JS_TOLOWER(s2[s2off])) {
+ break;
+ }
+ } else {
+ if ((jschar)s1[s1off] != s2[s2off]) {
+ break;
+ }
+ }
+ s1off++;
+ s2off++;
+ count--;
+ }
+
+ if (count == 0) {
+ result = JS_TRUE;
+ }
+
+ return result;
+}
+
+/* find UTC time from given date... no 1900 correction! */
+static jsdouble
+date_msecFromDate(jsdouble year, jsdouble mon, jsdouble mday, jsdouble hour,
+ jsdouble min, jsdouble sec, jsdouble msec)
+{
+ jsdouble day;
+ jsdouble msec_time;
+ jsdouble result;
+
+ day = MakeDay(year, mon, mday);
+ msec_time = MakeTime(hour, min, sec, msec);
+ result = MakeDate(day, msec_time);
+ return result;
+}
+
+/*
+ * See ECMA 15.9.4.[3-10];
+ */
+/* XXX this function must be above date_parseString to avoid a
+ horrid bug in the Win16 1.52 compiler */
+#define MAXARGS 7
+static JSBool
+date_UTC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble array[MAXARGS];
+ uintN loop;
+ jsdouble d;
+
+ for (loop = 0; loop < MAXARGS; loop++) {
+ if (loop < argc) {
+ if (!js_ValueToNumber(cx, argv[loop], &d))
+ return JS_FALSE;
+ /* return NaN if any arg is NaN */
+ if (!JSDOUBLE_IS_FINITE(d)) {
+ return js_NewNumberValue(cx, d, rval);
+ }
+ array[loop] = floor(d);
+ } else {
+ array[loop] = 0;
+ }
+ }
+
+ /* adjust 2-digit years into the 20th century */
+ if (array[0] >= 0 && array[0] <= 99)
+ array[0] += 1900;
+
+ /* if we got a 0 for 'date' (which is out of range)
+ * pretend it's a 1. (So Date.UTC(1972, 5) works) */
+ if (array[2] < 1)
+ array[2] = 1;
+
+ d = date_msecFromDate(array[0], array[1], array[2],
+ array[3], array[4], array[5], array[6]);
+ d = TIMECLIP(d);
+
+ return js_NewNumberValue(cx, d, rval);
+}
+
+static JSBool
+date_parseString(JSString *str, jsdouble *result)
+{
+ jsdouble msec;
+
+ const jschar *s = JSSTRING_CHARS(str);
+ size_t limit = JSSTRING_LENGTH(str);
+ size_t i = 0;
+ int year = -1;
+ int mon = -1;
+ int mday = -1;
+ int hour = -1;
+ int min = -1;
+ int sec = -1;
+ int c = -1;
+ int n = -1;
+ jsdouble tzoffset = -1; /* was an int, overflowed on win16!!! */
+ int prevc = 0;
+ JSBool seenplusminus = JS_FALSE;
+ int temp;
+ JSBool seenmonthname = JS_FALSE;
+
+ if (limit == 0)
+ goto syntax;
+ while (i < limit) {
+ c = s[i];
+ i++;
+ if (c <= ' ' || c == ',' || c == '-') {
+ if (c == '-' && '0' <= s[i] && s[i] <= '9') {
+ prevc = c;
+ }
+ continue;
+ }
+ if (c == '(') { /* comments) */
+ int depth = 1;
+ while (i < limit) {
+ c = s[i];
+ i++;
+ if (c == '(') depth++;
+ else if (c == ')')
+ if (--depth <= 0)
+ break;
+ }
+ continue;
+ }
+ if ('0' <= c && c <= '9') {
+ n = c - '0';
+ while (i < limit && '0' <= (c = s[i]) && c <= '9') {
+ n = n * 10 + c - '0';
+ i++;
+ }
+
+ /* allow TZA before the year, so
+ * 'Wed Nov 05 21:49:11 GMT-0800 1997'
+ * works */
+
+ /* uses of seenplusminus allow : in TZA, so Java
+ * no-timezone style of GMT+4:30 works
+ */
+
+ if ((prevc == '+' || prevc == '-')/* && year>=0 */) {
+ /* make ':' case below change tzoffset */
+ seenplusminus = JS_TRUE;
+
+ /* offset */
+ if (n < 24)
+ n = n * 60; /* EG. "GMT-3" */
+ else
+ n = n % 100 + n / 100 * 60; /* eg "GMT-0430" */
+ if (prevc == '+') /* plus means east of GMT */
+ n = -n;
+ if (tzoffset != 0 && tzoffset != -1)
+ goto syntax;
+ tzoffset = n;
+ } else if (prevc == '/' && mon >= 0 && mday >= 0 && year < 0) {
+ if (c <= ' ' || c == ',' || c == '/' || i >= limit)
+ year = n;
+ else
+ goto syntax;
+ } else if (c == ':') {
+ if (hour < 0)
+ hour = /*byte*/ n;
+ else if (min < 0)
+ min = /*byte*/ n;
+ else
+ goto syntax;
+ } else if (c == '/') {
+ /* until it is determined that mon is the actual
+ month, keep it as 1-based rather than 0-based */
+ if (mon < 0)
+ mon = /*byte*/ n;
+ else if (mday < 0)
+ mday = /*byte*/ n;
+ else
+ goto syntax;
+ } else if (i < limit && c != ',' && c > ' ' && c != '-' && c != '(') {
+ goto syntax;
+ } else if (seenplusminus && n < 60) { /* handle GMT-3:30 */
+ if (tzoffset < 0)
+ tzoffset -= n;
+ else
+ tzoffset += n;
+ } else if (hour >= 0 && min < 0) {
+ min = /*byte*/ n;
+ } else if (prevc == ':' && min >= 0 && sec < 0) {
+ sec = /*byte*/ n;
+ } else if (mon < 0) {
+ mon = /*byte*/n;
+ } else if (mon >= 0 && mday < 0) {
+ mday = /*byte*/ n;
+ } else if (mon >= 0 && mday >= 0 && year < 0) {
+ year = n;
+ } else {
+ goto syntax;
+ }
+ prevc = 0;
+ } else if (c == '/' || c == ':' || c == '+' || c == '-') {
+ prevc = c;
+ } else {
+ size_t st = i - 1;
+ int k;
+ while (i < limit) {
+ c = s[i];
+ if (!(('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z')))
+ break;
+ i++;
+ }
+ if (i <= st + 1)
+ goto syntax;
+ for (k = (sizeof(wtb)/sizeof(char*)); --k >= 0;)
+ if (date_regionMatches(wtb[k], 0, s, st, i-st, 1)) {
+ int action = ttb[k];
+ if (action != 0) {
+ if (action < 0) {
+ /*
+ * AM/PM. Count 12:30 AM as 00:30, 12:30 PM as
+ * 12:30, instead of blindly adding 12 if PM.
+ */
+ JS_ASSERT(action == -1 || action == -2);
+ if (hour > 12 || hour < 0) {
+ goto syntax;
+ } else {
+ if (action == -1 && hour == 12) { /* am */
+ hour = 0;
+ } else if (action == -2 && hour != 12) { /* pm */
+ hour += 12;
+ }
+ }
+ } else if (action <= 13) { /* month! */
+ /* Adjust mon to be 1-based until the final values
+ for mon, mday and year are adjusted below */
+ if (seenmonthname) {
+ goto syntax;
+ }
+ seenmonthname = JS_TRUE;
+ temp = /*byte*/ (action - 2) + 1;
+
+ if (mon < 0) {
+ mon = temp;
+ } else if (mday < 0) {
+ mday = mon;
+ mon = temp;
+ } else if (year < 0) {
+ year = mon;
+ mon = temp;
+ } else {
+ goto syntax;
+ }
+ } else {
+ tzoffset = action - 10000;
+ }
+ }
+ break;
+ }
+ if (k < 0)
+ goto syntax;
+ prevc = 0;
+ }
+ }
+ if (year < 0 || mon < 0 || mday < 0)
+ goto syntax;
+ /*
+ Case 1. The input string contains an English month name.
+ The form of the string can be month f l, or f month l, or
+ f l month which each evaluate to the same date.
+ If f and l are both greater than or equal to 70, or
+ both less than 70, the date is invalid.
+ The year is taken to be the greater of the values f, l.
+ If the year is greater than or equal to 70 and less than 100,
+ it is considered to be the number of years after 1900.
+ Case 2. The input string is of the form "f/m/l" where f, m and l are
+ integers, e.g. 7/16/45.
+ Adjust the mon, mday and year values to achieve 100% MSIE
+ compatibility.
+ a. If 0 <= f < 70, f/m/l is interpreted as month/day/year.
+ i. If year < 100, it is the number of years after 1900
+ ii. If year >= 100, it is the number of years after 0.
+ b. If 70 <= f < 100
+ i. If m < 70, f/m/l is interpreted as
+ year/month/day where year is the number of years after
+ 1900.
+ ii. If m >= 70, the date is invalid.
+ c. If f >= 100
+ i. If m < 70, f/m/l is interpreted as
+ year/month/day where year is the number of years after 0.
+ ii. If m >= 70, the date is invalid.
+ */
+ if (seenmonthname) {
+ if ((mday >= 70 && year >= 70) || (mday < 70 && year < 70)) {
+ goto syntax;
+ }
+ if (mday > year) {
+ temp = year;
+ year = mday;
+ mday = temp;
+ }
+ if (year >= 70 && year < 100) {
+ year += 1900;
+ }
+ } else if (mon < 70) { /* (a) month/day/year */
+ if (year < 100) {
+ year += 1900;
+ }
+ } else if (mon < 100) { /* (b) year/month/day */
+ if (mday < 70) {
+ temp = year;
+ year = mon + 1900;
+ mon = mday;
+ mday = temp;
+ } else {
+ goto syntax;
+ }
+ } else { /* (c) year/month/day */
+ if (mday < 70) {
+ temp = year;
+ year = mon;
+ mon = mday;
+ mday = temp;
+ } else {
+ goto syntax;
+ }
+ }
+ mon -= 1; /* convert month to 0-based */
+ if (sec < 0)
+ sec = 0;
+ if (min < 0)
+ min = 0;
+ if (hour < 0)
+ hour = 0;
+ if (tzoffset == -1) { /* no time zone specified, have to use local */
+ jsdouble msec_time;
+ msec_time = date_msecFromDate(year, mon, mday, hour, min, sec, 0);
+
+ *result = UTC(msec_time);
+ return JS_TRUE;
+ }
+
+ msec = date_msecFromDate(year, mon, mday, hour, min, sec, 0);
+ msec += tzoffset * msPerMinute;
+ *result = msec;
+ return JS_TRUE;
+
+syntax:
+ /* syntax error */
+ *result = 0;
+ return JS_FALSE;
+}
+
+static JSBool
+date_parse(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble result;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ if (!date_parseString(str, &result)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+
+ result = TIMECLIP(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_now(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ int64 us, ms, us2ms;
+ jsdouble msec_time;
+
+ us = PRMJ_Now();
+ JSLL_UI2L(us2ms, PRMJ_USEC_PER_MSEC);
+ JSLL_DIV(ms, us, us2ms);
+ JSLL_L2D(msec_time, ms);
+
+ return js_NewDoubleValue(cx, msec_time, rval);
+}
+
+/*
+ * Check that obj is an object of class Date, and get the date value.
+ * Return NULL on failure.
+ */
+static jsdouble *
+date_getProlog(JSContext *cx, JSObject *obj, jsval *argv)
+{
+ if (!JS_InstanceOf(cx, obj, &js_DateClass, argv))
+ return NULL;
+ return JSVAL_TO_DOUBLE(OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE));
+}
+
+/*
+ * See ECMA 15.9.5.4 thru 15.9.5.23
+ */
+static JSBool
+date_getTime(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+static JSBool
+date_getYear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble *date;
+ jsdouble result;
+
+ date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = YearFromTime(LocalTime(result));
+
+ /* Follow ECMA-262 to the letter, contrary to IE JScript. */
+ result -= 1900;
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getFullYear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = YearFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCFullYear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = YearFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getMonth(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MonthFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCMonth(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MonthFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getDate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = LocalTime(result);
+ result = DateFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCDate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = DateFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getDay(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = LocalTime(result);
+ result = WeekDay(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCDay(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = WeekDay(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getHours(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = HourFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCHours(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = HourFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getMinutes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MinFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCMinutes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MinFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+/* Date.getSeconds is mapped to getUTCSeconds */
+
+static JSBool
+date_getUTCSeconds(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = SecFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+/* Date.getMilliseconds is mapped to getUTCMilliseconds */
+
+static JSBool
+date_getUTCMilliseconds(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = msFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getTimezoneOffset(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ /*
+ * Return the time zone offset in minutes for the current locale
+ * that is appropriate for this time. This value would be a
+ * constant except for daylight savings time.
+ */
+ result = (result - LocalTime(result)) / msPerMinute;
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_setTime(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ if (!js_ValueToNumber(cx, argv[0], &result))
+ return JS_FALSE;
+
+ result = TIMECLIP(result);
+
+ *date = result;
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_makeTime(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ uintN maxargs, JSBool local, jsval *rval)
+{
+ uintN i;
+ jsdouble args[4], *argp, *stop;
+ jsdouble hour, min, sec, msec;
+ jsdouble lorutime; /* Local or UTC version of *date */
+
+ jsdouble msec_time;
+ jsdouble result;
+
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+
+ /* just return NaN if the date is already NaN */
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ /* Satisfy the ECMA rule that if a function is called with
+ * fewer arguments than the specified formal arguments, the
+ * remaining arguments are set to undefined. Seems like all
+ * the Date.setWhatever functions in ECMA are only varargs
+ * beyond the first argument; this should be set to undefined
+ * if it's not given. This means that "d = new Date();
+ * d.setMilliseconds()" returns NaN. Blech.
+ */
+ if (argc == 0)
+ argc = 1; /* should be safe, because length of all setters is 1 */
+ else if (argc > maxargs)
+ argc = maxargs; /* clamp argc */
+
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &args[i]))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(args[i])) {
+ *date = *cx->runtime->jsNaN;
+ return js_NewNumberValue(cx, *date, rval);
+ }
+ args[i] = js_DoubleToInteger(args[i]);
+ }
+
+ if (local)
+ lorutime = LocalTime(result);
+ else
+ lorutime = result;
+
+ argp = args;
+ stop = argp + argc;
+ if (maxargs >= 4 && argp < stop)
+ hour = *argp++;
+ else
+ hour = HourFromTime(lorutime);
+
+ if (maxargs >= 3 && argp < stop)
+ min = *argp++;
+ else
+ min = MinFromTime(lorutime);
+
+ if (maxargs >= 2 && argp < stop)
+ sec = *argp++;
+ else
+ sec = SecFromTime(lorutime);
+
+ if (maxargs >= 1 && argp < stop)
+ msec = *argp;
+ else
+ msec = msFromTime(lorutime);
+
+ msec_time = MakeTime(hour, min, sec, msec);
+ result = MakeDate(Day(lorutime), msec_time);
+
+/* fprintf(stderr, "%f\n", result); */
+
+ if (local)
+ result = UTC(result);
+
+/* fprintf(stderr, "%f\n", result); */
+
+ *date = TIMECLIP(result);
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+static JSBool
+date_setMilliseconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 1, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCMilliseconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 1, JS_FALSE, rval);
+}
+
+static JSBool
+date_setSeconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 2, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCSeconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 2, JS_FALSE, rval);
+}
+
+static JSBool
+date_setMinutes(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 3, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCMinutes(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 3, JS_FALSE, rval);
+}
+
+static JSBool
+date_setHours(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 4, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCHours(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 4, JS_FALSE, rval);
+}
+
+static JSBool
+date_makeDate(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, uintN maxargs, JSBool local, jsval *rval)
+{
+ uintN i;
+ jsdouble lorutime; /* local or UTC version of *date */
+ jsdouble args[3], *argp, *stop;
+ jsdouble year, month, day;
+ jsdouble result;
+
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+
+ /* see complaint about ECMA in date_MakeTime */
+ if (argc == 0)
+ argc = 1; /* should be safe, because length of all setters is 1 */
+ else if (argc > maxargs)
+ argc = maxargs; /* clamp argc */
+
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &args[i]))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(args[i])) {
+ *date = *cx->runtime->jsNaN;
+ return js_NewNumberValue(cx, *date, rval);
+ }
+ args[i] = js_DoubleToInteger(args[i]);
+ }
+
+ /* return NaN if date is NaN and we're not setting the year,
+ * If we are, use 0 as the time. */
+ if (!(JSDOUBLE_IS_FINITE(result))) {
+ if (maxargs < 3)
+ return js_NewNumberValue(cx, result, rval);
+ else
+ lorutime = +0.;
+ } else {
+ if (local)
+ lorutime = LocalTime(result);
+ else
+ lorutime = result;
+ }
+
+ argp = args;
+ stop = argp + argc;
+ if (maxargs >= 3 && argp < stop)
+ year = *argp++;
+ else
+ year = YearFromTime(lorutime);
+
+ if (maxargs >= 2 && argp < stop)
+ month = *argp++;
+ else
+ month = MonthFromTime(lorutime);
+
+ if (maxargs >= 1 && argp < stop)
+ day = *argp++;
+ else
+ day = DateFromTime(lorutime);
+
+ day = MakeDay(year, month, day); /* day within year */
+ result = MakeDate(day, TimeWithinDay(lorutime));
+
+ if (local)
+ result = UTC(result);
+
+ *date = TIMECLIP(result);
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+static JSBool
+date_setDate(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 1, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCDate(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 1, JS_FALSE, rval);
+}
+
+static JSBool
+date_setMonth(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 2, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCMonth(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 2, JS_FALSE, rval);
+}
+
+static JSBool
+date_setFullYear(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 3, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCFullYear(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 3, JS_FALSE, rval);
+}
+
+static JSBool
+date_setYear(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ jsdouble t;
+ jsdouble year;
+ jsdouble day;
+ jsdouble result;
+
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+
+ if (!js_ValueToNumber(cx, argv[0], &year))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(year)) {
+ *date = *cx->runtime->jsNaN;
+ return js_NewNumberValue(cx, *date, rval);
+ }
+
+ year = js_DoubleToInteger(year);
+
+ if (!JSDOUBLE_IS_FINITE(result)) {
+ t = +0.0;
+ } else {
+ t = LocalTime(result);
+ }
+
+ if (year >= 0 && year <= 99)
+ year += 1900;
+
+ day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ result = MakeDate(day, TimeWithinDay(t));
+ result = UTC(result);
+
+ *date = TIMECLIP(result);
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+/* constants for toString, toUTCString */
+static char js_NaN_date_str[] = "Invalid Date";
+static const char* days[] =
+{
+ "Sun","Mon","Tue","Wed","Thu","Fri","Sat"
+};
+static const char* months[] =
+{
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+static JSBool
+date_toGMTString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ char buf[100];
+ JSString *str;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ if (!JSDOUBLE_IS_FINITE(*date)) {
+ JS_snprintf(buf, sizeof buf, js_NaN_date_str);
+ } else {
+ jsdouble temp = *date;
+
+ /* Avoid dependence on PRMJ_FormatTimeUSEnglish, because it
+ * requires a PRMJTime... which only has 16-bit years. Sub-ECMA.
+ */
+ JS_snprintf(buf, sizeof buf, "%s, %.2d %s %.4d %.2d:%.2d:%.2d GMT",
+ days[WeekDay(temp)],
+ DateFromTime(temp),
+ months[MonthFromTime(temp)],
+ YearFromTime(temp),
+ HourFromTime(temp),
+ MinFromTime(temp),
+ SecFromTime(temp));
+ }
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* for Date.toLocaleString; interface to PRMJTime date struct.
+ * If findEquivalent is true, then try to map the year to an equivalent year
+ * that's in range.
+ */
+static void
+new_explode(jsdouble timeval, PRMJTime *split, JSBool findEquivalent)
+{
+ jsint year = YearFromTime(timeval);
+ int16 adjustedYear;
+
+ /* If the year doesn't fit in a PRMJTime, find something to do about it. */
+ if (year > 32767 || year < -32768) {
+ if (findEquivalent) {
+ /* We're really just trying to get a timezone string; map the year
+ * to some equivalent year in the range 0 to 2800. Borrowed from
+ * A. D. Olsen.
+ */
+ jsint cycles;
+#define CYCLE_YEARS 2800L
+ cycles = (year >= 0) ? year / CYCLE_YEARS
+ : -1 - (-1 - year) / CYCLE_YEARS;
+ adjustedYear = (int16)(year - cycles * CYCLE_YEARS);
+ } else {
+ /* Clamp it to the nearest representable year. */
+ adjustedYear = (int16)((year > 0) ? 32767 : - 32768);
+ }
+ } else {
+ adjustedYear = (int16)year;
+ }
+
+ split->tm_usec = (int32) msFromTime(timeval) * 1000;
+ split->tm_sec = (int8) SecFromTime(timeval);
+ split->tm_min = (int8) MinFromTime(timeval);
+ split->tm_hour = (int8) HourFromTime(timeval);
+ split->tm_mday = (int8) DateFromTime(timeval);
+ split->tm_mon = (int8) MonthFromTime(timeval);
+ split->tm_wday = (int8) WeekDay(timeval);
+ split->tm_year = (int16) adjustedYear;
+ split->tm_yday = (int16) DayWithinYear(timeval, year);
+
+ /* not sure how this affects things, but it doesn't seem
+ to matter. */
+ split->tm_isdst = (DaylightSavingTA(timeval) != 0);
+}
+
+typedef enum formatspec {
+ FORMATSPEC_FULL, FORMATSPEC_DATE, FORMATSPEC_TIME
+} formatspec;
+
+/* helper function */
+static JSBool
+date_format(JSContext *cx, jsdouble date, formatspec format, jsval *rval)
+{
+ char buf[100];
+ JSString *str;
+ char tzbuf[100];
+ JSBool usetz;
+ size_t i, tzlen;
+ PRMJTime split;
+
+ if (!JSDOUBLE_IS_FINITE(date)) {
+ JS_snprintf(buf, sizeof buf, js_NaN_date_str);
+ } else {
+ jsdouble local = LocalTime(date);
+
+ /* offset from GMT in minutes. The offset includes daylight savings,
+ if it applies. */
+ jsint minutes = (jsint) floor(AdjustTime(date) / msPerMinute);
+
+ /* map 510 minutes to 0830 hours */
+ intN offset = (minutes / 60) * 100 + minutes % 60;
+
+ /* print as "Wed Nov 05 19:38:03 GMT-0800 (PST) 1997" The TZA is
+ * printed as 'GMT-0800' rather than as 'PST' to avoid
+ * operating-system dependence on strftime (which
+ * PRMJ_FormatTimeUSEnglish calls, for %Z only.) win32 prints
+ * PST as 'Pacific Standard Time.' This way we always know
+ * what we're getting, and can parse it if we produce it.
+ * The OS TZA string is included as a comment.
+ */
+
+ /* get a timezone string from the OS to include as a
+ comment. */
+ new_explode(date, &split, JS_TRUE);
+ if (PRMJ_FormatTime(tzbuf, sizeof tzbuf, "(%Z)", &split) != 0) {
+
+ /* Decide whether to use the resulting timezone string.
+ *
+ * Reject it if it contains any non-ASCII, non-alphanumeric
+ * characters. It's then likely in some other character
+ * encoding, and we probably won't display it correctly.
+ */
+ usetz = JS_TRUE;
+ tzlen = strlen(tzbuf);
+ if (tzlen > 100) {
+ usetz = JS_FALSE;
+ } else {
+ for (i = 0; i < tzlen; i++) {
+ jschar c = tzbuf[i];
+ if (c > 127 ||
+ !(isalpha(c) || isdigit(c) ||
+ c == ' ' || c == '(' || c == ')')) {
+ usetz = JS_FALSE;
+ }
+ }
+ }
+
+ /* Also reject it if it's not parenthesized or if it's '()'. */
+ if (tzbuf[0] != '(' || tzbuf[1] == ')')
+ usetz = JS_FALSE;
+ } else
+ usetz = JS_FALSE;
+
+ switch (format) {
+ case FORMATSPEC_FULL:
+ /*
+ * Avoid dependence on PRMJ_FormatTimeUSEnglish, because it
+ * requires a PRMJTime... which only has 16-bit years. Sub-ECMA.
+ */
+ /* Tue Oct 31 2000 09:41:40 GMT-0800 (PST) */
+ JS_snprintf(buf, sizeof buf,
+ "%s %s %.2d %.4d %.2d:%.2d:%.2d GMT%+.4d%s%s",
+ days[WeekDay(local)],
+ months[MonthFromTime(local)],
+ DateFromTime(local),
+ YearFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ offset,
+ usetz ? " " : "",
+ usetz ? tzbuf : "");
+ break;
+ case FORMATSPEC_DATE:
+ /* Tue Oct 31 2000 */
+ JS_snprintf(buf, sizeof buf,
+ "%s %s %.2d %.4d",
+ days[WeekDay(local)],
+ months[MonthFromTime(local)],
+ DateFromTime(local),
+ YearFromTime(local));
+ break;
+ case FORMATSPEC_TIME:
+ /* 09:41:40 GMT-0800 (PST) */
+ JS_snprintf(buf, sizeof buf,
+ "%.2d:%.2d:%.2d GMT%+.4d%s%s",
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ offset,
+ usetz ? " " : "",
+ usetz ? tzbuf : "");
+ break;
+ }
+ }
+
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+date_toLocaleHelper(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval, char *format)
+{
+ char buf[100];
+ JSString *str;
+ PRMJTime split;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ if (!JSDOUBLE_IS_FINITE(*date)) {
+ JS_snprintf(buf, sizeof buf, js_NaN_date_str);
+ } else {
+ intN result_len;
+ jsdouble local = LocalTime(*date);
+ new_explode(local, &split, JS_FALSE);
+
+ /* let PRMJTime format it. */
+ result_len = PRMJ_FormatTime(buf, sizeof buf, format, &split);
+
+ /* If it failed, default to toString. */
+ if (result_len == 0)
+ return date_format(cx, *date, FORMATSPEC_FULL, rval);
+
+ /* Hacked check against undesired 2-digit year 00/00/00 form. */
+ if (strcmp(format, "%x") == 0 && result_len >= 6 &&
+ /* Format %x means use OS settings, which may have 2-digit yr, so
+ hack end of 3/11/22 or 11.03.22 or 11Mar22 to use 4-digit yr...*/
+ !isdigit(buf[result_len - 3]) &&
+ isdigit(buf[result_len - 2]) && isdigit(buf[result_len - 1]) &&
+ /* ...but not if starts with 4-digit year, like 2022/3/11. */
+ !(isdigit(buf[0]) && isdigit(buf[1]) &&
+ isdigit(buf[2]) && isdigit(buf[3]))) {
+ JS_snprintf(buf + (result_len - 2), (sizeof buf) - (result_len - 2),
+ "%d", js_DateGetYear(cx, obj));
+ }
+
+ }
+
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToUnicode)
+ return cx->localeCallbacks->localeToUnicode(cx, buf, rval);
+
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+date_toLocaleString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ /* Use '%#c' for windows, because '%c' is
+ * backward-compatible and non-y2k with msvc; '%#c' requests that a
+ * full year be used in the result string.
+ */
+ return date_toLocaleHelper(cx, obj, argc, argv, rval,
+#if defined(_WIN32) && !defined(__MWERKS__)
+ "%#c"
+#else
+ "%c"
+#endif
+ );
+}
+
+static JSBool
+date_toLocaleDateString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ /* Use '%#x' for windows, because '%x' is
+ * backward-compatible and non-y2k with msvc; '%#x' requests that a
+ * full year be used in the result string.
+ */
+ return date_toLocaleHelper(cx, obj, argc, argv, rval,
+#if defined(_WIN32) && !defined(__MWERKS__)
+ "%#x"
+#else
+ "%x"
+#endif
+ );
+}
+
+static JSBool
+date_toLocaleTimeString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_toLocaleHelper(cx, obj, argc, argv, rval, "%X");
+}
+
+static JSBool
+date_toLocaleFormat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *fmt;
+
+ if (argc == 0)
+ return date_toLocaleString(cx, obj, argc, argv, rval);
+
+ fmt = JS_ValueToString(cx, argv[0]);
+ if (!fmt)
+ return JS_FALSE;
+
+ return date_toLocaleHelper(cx, obj, argc, argv, rval,
+ JS_GetStringBytes(fmt));
+}
+
+static JSBool
+date_toTimeString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ return date_format(cx, *date, FORMATSPEC_TIME, rval);
+}
+
+static JSBool
+date_toDateString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ return date_format(cx, *date, FORMATSPEC_DATE, rval);
+}
+
+#if JS_HAS_TOSOURCE
+#include <string.h>
+#include "jsdtoa.h"
+
+static JSBool
+date_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble *date;
+ char buf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr, *bytes;
+ JSString *str;
+
+ date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, *date);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ bytes = JS_smprintf("(new %s(%s))", js_Date_str, numStr);
+ if (!bytes) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ str = JS_NewString(cx, bytes, strlen(bytes));
+ if (!str) {
+ free(bytes);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+date_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ return date_format(cx, *date, FORMATSPEC_FULL, rval);
+}
+
+static JSBool
+date_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ /* It is an error to call date_valueOf on a non-date object, but we don't
+ * need to check for that explicitly here because every path calls
+ * date_getProlog, which does the check.
+ */
+
+ /* If called directly with no arguments, convert to a time number. */
+ if (argc == 0)
+ return date_getTime(cx, obj, argc, argv, rval);
+
+ /* Convert to number only if the hint was given, otherwise favor string. */
+ if (argc == 1) {
+ JSString *str, *str2;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ str2 = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_NUMBER]);
+ if (js_EqualStrings(str, str2))
+ return date_getTime(cx, obj, argc, argv, rval);
+ }
+ return date_toString(cx, obj, argc, argv, rval);
+}
+
+
+/*
+ * creation and destruction
+ */
+
+static JSFunctionSpec date_static_methods[] = {
+ {"UTC", date_UTC, MAXARGS,0,0 },
+ {"parse", date_parse, 1,0,0 },
+ {"now", date_now, 0,0,0 },
+ {0,0,0,0,0}
+};
+
+static JSFunctionSpec date_methods[] = {
+ {"getTime", date_getTime, 0,0,0 },
+ {"getTimezoneOffset", date_getTimezoneOffset, 0,0,0 },
+ {"getYear", date_getYear, 0,0,0 },
+ {"getFullYear", date_getFullYear, 0,0,0 },
+ {"getUTCFullYear", date_getUTCFullYear, 0,0,0 },
+ {"getMonth", date_getMonth, 0,0,0 },
+ {"getUTCMonth", date_getUTCMonth, 0,0,0 },
+ {"getDate", date_getDate, 0,0,0 },
+ {"getUTCDate", date_getUTCDate, 0,0,0 },
+ {"getDay", date_getDay, 0,0,0 },
+ {"getUTCDay", date_getUTCDay, 0,0,0 },
+ {"getHours", date_getHours, 0,0,0 },
+ {"getUTCHours", date_getUTCHours, 0,0,0 },
+ {"getMinutes", date_getMinutes, 0,0,0 },
+ {"getUTCMinutes", date_getUTCMinutes, 0,0,0 },
+ {"getSeconds", date_getUTCSeconds, 0,0,0 },
+ {"getUTCSeconds", date_getUTCSeconds, 0,0,0 },
+ {"getMilliseconds", date_getUTCMilliseconds,0,0,0 },
+ {"getUTCMilliseconds", date_getUTCMilliseconds,0,0,0 },
+ {"setTime", date_setTime, 1,0,0 },
+ {"setYear", date_setYear, 1,0,0 },
+ {"setFullYear", date_setFullYear, 3,0,0 },
+ {"setUTCFullYear", date_setUTCFullYear, 3,0,0 },
+ {"setMonth", date_setMonth, 2,0,0 },
+ {"setUTCMonth", date_setUTCMonth, 2,0,0 },
+ {"setDate", date_setDate, 1,0,0 },
+ {"setUTCDate", date_setUTCDate, 1,0,0 },
+ {"setHours", date_setHours, 4,0,0 },
+ {"setUTCHours", date_setUTCHours, 4,0,0 },
+ {"setMinutes", date_setMinutes, 3,0,0 },
+ {"setUTCMinutes", date_setUTCMinutes, 3,0,0 },
+ {"setSeconds", date_setSeconds, 2,0,0 },
+ {"setUTCSeconds", date_setUTCSeconds, 2,0,0 },
+ {"setMilliseconds", date_setMilliseconds, 1,0,0 },
+ {"setUTCMilliseconds", date_setUTCMilliseconds,1,0,0 },
+ {"toUTCString", date_toGMTString, 0,0,0 },
+ {js_toLocaleString_str, date_toLocaleString, 0,0,0 },
+ {"toLocaleDateString", date_toLocaleDateString,0,0,0 },
+ {"toLocaleTimeString", date_toLocaleTimeString,0,0,0 },
+ {"toLocaleFormat", date_toLocaleFormat, 1,0,0 },
+ {"toDateString", date_toDateString, 0,0,0 },
+ {"toTimeString", date_toTimeString, 0,0,0 },
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, date_toSource, 0,0,0 },
+#endif
+ {js_toString_str, date_toString, 0,0,0 },
+ {js_valueOf_str, date_valueOf, 0,0,0 },
+ {0,0,0,0,0}
+};
+
+static jsdouble *
+date_constructor(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date;
+
+ date = js_NewDouble(cx, 0.0, 0);
+ if (!date)
+ return NULL;
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, DOUBLE_TO_JSVAL(date));
+ return date;
+}
+
+static JSBool
+Date(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble *date;
+ JSString *str;
+ jsdouble d;
+
+ /* Date called as function. */
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ int64 us, ms, us2ms;
+ jsdouble msec_time;
+
+ /* NSPR 2.0 docs say 'We do not support PRMJ_NowMS and PRMJ_NowS',
+ * so compute ms from PRMJ_Now.
+ */
+ us = PRMJ_Now();
+ JSLL_UI2L(us2ms, PRMJ_USEC_PER_MSEC);
+ JSLL_DIV(ms, us, us2ms);
+ JSLL_L2D(msec_time, ms);
+
+ return date_format(cx, msec_time, FORMATSPEC_FULL, rval);
+ }
+
+ /* Date called as constructor. */
+ if (argc == 0) {
+ int64 us, ms, us2ms;
+ jsdouble msec_time;
+
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+
+ us = PRMJ_Now();
+ JSLL_UI2L(us2ms, PRMJ_USEC_PER_MSEC);
+ JSLL_DIV(ms, us, us2ms);
+ JSLL_L2D(msec_time, ms);
+
+ *date = msec_time;
+ } else if (argc == 1) {
+ if (!JSVAL_IS_STRING(argv[0])) {
+ /* the argument is a millisecond number */
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+ *date = TIMECLIP(d);
+ } else {
+ /* the argument is a string; parse it. */
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+
+ if (!date_parseString(str, date))
+ *date = *cx->runtime->jsNaN;
+ *date = TIMECLIP(*date);
+ }
+ } else {
+ jsdouble array[MAXARGS];
+ uintN loop;
+ jsdouble double_arg;
+ jsdouble day;
+ jsdouble msec_time;
+
+ for (loop = 0; loop < MAXARGS; loop++) {
+ if (loop < argc) {
+ if (!js_ValueToNumber(cx, argv[loop], &double_arg))
+ return JS_FALSE;
+ /* if any arg is NaN, make a NaN date object
+ and return */
+ if (!JSDOUBLE_IS_FINITE(double_arg)) {
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+ *date = *cx->runtime->jsNaN;
+ return JS_TRUE;
+ }
+ array[loop] = js_DoubleToInteger(double_arg);
+ } else {
+ if (loop == 2) {
+ array[loop] = 1; /* Default the date argument to 1. */
+ } else {
+ array[loop] = 0;
+ }
+ }
+ }
+
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+
+ /* adjust 2-digit years into the 20th century */
+ if (array[0] >= 0 && array[0] <= 99)
+ array[0] += 1900;
+
+ day = MakeDay(array[0], array[1], array[2]);
+ msec_time = MakeTime(array[3], array[4], array[5], array[6]);
+ msec_time = MakeDate(day, msec_time);
+ msec_time = UTC(msec_time);
+ *date = TIMECLIP(msec_time);
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_InitDateClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+ jsdouble *proto_date;
+
+ /* set static LocalTZA */
+ LocalTZA = -(PRMJ_LocalGMTDifference() * msPerSecond);
+ proto = JS_InitClass(cx, obj, NULL, &js_DateClass, Date, MAXARGS,
+ NULL, date_methods, NULL, date_static_methods);
+ if (!proto)
+ return NULL;
+
+ /* Alias toUTCString with toGMTString. (ECMA B.2.6) */
+ if (!JS_AliasProperty(cx, proto, "toUTCString", "toGMTString"))
+ return NULL;
+
+ /* Set the value of the Date.prototype date to NaN */
+ proto_date = date_constructor(cx, proto);
+ if (!proto_date)
+ return NULL;
+ *proto_date = *cx->runtime->jsNaN;
+
+ return proto;
+}
+
+JS_FRIEND_API(JSObject *)
+js_NewDateObjectMsec(JSContext *cx, jsdouble msec_time)
+{
+ JSObject *obj;
+ jsdouble *date;
+
+ obj = js_NewObject(cx, &js_DateClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ date = date_constructor(cx, obj);
+ if (!date)
+ return NULL;
+
+ *date = msec_time;
+ return obj;
+}
+
+JS_FRIEND_API(JSObject *)
+js_NewDateObject(JSContext* cx, int year, int mon, int mday,
+ int hour, int min, int sec)
+{
+ JSObject *obj;
+ jsdouble msec_time;
+
+ msec_time = date_msecFromDate(year, mon, mday, hour, min, sec, 0);
+ obj = js_NewDateObjectMsec(cx, UTC(msec_time));
+ return obj;
+}
+
+JS_FRIEND_API(JSBool)
+js_DateIsValid(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return JS_FALSE;
+ else
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(int)
+js_DateGetYear(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ /* Preserve legacy API behavior of returning 0 for invalid dates. */
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) YearFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetMonth(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) MonthFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetDate(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) DateFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetHours(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) HourFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetMinutes(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) MinFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetSeconds(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) SecFromTime(*date);
+}
+
+JS_FRIEND_API(void)
+js_DateSetYear(JSContext *cx, JSObject *obj, int year)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ /* reset date if it was NaN */
+ if (JSDOUBLE_IS_NaN(local))
+ local = 0;
+ local = date_msecFromDate(year,
+ MonthFromTime(local),
+ DateFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetMonth(JSContext *cx, JSObject *obj, int month)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ /* bail if date was NaN */
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ month,
+ DateFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetDate(JSContext *cx, JSObject *obj, int date)
+{
+ jsdouble local;
+ jsdouble *datep = date_getProlog(cx, obj, NULL);
+ if (!datep)
+ return;
+ local = LocalTime(*datep);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ date,
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *datep = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetHours(JSContext *cx, JSObject *obj, int hours)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ DateFromTime(local),
+ hours,
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetMinutes(JSContext *cx, JSObject *obj, int minutes)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ DateFromTime(local),
+ HourFromTime(local),
+ minutes,
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetSeconds(JSContext *cx, JSObject *obj, int seconds)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ DateFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ seconds,
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(jsdouble)
+js_DateGetMsecSinceEpoch(JSContext *cx, JSObject *obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (*date);
+}
diff --git a/third_party/js-1.7/jsdate.h b/third_party/js-1.7/jsdate.h
new file mode 100644
index 0000000..88bd5f5
--- /dev/null
+++ b/third_party/js-1.7/jsdate.h
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS Date class interface.
+ */
+
+#ifndef jsdate_h___
+#define jsdate_h___
+
+JS_BEGIN_EXTERN_C
+
+extern JSClass js_DateClass;
+
+extern JSObject *
+js_InitDateClass(JSContext *cx, JSObject *obj);
+
+/*
+ * These functions provide a C interface to the date/time object
+ */
+
+/*
+ * Construct a new Date Object from a time value given in milliseconds UTC
+ * since the epoch.
+ */
+extern JS_FRIEND_API(JSObject*)
+js_NewDateObjectMsec(JSContext* cx, jsdouble msec_time);
+
+/*
+ * Construct a new Date Object from an exploded local time value.
+ */
+extern JS_FRIEND_API(JSObject*)
+js_NewDateObject(JSContext* cx, int year, int mon, int mday,
+ int hour, int min, int sec);
+
+/*
+ * Detect whether the internal date value is NaN. (Because failure is
+ * out-of-band for js_DateGet*)
+ */
+extern JS_FRIEND_API(JSBool)
+js_DateIsValid(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetYear(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetMonth(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetDate(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetHours(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetMinutes(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetSeconds(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(void)
+js_DateSetYear(JSContext *cx, JSObject *obj, int year);
+
+extern JS_FRIEND_API(void)
+js_DateSetMonth(JSContext *cx, JSObject *obj, int year);
+
+extern JS_FRIEND_API(void)
+js_DateSetDate(JSContext *cx, JSObject *obj, int date);
+
+extern JS_FRIEND_API(void)
+js_DateSetHours(JSContext *cx, JSObject *obj, int hours);
+
+extern JS_FRIEND_API(void)
+js_DateSetMinutes(JSContext *cx, JSObject *obj, int minutes);
+
+extern JS_FRIEND_API(void)
+js_DateSetSeconds(JSContext *cx, JSObject *obj, int seconds);
+
+extern JS_FRIEND_API(jsdouble)
+js_DateGetMsecSinceEpoch(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jsdate_h___ */
diff --git a/third_party/js-1.7/jsdbgapi.c b/third_party/js-1.7/jsdbgapi.c
new file mode 100644
index 0000000..8fa0e68
--- /dev/null
+++ b/third_party/js-1.7/jsdbgapi.c
@@ -0,0 +1,1439 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS debugging API.
+ */
+#include "jsstddef.h"
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+typedef struct JSTrap {
+ JSCList links;
+ JSScript *script;
+ jsbytecode *pc;
+ JSOp op;
+ JSTrapHandler handler;
+ void *closure;
+} JSTrap;
+
+static JSTrap *
+FindTrap(JSRuntime *rt, JSScript *script, jsbytecode *pc)
+{
+ JSTrap *trap;
+
+ for (trap = (JSTrap *)rt->trapList.next;
+ trap != (JSTrap *)&rt->trapList;
+ trap = (JSTrap *)trap->links.next) {
+ if (trap->script == script && trap->pc == pc)
+ return trap;
+ }
+ return NULL;
+}
+
+void
+js_PatchOpcode(JSContext *cx, JSScript *script, jsbytecode *pc, JSOp op)
+{
+ JSTrap *trap;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (trap)
+ trap->op = op;
+ else
+ *pc = (jsbytecode)op;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler handler, void *closure)
+{
+ JSRuntime *rt;
+ JSTrap *trap;
+
+ rt = cx->runtime;
+ trap = FindTrap(rt, script, pc);
+ if (trap) {
+ JS_ASSERT(trap->script == script && trap->pc == pc);
+ JS_ASSERT(*pc == JSOP_TRAP);
+ } else {
+ trap = (JSTrap *) JS_malloc(cx, sizeof *trap);
+ if (!trap || !js_AddRoot(cx, &trap->closure, "trap->closure")) {
+ if (trap)
+ JS_free(cx, trap);
+ return JS_FALSE;
+ }
+ JS_APPEND_LINK(&trap->links, &rt->trapList);
+ trap->script = script;
+ trap->pc = pc;
+ trap->op = (JSOp)*pc;
+ *pc = JSOP_TRAP;
+ }
+ trap->handler = handler;
+ trap->closure = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSOp)
+JS_GetTrapOpcode(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ JSTrap *trap;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (!trap) {
+ JS_ASSERT(0); /* XXX can't happen */
+ return JSOP_LIMIT;
+ }
+ return trap->op;
+}
+
+static void
+DestroyTrap(JSContext *cx, JSTrap *trap)
+{
+ JS_REMOVE_LINK(&trap->links);
+ *trap->pc = (jsbytecode)trap->op;
+ js_RemoveRoot(cx->runtime, &trap->closure);
+ JS_free(cx, trap);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler *handlerp, void **closurep)
+{
+ JSTrap *trap;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (handlerp)
+ *handlerp = trap ? trap->handler : NULL;
+ if (closurep)
+ *closurep = trap ? trap->closure : NULL;
+ if (trap)
+ DestroyTrap(cx, trap);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearScriptTraps(JSContext *cx, JSScript *script)
+{
+ JSRuntime *rt;
+ JSTrap *trap, *next;
+
+ rt = cx->runtime;
+ for (trap = (JSTrap *)rt->trapList.next;
+ trap != (JSTrap *)&rt->trapList;
+ trap = next) {
+ next = (JSTrap *)trap->links.next;
+ if (trap->script == script)
+ DestroyTrap(cx, trap);
+ }
+}
+
+JS_PUBLIC_API(void)
+JS_ClearAllTraps(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSTrap *trap, *next;
+
+ rt = cx->runtime;
+ for (trap = (JSTrap *)rt->trapList.next;
+ trap != (JSTrap *)&rt->trapList;
+ trap = next) {
+ next = (JSTrap *)trap->links.next;
+ DestroyTrap(cx, trap);
+ }
+}
+
+JS_PUBLIC_API(JSTrapStatus)
+JS_HandleTrap(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval)
+{
+ JSTrap *trap;
+ JSTrapStatus status;
+ jsint op;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (!trap) {
+ JS_ASSERT(0); /* XXX can't happen */
+ return JSTRAP_ERROR;
+ }
+ /*
+ * It's important that we not use 'trap->' after calling the callback --
+ * the callback might remove the trap!
+ */
+ op = (jsint)trap->op;
+ status = trap->handler(cx, script, pc, rval, trap->closure);
+ if (status == JSTRAP_CONTINUE) {
+ /* By convention, return the true op to the interpreter in rval. */
+ *rval = INT_TO_JSVAL(op);
+ }
+ return status;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure)
+{
+ rt->interruptHandler = handler;
+ rt->interruptHandlerData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearInterrupt(JSRuntime *rt, JSTrapHandler *handlerp, void **closurep)
+{
+ if (handlerp)
+ *handlerp = (JSTrapHandler)rt->interruptHandler;
+ if (closurep)
+ *closurep = rt->interruptHandlerData;
+ rt->interruptHandler = 0;
+ rt->interruptHandlerData = 0;
+ return JS_TRUE;
+}
+
+/************************************************************************/
+
+typedef struct JSWatchPoint {
+ JSCList links;
+ JSObject *object; /* weak link, see js_FinalizeObject */
+ JSScopeProperty *sprop;
+ JSPropertyOp setter;
+ JSWatchPointHandler handler;
+ void *closure;
+ uintN flags;
+} JSWatchPoint;
+
+#define JSWP_LIVE 0x1 /* live because set and not cleared */
+#define JSWP_HELD 0x2 /* held while running handler/setter */
+
+static JSBool
+DropWatchPoint(JSContext *cx, JSWatchPoint *wp, uintN flag)
+{
+ JSBool ok;
+ JSScopeProperty *sprop;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSPropertyOp setter;
+
+ ok = JS_TRUE;
+ wp->flags &= ~flag;
+ if (wp->flags != 0)
+ return JS_TRUE;
+
+ /*
+ * Remove wp from the list, then if there are no other watchpoints for
+ * wp->sprop in any scope, restore wp->sprop->setter from wp.
+ */
+ JS_REMOVE_LINK(&wp->links);
+ sprop = wp->sprop;
+
+ /*
+ * If js_ChangeNativePropertyAttrs fails, propagate failure after removing
+ * wp->closure's root and freeing wp.
+ */
+ setter = js_GetWatchedSetter(cx->runtime, NULL, sprop);
+ if (!setter) {
+ ok = js_LookupProperty(cx, wp->object, sprop->id, &pobj, &prop);
+
+ /*
+ * If the property wasn't found on wp->object or didn't exist, then
+ * someone else has dealt with this sprop, and we don't need to change
+ * the property attributes.
+ */
+ if (ok && prop) {
+ if (pobj == wp->object) {
+ JS_ASSERT(OBJ_SCOPE(pobj)->object == pobj);
+
+ sprop = js_ChangeScopePropertyAttrs(cx, OBJ_SCOPE(pobj), sprop,
+ 0, sprop->attrs,
+ sprop->getter,
+ wp->setter);
+ if (!sprop)
+ ok = JS_FALSE;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ }
+
+ js_RemoveRoot(cx->runtime, &wp->closure);
+ JS_free(cx, wp);
+ return ok;
+}
+
+void
+js_MarkWatchPoints(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ MARK_SCOPE_PROPERTY(cx, wp->sprop);
+ if (wp->sprop->attrs & JSPROP_SETTER)
+ JS_MarkGCThing(cx, wp->setter, "wp->setter", NULL);
+ }
+}
+
+static JSWatchPoint *
+FindWatchPoint(JSRuntime *rt, JSScope *scope, jsid id)
+{
+ JSWatchPoint *wp;
+
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ if (wp->object == scope->object && wp->sprop->id == id)
+ return wp;
+ }
+ return NULL;
+}
+
+JSScopeProperty *
+js_FindWatchPoint(JSRuntime *rt, JSScope *scope, jsid id)
+{
+ JSWatchPoint *wp;
+
+ wp = FindWatchPoint(rt, scope, id);
+ if (!wp)
+ return NULL;
+ return wp->sprop;
+}
+
+JSPropertyOp
+js_GetWatchedSetter(JSRuntime *rt, JSScope *scope,
+ const JSScopeProperty *sprop)
+{
+ JSWatchPoint *wp;
+
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ if ((!scope || wp->object == scope->object) && wp->sprop == sprop)
+ return wp->setter;
+ }
+ return NULL;
+}
+
+JSBool JS_DLL_CALLBACK
+js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp;
+ JSScopeProperty *sprop;
+ jsval propid, userid;
+ JSScope *scope;
+ JSBool ok;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ sprop = wp->sprop;
+ if (wp->object == obj && SPROP_USERID(sprop) == id &&
+ !(wp->flags & JSWP_HELD)) {
+ wp->flags |= JSWP_HELD;
+
+ JS_LOCK_OBJ(cx, obj);
+ propid = ID_TO_VALUE(sprop->id);
+ userid = (sprop->flags & SPROP_HAS_SHORTID)
+ ? INT_TO_JSVAL(sprop->shortid)
+ : propid;
+ scope = OBJ_SCOPE(obj);
+ JS_UNLOCK_OBJ(cx, obj);
+
+ /* NB: wp is held, so we can safely dereference it still. */
+ ok = wp->handler(cx, obj, propid,
+ SPROP_HAS_VALID_SLOT(sprop, scope)
+ ? OBJ_GET_SLOT(cx, obj, sprop->slot)
+ : JSVAL_VOID,
+ vp, wp->closure);
+ if (ok) {
+ /*
+ * Create a pseudo-frame for the setter invocation so that any
+ * stack-walking security code under the setter will correctly
+ * identify the guilty party. So that the watcher appears to
+ * be active to obj_eval and other such code, point frame.pc
+ * at the JSOP_STOP at the end of the script.
+ */
+ JSObject *closure;
+ JSClass *clasp;
+ JSFunction *fun;
+ JSScript *script;
+ uintN nslots;
+ jsval smallv[5];
+ jsval *argv;
+ JSStackFrame frame;
+
+ closure = (JSObject *) wp->closure;
+ clasp = OBJ_GET_CLASS(cx, closure);
+ if (clasp == &js_FunctionClass) {
+ fun = (JSFunction *) JS_GetPrivate(cx, closure);
+ script = FUN_SCRIPT(fun);
+ } else if (clasp == &js_ScriptClass) {
+ fun = NULL;
+ script = (JSScript *) JS_GetPrivate(cx, closure);
+ } else {
+ fun = NULL;
+ script = NULL;
+ }
+
+ nslots = 2;
+ if (fun) {
+ nslots += fun->nargs;
+ if (FUN_NATIVE(fun))
+ nslots += fun->u.n.extra;
+ }
+
+ if (nslots <= JS_ARRAY_LENGTH(smallv)) {
+ argv = smallv;
+ } else {
+ argv = JS_malloc(cx, nslots * sizeof(jsval));
+ if (!argv) {
+ DropWatchPoint(cx, wp, JSWP_HELD);
+ return JS_FALSE;
+ }
+ }
+
+ argv[0] = OBJECT_TO_JSVAL(closure);
+ argv[1] = JSVAL_NULL;
+ memset(argv + 2, 0, (nslots - 2) * sizeof(jsval));
+
+ memset(&frame, 0, sizeof(frame));
+ frame.script = script;
+ if (script) {
+ JS_ASSERT(script->length >= JSOP_STOP_LENGTH);
+ frame.pc = script->code + script->length
+ - JSOP_STOP_LENGTH;
+ }
+ frame.fun = fun;
+ frame.argv = argv + 2;
+ frame.down = cx->fp;
+ frame.scopeChain = OBJ_GET_PARENT(cx, closure);
+
+ cx->fp = &frame;
+ ok = !wp->setter ||
+ ((sprop->attrs & JSPROP_SETTER)
+ ? js_InternalCall(cx, obj, OBJECT_TO_JSVAL(wp->setter),
+ 1, vp, vp)
+ : wp->setter(cx, OBJ_THIS_OBJECT(cx, obj), userid, vp));
+ cx->fp = frame.down;
+ if (argv != smallv)
+ JS_free(cx, argv);
+ }
+ return DropWatchPoint(cx, wp, JSWP_HELD) && ok;
+ }
+ }
+ return JS_TRUE;
+}
+
+JSBool JS_DLL_CALLBACK
+js_watch_set_wrapper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSObject *funobj;
+ JSFunction *wrapper;
+ jsval userid;
+
+ funobj = JSVAL_TO_OBJECT(argv[-2]);
+ JS_ASSERT(OBJ_GET_CLASS(cx, funobj) == &js_FunctionClass);
+ wrapper = (JSFunction *) JS_GetPrivate(cx, funobj);
+ userid = ATOM_KEY(wrapper->atom);
+ *rval = argv[0];
+ return js_watch_set(cx, obj, userid, rval);
+}
+
+JSPropertyOp
+js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter)
+{
+ JSAtom *atom;
+ JSFunction *wrapper;
+
+ if (!(attrs & JSPROP_SETTER))
+ return &js_watch_set; /* & to silence schoolmarmish MSVC */
+
+ if (JSID_IS_ATOM(id)) {
+ atom = JSID_TO_ATOM(id);
+ } else if (JSID_IS_INT(id)) {
+ atom = js_AtomizeInt(cx, JSID_TO_INT(id), 0);
+ if (!atom)
+ return NULL;
+ } else {
+ atom = NULL;
+ }
+ wrapper = js_NewFunction(cx, NULL, js_watch_set_wrapper, 1, 0,
+ OBJ_GET_PARENT(cx, (JSObject *)setter),
+ atom);
+ if (!wrapper)
+ return NULL;
+ return (JSPropertyOp) wrapper->object;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler handler, void *closure)
+{
+ JSAtom *atom;
+ jsid propid;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSRuntime *rt;
+ JSBool ok;
+ JSWatchPoint *wp;
+ JSPropertyOp watcher;
+
+ if (!OBJ_IS_NATIVE(obj)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_WATCH,
+ OBJ_GET_CLASS(cx, obj)->name);
+ return JS_FALSE;
+ }
+
+ if (JSVAL_IS_INT(id)) {
+ propid = INT_JSVAL_TO_JSID(id);
+ atom = NULL;
+ } else {
+ atom = js_ValueToStringAtom(cx, id);
+ if (!atom)
+ return JS_FALSE;
+ propid = ATOM_TO_JSID(atom);
+ }
+
+ if (!js_LookupProperty(cx, obj, propid, &pobj, &prop))
+ return JS_FALSE;
+ sprop = (JSScopeProperty *) prop;
+ rt = cx->runtime;
+ if (!sprop) {
+ /* Check for a deleted symbol watchpoint, which holds its property. */
+ sprop = js_FindWatchPoint(rt, OBJ_SCOPE(obj), propid);
+ if (!sprop) {
+ /* Make a new property in obj so we can watch for the first set. */
+ if (!js_DefineProperty(cx, obj, propid, JSVAL_VOID,
+ NULL, NULL, JSPROP_ENUMERATE,
+ &prop)) {
+ return JS_FALSE;
+ }
+ sprop = (JSScopeProperty *) prop;
+ }
+ } else if (pobj != obj) {
+ /* Clone the prototype property so we can watch the right object. */
+ jsval value;
+ JSPropertyOp getter, setter;
+ uintN attrs, flags;
+ intN shortid;
+
+ if (OBJ_IS_NATIVE(pobj)) {
+ value = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))
+ ? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)
+ : JSVAL_VOID;
+ getter = sprop->getter;
+ setter = sprop->setter;
+ attrs = sprop->attrs;
+ flags = sprop->flags;
+ shortid = sprop->shortid;
+ } else {
+ if (!OBJ_GET_PROPERTY(cx, pobj, id, &value) ||
+ !OBJ_GET_ATTRIBUTES(cx, pobj, id, prop, &attrs)) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return JS_FALSE;
+ }
+ getter = setter = NULL;
+ flags = 0;
+ shortid = 0;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ /* Recall that obj is native, whether or not pobj is native. */
+ if (!js_DefineNativeProperty(cx, obj, propid, value, getter, setter,
+ attrs, flags, shortid, &prop)) {
+ return JS_FALSE;
+ }
+ sprop = (JSScopeProperty *) prop;
+ }
+
+ /*
+ * At this point, prop/sprop exists in obj, obj is locked, and we must
+ * OBJ_DROP_PROPERTY(cx, obj, prop) before returning.
+ */
+ ok = JS_TRUE;
+ wp = FindWatchPoint(rt, OBJ_SCOPE(obj), propid);
+ if (!wp) {
+ watcher = js_WrapWatchedSetter(cx, propid, sprop->attrs, sprop->setter);
+ if (!watcher) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ wp = (JSWatchPoint *) JS_malloc(cx, sizeof *wp);
+ if (!wp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ wp->handler = NULL;
+ wp->closure = NULL;
+ ok = js_AddRoot(cx, &wp->closure, "wp->closure");
+ if (!ok) {
+ JS_free(cx, wp);
+ goto out;
+ }
+ wp->object = obj;
+ JS_ASSERT(sprop->setter != js_watch_set || pobj != obj);
+ wp->setter = sprop->setter;
+ wp->flags = JSWP_LIVE;
+
+ /* XXXbe nest in obj lock here */
+ sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, 0, sprop->attrs,
+ sprop->getter, watcher);
+ if (!sprop) {
+ /* Self-link so DropWatchPoint can JS_REMOVE_LINK it. */
+ JS_INIT_CLIST(&wp->links);
+ DropWatchPoint(cx, wp, JSWP_LIVE);
+ ok = JS_FALSE;
+ goto out;
+ }
+ wp->sprop = sprop;
+
+ /*
+ * Now that wp is fully initialized, append it to rt's wp list.
+ * Because obj is locked we know that no other thread could have added
+ * a watchpoint for (obj, propid).
+ */
+ JS_ASSERT(!FindWatchPoint(rt, OBJ_SCOPE(obj), propid));
+ JS_APPEND_LINK(&wp->links, &rt->watchPointList);
+ }
+ wp->handler = handler;
+ wp->closure = closure;
+
+out:
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler *handlerp, void **closurep)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ if (wp->object == obj && SPROP_USERID(wp->sprop) == id) {
+ if (handlerp)
+ *handlerp = wp->handler;
+ if (closurep)
+ *closurep = wp->closure;
+ return DropWatchPoint(cx, wp, JSWP_LIVE);
+ }
+ }
+ if (handlerp)
+ *handlerp = NULL;
+ if (closurep)
+ *closurep = NULL;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearWatchPointsForObject(JSContext *cx, JSObject *obj)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp, *next;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = next) {
+ next = (JSWatchPoint *)wp->links.next;
+ if (wp->object == obj && !DropWatchPoint(cx, wp, JSWP_LIVE))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearAllWatchPoints(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp, *next;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = next) {
+ next = (JSWatchPoint *)wp->links.next;
+ if (!DropWatchPoint(cx, wp, JSWP_LIVE))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(uintN)
+JS_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ return js_PCToLineNumber(cx, script, pc);
+}
+
+JS_PUBLIC_API(jsbytecode *)
+JS_LineNumberToPC(JSContext *cx, JSScript *script, uintN lineno)
+{
+ return js_LineNumberToPC(script, lineno);
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_GetFunctionScript(JSContext *cx, JSFunction *fun)
+{
+ return FUN_SCRIPT(fun);
+}
+
+JS_PUBLIC_API(JSNative)
+JS_GetFunctionNative(JSContext *cx, JSFunction *fun)
+{
+ return FUN_NATIVE(fun);
+}
+
+JS_PUBLIC_API(JSPrincipals *)
+JS_GetScriptPrincipals(JSContext *cx, JSScript *script)
+{
+ return script->principals;
+}
+
+/************************************************************************/
+
+/*
+ * Stack Frame Iterator
+ */
+JS_PUBLIC_API(JSStackFrame *)
+JS_FrameIterator(JSContext *cx, JSStackFrame **iteratorp)
+{
+ *iteratorp = (*iteratorp == NULL) ? cx->fp : (*iteratorp)->down;
+ return *iteratorp;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_GetFrameScript(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->script;
+}
+
+JS_PUBLIC_API(jsbytecode *)
+JS_GetFramePC(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->pc;
+}
+
+JS_PUBLIC_API(JSStackFrame *)
+JS_GetScriptedCaller(JSContext *cx, JSStackFrame *fp)
+{
+ if (!fp)
+ fp = cx->fp;
+ while ((fp = fp->down) != NULL) {
+ if (fp->script)
+ return fp;
+ }
+ return NULL;
+}
+
+JS_PUBLIC_API(JSPrincipals *)
+JS_StackFramePrincipals(JSContext *cx, JSStackFrame *fp)
+{
+ if (fp->fun) {
+ JSRuntime *rt = cx->runtime;
+
+ if (rt->findObjectPrincipals) {
+ JSObject *callee = JSVAL_TO_OBJECT(fp->argv[-2]);
+
+ if (fp->fun->object != callee)
+ return rt->findObjectPrincipals(cx, callee);
+ /* FALL THROUGH */
+ }
+ }
+ if (fp->script)
+ return fp->script->principals;
+ return NULL;
+}
+
+JS_PUBLIC_API(JSPrincipals *)
+JS_EvalFramePrincipals(JSContext *cx, JSStackFrame *fp, JSStackFrame *caller)
+{
+ JSRuntime *rt;
+ JSObject *callee;
+ JSPrincipals *principals, *callerPrincipals;
+
+ rt = cx->runtime;
+ if (rt->findObjectPrincipals) {
+ callee = JSVAL_TO_OBJECT(fp->argv[-2]);
+ principals = rt->findObjectPrincipals(cx, callee);
+ } else {
+ principals = NULL;
+ }
+ if (!caller)
+ return principals;
+ callerPrincipals = JS_StackFramePrincipals(cx, caller);
+ return (callerPrincipals && principals &&
+ callerPrincipals->subsume(callerPrincipals, principals))
+ ? principals
+ : callerPrincipals;
+}
+
+JS_PUBLIC_API(void *)
+JS_GetFrameAnnotation(JSContext *cx, JSStackFrame *fp)
+{
+ if (fp->annotation && fp->script) {
+ JSPrincipals *principals = JS_StackFramePrincipals(cx, fp);
+
+ if (principals && principals->globalPrivilegesEnabled(cx, principals)) {
+ /*
+ * Give out an annotation only if privileges have not been revoked
+ * or disabled globally.
+ */
+ return fp->annotation;
+ }
+ }
+
+ return NULL;
+}
+
+JS_PUBLIC_API(void)
+JS_SetFrameAnnotation(JSContext *cx, JSStackFrame *fp, void *annotation)
+{
+ fp->annotation = annotation;
+}
+
+JS_PUBLIC_API(void *)
+JS_GetFramePrincipalArray(JSContext *cx, JSStackFrame *fp)
+{
+ JSPrincipals *principals;
+
+ principals = JS_StackFramePrincipals(cx, fp);
+ if (!principals)
+ return NULL;
+ return principals->getPrincipalArray(cx, principals);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsNativeFrame(JSContext *cx, JSStackFrame *fp)
+{
+ return !fp->script;
+}
+
+/* this is deprecated, use JS_GetFrameScopeChain instead */
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameObject(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->scopeChain;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameScopeChain(JSContext *cx, JSStackFrame *fp)
+{
+ /* Force creation of argument and call objects if not yet created */
+ (void) JS_GetFrameCallObject(cx, fp);
+ return js_GetScopeChain(cx, fp);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameCallObject(JSContext *cx, JSStackFrame *fp)
+{
+ if (! fp->fun)
+ return NULL;
+
+ /* Force creation of argument object if not yet created */
+ (void) js_GetArgsObject(cx, fp);
+
+ /*
+ * XXX ill-defined: null return here means error was reported, unlike a
+ * null returned above or in the #else
+ */
+ return js_GetCallObject(cx, fp, NULL);
+}
+
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameThis(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->thisp;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_GetFrameFunction(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->fun;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameFunctionObject(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->argv && fp->fun ? JSVAL_TO_OBJECT(fp->argv[-2]) : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsConstructorFrame(JSContext *cx, JSStackFrame *fp)
+{
+ return (fp->flags & JSFRAME_CONSTRUCTING) != 0;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameCalleeObject(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->argv ? JSVAL_TO_OBJECT(fp->argv[-2]) : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsDebuggerFrame(JSContext *cx, JSStackFrame *fp)
+{
+ return (fp->flags & JSFRAME_DEBUGGER) != 0;
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetFrameReturnValue(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->rval;
+}
+
+JS_PUBLIC_API(void)
+JS_SetFrameReturnValue(JSContext *cx, JSStackFrame *fp, jsval rval)
+{
+ fp->rval = rval;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(const char *)
+JS_GetScriptFilename(JSContext *cx, JSScript *script)
+{
+ return script->filename;
+}
+
+JS_PUBLIC_API(uintN)
+JS_GetScriptBaseLineNumber(JSContext *cx, JSScript *script)
+{
+ return script->lineno;
+}
+
+JS_PUBLIC_API(uintN)
+JS_GetScriptLineExtent(JSContext *cx, JSScript *script)
+{
+ return js_GetScriptLineExtent(script);
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_GetScriptVersion(JSContext *cx, JSScript *script)
+{
+ return script->version & JSVERSION_MASK;
+}
+
+/***************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_SetNewScriptHook(JSRuntime *rt, JSNewScriptHook hook, void *callerdata)
+{
+ rt->newScriptHook = hook;
+ rt->newScriptHookData = callerdata;
+}
+
+JS_PUBLIC_API(void)
+JS_SetDestroyScriptHook(JSRuntime *rt, JSDestroyScriptHook hook,
+ void *callerdata)
+{
+ rt->destroyScriptHook = hook;
+ rt->destroyScriptHookData = callerdata;
+}
+
+/***************************************************************************/
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateUCInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ JSObject *scobj;
+ uint32 flags, options;
+ JSScript *script;
+ JSBool ok;
+
+ scobj = JS_GetFrameScopeChain(cx, fp);
+ if (!scobj)
+ return JS_FALSE;
+
+ /*
+ * XXX Hack around ancient compiler API to propagate the JSFRAME_SPECIAL
+ * flags to the code generator (see js_EmitTree's TOK_SEMI case).
+ */
+ flags = fp->flags;
+ fp->flags |= JSFRAME_DEBUGGER | JSFRAME_EVAL;
+ options = cx->options;
+ cx->options = options | JSOPTION_COMPILE_N_GO;
+ script = JS_CompileUCScriptForPrincipals(cx, scobj,
+ JS_StackFramePrincipals(cx, fp),
+ chars, length, filename, lineno);
+ fp->flags = flags;
+ cx->options = options;
+ if (!script)
+ return JS_FALSE;
+
+ ok = js_Execute(cx, scobj, script, fp, JSFRAME_DEBUGGER | JSFRAME_EVAL,
+ rval);
+ js_DestroyScript(cx, script);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ jschar *chars;
+ JSBool ok;
+ size_t len = length;
+
+ chars = js_InflateString(cx, bytes, &len);
+ if (!chars)
+ return JS_FALSE;
+ length = (uintN) len;
+ ok = JS_EvaluateUCInStackFrame(cx, fp, chars, length, filename, lineno,
+ rval);
+ JS_free(cx, chars);
+
+ return ok;
+}
+
+/************************************************************************/
+
+/* XXXbe this all needs to be reworked to avoid requiring JSScope types. */
+
+JS_PUBLIC_API(JSScopeProperty *)
+JS_PropertyIterator(JSObject *obj, JSScopeProperty **iteratorp)
+{
+ JSScopeProperty *sprop;
+ JSScope *scope;
+
+ sprop = *iteratorp;
+ scope = OBJ_SCOPE(obj);
+
+ /* XXXbe minor(?) incompatibility: iterate in reverse definition order */
+ if (!sprop) {
+ sprop = SCOPE_LAST_PROP(scope);
+ } else {
+ while ((sprop = sprop->parent) != NULL) {
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope))
+ break;
+ if (SCOPE_HAS_PROPERTY(scope, sprop))
+ break;
+ }
+ }
+ *iteratorp = sprop;
+ return sprop;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop,
+ JSPropertyDesc *pd)
+{
+ JSPropertyOp getter;
+ JSScope *scope;
+ JSScopeProperty *aprop;
+ jsval lastException;
+ JSBool wasThrowing;
+
+ pd->id = ID_TO_VALUE(sprop->id);
+
+ wasThrowing = cx->throwing;
+ if (wasThrowing) {
+ lastException = cx->exception;
+ if (JSVAL_IS_GCTHING(lastException) &&
+ !js_AddRoot(cx, &lastException, "lastException")) {
+ return JS_FALSE;
+ }
+ cx->throwing = JS_FALSE;
+ }
+
+ if (!js_GetProperty(cx, obj, sprop->id, &pd->value)) {
+ if (!cx->throwing) {
+ pd->flags = JSPD_ERROR;
+ pd->value = JSVAL_VOID;
+ } else {
+ pd->flags = JSPD_EXCEPTION;
+ pd->value = cx->exception;
+ }
+ } else {
+ pd->flags = 0;
+ }
+
+ cx->throwing = wasThrowing;
+ if (wasThrowing) {
+ cx->exception = lastException;
+ if (JSVAL_IS_GCTHING(lastException))
+ js_RemoveRoot(cx->runtime, &lastException);
+ }
+
+ getter = sprop->getter;
+ pd->flags |= ((sprop->attrs & JSPROP_ENUMERATE) ? JSPD_ENUMERATE : 0)
+ | ((sprop->attrs & JSPROP_READONLY) ? JSPD_READONLY : 0)
+ | ((sprop->attrs & JSPROP_PERMANENT) ? JSPD_PERMANENT : 0)
+ | ((getter == js_GetCallVariable) ? JSPD_VARIABLE : 0)
+ | ((getter == js_GetArgument) ? JSPD_ARGUMENT : 0)
+ | ((getter == js_GetLocalVariable) ? JSPD_VARIABLE : 0);
+
+ /* for Call Object 'real' getter isn't passed in to us */
+ if (OBJ_GET_CLASS(cx, obj) == &js_CallClass &&
+ getter == js_CallClass.getProperty) {
+ /*
+ * Property of a heavyweight function's variable object having the
+ * class-default getter. It's either an argument if permanent, or a
+ * nested function if impermanent. Local variables have a special
+ * getter (js_GetCallVariable, tested above) and setter, and not the
+ * class default.
+ */
+ pd->flags |= (sprop->attrs & JSPROP_PERMANENT)
+ ? JSPD_ARGUMENT
+ : JSPD_VARIABLE;
+ }
+
+ pd->spare = 0;
+ pd->slot = (pd->flags & (JSPD_ARGUMENT | JSPD_VARIABLE))
+ ? sprop->shortid
+ : 0;
+ pd->alias = JSVAL_VOID;
+ scope = OBJ_SCOPE(obj);
+ if (SPROP_HAS_VALID_SLOT(sprop, scope)) {
+ for (aprop = SCOPE_LAST_PROP(scope); aprop; aprop = aprop->parent) {
+ if (aprop != sprop && aprop->slot == sprop->slot) {
+ pd->alias = ID_TO_VALUE(aprop->id);
+ break;
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda)
+{
+ JSClass *clasp;
+ JSScope *scope;
+ uint32 i, n;
+ JSPropertyDesc *pd;
+ JSScopeProperty *sprop;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (!OBJ_IS_NATIVE(obj) || (clasp->flags & JSCLASS_NEW_ENUMERATE)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_DESCRIBE_PROPS, clasp->name);
+ return JS_FALSE;
+ }
+ if (!clasp->enumerate(cx, obj))
+ return JS_FALSE;
+
+ /* have no props, or object's scope has not mutated from that of proto */
+ scope = OBJ_SCOPE(obj);
+ if (scope->object != obj || scope->entryCount == 0) {
+ pda->length = 0;
+ pda->array = NULL;
+ return JS_TRUE;
+ }
+
+ n = scope->entryCount;
+ if (n > scope->map.nslots)
+ n = scope->map.nslots;
+ pd = (JSPropertyDesc *) JS_malloc(cx, (size_t)n * sizeof(JSPropertyDesc));
+ if (!pd)
+ return JS_FALSE;
+ i = 0;
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) && !SCOPE_HAS_PROPERTY(scope, sprop))
+ continue;
+ if (!js_AddRoot(cx, &pd[i].id, NULL))
+ goto bad;
+ if (!js_AddRoot(cx, &pd[i].value, NULL))
+ goto bad;
+ if (!JS_GetPropertyDesc(cx, obj, sprop, &pd[i]))
+ goto bad;
+ if ((pd[i].flags & JSPD_ALIAS) && !js_AddRoot(cx, &pd[i].alias, NULL))
+ goto bad;
+ if (++i == n)
+ break;
+ }
+ pda->length = i;
+ pda->array = pd;
+ return JS_TRUE;
+
+bad:
+ pda->length = i + 1;
+ pda->array = pd;
+ JS_PutPropertyDescArray(cx, pda);
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(void)
+JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda)
+{
+ JSPropertyDesc *pd;
+ uint32 i;
+
+ pd = pda->array;
+ for (i = 0; i < pda->length; i++) {
+ js_RemoveRoot(cx->runtime, &pd[i].id);
+ js_RemoveRoot(cx->runtime, &pd[i].value);
+ if (pd[i].flags & JSPD_ALIAS)
+ js_RemoveRoot(cx->runtime, &pd[i].alias);
+ }
+ JS_free(cx, pd);
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSBool)
+JS_SetDebuggerHandler(JSRuntime *rt, JSTrapHandler handler, void *closure)
+{
+ rt->debuggerHandler = handler;
+ rt->debuggerHandlerData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetSourceHandler(JSRuntime *rt, JSSourceHandler handler, void *closure)
+{
+ rt->sourceHandler = handler;
+ rt->sourceHandlerData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetExecuteHook(JSRuntime *rt, JSInterpreterHook hook, void *closure)
+{
+ rt->executeHook = hook;
+ rt->executeHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetCallHook(JSRuntime *rt, JSInterpreterHook hook, void *closure)
+{
+ rt->callHook = hook;
+ rt->callHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetObjectHook(JSRuntime *rt, JSObjectHook hook, void *closure)
+{
+ rt->objectHook = hook;
+ rt->objectHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetThrowHook(JSRuntime *rt, JSTrapHandler hook, void *closure)
+{
+ rt->throwHook = hook;
+ rt->throwHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetDebugErrorHook(JSRuntime *rt, JSDebugErrorHook hook, void *closure)
+{
+ rt->debugErrorHook = hook;
+ rt->debugErrorHookData = closure;
+ return JS_TRUE;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(size_t)
+JS_GetObjectTotalSize(JSContext *cx, JSObject *obj)
+{
+ size_t nbytes;
+ JSScope *scope;
+
+ nbytes = sizeof *obj + obj->map->nslots * sizeof obj->slots[0];
+ if (OBJ_IS_NATIVE(obj)) {
+ scope = OBJ_SCOPE(obj);
+ if (scope->object == obj) {
+ nbytes += sizeof *scope;
+ nbytes += SCOPE_CAPACITY(scope) * sizeof(JSScopeProperty *);
+ }
+ }
+ return nbytes;
+}
+
+static size_t
+GetAtomTotalSize(JSContext *cx, JSAtom *atom)
+{
+ size_t nbytes;
+
+ nbytes = sizeof *atom;
+ if (ATOM_IS_STRING(atom)) {
+ nbytes += sizeof(JSString);
+ nbytes += (ATOM_TO_STRING(atom)->length + 1) * sizeof(jschar);
+ } else if (ATOM_IS_DOUBLE(atom)) {
+ nbytes += sizeof(jsdouble);
+ } else if (ATOM_IS_OBJECT(atom)) {
+ nbytes += JS_GetObjectTotalSize(cx, ATOM_TO_OBJECT(atom));
+ }
+ return nbytes;
+}
+
+JS_PUBLIC_API(size_t)
+JS_GetFunctionTotalSize(JSContext *cx, JSFunction *fun)
+{
+ size_t nbytes;
+
+ nbytes = sizeof *fun;
+ if (fun->object)
+ nbytes += JS_GetObjectTotalSize(cx, fun->object);
+ if (FUN_INTERPRETED(fun))
+ nbytes += JS_GetScriptTotalSize(cx, fun->u.i.script);
+ if (fun->atom)
+ nbytes += GetAtomTotalSize(cx, fun->atom);
+ return nbytes;
+}
+
+#include "jsemit.h"
+
+JS_PUBLIC_API(size_t)
+JS_GetScriptTotalSize(JSContext *cx, JSScript *script)
+{
+ size_t nbytes, pbytes;
+ JSObject *obj;
+ jsatomid i;
+ jssrcnote *sn, *notes;
+ JSTryNote *tn, *tnotes;
+ JSPrincipals *principals;
+
+ nbytes = sizeof *script;
+ obj = script->object;
+ if (obj)
+ nbytes += JS_GetObjectTotalSize(cx, obj);
+
+ nbytes += script->length * sizeof script->code[0];
+ nbytes += script->atomMap.length * sizeof script->atomMap.vector[0];
+ for (i = 0; i < script->atomMap.length; i++)
+ nbytes += GetAtomTotalSize(cx, script->atomMap.vector[i]);
+
+ if (script->filename)
+ nbytes += strlen(script->filename) + 1;
+
+ notes = SCRIPT_NOTES(script);
+ for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn))
+ continue;
+ nbytes += (sn - notes + 1) * sizeof *sn;
+
+ tnotes = script->trynotes;
+ if (tnotes) {
+ for (tn = tnotes; tn->catchStart; tn++)
+ continue;
+ nbytes += (tn - tnotes + 1) * sizeof *tn;
+ }
+
+ principals = script->principals;
+ if (principals) {
+ JS_ASSERT(principals->refcount);
+ pbytes = sizeof *principals;
+ if (principals->refcount > 1)
+ pbytes = JS_HOWMANY(pbytes, principals->refcount);
+ nbytes += pbytes;
+ }
+
+ return nbytes;
+}
+
+JS_PUBLIC_API(uint32)
+JS_GetTopScriptFilenameFlags(JSContext *cx, JSStackFrame *fp)
+{
+ if (!fp)
+ fp = cx->fp;
+ while (fp) {
+ if (fp->script) {
+ return JS_GetScriptFilenameFlags(fp->script);
+ }
+ fp = fp->down;
+ }
+ return 0;
+ }
+
+JS_PUBLIC_API(uint32)
+JS_GetScriptFilenameFlags(JSScript *script)
+{
+ JS_ASSERT(script);
+ if (!script->filename)
+ return JSFILENAME_NULL;
+ return js_GetScriptFilenameFlags(script->filename);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_FlagScriptFilenamePrefix(JSRuntime *rt, const char *prefix, uint32 flags)
+{
+ if (!js_SaveScriptFilenameRT(rt, prefix, flags))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsSystemObject(JSContext *cx, JSObject *obj)
+{
+ return (*js_GetGCThingFlags(obj) & GCF_SYSTEM) != 0;
+}
+
+JS_PUBLIC_API(void)
+JS_FlagSystemObject(JSContext *cx, JSObject *obj)
+{
+ uint8 *flagp;
+
+ flagp = js_GetGCThingFlags(obj);
+ *flagp |= GCF_SYSTEM;
+}
diff --git a/third_party/js-1.7/jsdbgapi.h b/third_party/js-1.7/jsdbgapi.h
new file mode 100644
index 0000000..d2e1f1c
--- /dev/null
+++ b/third_party/js-1.7/jsdbgapi.h
@@ -0,0 +1,406 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsdbgapi_h___
+#define jsdbgapi_h___
+/*
+ * JS debugger API.
+ */
+#include "jsapi.h"
+#include "jsopcode.h"
+#include "jsprvtd.h"
+
+JS_BEGIN_EXTERN_C
+
+extern void
+js_PatchOpcode(JSContext *cx, JSScript *script, jsbytecode *pc, JSOp op);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSOp)
+JS_GetTrapOpcode(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+extern JS_PUBLIC_API(void)
+JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler *handlerp, void **closurep);
+
+extern JS_PUBLIC_API(void)
+JS_ClearScriptTraps(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(void)
+JS_ClearAllTraps(JSContext *cx);
+
+extern JS_PUBLIC_API(JSTrapStatus)
+JS_HandleTrap(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearInterrupt(JSRuntime *rt, JSTrapHandler *handlerp, void **closurep);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler *handlerp, void **closurep);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearWatchPointsForObject(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearAllWatchPoints(JSContext *cx);
+
+#ifdef JS_HAS_OBJ_WATCHPOINT
+/*
+ * Hide these non-API function prototypes by testing whether the internal
+ * header file "jsconfig.h" has been included.
+ */
+extern void
+js_MarkWatchPoints(JSContext *cx);
+
+extern JSScopeProperty *
+js_FindWatchPoint(JSRuntime *rt, JSScope *scope, jsid id);
+
+extern JSPropertyOp
+js_GetWatchedSetter(JSRuntime *rt, JSScope *scope,
+ const JSScopeProperty *sprop);
+
+extern JSBool JS_DLL_CALLBACK
+js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool JS_DLL_CALLBACK
+js_watch_set_wrapper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSPropertyOp
+js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter);
+
+#endif /* JS_HAS_OBJ_WATCHPOINT */
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(uintN)
+JS_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+extern JS_PUBLIC_API(jsbytecode *)
+JS_LineNumberToPC(JSContext *cx, JSScript *script, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_GetFunctionScript(JSContext *cx, JSFunction *fun);
+
+extern JS_PUBLIC_API(JSNative)
+JS_GetFunctionNative(JSContext *cx, JSFunction *fun);
+
+extern JS_PUBLIC_API(JSPrincipals *)
+JS_GetScriptPrincipals(JSContext *cx, JSScript *script);
+
+/*
+ * Stack Frame Iterator
+ *
+ * Used to iterate through the JS stack frames to extract
+ * information from the frames.
+ */
+
+extern JS_PUBLIC_API(JSStackFrame *)
+JS_FrameIterator(JSContext *cx, JSStackFrame **iteratorp);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_GetFrameScript(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(jsbytecode *)
+JS_GetFramePC(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Get the closest scripted frame below fp. If fp is null, start from cx->fp.
+ */
+extern JS_PUBLIC_API(JSStackFrame *)
+JS_GetScriptedCaller(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Return a weak reference to fp's principals. A null return does not denote
+ * an error, it means there are no principals.
+ */
+extern JS_PUBLIC_API(JSPrincipals *)
+JS_StackFramePrincipals(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * This API is like JS_StackFramePrincipals(cx, caller), except that if
+ * cx->runtime->findObjectPrincipals is non-null, it returns the weaker of
+ * the caller's principals and the object principals of fp's callee function
+ * object (fp->argv[-2]), which is eval, Function, or a similar eval-like
+ * method. The caller parameter should be JS_GetScriptedCaller(cx, fp).
+ *
+ * All eval-like methods must use JS_EvalFramePrincipals to acquire a weak
+ * reference to the correct principals for the eval call to be secure, given
+ * an embedding that calls JS_SetObjectPrincipalsFinder (see jsapi.h).
+ */
+extern JS_PUBLIC_API(JSPrincipals *)
+JS_EvalFramePrincipals(JSContext *cx, JSStackFrame *fp, JSStackFrame *caller);
+
+extern JS_PUBLIC_API(void *)
+JS_GetFrameAnnotation(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(void)
+JS_SetFrameAnnotation(JSContext *cx, JSStackFrame *fp, void *annotation);
+
+extern JS_PUBLIC_API(void *)
+JS_GetFramePrincipalArray(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsNativeFrame(JSContext *cx, JSStackFrame *fp);
+
+/* this is deprecated, use JS_GetFrameScopeChain instead */
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameObject(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameScopeChain(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameCallObject(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameThis(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_GetFrameFunction(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameFunctionObject(JSContext *cx, JSStackFrame *fp);
+
+/* XXXrginda Initially published with typo */
+#define JS_IsContructorFrame JS_IsConstructorFrame
+extern JS_PUBLIC_API(JSBool)
+JS_IsConstructorFrame(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsDebuggerFrame(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetFrameReturnValue(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(void)
+JS_SetFrameReturnValue(JSContext *cx, JSStackFrame *fp, jsval rval);
+
+/**
+ * Return fp's callee function object (fp->argv[-2]) if it has one.
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameCalleeObject(JSContext *cx, JSStackFrame *fp);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(const char *)
+JS_GetScriptFilename(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(uintN)
+JS_GetScriptBaseLineNumber(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(uintN)
+JS_GetScriptLineExtent(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_GetScriptVersion(JSContext *cx, JSScript *script);
+
+/************************************************************************/
+
+/*
+ * Hook setters for script creation and destruction, see jsprvtd.h for the
+ * typedefs. These macros provide binary compatibility and newer, shorter
+ * synonyms.
+ */
+#define JS_SetNewScriptHook JS_SetNewScriptHookProc
+#define JS_SetDestroyScriptHook JS_SetDestroyScriptHookProc
+
+extern JS_PUBLIC_API(void)
+JS_SetNewScriptHook(JSRuntime *rt, JSNewScriptHook hook, void *callerdata);
+
+extern JS_PUBLIC_API(void)
+JS_SetDestroyScriptHook(JSRuntime *rt, JSDestroyScriptHook hook,
+ void *callerdata);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateUCInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+/************************************************************************/
+
+typedef struct JSPropertyDesc {
+ jsval id; /* primary id, a string or int */
+ jsval value; /* property value */
+ uint8 flags; /* flags, see below */
+ uint8 spare; /* unused */
+ uint16 slot; /* argument/variable slot */
+ jsval alias; /* alias id if JSPD_ALIAS flag */
+} JSPropertyDesc;
+
+#define JSPD_ENUMERATE 0x01 /* visible to for/in loop */
+#define JSPD_READONLY 0x02 /* assignment is error */
+#define JSPD_PERMANENT 0x04 /* property cannot be deleted */
+#define JSPD_ALIAS 0x08 /* property has an alias id */
+#define JSPD_ARGUMENT 0x10 /* argument to function */
+#define JSPD_VARIABLE 0x20 /* local variable in function */
+#define JSPD_EXCEPTION 0x40 /* exception occurred fetching the property, */
+ /* value is exception */
+#define JSPD_ERROR 0x80 /* native getter returned JS_FALSE without */
+ /* throwing an exception */
+
+typedef struct JSPropertyDescArray {
+ uint32 length; /* number of elements in array */
+ JSPropertyDesc *array; /* alloc'd by Get, freed by Put */
+} JSPropertyDescArray;
+
+extern JS_PUBLIC_API(JSScopeProperty *)
+JS_PropertyIterator(JSObject *obj, JSScopeProperty **iteratorp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop,
+ JSPropertyDesc *pd);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda);
+
+extern JS_PUBLIC_API(void)
+JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetDebuggerHandler(JSRuntime *rt, JSTrapHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetSourceHandler(JSRuntime *rt, JSSourceHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetExecuteHook(JSRuntime *rt, JSInterpreterHook hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetCallHook(JSRuntime *rt, JSInterpreterHook hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetObjectHook(JSRuntime *rt, JSObjectHook hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetThrowHook(JSRuntime *rt, JSTrapHandler hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetDebugErrorHook(JSRuntime *rt, JSDebugErrorHook hook, void *closure);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(size_t)
+JS_GetObjectTotalSize(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(size_t)
+JS_GetFunctionTotalSize(JSContext *cx, JSFunction *fun);
+
+extern JS_PUBLIC_API(size_t)
+JS_GetScriptTotalSize(JSContext *cx, JSScript *script);
+
+/*
+ * Get the top-most running script on cx starting from fp, or from the top of
+ * cx's frame stack if fp is null, and return its script filename flags. If
+ * the script has a null filename member, return JSFILENAME_NULL.
+ */
+extern JS_PUBLIC_API(uint32)
+JS_GetTopScriptFilenameFlags(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Get the script filename flags for the script. If the script doesn't have a
+ * filename, return JSFILENAME_NULL.
+ */
+extern JS_PUBLIC_API(uint32)
+JS_GetScriptFilenameFlags(JSScript *script);
+
+/*
+ * Associate flags with a script filename prefix in rt, so that any subsequent
+ * script compilation will inherit those flags if the script's filename is the
+ * same as prefix, or if prefix is a substring of the script's filename.
+ *
+ * The API defines only one flag bit, JSFILENAME_SYSTEM, leaving the remaining
+ * 31 bits up to the API client to define. The union of all 32 bits must not
+ * be a legal combination, however, in order to preserve JSFILENAME_NULL as a
+ * unique value. API clients may depend on JSFILENAME_SYSTEM being a set bit
+ * in JSFILENAME_NULL -- a script with a null filename member is presumed to
+ * be a "system" script.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_FlagScriptFilenamePrefix(JSRuntime *rt, const char *prefix, uint32 flags);
+
+#define JSFILENAME_NULL 0xffffffff /* null script filename */
+#define JSFILENAME_SYSTEM 0x00000001 /* "system" script, see below */
+
+/*
+ * Return true if obj is a "system" object, that is, one flagged by a prior
+ * call to JS_FlagSystemObject(cx, obj). What "system" means is up to the API
+ * client, but it can be used to coordinate access control policies based on
+ * script filenames and their prefixes, using JS_FlagScriptFilenamePrefix and
+ * JS_GetTopScriptFilenameFlags.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_IsSystemObject(JSContext *cx, JSObject *obj);
+
+/*
+ * Flag obj as a "system" object. The API client can flag system objects to
+ * optimize access control checks. The engine stores but does not interpret
+ * the per-object flag set by this call.
+ */
+extern JS_PUBLIC_API(void)
+JS_FlagSystemObject(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jsdbgapi_h___ */
diff --git a/third_party/js-1.7/jsdhash.c b/third_party/js-1.7/jsdhash.c
new file mode 100644
index 0000000..295883b
--- /dev/null
+++ b/third_party/js-1.7/jsdhash.c
@@ -0,0 +1,826 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla JavaScript code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999-2001
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Brendan Eich <brendan@mozilla.org> (Original Author)
+ * Chris Waterson <waterson@netscape.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Double hashing implementation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jsbit.h"
+#include "jsdhash.h"
+#include "jsutil.h" /* for JS_ASSERT */
+
+#ifdef JS_DHASHMETER
+# if defined MOZILLA_CLIENT && defined DEBUG_XXXbrendan
+# include "nsTraceMalloc.h"
+# endif
+# define METER(x) x
+#else
+# define METER(x) /* nothing */
+#endif
+
+/*
+ * The following DEBUG-only code is used to assert that calls to one of
+ * table->ops or to an enumerator do not cause re-entry into a call that
+ * can mutate the table. The recursion level is stored in additional
+ * space allocated at the end of the entry store to avoid changing
+ * JSDHashTable, which could cause issues when mixing DEBUG and
+ * non-DEBUG components.
+ */
+#ifdef DEBUG
+
+#define RECURSION_LEVEL(table_) (*(uint32*)(table_->entryStore + \
+ JS_DHASH_TABLE_SIZE(table_) * \
+ table_->entrySize))
+
+#define ENTRY_STORE_EXTRA sizeof(uint32)
+#define INCREMENT_RECURSION_LEVEL(table_) (++RECURSION_LEVEL(table_))
+#define DECREMENT_RECURSION_LEVEL(table_) (--RECURSION_LEVEL(table_))
+
+#else
+
+#define ENTRY_STORE_EXTRA 0
+#define INCREMENT_RECURSION_LEVEL(table_) ((void)1)
+#define DECREMENT_RECURSION_LEVEL(table_) ((void)0)
+
+#endif /* defined(DEBUG) */
+
+JS_PUBLIC_API(void *)
+JS_DHashAllocTable(JSDHashTable *table, uint32 nbytes)
+{
+ return malloc(nbytes);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashFreeTable(JSDHashTable *table, void *ptr)
+{
+ free(ptr);
+}
+
+JS_PUBLIC_API(JSDHashNumber)
+JS_DHashStringKey(JSDHashTable *table, const void *key)
+{
+ JSDHashNumber h;
+ const unsigned char *s;
+
+ h = 0;
+ for (s = key; *s != '\0'; s++)
+ h = (h >> (JS_DHASH_BITS - 4)) ^ (h << 4) ^ *s;
+ return h;
+}
+
+JS_PUBLIC_API(const void *)
+JS_DHashGetKeyStub(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ JSDHashEntryStub *stub = (JSDHashEntryStub *)entry;
+
+ return stub->key;
+}
+
+JS_PUBLIC_API(JSDHashNumber)
+JS_DHashVoidPtrKeyStub(JSDHashTable *table, const void *key)
+{
+ return (JSDHashNumber)(unsigned long)key >> 2;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DHashMatchEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key)
+{
+ const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry;
+
+ return stub->key == key;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DHashMatchStringKey(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key)
+{
+ const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry;
+
+ /* XXX tolerate null keys on account of sloppy Mozilla callers. */
+ return stub->key == key ||
+ (stub->key && key && strcmp(stub->key, key) == 0);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashMoveEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *from,
+ JSDHashEntryHdr *to)
+{
+ memcpy(to, from, table->entrySize);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashClearEntryStub(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ memset(entry, 0, table->entrySize);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashFreeStringKey(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry;
+
+ free((void *) stub->key);
+ memset(entry, 0, table->entrySize);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashFinalizeStub(JSDHashTable *table)
+{
+}
+
+static const JSDHashTableOps stub_ops = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ JS_DHashGetKeyStub,
+ JS_DHashVoidPtrKeyStub,
+ JS_DHashMatchEntryStub,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+JS_PUBLIC_API(const JSDHashTableOps *)
+JS_DHashGetStubOps(void)
+{
+ return &stub_ops;
+}
+
+JS_PUBLIC_API(JSDHashTable *)
+JS_NewDHashTable(const JSDHashTableOps *ops, void *data, uint32 entrySize,
+ uint32 capacity)
+{
+ JSDHashTable *table;
+
+ table = (JSDHashTable *) malloc(sizeof *table);
+ if (!table)
+ return NULL;
+ if (!JS_DHashTableInit(table, ops, data, entrySize, capacity)) {
+ free(table);
+ return NULL;
+ }
+ return table;
+}
+
+JS_PUBLIC_API(void)
+JS_DHashTableDestroy(JSDHashTable *table)
+{
+ JS_DHashTableFinish(table);
+ free(table);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DHashTableInit(JSDHashTable *table, const JSDHashTableOps *ops, void *data,
+ uint32 entrySize, uint32 capacity)
+{
+ int log2;
+ uint32 nbytes;
+
+#ifdef DEBUG
+ if (entrySize > 10 * sizeof(void *)) {
+ fprintf(stderr,
+ "jsdhash: for the table at address %p, the given entrySize"
+ " of %lu %s favors chaining over double hashing.\n",
+ (void *)table,
+ (unsigned long) entrySize,
+ (entrySize > 16 * sizeof(void*)) ? "definitely" : "probably");
+ }
+#endif
+
+ table->ops = ops;
+ table->data = data;
+ if (capacity < JS_DHASH_MIN_SIZE)
+ capacity = JS_DHASH_MIN_SIZE;
+
+ JS_CEILING_LOG2(log2, capacity);
+
+ capacity = JS_BIT(log2);
+ if (capacity >= JS_DHASH_SIZE_LIMIT)
+ return JS_FALSE;
+ table->hashShift = JS_DHASH_BITS - log2;
+ table->maxAlphaFrac = 0xC0; /* .75 */
+ table->minAlphaFrac = 0x40; /* .25 */
+ table->entrySize = entrySize;
+ table->entryCount = table->removedCount = 0;
+ table->generation = 0;
+ nbytes = capacity * entrySize;
+
+ table->entryStore = ops->allocTable(table, nbytes + ENTRY_STORE_EXTRA);
+ if (!table->entryStore)
+ return JS_FALSE;
+ memset(table->entryStore, 0, nbytes);
+ METER(memset(&table->stats, 0, sizeof table->stats));
+
+#ifdef DEBUG
+ RECURSION_LEVEL(table) = 0;
+#endif
+
+ return JS_TRUE;
+}
+
+/*
+ * Compute max and min load numbers (entry counts) from table params.
+ */
+#define MAX_LOAD(table, size) (((table)->maxAlphaFrac * (size)) >> 8)
+#define MIN_LOAD(table, size) (((table)->minAlphaFrac * (size)) >> 8)
+
+JS_PUBLIC_API(void)
+JS_DHashTableSetAlphaBounds(JSDHashTable *table,
+ float maxAlpha,
+ float minAlpha)
+{
+ uint32 size;
+
+ /*
+ * Reject obviously insane bounds, rather than trying to guess what the
+ * buggy caller intended.
+ */
+ JS_ASSERT(0.5 <= maxAlpha && maxAlpha < 1 && 0 <= minAlpha);
+ if (maxAlpha < 0.5 || 1 <= maxAlpha || minAlpha < 0)
+ return;
+
+ /*
+ * Ensure that at least one entry will always be free. If maxAlpha at
+ * minimum size leaves no entries free, reduce maxAlpha based on minimum
+ * size and the precision limit of maxAlphaFrac's fixed point format.
+ */
+ JS_ASSERT(JS_DHASH_MIN_SIZE - (maxAlpha * JS_DHASH_MIN_SIZE) >= 1);
+ if (JS_DHASH_MIN_SIZE - (maxAlpha * JS_DHASH_MIN_SIZE) < 1) {
+ maxAlpha = (float)
+ (JS_DHASH_MIN_SIZE - JS_MAX(JS_DHASH_MIN_SIZE / 256, 1))
+ / JS_DHASH_MIN_SIZE;
+ }
+
+ /*
+ * Ensure that minAlpha is strictly less than half maxAlpha. Take care
+ * not to truncate an entry's worth of alpha when storing in minAlphaFrac
+ * (8-bit fixed point format).
+ */
+ JS_ASSERT(minAlpha < maxAlpha / 2);
+ if (minAlpha >= maxAlpha / 2) {
+ size = JS_DHASH_TABLE_SIZE(table);
+ minAlpha = (size * maxAlpha - JS_MAX(size / 256, 1)) / (2 * size);
+ }
+
+ table->maxAlphaFrac = (uint8)(maxAlpha * 256);
+ table->minAlphaFrac = (uint8)(minAlpha * 256);
+}
+
+/*
+ * Double hashing needs the second hash code to be relatively prime to table
+ * size, so we simply make hash2 odd.
+ */
+#define HASH1(hash0, shift) ((hash0) >> (shift))
+#define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
+
+/*
+ * Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. Note
+ * that a removed-entry sentinel need be stored only if the removed entry had
+ * a colliding entry added after it. Therefore we can use 1 as the collision
+ * flag in addition to the removed-entry sentinel value. Multiplicative hash
+ * uses the high order bits of keyHash, so this least-significant reservation
+ * should not hurt the hash function's effectiveness much.
+ *
+ * If you change any of these magic numbers, also update JS_DHASH_ENTRY_IS_LIVE
+ * in jsdhash.h. It used to be private to jsdhash.c, but then became public to
+ * assist iterator writers who inspect table->entryStore directly.
+ */
+#define COLLISION_FLAG ((JSDHashNumber) 1)
+#define MARK_ENTRY_FREE(entry) ((entry)->keyHash = 0)
+#define MARK_ENTRY_REMOVED(entry) ((entry)->keyHash = 1)
+#define ENTRY_IS_REMOVED(entry) ((entry)->keyHash == 1)
+#define ENTRY_IS_LIVE(entry) JS_DHASH_ENTRY_IS_LIVE(entry)
+#define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0
+
+/* Match an entry's keyHash against an unstored one computed from a key. */
+#define MATCH_ENTRY_KEYHASH(entry,hash0) \
+ (((entry)->keyHash & ~COLLISION_FLAG) == (hash0))
+
+/* Compute the address of the indexed entry in table. */
+#define ADDRESS_ENTRY(table, index) \
+ ((JSDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize))
+
+JS_PUBLIC_API(void)
+JS_DHashTableFinish(JSDHashTable *table)
+{
+ char *entryAddr, *entryLimit;
+ uint32 entrySize;
+ JSDHashEntryHdr *entry;
+
+#ifdef DEBUG_XXXbrendan
+ static FILE *dumpfp = NULL;
+ if (!dumpfp) dumpfp = fopen("/tmp/jsdhash.bigdump", "w");
+ if (dumpfp) {
+#ifdef MOZILLA_CLIENT
+ NS_TraceStack(1, dumpfp);
+#endif
+ JS_DHashTableDumpMeter(table, NULL, dumpfp);
+ fputc('\n', dumpfp);
+ }
+#endif
+
+ INCREMENT_RECURSION_LEVEL(table);
+
+ /* Call finalize before clearing entries, so it can enumerate them. */
+ table->ops->finalize(table);
+
+ /* Clear any remaining live entries. */
+ entryAddr = table->entryStore;
+ entrySize = table->entrySize;
+ entryLimit = entryAddr + JS_DHASH_TABLE_SIZE(table) * entrySize;
+ while (entryAddr < entryLimit) {
+ entry = (JSDHashEntryHdr *)entryAddr;
+ if (ENTRY_IS_LIVE(entry)) {
+ METER(table->stats.removeEnums++);
+ table->ops->clearEntry(table, entry);
+ }
+ entryAddr += entrySize;
+ }
+
+ DECREMENT_RECURSION_LEVEL(table);
+ JS_ASSERT(RECURSION_LEVEL(table) == 0);
+
+ /* Free entry storage last. */
+ table->ops->freeTable(table, table->entryStore);
+}
+
+static JSDHashEntryHdr * JS_DHASH_FASTCALL
+SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash,
+ JSDHashOperator op)
+{
+ JSDHashNumber hash1, hash2;
+ int hashShift, sizeLog2;
+ JSDHashEntryHdr *entry, *firstRemoved;
+ JSDHashMatchEntry matchEntry;
+ uint32 sizeMask;
+
+ METER(table->stats.searches++);
+ JS_ASSERT(!(keyHash & COLLISION_FLAG));
+
+ /* Compute the primary hash address. */
+ hashShift = table->hashShift;
+ hash1 = HASH1(keyHash, hashShift);
+ entry = ADDRESS_ENTRY(table, hash1);
+
+ /* Miss: return space for a new entry. */
+ if (JS_DHASH_ENTRY_IS_FREE(entry)) {
+ METER(table->stats.misses++);
+ return entry;
+ }
+
+ /* Hit: return entry. */
+ matchEntry = table->ops->matchEntry;
+ if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) {
+ METER(table->stats.hits++);
+ return entry;
+ }
+
+ /* Collision: double hash. */
+ sizeLog2 = JS_DHASH_BITS - table->hashShift;
+ hash2 = HASH2(keyHash, sizeLog2, hashShift);
+ sizeMask = JS_BITMASK(sizeLog2);
+
+ /* Save the first removed entry pointer so JS_DHASH_ADD can recycle it. */
+ if (ENTRY_IS_REMOVED(entry)) {
+ firstRemoved = entry;
+ } else {
+ firstRemoved = NULL;
+ if (op == JS_DHASH_ADD)
+ entry->keyHash |= COLLISION_FLAG;
+ }
+
+ for (;;) {
+ METER(table->stats.steps++);
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+
+ entry = ADDRESS_ENTRY(table, hash1);
+ if (JS_DHASH_ENTRY_IS_FREE(entry)) {
+ METER(table->stats.misses++);
+ return (firstRemoved && op == JS_DHASH_ADD) ? firstRemoved : entry;
+ }
+
+ if (MATCH_ENTRY_KEYHASH(entry, keyHash) &&
+ matchEntry(table, entry, key)) {
+ METER(table->stats.hits++);
+ return entry;
+ }
+
+ if (ENTRY_IS_REMOVED(entry)) {
+ if (!firstRemoved)
+ firstRemoved = entry;
+ } else {
+ if (op == JS_DHASH_ADD)
+ entry->keyHash |= COLLISION_FLAG;
+ }
+ }
+
+ /* NOTREACHED */
+ return NULL;
+}
+
+static JSBool
+ChangeTable(JSDHashTable *table, int deltaLog2)
+{
+ int oldLog2, newLog2;
+ uint32 oldCapacity, newCapacity;
+ char *newEntryStore, *oldEntryStore, *oldEntryAddr;
+ uint32 entrySize, i, nbytes;
+ JSDHashEntryHdr *oldEntry, *newEntry;
+ JSDHashGetKey getKey;
+ JSDHashMoveEntry moveEntry;
+#ifdef DEBUG
+ uint32 recursionLevel;
+#endif
+
+ /* Look, but don't touch, until we succeed in getting new entry store. */
+ oldLog2 = JS_DHASH_BITS - table->hashShift;
+ newLog2 = oldLog2 + deltaLog2;
+ oldCapacity = JS_BIT(oldLog2);
+ newCapacity = JS_BIT(newLog2);
+ if (newCapacity >= JS_DHASH_SIZE_LIMIT)
+ return JS_FALSE;
+ entrySize = table->entrySize;
+ nbytes = newCapacity * entrySize;
+
+ newEntryStore = table->ops->allocTable(table, nbytes + ENTRY_STORE_EXTRA);
+ if (!newEntryStore)
+ return JS_FALSE;
+
+ /* We can't fail from here on, so update table parameters. */
+#ifdef DEBUG
+ recursionLevel = RECURSION_LEVEL(table);
+#endif
+ table->hashShift = JS_DHASH_BITS - newLog2;
+ table->removedCount = 0;
+ table->generation++;
+
+ /* Assign the new entry store to table. */
+ memset(newEntryStore, 0, nbytes);
+ oldEntryAddr = oldEntryStore = table->entryStore;
+ table->entryStore = newEntryStore;
+ getKey = table->ops->getKey;
+ moveEntry = table->ops->moveEntry;
+#ifdef DEBUG
+ RECURSION_LEVEL(table) = recursionLevel;
+#endif
+
+ /* Copy only live entries, leaving removed ones behind. */
+ for (i = 0; i < oldCapacity; i++) {
+ oldEntry = (JSDHashEntryHdr *)oldEntryAddr;
+ if (ENTRY_IS_LIVE(oldEntry)) {
+ oldEntry->keyHash &= ~COLLISION_FLAG;
+ newEntry = SearchTable(table, getKey(table, oldEntry),
+ oldEntry->keyHash, JS_DHASH_ADD);
+ JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(newEntry));
+ moveEntry(table, oldEntry, newEntry);
+ newEntry->keyHash = oldEntry->keyHash;
+ }
+ oldEntryAddr += entrySize;
+ }
+
+ table->ops->freeTable(table, oldEntryStore);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSDHashEntryHdr *) JS_DHASH_FASTCALL
+JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op)
+{
+ JSDHashNumber keyHash;
+ JSDHashEntryHdr *entry;
+ uint32 size;
+ int deltaLog2;
+
+ JS_ASSERT(op == JS_DHASH_LOOKUP || RECURSION_LEVEL(table) == 0);
+ INCREMENT_RECURSION_LEVEL(table);
+
+ keyHash = table->ops->hashKey(table, key);
+ keyHash *= JS_DHASH_GOLDEN_RATIO;
+
+ /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */
+ ENSURE_LIVE_KEYHASH(keyHash);
+ keyHash &= ~COLLISION_FLAG;
+
+ switch (op) {
+ case JS_DHASH_LOOKUP:
+ METER(table->stats.lookups++);
+ entry = SearchTable(table, key, keyHash, op);
+ break;
+
+ case JS_DHASH_ADD:
+ /*
+ * If alpha is >= .75, grow or compress the table. If key is already
+ * in the table, we may grow once more than necessary, but only if we
+ * are on the edge of being overloaded.
+ */
+ size = JS_DHASH_TABLE_SIZE(table);
+ if (table->entryCount + table->removedCount >= MAX_LOAD(table, size)) {
+ /* Compress if a quarter or more of all entries are removed. */
+ if (table->removedCount >= size >> 2) {
+ METER(table->stats.compresses++);
+ deltaLog2 = 0;
+ } else {
+ METER(table->stats.grows++);
+ deltaLog2 = 1;
+ }
+
+ /*
+ * Grow or compress table, returning null if ChangeTable fails and
+ * falling through might claim the last free entry.
+ */
+ if (!ChangeTable(table, deltaLog2) &&
+ table->entryCount + table->removedCount == size - 1) {
+ METER(table->stats.addFailures++);
+ entry = NULL;
+ break;
+ }
+ }
+
+ /*
+ * Look for entry after possibly growing, so we don't have to add it,
+ * then skip it while growing the table and re-add it after.
+ */
+ entry = SearchTable(table, key, keyHash, op);
+ if (!ENTRY_IS_LIVE(entry)) {
+ /* Initialize the entry, indicating that it's no longer free. */
+ METER(table->stats.addMisses++);
+ if (ENTRY_IS_REMOVED(entry)) {
+ METER(table->stats.addOverRemoved++);
+ table->removedCount--;
+ keyHash |= COLLISION_FLAG;
+ }
+ if (table->ops->initEntry &&
+ !table->ops->initEntry(table, entry, key)) {
+ /* We haven't claimed entry yet; fail with null return. */
+ memset(entry + 1, 0, table->entrySize - sizeof *entry);
+ entry = NULL;
+ break;
+ }
+ entry->keyHash = keyHash;
+ table->entryCount++;
+ }
+ METER(else table->stats.addHits++);
+ break;
+
+ case JS_DHASH_REMOVE:
+ entry = SearchTable(table, key, keyHash, op);
+ if (ENTRY_IS_LIVE(entry)) {
+ /* Clear this entry and mark it as "removed". */
+ METER(table->stats.removeHits++);
+ JS_DHashTableRawRemove(table, entry);
+
+ /* Shrink if alpha is <= .25 and table isn't too small already. */
+ size = JS_DHASH_TABLE_SIZE(table);
+ if (size > JS_DHASH_MIN_SIZE &&
+ table->entryCount <= MIN_LOAD(table, size)) {
+ METER(table->stats.shrinks++);
+ (void) ChangeTable(table, -1);
+ }
+ }
+ METER(else table->stats.removeMisses++);
+ entry = NULL;
+ break;
+
+ default:
+ JS_ASSERT(0);
+ entry = NULL;
+ }
+
+ DECREMENT_RECURSION_LEVEL(table);
+
+ return entry;
+}
+
+JS_PUBLIC_API(void)
+JS_DHashTableRawRemove(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ JSDHashNumber keyHash; /* load first in case clearEntry goofs it */
+
+ JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(entry));
+ keyHash = entry->keyHash;
+ table->ops->clearEntry(table, entry);
+ if (keyHash & COLLISION_FLAG) {
+ MARK_ENTRY_REMOVED(entry);
+ table->removedCount++;
+ } else {
+ METER(table->stats.removeFrees++);
+ MARK_ENTRY_FREE(entry);
+ }
+ table->entryCount--;
+}
+
+JS_PUBLIC_API(uint32)
+JS_DHashTableEnumerate(JSDHashTable *table, JSDHashEnumerator etor, void *arg)
+{
+ char *entryAddr, *entryLimit;
+ uint32 i, capacity, entrySize, ceiling;
+ JSBool didRemove;
+ JSDHashEntryHdr *entry;
+ JSDHashOperator op;
+
+ INCREMENT_RECURSION_LEVEL(table);
+
+ entryAddr = table->entryStore;
+ entrySize = table->entrySize;
+ capacity = JS_DHASH_TABLE_SIZE(table);
+ entryLimit = entryAddr + capacity * entrySize;
+ i = 0;
+ didRemove = JS_FALSE;
+ while (entryAddr < entryLimit) {
+ entry = (JSDHashEntryHdr *)entryAddr;
+ if (ENTRY_IS_LIVE(entry)) {
+ op = etor(table, entry, i++, arg);
+ if (op & JS_DHASH_REMOVE) {
+ METER(table->stats.removeEnums++);
+ JS_DHashTableRawRemove(table, entry);
+ didRemove = JS_TRUE;
+ }
+ if (op & JS_DHASH_STOP)
+ break;
+ }
+ entryAddr += entrySize;
+ }
+
+ JS_ASSERT(!didRemove || RECURSION_LEVEL(table) == 1);
+
+ /*
+ * Shrink or compress if a quarter or more of all entries are removed, or
+ * if the table is underloaded according to the configured minimum alpha,
+ * and is not minimal-size already. Do this only if we removed above, so
+ * non-removing enumerations can count on stable table->entryStore until
+ * the next non-lookup-Operate or removing-Enumerate.
+ */
+ if (didRemove &&
+ (table->removedCount >= capacity >> 2 ||
+ (capacity > JS_DHASH_MIN_SIZE &&
+ table->entryCount <= MIN_LOAD(table, capacity)))) {
+ METER(table->stats.enumShrinks++);
+ capacity = table->entryCount;
+ capacity += capacity >> 1;
+ if (capacity < JS_DHASH_MIN_SIZE)
+ capacity = JS_DHASH_MIN_SIZE;
+
+ JS_CEILING_LOG2(ceiling, capacity);
+ ceiling -= JS_DHASH_BITS - table->hashShift;
+
+ (void) ChangeTable(table, ceiling);
+ }
+
+ DECREMENT_RECURSION_LEVEL(table);
+
+ return i;
+}
+
+#ifdef JS_DHASHMETER
+#include <math.h>
+
+JS_PUBLIC_API(void)
+JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
+{
+ char *entryAddr;
+ uint32 entrySize, entryCount;
+ int hashShift, sizeLog2;
+ uint32 i, tableSize, sizeMask, chainLen, maxChainLen, chainCount;
+ JSDHashNumber hash1, hash2, saveHash1, maxChainHash1, maxChainHash2;
+ double sqsum, mean, variance, sigma;
+ JSDHashEntryHdr *entry, *probe;
+
+ entryAddr = table->entryStore;
+ entrySize = table->entrySize;
+ hashShift = table->hashShift;
+ sizeLog2 = JS_DHASH_BITS - hashShift;
+ tableSize = JS_DHASH_TABLE_SIZE(table);
+ sizeMask = JS_BITMASK(sizeLog2);
+ chainCount = maxChainLen = 0;
+ hash2 = 0;
+ sqsum = 0;
+
+ for (i = 0; i < tableSize; i++) {
+ entry = (JSDHashEntryHdr *)entryAddr;
+ entryAddr += entrySize;
+ if (!ENTRY_IS_LIVE(entry))
+ continue;
+ hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift);
+ saveHash1 = hash1;
+ probe = ADDRESS_ENTRY(table, hash1);
+ chainLen = 1;
+ if (probe == entry) {
+ /* Start of a (possibly unit-length) chain. */
+ chainCount++;
+ } else {
+ hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2,
+ hashShift);
+ do {
+ chainLen++;
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+ probe = ADDRESS_ENTRY(table, hash1);
+ } while (probe != entry);
+ }
+ sqsum += chainLen * chainLen;
+ if (chainLen > maxChainLen) {
+ maxChainLen = chainLen;
+ maxChainHash1 = saveHash1;
+ maxChainHash2 = hash2;
+ }
+ }
+
+ entryCount = table->entryCount;
+ if (entryCount && chainCount) {
+ mean = (double)entryCount / chainCount;
+ variance = chainCount * sqsum - entryCount * entryCount;
+ if (variance < 0 || chainCount == 1)
+ variance = 0;
+ else
+ variance /= chainCount * (chainCount - 1);
+ sigma = sqrt(variance);
+ } else {
+ mean = sigma = 0;
+ }
+
+ fprintf(fp, "Double hashing statistics:\n");
+ fprintf(fp, " table size (in entries): %u\n", tableSize);
+ fprintf(fp, " number of entries: %u\n", table->entryCount);
+ fprintf(fp, " number of removed entries: %u\n", table->removedCount);
+ fprintf(fp, " number of searches: %u\n", table->stats.searches);
+ fprintf(fp, " number of hits: %u\n", table->stats.hits);
+ fprintf(fp, " number of misses: %u\n", table->stats.misses);
+ fprintf(fp, " mean steps per search: %g\n", table->stats.searches ?
+ (double)table->stats.steps
+ / table->stats.searches :
+ 0.);
+ fprintf(fp, " mean hash chain length: %g\n", mean);
+ fprintf(fp, " standard deviation: %g\n", sigma);
+ fprintf(fp, " maximum hash chain length: %u\n", maxChainLen);
+ fprintf(fp, " number of lookups: %u\n", table->stats.lookups);
+ fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses);
+ fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved);
+ fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits);
+ fprintf(fp, " add failures: %u\n", table->stats.addFailures);
+ fprintf(fp, " useful removes: %u\n", table->stats.removeHits);
+ fprintf(fp, " useless removes: %u\n", table->stats.removeMisses);
+ fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees);
+ fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums);
+ fprintf(fp, " number of grows: %u\n", table->stats.grows);
+ fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks);
+ fprintf(fp, " number of compresses: %u\n", table->stats.compresses);
+ fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks);
+
+ if (dump && maxChainLen && hash2) {
+ fputs("Maximum hash chain:\n", fp);
+ hash1 = maxChainHash1;
+ hash2 = maxChainHash2;
+ entry = ADDRESS_ENTRY(table, hash1);
+ i = 0;
+ do {
+ if (dump(table, entry, i++, fp) != JS_DHASH_NEXT)
+ break;
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+ entry = ADDRESS_ENTRY(table, hash1);
+ } while (JS_DHASH_ENTRY_IS_BUSY(entry));
+ }
+}
+#endif /* JS_DHASHMETER */
diff --git a/third_party/js-1.7/jsdhash.h b/third_party/js-1.7/jsdhash.h
new file mode 100644
index 0000000..76867e5
--- /dev/null
+++ b/third_party/js-1.7/jsdhash.h
@@ -0,0 +1,581 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla JavaScript code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999-2001
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Brendan Eich <brendan@mozilla.org> (Original Author)
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsdhash_h___
+#define jsdhash_h___
+/*
+ * Double hashing, a la Knuth 6.
+ */
+#include "jstypes.h"
+
+JS_BEGIN_EXTERN_C
+
+#if defined(__GNUC__) && defined(__i386__) && (__GNUC__ >= 3) && !defined(XP_OS2)
+#define JS_DHASH_FASTCALL __attribute__ ((regparm (3),stdcall))
+#elif defined(XP_WIN)
+#define JS_DHASH_FASTCALL __fastcall
+#else
+#define JS_DHASH_FASTCALL
+#endif
+
+#ifdef DEBUG_XXXbrendan
+#define JS_DHASHMETER 1
+#endif
+
+/* Table size limit, do not equal or exceed (see min&maxAlphaFrac, below). */
+#undef JS_DHASH_SIZE_LIMIT
+#define JS_DHASH_SIZE_LIMIT JS_BIT(24)
+
+/* Minimum table size, or gross entry count (net is at most .75 loaded). */
+#ifndef JS_DHASH_MIN_SIZE
+#define JS_DHASH_MIN_SIZE 16
+#elif (JS_DHASH_MIN_SIZE & (JS_DHASH_MIN_SIZE - 1)) != 0
+#error "JS_DHASH_MIN_SIZE must be a power of two!"
+#endif
+
+/*
+ * Multiplicative hash uses an unsigned 32 bit integer and the golden ratio,
+ * expressed as a fixed-point 32-bit fraction.
+ */
+#define JS_DHASH_BITS 32
+#define JS_DHASH_GOLDEN_RATIO 0x9E3779B9U
+
+/* Primitive and forward-struct typedefs. */
+typedef uint32 JSDHashNumber;
+typedef struct JSDHashEntryHdr JSDHashEntryHdr;
+typedef struct JSDHashEntryStub JSDHashEntryStub;
+typedef struct JSDHashTable JSDHashTable;
+typedef struct JSDHashTableOps JSDHashTableOps;
+
+/*
+ * Table entry header structure.
+ *
+ * In order to allow in-line allocation of key and value, we do not declare
+ * either here. Instead, the API uses const void *key as a formal parameter,
+ * and asks each entry for its key when necessary via a getKey callback, used
+ * when growing or shrinking the table. Other callback types are defined
+ * below and grouped into the JSDHashTableOps structure, for single static
+ * initialization per hash table sub-type.
+ *
+ * Each hash table sub-type should nest the JSDHashEntryHdr structure at the
+ * front of its particular entry type. The keyHash member contains the result
+ * of multiplying the hash code returned from the hashKey callback (see below)
+ * by JS_DHASH_GOLDEN_RATIO, then constraining the result to avoid the magic 0
+ * and 1 values. The stored keyHash value is table size invariant, and it is
+ * maintained automatically by JS_DHashTableOperate -- users should never set
+ * it, and its only uses should be via the entry macros below.
+ *
+ * The JS_DHASH_ENTRY_IS_LIVE macro tests whether entry is neither free nor
+ * removed. An entry may be either busy or free; if busy, it may be live or
+ * removed. Consumers of this API should not access members of entries that
+ * are not live.
+ *
+ * However, use JS_DHASH_ENTRY_IS_BUSY for faster liveness testing of entries
+ * returned by JS_DHashTableOperate, as JS_DHashTableOperate never returns a
+ * non-live, busy (i.e., removed) entry pointer to its caller. See below for
+ * more details on JS_DHashTableOperate's calling rules.
+ */
+struct JSDHashEntryHdr {
+ JSDHashNumber keyHash; /* every entry must begin like this */
+};
+
+#define JS_DHASH_ENTRY_IS_FREE(entry) ((entry)->keyHash == 0)
+#define JS_DHASH_ENTRY_IS_BUSY(entry) (!JS_DHASH_ENTRY_IS_FREE(entry))
+#define JS_DHASH_ENTRY_IS_LIVE(entry) ((entry)->keyHash >= 2)
+
+/*
+ * A JSDHashTable is currently 8 words (without the JS_DHASHMETER overhead)
+ * on most architectures, and may be allocated on the stack or within another
+ * structure or class (see below for the Init and Finish functions to use).
+ *
+ * To decide whether to use double hashing vs. chaining, we need to develop a
+ * trade-off relation, as follows:
+ *
+ * Let alpha be the load factor, esize the entry size in words, count the
+ * entry count, and pow2 the power-of-two table size in entries.
+ *
+ * (JSDHashTable overhead) > (JSHashTable overhead)
+ * (unused table entry space) > (malloc and .next overhead per entry) +
+ * (buckets overhead)
+ * (1 - alpha) * esize * pow2 > 2 * count + pow2
+ *
+ * Notice that alpha is by definition (count / pow2):
+ *
+ * (1 - alpha) * esize * pow2 > 2 * alpha * pow2 + pow2
+ * (1 - alpha) * esize > 2 * alpha + 1
+ *
+ * esize > (1 + 2 * alpha) / (1 - alpha)
+ *
+ * This assumes both tables must keep keyHash, key, and value for each entry,
+ * where key and value point to separately allocated strings or structures.
+ * If key and value can be combined into one pointer, then the trade-off is:
+ *
+ * esize > (1 + 3 * alpha) / (1 - alpha)
+ *
+ * If the entry value can be a subtype of JSDHashEntryHdr, rather than a type
+ * that must be allocated separately and referenced by an entry.value pointer
+ * member, and provided key's allocation can be fused with its entry's, then
+ * k (the words wasted per entry with chaining) is 4.
+ *
+ * To see these curves, feed gnuplot input like so:
+ *
+ * gnuplot> f(x,k) = (1 + k * x) / (1 - x)
+ * gnuplot> plot [0:.75] f(x,2), f(x,3), f(x,4)
+ *
+ * For k of 2 and a well-loaded table (alpha > .5), esize must be more than 4
+ * words for chaining to be more space-efficient than double hashing.
+ *
+ * Solving for alpha helps us decide when to shrink an underloaded table:
+ *
+ * esize > (1 + k * alpha) / (1 - alpha)
+ * esize - alpha * esize > 1 + k * alpha
+ * esize - 1 > (k + esize) * alpha
+ * (esize - 1) / (k + esize) > alpha
+ *
+ * alpha < (esize - 1) / (esize + k)
+ *
+ * Therefore double hashing should keep alpha >= (esize - 1) / (esize + k),
+ * assuming esize is not too large (in which case, chaining should probably be
+ * used for any alpha). For esize=2 and k=3, we want alpha >= .2; for esize=3
+ * and k=2, we want alpha >= .4. For k=4, esize could be 6, and alpha >= .5
+ * would still obtain. See the JS_DHASH_MIN_ALPHA macro further below.
+ *
+ * The current implementation uses a configurable lower bound on alpha, which
+ * defaults to .25, when deciding to shrink the table (while still respecting
+ * JS_DHASH_MIN_SIZE).
+ *
+ * Note a qualitative difference between chaining and double hashing: under
+ * chaining, entry addresses are stable across table shrinks and grows. With
+ * double hashing, you can't safely hold an entry pointer and use it after an
+ * ADD or REMOVE operation, unless you sample table->generation before adding
+ * or removing, and compare the sample after, dereferencing the entry pointer
+ * only if table->generation has not changed.
+ *
+ * The moral of this story: there is no one-size-fits-all hash table scheme,
+ * but for small table entry size, and assuming entry address stability is not
+ * required, double hashing wins.
+ */
+struct JSDHashTable {
+ const JSDHashTableOps *ops; /* virtual operations, see below */
+ void *data; /* ops- and instance-specific data */
+ int16 hashShift; /* multiplicative hash shift */
+ uint8 maxAlphaFrac; /* 8-bit fixed point max alpha */
+ uint8 minAlphaFrac; /* 8-bit fixed point min alpha */
+ uint32 entrySize; /* number of bytes in an entry */
+ uint32 entryCount; /* number of entries in table */
+ uint32 removedCount; /* removed entry sentinels in table */
+ uint32 generation; /* entry storage generation number */
+ char *entryStore; /* entry storage */
+#ifdef JS_DHASHMETER
+ struct JSDHashStats {
+ uint32 searches; /* total number of table searches */
+ uint32 steps; /* hash chain links traversed */
+ uint32 hits; /* searches that found key */
+ uint32 misses; /* searches that didn't find key */
+ uint32 lookups; /* number of JS_DHASH_LOOKUPs */
+ uint32 addMisses; /* adds that miss, and do work */
+ uint32 addOverRemoved; /* adds that recycled a removed entry */
+ uint32 addHits; /* adds that hit an existing entry */
+ uint32 addFailures; /* out-of-memory during add growth */
+ uint32 removeHits; /* removes that hit, and do work */
+ uint32 removeMisses; /* useless removes that miss */
+ uint32 removeFrees; /* removes that freed entry directly */
+ uint32 removeEnums; /* removes done by Enumerate */
+ uint32 grows; /* table expansions */
+ uint32 shrinks; /* table contractions */
+ uint32 compresses; /* table compressions */
+ uint32 enumShrinks; /* contractions after Enumerate */
+ } stats;
+#endif
+};
+
+/*
+ * Size in entries (gross, not net of free and removed sentinels) for table.
+ * We store hashShift rather than sizeLog2 to optimize the collision-free case
+ * in SearchTable.
+ */
+#define JS_DHASH_TABLE_SIZE(table) JS_BIT(JS_DHASH_BITS - (table)->hashShift)
+
+/*
+ * Table space at entryStore is allocated and freed using these callbacks.
+ * The allocator should return null on error only (not if called with nbytes
+ * equal to 0; but note that jsdhash.c code will never call with 0 nbytes).
+ */
+typedef void *
+(* JS_DLL_CALLBACK JSDHashAllocTable)(JSDHashTable *table, uint32 nbytes);
+
+typedef void
+(* JS_DLL_CALLBACK JSDHashFreeTable) (JSDHashTable *table, void *ptr);
+
+/*
+ * When a table grows or shrinks, each entry is queried for its key using this
+ * callback. NB: in that event, entry is not in table any longer; it's in the
+ * old entryStore vector, which is due to be freed once all entries have been
+ * moved via moveEntry callbacks.
+ */
+typedef const void *
+(* JS_DLL_CALLBACK JSDHashGetKey) (JSDHashTable *table,
+ JSDHashEntryHdr *entry);
+
+/*
+ * Compute the hash code for a given key to be looked up, added, or removed
+ * from table. A hash code may have any JSDHashNumber value.
+ */
+typedef JSDHashNumber
+(* JS_DLL_CALLBACK JSDHashHashKey) (JSDHashTable *table, const void *key);
+
+/*
+ * Compare the key identifying entry in table with the provided key parameter.
+ * Return JS_TRUE if keys match, JS_FALSE otherwise.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSDHashMatchEntry)(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key);
+
+/*
+ * Copy the data starting at from to the new entry storage at to. Do not add
+ * reference counts for any strong references in the entry, however, as this
+ * is a "move" operation: the old entry storage at from will be freed without
+ * any reference-decrementing callback shortly.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSDHashMoveEntry)(JSDHashTable *table,
+ const JSDHashEntryHdr *from,
+ JSDHashEntryHdr *to);
+
+/*
+ * Clear the entry and drop any strong references it holds. This callback is
+ * invoked during a JS_DHASH_REMOVE operation (see below for operation codes),
+ * but only if the given key is found in the table.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSDHashClearEntry)(JSDHashTable *table,
+ JSDHashEntryHdr *entry);
+
+/*
+ * Called when a table (whether allocated dynamically by itself, or nested in
+ * a larger structure, or allocated on the stack) is finished. This callback
+ * allows table->ops-specific code to finalize table->data.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSDHashFinalize) (JSDHashTable *table);
+
+/*
+ * Initialize a new entry, apart from keyHash. This function is called when
+ * JS_DHashTableOperate's JS_DHASH_ADD case finds no existing entry for the
+ * given key, and must add a new one. At that point, entry->keyHash is not
+ * set yet, to avoid claiming the last free entry in a severely overloaded
+ * table.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSDHashInitEntry)(JSDHashTable *table,
+ JSDHashEntryHdr *entry,
+ const void *key);
+
+/*
+ * Finally, the "vtable" structure for JSDHashTable. The first eight hooks
+ * must be provided by implementations; they're called unconditionally by the
+ * generic jsdhash.c code. Hooks after these may be null.
+ *
+ * Summary of allocation-related hook usage with C++ placement new emphasis:
+ * allocTable Allocate raw bytes with malloc, no ctors run.
+ * freeTable Free raw bytes with free, no dtors run.
+ * initEntry Call placement new using default key-based ctor.
+ * Return JS_TRUE on success, JS_FALSE on error.
+ * moveEntry Call placement new using copy ctor, run dtor on old
+ * entry storage.
+ * clearEntry Run dtor on entry.
+ * finalize Stub unless table->data was initialized and needs to
+ * be finalized.
+ *
+ * Note the reason why initEntry is optional: the default hooks (stubs) clear
+ * entry storage: On successful JS_DHashTableOperate(tbl, key, JS_DHASH_ADD),
+ * the returned entry pointer addresses an entry struct whose keyHash member
+ * has been set non-zero, but all other entry members are still clear (null).
+ * JS_DHASH_ADD callers can test such members to see whether the entry was
+ * newly created by the JS_DHASH_ADD call that just succeeded. If placement
+ * new or similar initialization is required, define an initEntry hook. Of
+ * course, the clearEntry hook must zero or null appropriately.
+ *
+ * XXX assumes 0 is null for pointer types.
+ */
+struct JSDHashTableOps {
+ /* Mandatory hooks. All implementations must provide these. */
+ JSDHashAllocTable allocTable;
+ JSDHashFreeTable freeTable;
+ JSDHashGetKey getKey;
+ JSDHashHashKey hashKey;
+ JSDHashMatchEntry matchEntry;
+ JSDHashMoveEntry moveEntry;
+ JSDHashClearEntry clearEntry;
+ JSDHashFinalize finalize;
+
+ /* Optional hooks start here. If null, these are not called. */
+ JSDHashInitEntry initEntry;
+};
+
+/*
+ * Default implementations for the above ops.
+ */
+extern JS_PUBLIC_API(void *)
+JS_DHashAllocTable(JSDHashTable *table, uint32 nbytes);
+
+extern JS_PUBLIC_API(void)
+JS_DHashFreeTable(JSDHashTable *table, void *ptr);
+
+extern JS_PUBLIC_API(JSDHashNumber)
+JS_DHashStringKey(JSDHashTable *table, const void *key);
+
+/* A minimal entry contains a keyHash header and a void key pointer. */
+struct JSDHashEntryStub {
+ JSDHashEntryHdr hdr;
+ const void *key;
+};
+
+extern JS_PUBLIC_API(const void *)
+JS_DHashGetKeyStub(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+extern JS_PUBLIC_API(JSDHashNumber)
+JS_DHashVoidPtrKeyStub(JSDHashTable *table, const void *key);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DHashMatchEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DHashMatchStringKey(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key);
+
+extern JS_PUBLIC_API(void)
+JS_DHashMoveEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *from,
+ JSDHashEntryHdr *to);
+
+extern JS_PUBLIC_API(void)
+JS_DHashClearEntryStub(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+extern JS_PUBLIC_API(void)
+JS_DHashFreeStringKey(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+extern JS_PUBLIC_API(void)
+JS_DHashFinalizeStub(JSDHashTable *table);
+
+/*
+ * If you use JSDHashEntryStub or a subclass of it as your entry struct, and
+ * if your entries move via memcpy and clear via memset(0), you can use these
+ * stub operations.
+ */
+extern JS_PUBLIC_API(const JSDHashTableOps *)
+JS_DHashGetStubOps(void);
+
+/*
+ * Dynamically allocate a new JSDHashTable using malloc, initialize it using
+ * JS_DHashTableInit, and return its address. Return null on malloc failure.
+ * Note that the entry storage at table->entryStore will be allocated using
+ * the ops->allocTable callback.
+ */
+extern JS_PUBLIC_API(JSDHashTable *)
+JS_NewDHashTable(const JSDHashTableOps *ops, void *data, uint32 entrySize,
+ uint32 capacity);
+
+/*
+ * Finalize table's data, free its entry storage (via table->ops->freeTable),
+ * and return the memory starting at table to the malloc heap.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableDestroy(JSDHashTable *table);
+
+/*
+ * Initialize table with ops, data, entrySize, and capacity. Capacity is a
+ * guess for the smallest table size at which the table will usually be less
+ * than 75% loaded (the table will grow or shrink as needed; capacity serves
+ * only to avoid inevitable early growth from JS_DHASH_MIN_SIZE).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_DHashTableInit(JSDHashTable *table, const JSDHashTableOps *ops, void *data,
+ uint32 entrySize, uint32 capacity);
+
+/*
+ * Set maximum and minimum alpha for table. The defaults are 0.75 and .25.
+ * maxAlpha must be in [0.5, 0.9375] for the default JS_DHASH_MIN_SIZE; or if
+ * MinSize=JS_DHASH_MIN_SIZE <= 256, in [0.5, (float)(MinSize-1)/MinSize]; or
+ * else in [0.5, 255.0/256]. minAlpha must be in [0, maxAlpha / 2), so that
+ * we don't shrink on the very next remove after growing a table upon adding
+ * an entry that brings entryCount past maxAlpha * tableSize.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableSetAlphaBounds(JSDHashTable *table,
+ float maxAlpha,
+ float minAlpha);
+
+/*
+ * Call this macro with k, the number of pointer-sized words wasted per entry
+ * under chaining, to compute the minimum alpha at which double hashing still
+ * beats chaining.
+ */
+#define JS_DHASH_MIN_ALPHA(table, k) \
+ ((float)((table)->entrySize / sizeof(void *) - 1) \
+ / ((table)->entrySize / sizeof(void *) + (k)))
+
+/*
+ * Finalize table's data, free its entry storage using table->ops->freeTable,
+ * and leave its members unchanged from their last live values (which leaves
+ * pointers dangling). If you want to burn cycles clearing table, it's up to
+ * your code to call memset.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableFinish(JSDHashTable *table);
+
+/*
+ * To consolidate keyHash computation and table grow/shrink code, we use a
+ * single entry point for lookup, add, and remove operations. The operation
+ * codes are declared here, along with codes returned by JSDHashEnumerator
+ * functions, which control JS_DHashTableEnumerate's behavior.
+ */
+typedef enum JSDHashOperator {
+ JS_DHASH_LOOKUP = 0, /* lookup entry */
+ JS_DHASH_ADD = 1, /* add entry */
+ JS_DHASH_REMOVE = 2, /* remove entry, or enumerator says remove */
+ JS_DHASH_NEXT = 0, /* enumerator says continue */
+ JS_DHASH_STOP = 1 /* enumerator says stop */
+} JSDHashOperator;
+
+/*
+ * To lookup a key in table, call:
+ *
+ * entry = JS_DHashTableOperate(table, key, JS_DHASH_LOOKUP);
+ *
+ * If JS_DHASH_ENTRY_IS_BUSY(entry) is true, key was found and it identifies
+ * entry. If JS_DHASH_ENTRY_IS_FREE(entry) is true, key was not found.
+ *
+ * To add an entry identified by key to table, call:
+ *
+ * entry = JS_DHashTableOperate(table, key, JS_DHASH_ADD);
+ *
+ * If entry is null upon return, then either the table is severely overloaded,
+ * and memory can't be allocated for entry storage via table->ops->allocTable;
+ * Or if table->ops->initEntry is non-null, the table->ops->initEntry op may
+ * have returned false.
+ *
+ * Otherwise, entry->keyHash has been set so that JS_DHASH_ENTRY_IS_BUSY(entry)
+ * is true, and it is up to the caller to initialize the key and value parts
+ * of the entry sub-type, if they have not been set already (i.e. if entry was
+ * not already in the table, and if the optional initEntry hook was not used).
+ *
+ * To remove an entry identified by key from table, call:
+ *
+ * (void) JS_DHashTableOperate(table, key, JS_DHASH_REMOVE);
+ *
+ * If key's entry is found, it is cleared (via table->ops->clearEntry) and
+ * the entry is marked so that JS_DHASH_ENTRY_IS_FREE(entry). This operation
+ * returns null unconditionally; you should ignore its return value.
+ */
+extern JS_PUBLIC_API(JSDHashEntryHdr *) JS_DHASH_FASTCALL
+JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op);
+
+/*
+ * Remove an entry already accessed via LOOKUP or ADD.
+ *
+ * NB: this is a "raw" or low-level routine, intended to be used only where
+ * the inefficiency of a full JS_DHashTableOperate (which rehashes in order
+ * to find the entry given its key) is not tolerable. This function does not
+ * shrink the table if it is underloaded. It does not update stats #ifdef
+ * JS_DHASHMETER, either.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableRawRemove(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+/*
+ * Enumerate entries in table using etor:
+ *
+ * count = JS_DHashTableEnumerate(table, etor, arg);
+ *
+ * JS_DHashTableEnumerate calls etor like so:
+ *
+ * op = etor(table, entry, number, arg);
+ *
+ * where number is a zero-based ordinal assigned to live entries according to
+ * their order in table->entryStore.
+ *
+ * The return value, op, is treated as a set of flags. If op is JS_DHASH_NEXT,
+ * then continue enumerating. If op contains JS_DHASH_REMOVE, then clear (via
+ * table->ops->clearEntry) and free entry. Then we check whether op contains
+ * JS_DHASH_STOP; if so, stop enumerating and return the number of live entries
+ * that were enumerated so far. Return the total number of live entries when
+ * enumeration completes normally.
+ *
+ * If etor calls JS_DHashTableOperate on table with op != JS_DHASH_LOOKUP, it
+ * must return JS_DHASH_STOP; otherwise undefined behavior results.
+ *
+ * If any enumerator returns JS_DHASH_REMOVE, table->entryStore may be shrunk
+ * or compressed after enumeration, but before JS_DHashTableEnumerate returns.
+ * Such an enumerator therefore can't safely set aside entry pointers, but an
+ * enumerator that never returns JS_DHASH_REMOVE can set pointers to entries
+ * aside, e.g., to avoid copying live entries into an array of the entry type.
+ * Copying entry pointers is cheaper, and safe so long as the caller of such a
+ * "stable" Enumerate doesn't use the set-aside pointers after any call either
+ * to PL_DHashTableOperate, or to an "unstable" form of Enumerate, which might
+ * grow or shrink entryStore.
+ *
+ * If your enumerator wants to remove certain entries, but set aside pointers
+ * to other entries that it retains, it can use JS_DHashTableRawRemove on the
+ * entries to be removed, returning JS_DHASH_NEXT to skip them. Likewise, if
+ * you want to remove entries, but for some reason you do not want entryStore
+ * to be shrunk or compressed, you can call JS_DHashTableRawRemove safely on
+ * the entry being enumerated, rather than returning JS_DHASH_REMOVE.
+ */
+typedef JSDHashOperator
+(* JS_DLL_CALLBACK JSDHashEnumerator)(JSDHashTable *table, JSDHashEntryHdr *hdr,
+ uint32 number, void *arg);
+
+extern JS_PUBLIC_API(uint32)
+JS_DHashTableEnumerate(JSDHashTable *table, JSDHashEnumerator etor, void *arg);
+
+#ifdef JS_DHASHMETER
+#include <stdio.h>
+
+extern JS_PUBLIC_API(void)
+JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp);
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsdhash_h___ */
diff --git a/third_party/js-1.7/jsdtoa.c b/third_party/js-1.7/jsdtoa.c
new file mode 100644
index 0000000..5b0b09f
--- /dev/null
+++ b/third_party/js-1.7/jsdtoa.c
@@ -0,0 +1,3132 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Portable double to alphanumeric string and back converters.
+ */
+#include "jsstddef.h"
+#include "jslibmath.h"
+#include "jstypes.h"
+#include "jsdtoa.h"
+#include "jsprf.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jspubtd.h"
+#include "jsnum.h"
+
+#ifdef JS_THREADSAFE
+#include "prlock.h"
+#endif
+
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* Please send bug reports to
+ David M. Gay
+ Bell Laboratories, Room 2C-463
+ 600 Mountain Avenue
+ Murray Hill, NJ 07974-0636
+ U.S.A.
+ dmg@bell-labs.com
+ */
+
+/* On a machine with IEEE extended-precision registers, it is
+ * necessary to specify double-precision (53-bit) rounding precision
+ * before invoking strtod or dtoa. If the machine uses (the equivalent
+ * of) Intel 80x87 arithmetic, the call
+ * _control87(PC_53, MCW_PC);
+ * does this with many compilers. Whether this or another call is
+ * appropriate depends on the compiler; for this to work, it may be
+ * necessary to #include "float.h" or another system-dependent header
+ * file.
+ */
+
+/* strtod for IEEE-arithmetic machines.
+ *
+ * This strtod returns a nearest machine number to the input decimal
+ * string (or sets err to JS_DTOA_ERANGE or JS_DTOA_ENOMEM). With IEEE
+ * arithmetic, ties are broken by the IEEE round-even rule. Otherwise
+ * ties are broken by biased rounding (add half and chop).
+ *
+ * Inspired loosely by William D. Clinger's paper "How to Read Floating
+ * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ *
+ * 1. We only require IEEE double-precision
+ * arithmetic (not IEEE double-extended).
+ * 2. We get by with floating-point arithmetic in a case that
+ * Clinger missed -- when we're computing d * 10^n
+ * for a small integer d and the integer n is not too
+ * much larger than 22 (the maximum integer k for which
+ * we can represent 10^k exactly), we may be able to
+ * compute (d*10^k) * 10^(e-k) with just one roundoff.
+ * 3. Rather than a bit-at-a-time adjustment of the binary
+ * result in the hard case, we use floating-point
+ * arithmetic to determine the adjustment to within
+ * one bit; only in really hard cases do we need to
+ * compute a second residual.
+ * 4. Because of 3., we don't need a large table of powers of 10
+ * for ten-to-e (just some small tables, e.g. of 10^k
+ * for 0 <= k <= 22).
+ */
+
+/*
+ * #define IEEE_8087 for IEEE-arithmetic machines where the least
+ * significant byte has the lowest address.
+ * #define IEEE_MC68k for IEEE-arithmetic machines where the most
+ * significant byte has the lowest address.
+ * #define Long int on machines with 32-bit ints and 64-bit longs.
+ * #define Sudden_Underflow for IEEE-format machines without gradual
+ * underflow (i.e., that flush to zero on underflow).
+ * #define No_leftright to omit left-right logic in fast floating-point
+ * computation of js_dtoa.
+ * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3.
+ * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines
+ * that use extended-precision instructions to compute rounded
+ * products and quotients) with IBM.
+ * #define ROUND_BIASED for IEEE-format with biased rounding.
+ * #define Inaccurate_Divide for IEEE-format with correctly rounded
+ * products but inaccurate quotients, e.g., for Intel i860.
+ * #define JS_HAVE_LONG_LONG on machines that have a "long long"
+ * integer type (of >= 64 bits). If long long is available and the name is
+ * something other than "long long", #define Llong to be the name,
+ * and if "unsigned Llong" does not work as an unsigned version of
+ * Llong, #define #ULLong to be the corresponding unsigned type.
+ * #define Bad_float_h if your system lacks a float.h or if it does not
+ * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP,
+ * FLT_RADIX, FLT_ROUNDS, and DBL_MAX.
+ * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n)
+ * if memory is available and otherwise does something you deem
+ * appropriate. If MALLOC is undefined, malloc will be invoked
+ * directly -- and assumed always to succeed.
+ * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making
+ * memory allocations from a private pool of memory when possible.
+ * When used, the private pool is PRIVATE_MEM bytes long: 2000 bytes,
+ * unless #defined to be a different length. This default length
+ * suffices to get rid of MALLOC calls except for unusual cases,
+ * such as decimal-to-binary conversion of a very long string of
+ * digits.
+ * #define INFNAN_CHECK on IEEE systems to cause strtod to check for
+ * Infinity and NaN (case insensitively). On some systems (e.g.,
+ * some HP systems), it may be necessary to #define NAN_WORD0
+ * appropriately -- to the most significant word of a quiet NaN.
+ * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.)
+ * #define MULTIPLE_THREADS if the system offers preemptively scheduled
+ * multiple threads. In this case, you must provide (or suitably
+ * #define) two locks, acquired by ACQUIRE_DTOA_LOCK() and released
+ * by RELEASE_DTOA_LOCK(). (The second lock, accessed
+ * in pow5mult, ensures lazy evaluation of only one copy of high
+ * powers of 5; omitting this lock would introduce a small
+ * probability of wasting memory, but would otherwise be harmless.)
+ * You must also invoke freedtoa(s) to free the value s returned by
+ * dtoa. You may do so whether or not MULTIPLE_THREADS is #defined.
+ * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that
+ * avoids underflows on inputs whose result does not underflow.
+ */
+#ifdef IS_LITTLE_ENDIAN
+#define IEEE_8087
+#else
+#define IEEE_MC68k
+#endif
+
+#ifndef Long
+#define Long int32
+#endif
+
+#ifndef ULong
+#define ULong uint32
+#endif
+
+#define Bug(errorMessageString) JS_ASSERT(!errorMessageString)
+
+#include "stdlib.h"
+#include "string.h"
+
+#ifdef MALLOC
+extern void *MALLOC(size_t);
+#else
+#define MALLOC malloc
+#endif
+
+#define Omit_Private_Memory
+/* Private memory currently doesn't work with JS_THREADSAFE */
+#ifndef Omit_Private_Memory
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2000
+#endif
+#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
+static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+#endif
+
+#ifdef Bad_float_h
+#undef __STDC__
+
+#define DBL_DIG 15
+#define DBL_MAX_10_EXP 308
+#define DBL_MAX_EXP 1024
+#define FLT_RADIX 2
+#define FLT_ROUNDS 1
+#define DBL_MAX 1.7976931348623157e+308
+
+
+
+#ifndef LONG_MAX
+#define LONG_MAX 2147483647
+#endif
+
+#else /* ifndef Bad_float_h */
+#include "float.h"
+#endif /* Bad_float_h */
+
+#ifndef __MATH_H__
+#include "math.h"
+#endif
+
+#ifndef CONST
+#define CONST const
+#endif
+
+#if defined(IEEE_8087) + defined(IEEE_MC68k) != 1
+Exactly one of IEEE_8087 or IEEE_MC68k should be defined.
+#endif
+
+#define word0(x) JSDOUBLE_HI32(x)
+#define set_word0(x, y) JSDOUBLE_SET_HI32(x, y)
+#define word1(x) JSDOUBLE_LO32(x)
+#define set_word1(x, y) JSDOUBLE_SET_LO32(x, y)
+
+#define Storeinc(a,b,c) (*(a)++ = (b) << 16 | (c) & 0xffff)
+
+/* #define P DBL_MANT_DIG */
+/* Ten_pmax = floor(P*log(2)/log(5)) */
+/* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */
+/* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */
+/* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */
+
+#define Exp_shift 20
+#define Exp_shift1 20
+#define Exp_msk1 0x100000
+#define Exp_msk11 0x100000
+#define Exp_mask 0x7ff00000
+#define P 53
+#define Bias 1023
+#define Emin (-1022)
+#define Exp_1 0x3ff00000
+#define Exp_11 0x3ff00000
+#define Ebits 11
+#define Frac_mask 0xfffff
+#define Frac_mask1 0xfffff
+#define Ten_pmax 22
+#define Bletch 0x10
+#define Bndry_mask 0xfffff
+#define Bndry_mask1 0xfffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 1
+#define Tiny0 0
+#define Tiny1 1
+#define Quick_max 14
+#define Int_max 14
+#define Infinite(x) (word0(x) == 0x7ff00000) /* sufficient test for here */
+#ifndef NO_IEEE_Scale
+#define Avoid_Underflow
+#endif
+
+
+
+#ifdef RND_PRODQUOT
+#define rounded_product(a,b) a = rnd_prod(a, b)
+#define rounded_quotient(a,b) a = rnd_quot(a, b)
+extern double rnd_prod(double, double), rnd_quot(double, double);
+#else
+#define rounded_product(a,b) a *= b
+#define rounded_quotient(a,b) a /= b
+#endif
+
+#define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1))
+#define Big1 0xffffffff
+
+#ifndef JS_HAVE_LONG_LONG
+#undef ULLong
+#else /* long long available */
+#ifndef Llong
+#define Llong JSInt64
+#endif
+#ifndef ULLong
+#define ULLong JSUint64
+#endif
+#endif /* JS_HAVE_LONG_LONG */
+
+#ifdef JS_THREADSAFE
+#define MULTIPLE_THREADS
+static PRLock *freelist_lock;
+#define ACQUIRE_DTOA_LOCK() \
+ JS_BEGIN_MACRO \
+ if (!initialized) \
+ InitDtoa(); \
+ PR_Lock(freelist_lock); \
+ JS_END_MACRO
+#define RELEASE_DTOA_LOCK() PR_Unlock(freelist_lock)
+#else
+#undef MULTIPLE_THREADS
+#define ACQUIRE_DTOA_LOCK() /*nothing*/
+#define RELEASE_DTOA_LOCK() /*nothing*/
+#endif
+
+#define Kmax 15
+
+struct Bigint {
+ struct Bigint *next; /* Free list link */
+ int32 k; /* lg2(maxwds) */
+ int32 maxwds; /* Number of words allocated for x */
+ int32 sign; /* Zero if positive, 1 if negative. Ignored by most Bigint routines! */
+ int32 wds; /* Actual number of words. If value is nonzero, the most significant word must be nonzero. */
+ ULong x[1]; /* wds words of number in little endian order */
+};
+
+#ifdef ENABLE_OOM_TESTING
+/* Out-of-memory testing. Use a good testcase (over and over) and then use
+ * these routines to cause a memory failure on every possible Balloc allocation,
+ * to make sure that all out-of-memory paths can be followed. See bug 14044.
+ */
+
+static int allocationNum; /* which allocation is next? */
+static int desiredFailure; /* which allocation should fail? */
+
+/**
+ * js_BigintTestingReset
+ *
+ * Call at the beginning of a test run to set the allocation failure position.
+ * (Set to 0 to just have the engine count allocations without failing.)
+ */
+JS_PUBLIC_API(void)
+js_BigintTestingReset(int newFailure)
+{
+ allocationNum = 0;
+ desiredFailure = newFailure;
+}
+
+/**
+ * js_BigintTestingWhere
+ *
+ * Report the current allocation position. This is really only useful when you
+ * want to learn how many allocations a test run has.
+ */
+JS_PUBLIC_API(int)
+js_BigintTestingWhere()
+{
+ return allocationNum;
+}
+
+
+/*
+ * So here's what you do: Set up a fantastic test case that exercises the
+ * elements of the code you wish. Set the failure point at 0 and run the test,
+ * then get the allocation position. This number is the number of allocations
+ * your test makes. Now loop from 1 to that number, setting the failure point
+ * at each loop count, and run the test over and over, causing failures at each
+ * step. Any memory failure *should* cause a Out-Of-Memory exception; if it
+ * doesn't, then there's still an error here.
+ */
+#endif
+
+typedef struct Bigint Bigint;
+
+static Bigint *freelist[Kmax+1];
+
+/*
+ * Allocate a Bigint with 2^k words.
+ * This is not threadsafe. The caller must use thread locks
+ */
+static Bigint *Balloc(int32 k)
+{
+ int32 x;
+ Bigint *rv;
+#ifndef Omit_Private_Memory
+ uint32 len;
+#endif
+
+#ifdef ENABLE_OOM_TESTING
+ if (++allocationNum == desiredFailure) {
+ printf("Forced Failing Allocation number %d\n", allocationNum);
+ return NULL;
+ }
+#endif
+
+ if ((rv = freelist[k]) != NULL)
+ freelist[k] = rv->next;
+ if (rv == NULL) {
+ x = 1 << k;
+#ifdef Omit_Private_Memory
+ rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong));
+#else
+ len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
+ /sizeof(double);
+ if (pmem_next - private_mem + len <= PRIVATE_mem) {
+ rv = (Bigint*)pmem_next;
+ pmem_next += len;
+ }
+ else
+ rv = (Bigint*)MALLOC(len*sizeof(double));
+#endif
+ if (!rv)
+ return NULL;
+ rv->k = k;
+ rv->maxwds = x;
+ }
+ rv->sign = rv->wds = 0;
+ return rv;
+}
+
+static void Bfree(Bigint *v)
+{
+ if (v) {
+ v->next = freelist[v->k];
+ freelist[v->k] = v;
+ }
+}
+
+#define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \
+ y->wds*sizeof(Long) + 2*sizeof(int32))
+
+/* Return b*m + a. Deallocate the old b. Both a and m must be between 0 and
+ * 65535 inclusive. NOTE: old b is deallocated on memory failure.
+ */
+static Bigint *multadd(Bigint *b, int32 m, int32 a)
+{
+ int32 i, wds;
+#ifdef ULLong
+ ULong *x;
+ ULLong carry, y;
+#else
+ ULong carry, *x, y;
+ ULong xi, z;
+#endif
+ Bigint *b1;
+
+#ifdef ENABLE_OOM_TESTING
+ if (++allocationNum == desiredFailure) {
+ /* Faux allocation, because I'm not getting all of the failure paths
+ * without it.
+ */
+ printf("Forced Failing Allocation number %d\n", allocationNum);
+ Bfree(b);
+ return NULL;
+ }
+#endif
+
+ wds = b->wds;
+ x = b->x;
+ i = 0;
+ carry = a;
+ do {
+#ifdef ULLong
+ y = *x * (ULLong)m + carry;
+ carry = y >> 32;
+ *x++ = (ULong)(y & 0xffffffffUL);
+#else
+ xi = *x;
+ y = (xi & 0xffff) * m + carry;
+ z = (xi >> 16) * m + (y >> 16);
+ carry = z >> 16;
+ *x++ = (z << 16) + (y & 0xffff);
+#endif
+ }
+ while(++i < wds);
+ if (carry) {
+ if (wds >= b->maxwds) {
+ b1 = Balloc(b->k+1);
+ if (!b1) {
+ Bfree(b);
+ return NULL;
+ }
+ Bcopy(b1, b);
+ Bfree(b);
+ b = b1;
+ }
+ b->x[wds++] = (ULong)carry;
+ b->wds = wds;
+ }
+ return b;
+}
+
+static Bigint *s2b(CONST char *s, int32 nd0, int32 nd, ULong y9)
+{
+ Bigint *b;
+ int32 i, k;
+ Long x, y;
+
+ x = (nd + 8) / 9;
+ for(k = 0, y = 1; x > y; y <<= 1, k++) ;
+ b = Balloc(k);
+ if (!b)
+ return NULL;
+ b->x[0] = y9;
+ b->wds = 1;
+
+ i = 9;
+ if (9 < nd0) {
+ s += 9;
+ do {
+ b = multadd(b, 10, *s++ - '0');
+ if (!b)
+ return NULL;
+ } while(++i < nd0);
+ s++;
+ }
+ else
+ s += 10;
+ for(; i < nd; i++) {
+ b = multadd(b, 10, *s++ - '0');
+ if (!b)
+ return NULL;
+ }
+ return b;
+}
+
+
+/* Return the number (0 through 32) of most significant zero bits in x. */
+static int32 hi0bits(register ULong x)
+{
+ register int32 k = 0;
+
+ if (!(x & 0xffff0000)) {
+ k = 16;
+ x <<= 16;
+ }
+ if (!(x & 0xff000000)) {
+ k += 8;
+ x <<= 8;
+ }
+ if (!(x & 0xf0000000)) {
+ k += 4;
+ x <<= 4;
+ }
+ if (!(x & 0xc0000000)) {
+ k += 2;
+ x <<= 2;
+ }
+ if (!(x & 0x80000000)) {
+ k++;
+ if (!(x & 0x40000000))
+ return 32;
+ }
+ return k;
+}
+
+
+/* Return the number (0 through 32) of least significant zero bits in y.
+ * Also shift y to the right past these 0 through 32 zeros so that y's
+ * least significant bit will be set unless y was originally zero. */
+static int32 lo0bits(ULong *y)
+{
+ register int32 k;
+ register ULong x = *y;
+
+ if (x & 7) {
+ if (x & 1)
+ return 0;
+ if (x & 2) {
+ *y = x >> 1;
+ return 1;
+ }
+ *y = x >> 2;
+ return 2;
+ }
+ k = 0;
+ if (!(x & 0xffff)) {
+ k = 16;
+ x >>= 16;
+ }
+ if (!(x & 0xff)) {
+ k += 8;
+ x >>= 8;
+ }
+ if (!(x & 0xf)) {
+ k += 4;
+ x >>= 4;
+ }
+ if (!(x & 0x3)) {
+ k += 2;
+ x >>= 2;
+ }
+ if (!(x & 1)) {
+ k++;
+ x >>= 1;
+ if (!x & 1)
+ return 32;
+ }
+ *y = x;
+ return k;
+}
+
+/* Return a new Bigint with the given integer value, which must be nonnegative. */
+static Bigint *i2b(int32 i)
+{
+ Bigint *b;
+
+ b = Balloc(1);
+ if (!b)
+ return NULL;
+ b->x[0] = i;
+ b->wds = 1;
+ return b;
+}
+
+/* Return a newly allocated product of a and b. */
+static Bigint *mult(CONST Bigint *a, CONST Bigint *b)
+{
+ CONST Bigint *t;
+ Bigint *c;
+ int32 k, wa, wb, wc;
+ ULong y;
+ ULong *xc, *xc0, *xce;
+ CONST ULong *x, *xa, *xae, *xb, *xbe;
+#ifdef ULLong
+ ULLong carry, z;
+#else
+ ULong carry, z;
+ ULong z2;
+#endif
+
+ if (a->wds < b->wds) {
+ t = a;
+ a = b;
+ b = t;
+ }
+ k = a->k;
+ wa = a->wds;
+ wb = b->wds;
+ wc = wa + wb;
+ if (wc > a->maxwds)
+ k++;
+ c = Balloc(k);
+ if (!c)
+ return NULL;
+ for(xc = c->x, xce = xc + wc; xc < xce; xc++)
+ *xc = 0;
+ xa = a->x;
+ xae = xa + wa;
+ xb = b->x;
+ xbe = xb + wb;
+ xc0 = c->x;
+#ifdef ULLong
+ for(; xb < xbe; xc0++) {
+ if ((y = *xb++) != 0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * (ULLong)y + *xc + carry;
+ carry = z >> 32;
+ *xc++ = (ULong)(z & 0xffffffffUL);
+ }
+ while(x < xae);
+ *xc = (ULong)carry;
+ }
+ }
+#else
+ for(; xb < xbe; xb++, xc0++) {
+ if ((y = *xb & 0xffff) != 0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+ carry = z >> 16;
+ z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+ carry = z2 >> 16;
+ Storeinc(xc, z2, z);
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ if ((y = *xb >> 16) != 0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ z2 = *xc;
+ do {
+ z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+ carry = z >> 16;
+ Storeinc(xc, z, z2);
+ z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+ carry = z2 >> 16;
+ }
+ while(x < xae);
+ *xc = z2;
+ }
+ }
+#endif
+ for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ;
+ c->wds = wc;
+ return c;
+}
+
+/*
+ * 'p5s' points to a linked list of Bigints that are powers of 5.
+ * This list grows on demand, and it can only grow: it won't change
+ * in any other way. So if we read 'p5s' or the 'next' field of
+ * some Bigint on the list, and it is not NULL, we know it won't
+ * change to NULL or some other value. Only when the value of
+ * 'p5s' or 'next' is NULL do we need to acquire the lock and add
+ * a new Bigint to the list.
+ */
+
+static Bigint *p5s;
+
+#ifdef JS_THREADSAFE
+static PRLock *p5s_lock;
+#endif
+
+/* Return b * 5^k. Deallocate the old b. k must be nonnegative. */
+/* NOTE: old b is deallocated on memory failure. */
+static Bigint *pow5mult(Bigint *b, int32 k)
+{
+ Bigint *b1, *p5, *p51;
+ int32 i;
+ static CONST int32 p05[3] = { 5, 25, 125 };
+
+ if ((i = k & 3) != 0) {
+ b = multadd(b, p05[i-1], 0);
+ if (!b)
+ return NULL;
+ }
+
+ if (!(k >>= 2))
+ return b;
+ if (!(p5 = p5s)) {
+#ifdef JS_THREADSAFE
+ /*
+ * We take great care to not call i2b() and Bfree()
+ * while holding the lock.
+ */
+ Bigint *wasted_effort = NULL;
+ p5 = i2b(625);
+ if (!p5) {
+ Bfree(b);
+ return NULL;
+ }
+ /* lock and check again */
+ PR_Lock(p5s_lock);
+ if (!p5s) {
+ /* first time */
+ p5s = p5;
+ p5->next = 0;
+ } else {
+ /* some other thread just beat us */
+ wasted_effort = p5;
+ p5 = p5s;
+ }
+ PR_Unlock(p5s_lock);
+ if (wasted_effort) {
+ Bfree(wasted_effort);
+ }
+#else
+ /* first time */
+ p5 = p5s = i2b(625);
+ if (!p5) {
+ Bfree(b);
+ return NULL;
+ }
+ p5->next = 0;
+#endif
+ }
+ for(;;) {
+ if (k & 1) {
+ b1 = mult(b, p5);
+ Bfree(b);
+ if (!b1)
+ return NULL;
+ b = b1;
+ }
+ if (!(k >>= 1))
+ break;
+ if (!(p51 = p5->next)) {
+#ifdef JS_THREADSAFE
+ Bigint *wasted_effort = NULL;
+ p51 = mult(p5, p5);
+ if (!p51) {
+ Bfree(b);
+ return NULL;
+ }
+ PR_Lock(p5s_lock);
+ if (!p5->next) {
+ p5->next = p51;
+ p51->next = 0;
+ } else {
+ wasted_effort = p51;
+ p51 = p5->next;
+ }
+ PR_Unlock(p5s_lock);
+ if (wasted_effort) {
+ Bfree(wasted_effort);
+ }
+#else
+ p51 = mult(p5,p5);
+ if (!p51) {
+ Bfree(b);
+ return NULL;
+ }
+ p51->next = 0;
+ p5->next = p51;
+#endif
+ }
+ p5 = p51;
+ }
+ return b;
+}
+
+/* Return b * 2^k. Deallocate the old b. k must be nonnegative.
+ * NOTE: on memory failure, old b is deallocated. */
+static Bigint *lshift(Bigint *b, int32 k)
+{
+ int32 i, k1, n, n1;
+ Bigint *b1;
+ ULong *x, *x1, *xe, z;
+
+ n = k >> 5;
+ k1 = b->k;
+ n1 = n + b->wds + 1;
+ for(i = b->maxwds; n1 > i; i <<= 1)
+ k1++;
+ b1 = Balloc(k1);
+ if (!b1)
+ goto done;
+ x1 = b1->x;
+ for(i = 0; i < n; i++)
+ *x1++ = 0;
+ x = b->x;
+ xe = x + b->wds;
+ if (k &= 0x1f) {
+ k1 = 32 - k;
+ z = 0;
+ do {
+ *x1++ = *x << k | z;
+ z = *x++ >> k1;
+ }
+ while(x < xe);
+ if ((*x1 = z) != 0)
+ ++n1;
+ }
+ else do
+ *x1++ = *x++;
+ while(x < xe);
+ b1->wds = n1 - 1;
+done:
+ Bfree(b);
+ return b1;
+}
+
+/* Return -1, 0, or 1 depending on whether a<b, a==b, or a>b, respectively. */
+static int32 cmp(Bigint *a, Bigint *b)
+{
+ ULong *xa, *xa0, *xb, *xb0;
+ int32 i, j;
+
+ i = a->wds;
+ j = b->wds;
+#ifdef DEBUG
+ if (i > 1 && !a->x[i-1])
+ Bug("cmp called with a->x[a->wds-1] == 0");
+ if (j > 1 && !b->x[j-1])
+ Bug("cmp called with b->x[b->wds-1] == 0");
+#endif
+ if (i -= j)
+ return i;
+ xa0 = a->x;
+ xa = xa0 + j;
+ xb0 = b->x;
+ xb = xb0 + j;
+ for(;;) {
+ if (*--xa != *--xb)
+ return *xa < *xb ? -1 : 1;
+ if (xa <= xa0)
+ break;
+ }
+ return 0;
+}
+
+static Bigint *diff(Bigint *a, Bigint *b)
+{
+ Bigint *c;
+ int32 i, wa, wb;
+ ULong *xa, *xae, *xb, *xbe, *xc;
+#ifdef ULLong
+ ULLong borrow, y;
+#else
+ ULong borrow, y;
+ ULong z;
+#endif
+
+ i = cmp(a,b);
+ if (!i) {
+ c = Balloc(0);
+ if (!c)
+ return NULL;
+ c->wds = 1;
+ c->x[0] = 0;
+ return c;
+ }
+ if (i < 0) {
+ c = a;
+ a = b;
+ b = c;
+ i = 1;
+ }
+ else
+ i = 0;
+ c = Balloc(a->k);
+ if (!c)
+ return NULL;
+ c->sign = i;
+ wa = a->wds;
+ xa = a->x;
+ xae = xa + wa;
+ wb = b->wds;
+ xb = b->x;
+ xbe = xb + wb;
+ xc = c->x;
+ borrow = 0;
+#ifdef ULLong
+ do {
+ y = (ULLong)*xa++ - *xb++ - borrow;
+ borrow = y >> 32 & 1UL;
+ *xc++ = (ULong)(y & 0xffffffffUL);
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = *xa++ - borrow;
+ borrow = y >> 32 & 1UL;
+ *xc++ = (ULong)(y & 0xffffffffUL);
+ }
+#else
+ do {
+ y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = (*xa & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+#endif
+ while(!*--xc)
+ wa--;
+ c->wds = wa;
+ return c;
+}
+
+/* Return the absolute difference between x and the adjacent greater-magnitude double number (ignoring exponent overflows). */
+static double ulp(double x)
+{
+ register Long L;
+ double a = 0;
+
+ L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
+#ifndef Sudden_Underflow
+ if (L > 0) {
+#endif
+ set_word0(a, L);
+ set_word1(a, 0);
+#ifndef Sudden_Underflow
+ }
+ else {
+ L = -L >> Exp_shift;
+ if (L < Exp_shift) {
+ set_word0(a, 0x80000 >> L);
+ set_word1(a, 0);
+ }
+ else {
+ set_word0(a, 0);
+ L -= Exp_shift;
+ set_word1(a, L >= 31 ? 1 : 1 << (31 - L));
+ }
+ }
+#endif
+ return a;
+}
+
+
+static double b2d(Bigint *a, int32 *e)
+{
+ ULong *xa, *xa0, w, y, z;
+ int32 k;
+ double d = 0;
+#define d0 word0(d)
+#define d1 word1(d)
+#define set_d0(x) set_word0(d, x)
+#define set_d1(x) set_word1(d, x)
+
+ xa0 = a->x;
+ xa = xa0 + a->wds;
+ y = *--xa;
+#ifdef DEBUG
+ if (!y) Bug("zero y in b2d");
+#endif
+ k = hi0bits(y);
+ *e = 32 - k;
+ if (k < Ebits) {
+ set_d0(Exp_1 | y >> (Ebits - k));
+ w = xa > xa0 ? *--xa : 0;
+ set_d1(y << (32-Ebits + k) | w >> (Ebits - k));
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ if (k -= Ebits) {
+ set_d0(Exp_1 | y << k | z >> (32 - k));
+ y = xa > xa0 ? *--xa : 0;
+ set_d1(z << k | y >> (32 - k));
+ }
+ else {
+ set_d0(Exp_1 | y);
+ set_d1(z);
+ }
+ ret_d:
+#undef d0
+#undef d1
+#undef set_d0
+#undef set_d1
+ return d;
+}
+
+
+/* Convert d into the form b*2^e, where b is an odd integer. b is the returned
+ * Bigint and e is the returned binary exponent. Return the number of significant
+ * bits in b in bits. d must be finite and nonzero. */
+static Bigint *d2b(double d, int32 *e, int32 *bits)
+{
+ Bigint *b;
+ int32 de, i, k;
+ ULong *x, y, z;
+#define d0 word0(d)
+#define d1 word1(d)
+#define set_d0(x) set_word0(d, x)
+#define set_d1(x) set_word1(d, x)
+
+ b = Balloc(1);
+ if (!b)
+ return NULL;
+ x = b->x;
+
+ z = d0 & Frac_mask;
+ set_d0(d0 & 0x7fffffff); /* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+ de = (int32)(d0 >> Exp_shift);
+ z |= Exp_msk11;
+#else
+ if ((de = (int32)(d0 >> Exp_shift)) != 0)
+ z |= Exp_msk1;
+#endif
+ if ((y = d1) != 0) {
+ if ((k = lo0bits(&y)) != 0) {
+ x[0] = y | z << (32 - k);
+ z >>= k;
+ }
+ else
+ x[0] = y;
+ i = b->wds = (x[1] = z) ? 2 : 1;
+ }
+ else {
+ JS_ASSERT(z);
+ k = lo0bits(&z);
+ x[0] = z;
+ i = b->wds = 1;
+ k += 32;
+ }
+#ifndef Sudden_Underflow
+ if (de) {
+#endif
+ *e = de - Bias - (P-1) + k;
+ *bits = P - k;
+#ifndef Sudden_Underflow
+ }
+ else {
+ *e = de - Bias - (P-1) + 1 + k;
+ *bits = 32*i - hi0bits(x[i-1]);
+ }
+#endif
+ return b;
+}
+#undef d0
+#undef d1
+#undef set_d0
+#undef set_d1
+
+
+static double ratio(Bigint *a, Bigint *b)
+{
+ double da, db;
+ int32 k, ka, kb;
+
+ da = b2d(a, &ka);
+ db = b2d(b, &kb);
+ k = ka - kb + 32*(a->wds - b->wds);
+ if (k > 0)
+ set_word0(da, word0(da) + k*Exp_msk1);
+ else {
+ k = -k;
+ set_word0(db, word0(db) + k*Exp_msk1);
+ }
+ return da / db;
+}
+
+static CONST double
+tens[] = {
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22
+};
+
+static CONST double bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
+#ifdef Avoid_Underflow
+ 9007199254740992.e-256
+#else
+ 1e-256
+#endif
+ };
+/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
+#define Scale_Bit 0x10
+#define n_bigtens 5
+
+
+#ifdef INFNAN_CHECK
+
+#ifndef NAN_WORD0
+#define NAN_WORD0 0x7ff80000
+#endif
+
+#ifndef NAN_WORD1
+#define NAN_WORD1 0
+#endif
+
+static int match(CONST char **sp, char *t)
+{
+ int c, d;
+ CONST char *s = *sp;
+
+ while(d = *t++) {
+ if ((c = *++s) >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+ if (c != d)
+ return 0;
+ }
+ *sp = s + 1;
+ return 1;
+ }
+#endif /* INFNAN_CHECK */
+
+
+#ifdef JS_THREADSAFE
+static JSBool initialized = JS_FALSE;
+
+/* hacked replica of nspr _PR_InitDtoa */
+static void InitDtoa(void)
+{
+ freelist_lock = PR_NewLock();
+ p5s_lock = PR_NewLock();
+ initialized = JS_TRUE;
+}
+#endif
+
+void js_FinishDtoa(void)
+{
+ int count;
+ Bigint *temp;
+
+#ifdef JS_THREADSAFE
+ if (initialized == JS_TRUE) {
+ PR_DestroyLock(freelist_lock);
+ PR_DestroyLock(p5s_lock);
+ initialized = JS_FALSE;
+ }
+#endif
+
+ /* clear down the freelist array and p5s */
+
+ /* static Bigint *freelist[Kmax+1]; */
+ for (count = 0; count <= Kmax; count++) {
+ Bigint **listp = &freelist[count];
+ while ((temp = *listp) != NULL) {
+ *listp = temp->next;
+ free(temp);
+ }
+ freelist[count] = NULL;
+ }
+
+ /* static Bigint *p5s; */
+ while (p5s) {
+ temp = p5s;
+ p5s = p5s->next;
+ free(temp);
+ }
+}
+
+/* nspr2 watcom bug ifdef omitted */
+
+JS_FRIEND_API(double)
+JS_strtod(CONST char *s00, char **se, int *err)
+{
+ int32 scale;
+ int32 bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
+ e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
+ CONST char *s, *s0, *s1;
+ double aadj, aadj1, adj, rv, rv0;
+ Long L;
+ ULong y, z;
+ Bigint *bb, *bb1, *bd, *bd0, *bs, *delta;
+
+ *err = 0;
+
+ bb = bd = bs = delta = NULL;
+ sign = nz0 = nz = 0;
+ rv = 0.;
+
+ /* Locking for Balloc's shared buffers that will be used in this block */
+ ACQUIRE_DTOA_LOCK();
+
+ for(s = s00;;s++) switch(*s) {
+ case '-':
+ sign = 1;
+ /* no break */
+ case '+':
+ if (*++s)
+ goto break2;
+ /* no break */
+ case 0:
+ s = s00;
+ goto ret;
+ case '\t':
+ case '\n':
+ case '\v':
+ case '\f':
+ case '\r':
+ case ' ':
+ continue;
+ default:
+ goto break2;
+ }
+break2:
+
+ if (*s == '0') {
+ nz0 = 1;
+ while(*++s == '0') ;
+ if (!*s)
+ goto ret;
+ }
+ s0 = s;
+ y = z = 0;
+ for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+ if (nd < 9)
+ y = 10*y + c - '0';
+ else if (nd < 16)
+ z = 10*z + c - '0';
+ nd0 = nd;
+ if (c == '.') {
+ c = *++s;
+ if (!nd) {
+ for(; c == '0'; c = *++s)
+ nz++;
+ if (c > '0' && c <= '9') {
+ s0 = s;
+ nf += nz;
+ nz = 0;
+ goto have_dig;
+ }
+ goto dig_done;
+ }
+ for(; c >= '0' && c <= '9'; c = *++s) {
+ have_dig:
+ nz++;
+ if (c -= '0') {
+ nf += nz;
+ for(i = 1; i < nz; i++)
+ if (nd++ < 9)
+ y *= 10;
+ else if (nd <= DBL_DIG + 1)
+ z *= 10;
+ if (nd++ < 9)
+ y = 10*y + c;
+ else if (nd <= DBL_DIG + 1)
+ z = 10*z + c;
+ nz = 0;
+ }
+ }
+ }
+dig_done:
+ e = 0;
+ if (c == 'e' || c == 'E') {
+ if (!nd && !nz && !nz0) {
+ s = s00;
+ goto ret;
+ }
+ s00 = s;
+ esign = 0;
+ switch(c = *++s) {
+ case '-':
+ esign = 1;
+ case '+':
+ c = *++s;
+ }
+ if (c >= '0' && c <= '9') {
+ while(c == '0')
+ c = *++s;
+ if (c > '0' && c <= '9') {
+ L = c - '0';
+ s1 = s;
+ while((c = *++s) >= '0' && c <= '9')
+ L = 10*L + c - '0';
+ if (s - s1 > 8 || L > 19999)
+ /* Avoid confusion from exponents
+ * so large that e might overflow.
+ */
+ e = 19999; /* safe for 16 bit ints */
+ else
+ e = (int32)L;
+ if (esign)
+ e = -e;
+ }
+ else
+ e = 0;
+ }
+ else
+ s = s00;
+ }
+ if (!nd) {
+ if (!nz && !nz0) {
+#ifdef INFNAN_CHECK
+ /* Check for Nan and Infinity */
+ switch(c) {
+ case 'i':
+ case 'I':
+ if (match(&s,"nfinity")) {
+ set_word0(rv, 0x7ff00000);
+ set_word1(rv, 0);
+ goto ret;
+ }
+ break;
+ case 'n':
+ case 'N':
+ if (match(&s, "an")) {
+ set_word0(rv, NAN_WORD0);
+ set_word1(rv, NAN_WORD1);
+ goto ret;
+ }
+ }
+#endif /* INFNAN_CHECK */
+ s = s00;
+ }
+ goto ret;
+ }
+ e1 = e -= nf;
+
+ /* Now we have nd0 digits, starting at s0, followed by a
+ * decimal point, followed by nd-nd0 digits. The number we're
+ * after is the integer represented by those digits times
+ * 10**e */
+
+ if (!nd0)
+ nd0 = nd;
+ k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1;
+ rv = y;
+ if (k > 9)
+ rv = tens[k - 9] * rv + z;
+ bd0 = 0;
+ if (nd <= DBL_DIG
+#ifndef RND_PRODQUOT
+ && FLT_ROUNDS == 1
+#endif
+ ) {
+ if (!e)
+ goto ret;
+ if (e > 0) {
+ if (e <= Ten_pmax) {
+ /* rv = */ rounded_product(rv, tens[e]);
+ goto ret;
+ }
+ i = DBL_DIG - nd;
+ if (e <= Ten_pmax + i) {
+ /* A fancier test would sometimes let us do
+ * this for larger i values.
+ */
+ e -= i;
+ rv *= tens[i];
+ /* rv = */ rounded_product(rv, tens[e]);
+ goto ret;
+ }
+ }
+#ifndef Inaccurate_Divide
+ else if (e >= -Ten_pmax) {
+ /* rv = */ rounded_quotient(rv, tens[-e]);
+ goto ret;
+ }
+#endif
+ }
+ e1 += nd - k;
+
+ scale = 0;
+
+ /* Get starting approximation = rv * 10**e1 */
+
+ if (e1 > 0) {
+ if ((i = e1 & 15) != 0)
+ rv *= tens[i];
+ if (e1 &= ~15) {
+ if (e1 > DBL_MAX_10_EXP) {
+ ovfl:
+ *err = JS_DTOA_ERANGE;
+#ifdef __STDC__
+ rv = HUGE_VAL;
+#else
+ /* Can't trust HUGE_VAL */
+ set_word0(rv, Exp_mask);
+ set_word1(rv, 0);
+#endif
+ if (bd0)
+ goto retfree;
+ goto ret;
+ }
+ e1 >>= 4;
+ for(j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ rv *= bigtens[j];
+ /* The last multiplication could overflow. */
+ set_word0(rv, word0(rv) - P*Exp_msk1);
+ rv *= bigtens[j];
+ if ((z = word0(rv) & Exp_mask) > Exp_msk1*(DBL_MAX_EXP+Bias-P))
+ goto ovfl;
+ if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) {
+ /* set to largest number */
+ /* (Can't trust DBL_MAX) */
+ set_word0(rv, Big0);
+ set_word1(rv, Big1);
+ }
+ else
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ }
+ }
+ else if (e1 < 0) {
+ e1 = -e1;
+ if ((i = e1 & 15) != 0)
+ rv /= tens[i];
+ if (e1 &= ~15) {
+ e1 >>= 4;
+ if (e1 >= 1 << n_bigtens)
+ goto undfl;
+#ifdef Avoid_Underflow
+ if (e1 & Scale_Bit)
+ scale = P;
+ for(j = 0; e1 > 0; j++, e1 >>= 1)
+ if (e1 & 1)
+ rv *= tinytens[j];
+ if (scale && (j = P + 1 - ((word0(rv) & Exp_mask)
+ >> Exp_shift)) > 0) {
+ /* scaled rv is denormal; zap j low bits */
+ if (j >= 32) {
+ set_word1(rv, 0);
+ set_word0(rv, word0(rv) & (0xffffffff << (j-32)));
+ if (!word0(rv))
+ set_word0(rv, 1);
+ }
+ else
+ set_word1(rv, word1(rv) & (0xffffffff << j));
+ }
+#else
+ for(j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ rv *= tinytens[j];
+ /* The last multiplication could underflow. */
+ rv0 = rv;
+ rv *= tinytens[j];
+ if (!rv) {
+ rv = 2.*rv0;
+ rv *= tinytens[j];
+#endif
+ if (!rv) {
+ undfl:
+ rv = 0.;
+ *err = JS_DTOA_ERANGE;
+ if (bd0)
+ goto retfree;
+ goto ret;
+ }
+#ifndef Avoid_Underflow
+ set_word0(rv, Tiny0);
+ set_word1(rv, Tiny1);
+ /* The refinement below will clean
+ * this approximation up.
+ */
+ }
+#endif
+ }
+ }
+
+ /* Now the hard part -- adjusting rv to the correct value.*/
+
+ /* Put digits into bd: true value = bd * 10^e */
+
+ bd0 = s2b(s0, nd0, nd, y);
+ if (!bd0)
+ goto nomem;
+
+ for(;;) {
+ bd = Balloc(bd0->k);
+ if (!bd)
+ goto nomem;
+ Bcopy(bd, bd0);
+ bb = d2b(rv, &bbe, &bbbits); /* rv = bb * 2^bbe */
+ if (!bb)
+ goto nomem;
+ bs = i2b(1);
+ if (!bs)
+ goto nomem;
+
+ if (e >= 0) {
+ bb2 = bb5 = 0;
+ bd2 = bd5 = e;
+ }
+ else {
+ bb2 = bb5 = -e;
+ bd2 = bd5 = 0;
+ }
+ if (bbe >= 0)
+ bb2 += bbe;
+ else
+ bd2 -= bbe;
+ bs2 = bb2;
+#ifdef Sudden_Underflow
+ j = P + 1 - bbbits;
+#else
+#ifdef Avoid_Underflow
+ j = bbe - scale;
+#else
+ j = bbe;
+#endif
+ i = j + bbbits - 1; /* logb(rv) */
+ if (i < Emin) /* denormal */
+ j += P - Emin;
+ else
+ j = P + 1 - bbbits;
+#endif
+ bb2 += j;
+ bd2 += j;
+#ifdef Avoid_Underflow
+ bd2 += scale;
+#endif
+ i = bb2 < bd2 ? bb2 : bd2;
+ if (i > bs2)
+ i = bs2;
+ if (i > 0) {
+ bb2 -= i;
+ bd2 -= i;
+ bs2 -= i;
+ }
+ if (bb5 > 0) {
+ bs = pow5mult(bs, bb5);
+ if (!bs)
+ goto nomem;
+ bb1 = mult(bs, bb);
+ if (!bb1)
+ goto nomem;
+ Bfree(bb);
+ bb = bb1;
+ }
+ if (bb2 > 0) {
+ bb = lshift(bb, bb2);
+ if (!bb)
+ goto nomem;
+ }
+ if (bd5 > 0) {
+ bd = pow5mult(bd, bd5);
+ if (!bd)
+ goto nomem;
+ }
+ if (bd2 > 0) {
+ bd = lshift(bd, bd2);
+ if (!bd)
+ goto nomem;
+ }
+ if (bs2 > 0) {
+ bs = lshift(bs, bs2);
+ if (!bs)
+ goto nomem;
+ }
+ delta = diff(bb, bd);
+ if (!delta)
+ goto nomem;
+ dsign = delta->sign;
+ delta->sign = 0;
+ i = cmp(delta, bs);
+ if (i < 0) {
+ /* Error is less than half an ulp -- check for
+ * special case of mantissa a power of two.
+ */
+ if (dsign || word1(rv) || word0(rv) & Bndry_mask
+#ifdef Avoid_Underflow
+ || (word0(rv) & Exp_mask) <= Exp_msk1 + P*Exp_msk1
+#else
+ || (word0(rv) & Exp_mask) <= Exp_msk1
+#endif
+ ) {
+#ifdef Avoid_Underflow
+ if (!delta->x[0] && delta->wds == 1)
+ dsign = 2;
+#endif
+ break;
+ }
+ delta = lshift(delta,Log2P);
+ if (!delta)
+ goto nomem;
+ if (cmp(delta, bs) > 0)
+ goto drop_down;
+ break;
+ }
+ if (i == 0) {
+ /* exactly half-way between */
+ if (dsign) {
+ if ((word0(rv) & Bndry_mask1) == Bndry_mask1
+ && word1(rv) == 0xffffffff) {
+ /*boundary case -- increment exponent*/
+ set_word0(rv, (word0(rv) & Exp_mask) + Exp_msk1);
+ set_word1(rv, 0);
+#ifdef Avoid_Underflow
+ dsign = 0;
+#endif
+ break;
+ }
+ }
+ else if (!(word0(rv) & Bndry_mask) && !word1(rv)) {
+#ifdef Avoid_Underflow
+ dsign = 2;
+#endif
+ drop_down:
+ /* boundary case -- decrement exponent */
+#ifdef Sudden_Underflow
+ L = word0(rv) & Exp_mask;
+ if (L <= Exp_msk1)
+ goto undfl;
+ L -= Exp_msk1;
+#else
+ L = (word0(rv) & Exp_mask) - Exp_msk1;
+#endif
+ set_word0(rv, L | Bndry_mask1);
+ set_word1(rv, 0xffffffff);
+ break;
+ }
+#ifndef ROUND_BIASED
+ if (!(word1(rv) & LSB))
+ break;
+#endif
+ if (dsign)
+ rv += ulp(rv);
+#ifndef ROUND_BIASED
+ else {
+ rv -= ulp(rv);
+#ifndef Sudden_Underflow
+ if (!rv)
+ goto undfl;
+#endif
+ }
+#ifdef Avoid_Underflow
+ dsign = 1 - dsign;
+#endif
+#endif
+ break;
+ }
+ if ((aadj = ratio(delta, bs)) <= 2.) {
+ if (dsign)
+ aadj = aadj1 = 1.;
+ else if (word1(rv) || word0(rv) & Bndry_mask) {
+#ifndef Sudden_Underflow
+ if (word1(rv) == Tiny1 && !word0(rv))
+ goto undfl;
+#endif
+ aadj = 1.;
+ aadj1 = -1.;
+ }
+ else {
+ /* special case -- power of FLT_RADIX to be */
+ /* rounded down... */
+
+ if (aadj < 2./FLT_RADIX)
+ aadj = 1./FLT_RADIX;
+ else
+ aadj *= 0.5;
+ aadj1 = -aadj;
+ }
+ }
+ else {
+ aadj *= 0.5;
+ aadj1 = dsign ? aadj : -aadj;
+#ifdef Check_FLT_ROUNDS
+ switch(FLT_ROUNDS) {
+ case 2: /* towards +infinity */
+ aadj1 -= 0.5;
+ break;
+ case 0: /* towards 0 */
+ case 3: /* towards -infinity */
+ aadj1 += 0.5;
+ }
+#else
+ if (FLT_ROUNDS == 0)
+ aadj1 += 0.5;
+#endif
+ }
+ y = word0(rv) & Exp_mask;
+
+ /* Check for overflow */
+
+ if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
+ rv0 = rv;
+ set_word0(rv, word0(rv) - P*Exp_msk1);
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+ if ((word0(rv) & Exp_mask) >=
+ Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
+ if (word0(rv0) == Big0 && word1(rv0) == Big1)
+ goto ovfl;
+ set_word0(rv, Big0);
+ set_word1(rv, Big1);
+ goto cont;
+ }
+ else
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ }
+ else {
+#ifdef Sudden_Underflow
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
+ rv0 = rv;
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1)
+ {
+ if (word0(rv0) == Tiny0
+ && word1(rv0) == Tiny1)
+ goto undfl;
+ set_word0(rv, Tiny0);
+ set_word1(rv, Tiny1);
+ goto cont;
+ }
+ else
+ set_word0(rv, word0(rv) - P*Exp_msk1);
+ }
+ else {
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+ }
+#else
+ /* Compute adj so that the IEEE rounding rules will
+ * correctly round rv + adj in some half-way cases.
+ * If rv * ulp(rv) is denormalized (i.e.,
+ * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid
+ * trouble from bits lost to denormalization;
+ * example: 1.2e-307 .
+ */
+#ifdef Avoid_Underflow
+ if (y <= P*Exp_msk1 && aadj > 1.)
+#else
+ if (y <= (P-1)*Exp_msk1 && aadj > 1.)
+#endif
+ {
+ aadj1 = (double)(int32)(aadj + 0.5);
+ if (!dsign)
+ aadj1 = -aadj1;
+ }
+#ifdef Avoid_Underflow
+ if (scale && y <= P*Exp_msk1)
+ set_word0(aadj1, word0(aadj1) + (P+1)*Exp_msk1 - y);
+#endif
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+#endif
+ }
+ z = word0(rv) & Exp_mask;
+#ifdef Avoid_Underflow
+ if (!scale)
+#endif
+ if (y == z) {
+ /* Can we stop now? */
+ L = (Long)aadj;
+ aadj -= L;
+ /* The tolerances below are conservative. */
+ if (dsign || word1(rv) || word0(rv) & Bndry_mask) {
+ if (aadj < .4999999 || aadj > .5000001)
+ break;
+ }
+ else if (aadj < .4999999/FLT_RADIX)
+ break;
+ }
+ cont:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(delta);
+ bb = bd = bs = delta = NULL;
+ }
+#ifdef Avoid_Underflow
+ if (scale) {
+ rv0 = 0.;
+ set_word0(rv0, Exp_1 - P*Exp_msk1);
+ set_word1(rv0, 0);
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1
+ && word1(rv) & 1
+ && dsign != 2) {
+ if (dsign) {
+#ifdef Sudden_Underflow
+ /* rv will be 0, but this would give the */
+ /* right result if only rv *= rv0 worked. */
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ set_word0(rv0, Exp_1 - 2*P*Exp_msk1);
+#endif
+ rv += ulp(rv);
+ }
+ else
+ set_word1(rv, word1(rv) & ~1);
+ }
+ rv *= rv0;
+ }
+#endif /* Avoid_Underflow */
+retfree:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(bd0);
+ Bfree(delta);
+ret:
+ RELEASE_DTOA_LOCK();
+ if (se)
+ *se = (char *)s;
+ return sign ? -rv : rv;
+
+nomem:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(bd0);
+ Bfree(delta);
+ RELEASE_DTOA_LOCK();
+ *err = JS_DTOA_ENOMEM;
+ return 0;
+}
+
+
+/* Return floor(b/2^k) and set b to be the remainder. The returned quotient must be less than 2^32. */
+static uint32 quorem2(Bigint *b, int32 k)
+{
+ ULong mask;
+ ULong result;
+ ULong *bx, *bxe;
+ int32 w;
+ int32 n = k >> 5;
+ k &= 0x1F;
+ mask = (1<<k) - 1;
+
+ w = b->wds - n;
+ if (w <= 0)
+ return 0;
+ JS_ASSERT(w <= 2);
+ bx = b->x;
+ bxe = bx + n;
+ result = *bxe >> k;
+ *bxe &= mask;
+ if (w == 2) {
+ JS_ASSERT(!(bxe[1] & ~mask));
+ if (k)
+ result |= bxe[1] << (32 - k);
+ }
+ n++;
+ while (!*bxe && bxe != bx) {
+ n--;
+ bxe--;
+ }
+ b->wds = n;
+ return result;
+}
+
+/* Return floor(b/S) and set b to be the remainder. As added restrictions, b must not have
+ * more words than S, the most significant word of S must not start with a 1 bit, and the
+ * returned quotient must be less than 36. */
+static int32 quorem(Bigint *b, Bigint *S)
+{
+ int32 n;
+ ULong *bx, *bxe, q, *sx, *sxe;
+#ifdef ULLong
+ ULLong borrow, carry, y, ys;
+#else
+ ULong borrow, carry, y, ys;
+ ULong si, z, zs;
+#endif
+
+ n = S->wds;
+ JS_ASSERT(b->wds <= n);
+ if (b->wds < n)
+ return 0;
+ sx = S->x;
+ sxe = sx + --n;
+ bx = b->x;
+ bxe = bx + n;
+ JS_ASSERT(*sxe <= 0x7FFFFFFF);
+ q = *bxe / (*sxe + 1); /* ensure q <= true quotient */
+ JS_ASSERT(q < 36);
+ if (q) {
+ borrow = 0;
+ carry = 0;
+ do {
+#ifdef ULLong
+ ys = *sx++ * (ULLong)q + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & 0xffffffffUL) - borrow;
+ borrow = y >> 32 & 1UL;
+ *bx++ = (ULong)(y & 0xffffffffUL);
+#else
+ si = *sx++;
+ ys = (si & 0xffff) * q + carry;
+ zs = (si >> 16) * q + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#endif
+ }
+ while(sx <= sxe);
+ if (!*bxe) {
+ bx = b->x;
+ while(--bxe > bx && !*bxe)
+ --n;
+ b->wds = n;
+ }
+ }
+ if (cmp(b, S) >= 0) {
+ q++;
+ borrow = 0;
+ carry = 0;
+ bx = b->x;
+ sx = S->x;
+ do {
+#ifdef ULLong
+ ys = *sx++ + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & 0xffffffffUL) - borrow;
+ borrow = y >> 32 & 1UL;
+ *bx++ = (ULong)(y & 0xffffffffUL);
+#else
+ si = *sx++;
+ ys = (si & 0xffff) + carry;
+ zs = (si >> 16) + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#endif
+ } while(sx <= sxe);
+ bx = b->x;
+ bxe = bx + n;
+ if (!*bxe) {
+ while(--bxe > bx && !*bxe)
+ --n;
+ b->wds = n;
+ }
+ }
+ return (int32)q;
+}
+
+/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
+ *
+ * Inspired by "How to Print Floating-Point Numbers Accurately" by
+ * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ * 1. Rather than iterating, we use a simple numeric overestimate
+ * to determine k = floor(log10(d)). We scale relevant
+ * quantities using O(log2(k)) rather than O(k) multiplications.
+ * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
+ * try to generate digits strictly left to right. Instead, we
+ * compute with fewer bits and propagate the carry if necessary
+ * when rounding the final digit up. This is often faster.
+ * 3. Under the assumption that input will be rounded nearest,
+ * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
+ * That is, we allow equality in stopping tests when the
+ * round-nearest rule will give the same floating-point value
+ * as would satisfaction of the stopping test with strict
+ * inequality.
+ * 4. We remove common factors of powers of 2 from relevant
+ * quantities.
+ * 5. When converting floating-point integers less than 1e16,
+ * we use floating-point arithmetic rather than resorting
+ * to multiple-precision integers.
+ * 6. When asked to produce fewer than 15 digits, we first try
+ * to get by with floating-point arithmetic; we resort to
+ * multiple-precision integer arithmetic only if we cannot
+ * guarantee that the floating-point calculation has given
+ * the correctly rounded result. For k requested digits and
+ * "uniformly" distributed input, the probability is
+ * something like 10^(k-15) that we must resort to the Long
+ * calculation.
+ */
+
+/* Always emits at least one digit. */
+/* If biasUp is set, then rounding in modes 2 and 3 will round away from zero
+ * when the number is exactly halfway between two representable values. For example,
+ * rounding 2.5 to zero digits after the decimal point will return 3 and not 2.
+ * 2.49 will still round to 2, and 2.51 will still round to 3. */
+/* bufsize should be at least 20 for modes 0 and 1. For the other modes,
+ * bufsize should be two greater than the maximum number of output characters expected. */
+static JSBool
+js_dtoa(double d, int mode, JSBool biasUp, int ndigits,
+ int *decpt, int *sign, char **rve, char *buf, size_t bufsize)
+{
+ /* Arguments ndigits, decpt, sign are similar to those
+ of ecvt and fcvt; trailing zeros are suppressed from
+ the returned string. If not null, *rve is set to point
+ to the end of the return value. If d is +-Infinity or NaN,
+ then *decpt is set to 9999.
+
+ mode:
+ 0 ==> shortest string that yields d when read in
+ and rounded to nearest.
+ 1 ==> like 0, but with Steele & White stopping rule;
+ e.g. with IEEE P754 arithmetic , mode 0 gives
+ 1e23 whereas mode 1 gives 9.999999999999999e22.
+ 2 ==> max(1,ndigits) significant digits. This gives a
+ return value similar to that of ecvt, except
+ that trailing zeros are suppressed.
+ 3 ==> through ndigits past the decimal point. This
+ gives a return value similar to that from fcvt,
+ except that trailing zeros are suppressed, and
+ ndigits can be negative.
+ 4-9 should give the same return values as 2-3, i.e.,
+ 4 <= mode <= 9 ==> same return as mode
+ 2 + (mode & 1). These modes are mainly for
+ debugging; often they run slower but sometimes
+ faster than modes 2-3.
+ 4,5,8,9 ==> left-to-right digit generation.
+ 6-9 ==> don't try fast floating-point estimate
+ (if applicable).
+
+ Values of mode other than 0-9 are treated as mode 0.
+
+ Sufficient space is allocated to the return value
+ to hold the suppressed trailing zeros.
+ */
+
+ int32 bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1,
+ j, j1, k, k0, k_check, leftright, m2, m5, s2, s5,
+ spec_case, try_quick;
+ Long L;
+#ifndef Sudden_Underflow
+ int32 denorm;
+ ULong x;
+#endif
+ Bigint *b, *b1, *delta, *mlo, *mhi, *S;
+ double d2, ds, eps;
+ char *s;
+
+ if (word0(d) & Sign_bit) {
+ /* set sign for everything, including 0's and NaNs */
+ *sign = 1;
+ set_word0(d, word0(d) & ~Sign_bit); /* clear sign bit */
+ }
+ else
+ *sign = 0;
+
+ if ((word0(d) & Exp_mask) == Exp_mask) {
+ /* Infinity or NaN */
+ *decpt = 9999;
+ s = !word1(d) && !(word0(d) & Frac_mask) ? "Infinity" : "NaN";
+ if ((s[0] == 'I' && bufsize < 9) || (s[0] == 'N' && bufsize < 4)) {
+ JS_ASSERT(JS_FALSE);
+/* JS_SetError(JS_BUFFER_OVERFLOW_ERROR, 0); */
+ return JS_FALSE;
+ }
+ strcpy(buf, s);
+ if (rve) {
+ *rve = buf[3] ? buf + 8 : buf + 3;
+ JS_ASSERT(**rve == '\0');
+ }
+ return JS_TRUE;
+ }
+
+ b = NULL; /* initialize for abort protection */
+ S = NULL;
+ mlo = mhi = NULL;
+
+ if (!d) {
+ no_digits:
+ *decpt = 1;
+ if (bufsize < 2) {
+ JS_ASSERT(JS_FALSE);
+/* JS_SetError(JS_BUFFER_OVERFLOW_ERROR, 0); */
+ return JS_FALSE;
+ }
+ buf[0] = '0'; buf[1] = '\0'; /* copy "0" to buffer */
+ if (rve)
+ *rve = buf + 1;
+ /* We might have jumped to "no_digits" from below, so we need
+ * to be sure to free the potentially allocated Bigints to avoid
+ * memory leaks. */
+ Bfree(b);
+ Bfree(S);
+ if (mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ return JS_TRUE;
+ }
+
+ b = d2b(d, &be, &bbits);
+ if (!b)
+ goto nomem;
+#ifdef Sudden_Underflow
+ i = (int32)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1));
+#else
+ if ((i = (int32)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1))) != 0) {
+#endif
+ d2 = d;
+ set_word0(d2, word0(d2) & Frac_mask1);
+ set_word0(d2, word0(d2) | Exp_11);
+
+ /* log(x) ~=~ log(1.5) + (x-1.5)/1.5
+ * log10(x) = log(x) / log(10)
+ * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
+ * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
+ *
+ * This suggests computing an approximation k to log10(d) by
+ *
+ * k = (i - Bias)*0.301029995663981
+ * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
+ *
+ * We want k to be too large rather than too small.
+ * The error in the first-order Taylor series approximation
+ * is in our favor, so we just round up the constant enough
+ * to compensate for any error in the multiplication of
+ * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
+ * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
+ * adding 1e-13 to the constant term more than suffices.
+ * Hence we adjust the constant term to 0.1760912590558.
+ * (We could get a more accurate k by invoking log10,
+ * but this is probably not worthwhile.)
+ */
+
+ i -= Bias;
+#ifndef Sudden_Underflow
+ denorm = 0;
+ }
+ else {
+ /* d is denormalized */
+
+ i = bbits + be + (Bias + (P-1) - 1);
+ x = i > 32 ? word0(d) << (64 - i) | word1(d) >> (i - 32) : word1(d) << (32 - i);
+ d2 = x;
+ set_word0(d2, word0(d2) - 31*Exp_msk1); /* adjust exponent */
+ i -= (Bias + (P-1) - 1) + 1;
+ denorm = 1;
+ }
+#endif
+ /* At this point d = f*2^i, where 1 <= f < 2. d2 is an approximation of f. */
+ ds = (d2-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981;
+ k = (int32)ds;
+ if (ds < 0. && ds != k)
+ k--; /* want k = floor(ds) */
+ k_check = 1;
+ if (k >= 0 && k <= Ten_pmax) {
+ if (d < tens[k])
+ k--;
+ k_check = 0;
+ }
+ /* At this point floor(log10(d)) <= k <= floor(log10(d))+1.
+ If k_check is zero, we're guaranteed that k = floor(log10(d)). */
+ j = bbits - i - 1;
+ /* At this point d = b/2^j, where b is an odd integer. */
+ if (j >= 0) {
+ b2 = 0;
+ s2 = j;
+ }
+ else {
+ b2 = -j;
+ s2 = 0;
+ }
+ if (k >= 0) {
+ b5 = 0;
+ s5 = k;
+ s2 += k;
+ }
+ else {
+ b2 -= k;
+ b5 = -k;
+ s5 = 0;
+ }
+ /* At this point d/10^k = (b * 2^b2 * 5^b5) / (2^s2 * 5^s5), where b is an odd integer,
+ b2 >= 0, b5 >= 0, s2 >= 0, and s5 >= 0. */
+ if (mode < 0 || mode > 9)
+ mode = 0;
+ try_quick = 1;
+ if (mode > 5) {
+ mode -= 4;
+ try_quick = 0;
+ }
+ leftright = 1;
+ ilim = ilim1 = 0;
+ switch(mode) {
+ case 0:
+ case 1:
+ ilim = ilim1 = -1;
+ i = 18;
+ ndigits = 0;
+ break;
+ case 2:
+ leftright = 0;
+ /* no break */
+ case 4:
+ if (ndigits <= 0)
+ ndigits = 1;
+ ilim = ilim1 = i = ndigits;
+ break;
+ case 3:
+ leftright = 0;
+ /* no break */
+ case 5:
+ i = ndigits + k + 1;
+ ilim = i;
+ ilim1 = i - 1;
+ if (i <= 0)
+ i = 1;
+ }
+ /* ilim is the maximum number of significant digits we want, based on k and ndigits. */
+ /* ilim1 is the maximum number of significant digits we want, based on k and ndigits,
+ when it turns out that k was computed too high by one. */
+
+ /* Ensure space for at least i+1 characters, including trailing null. */
+ if (bufsize <= (size_t)i) {
+ Bfree(b);
+ JS_ASSERT(JS_FALSE);
+ return JS_FALSE;
+ }
+ s = buf;
+
+ if (ilim >= 0 && ilim <= Quick_max && try_quick) {
+
+ /* Try to get by with floating-point arithmetic. */
+
+ i = 0;
+ d2 = d;
+ k0 = k;
+ ilim0 = ilim;
+ ieps = 2; /* conservative */
+ /* Divide d by 10^k, keeping track of the roundoff error and avoiding overflows. */
+ if (k > 0) {
+ ds = tens[k&0xf];
+ j = k >> 4;
+ if (j & Bletch) {
+ /* prevent overflows */
+ j &= Bletch - 1;
+ d /= bigtens[n_bigtens-1];
+ ieps++;
+ }
+ for(; j; j >>= 1, i++)
+ if (j & 1) {
+ ieps++;
+ ds *= bigtens[i];
+ }
+ d /= ds;
+ }
+ else if ((j1 = -k) != 0) {
+ d *= tens[j1 & 0xf];
+ for(j = j1 >> 4; j; j >>= 1, i++)
+ if (j & 1) {
+ ieps++;
+ d *= bigtens[i];
+ }
+ }
+ /* Check that k was computed correctly. */
+ if (k_check && d < 1. && ilim > 0) {
+ if (ilim1 <= 0)
+ goto fast_failed;
+ ilim = ilim1;
+ k--;
+ d *= 10.;
+ ieps++;
+ }
+ /* eps bounds the cumulative error. */
+ eps = ieps*d + 7.;
+ set_word0(eps, word0(eps) - (P-1)*Exp_msk1);
+ if (ilim == 0) {
+ S = mhi = 0;
+ d -= 5.;
+ if (d > eps)
+ goto one_digit;
+ if (d < -eps)
+ goto no_digits;
+ goto fast_failed;
+ }
+#ifndef No_leftright
+ if (leftright) {
+ /* Use Steele & White method of only
+ * generating digits needed.
+ */
+ eps = 0.5/tens[ilim-1] - eps;
+ for(i = 0;;) {
+ L = (Long)d;
+ d -= L;
+ *s++ = '0' + (char)L;
+ if (d < eps)
+ goto ret1;
+ if (1. - d < eps)
+ goto bump_up;
+ if (++i >= ilim)
+ break;
+ eps *= 10.;
+ d *= 10.;
+ }
+ }
+ else {
+#endif
+ /* Generate ilim digits, then fix them up. */
+ eps *= tens[ilim-1];
+ for(i = 1;; i++, d *= 10.) {
+ L = (Long)d;
+ d -= L;
+ *s++ = '0' + (char)L;
+ if (i == ilim) {
+ if (d > 0.5 + eps)
+ goto bump_up;
+ else if (d < 0.5 - eps) {
+ while(*--s == '0') ;
+ s++;
+ goto ret1;
+ }
+ break;
+ }
+ }
+#ifndef No_leftright
+ }
+#endif
+ fast_failed:
+ s = buf;
+ d = d2;
+ k = k0;
+ ilim = ilim0;
+ }
+
+ /* Do we have a "small" integer? */
+
+ if (be >= 0 && k <= Int_max) {
+ /* Yes. */
+ ds = tens[k];
+ if (ndigits < 0 && ilim <= 0) {
+ S = mhi = 0;
+ if (ilim < 0 || d < 5*ds || (!biasUp && d == 5*ds))
+ goto no_digits;
+ goto one_digit;
+ }
+
+ /* Use true number of digits to limit looping. */
+ for(i = 1; i<=k+1; i++) {
+ L = (Long) (d / ds);
+ d -= L*ds;
+#ifdef Check_FLT_ROUNDS
+ /* If FLT_ROUNDS == 2, L will usually be high by 1 */
+ if (d < 0) {
+ L--;
+ d += ds;
+ }
+#endif
+ *s++ = '0' + (char)L;
+ if (i == ilim) {
+ d += d;
+ if ((d > ds) || (d == ds && (L & 1 || biasUp))) {
+ bump_up:
+ while(*--s == '9')
+ if (s == buf) {
+ k++;
+ *s = '0';
+ break;
+ }
+ ++*s++;
+ }
+ break;
+ }
+ d *= 10.;
+ }
+ goto ret1;
+ }
+
+ m2 = b2;
+ m5 = b5;
+ if (leftright) {
+ if (mode < 2) {
+ i =
+#ifndef Sudden_Underflow
+ denorm ? be + (Bias + (P-1) - 1 + 1) :
+#endif
+ 1 + P - bbits;
+ /* i is 1 plus the number of trailing zero bits in d's significand. Thus,
+ (2^m2 * 5^m5) / (2^(s2+i) * 5^s5) = (1/2 lsb of d)/10^k. */
+ }
+ else {
+ j = ilim - 1;
+ if (m5 >= j)
+ m5 -= j;
+ else {
+ s5 += j -= m5;
+ b5 += j;
+ m5 = 0;
+ }
+ if ((i = ilim) < 0) {
+ m2 -= i;
+ i = 0;
+ }
+ /* (2^m2 * 5^m5) / (2^(s2+i) * 5^s5) = (1/2 * 10^(1-ilim))/10^k. */
+ }
+ b2 += i;
+ s2 += i;
+ mhi = i2b(1);
+ if (!mhi)
+ goto nomem;
+ /* (mhi * 2^m2 * 5^m5) / (2^s2 * 5^s5) = one-half of last printed (when mode >= 2) or
+ input (when mode < 2) significant digit, divided by 10^k. */
+ }
+ /* We still have d/10^k = (b * 2^b2 * 5^b5) / (2^s2 * 5^s5). Reduce common factors in
+ b2, m2, and s2 without changing the equalities. */
+ if (m2 > 0 && s2 > 0) {
+ i = m2 < s2 ? m2 : s2;
+ b2 -= i;
+ m2 -= i;
+ s2 -= i;
+ }
+
+ /* Fold b5 into b and m5 into mhi. */
+ if (b5 > 0) {
+ if (leftright) {
+ if (m5 > 0) {
+ mhi = pow5mult(mhi, m5);
+ if (!mhi)
+ goto nomem;
+ b1 = mult(mhi, b);
+ if (!b1)
+ goto nomem;
+ Bfree(b);
+ b = b1;
+ }
+ if ((j = b5 - m5) != 0) {
+ b = pow5mult(b, j);
+ if (!b)
+ goto nomem;
+ }
+ }
+ else {
+ b = pow5mult(b, b5);
+ if (!b)
+ goto nomem;
+ }
+ }
+ /* Now we have d/10^k = (b * 2^b2) / (2^s2 * 5^s5) and
+ (mhi * 2^m2) / (2^s2 * 5^s5) = one-half of last printed or input significant digit, divided by 10^k. */
+
+ S = i2b(1);
+ if (!S)
+ goto nomem;
+ if (s5 > 0) {
+ S = pow5mult(S, s5);
+ if (!S)
+ goto nomem;
+ }
+ /* Now we have d/10^k = (b * 2^b2) / (S * 2^s2) and
+ (mhi * 2^m2) / (S * 2^s2) = one-half of last printed or input significant digit, divided by 10^k. */
+
+ /* Check for special case that d is a normalized power of 2. */
+ spec_case = 0;
+ if (mode < 2) {
+ if (!word1(d) && !(word0(d) & Bndry_mask)
+#ifndef Sudden_Underflow
+ && word0(d) & (Exp_mask & Exp_mask << 1)
+#endif
+ ) {
+ /* The special case. Here we want to be within a quarter of the last input
+ significant digit instead of one half of it when the decimal output string's value is less than d. */
+ b2 += Log2P;
+ s2 += Log2P;
+ spec_case = 1;
+ }
+ }
+
+ /* Arrange for convenient computation of quotients:
+ * shift left if necessary so divisor has 4 leading 0 bits.
+ *
+ * Perhaps we should just compute leading 28 bits of S once
+ * and for all and pass them and a shift to quorem, so it
+ * can do shifts and ors to compute the numerator for q.
+ */
+ if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f) != 0)
+ i = 32 - i;
+ /* i is the number of leading zero bits in the most significant word of S*2^s2. */
+ if (i > 4) {
+ i -= 4;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ else if (i < 4) {
+ i += 28;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ /* Now S*2^s2 has exactly four leading zero bits in its most significant word. */
+ if (b2 > 0) {
+ b = lshift(b, b2);
+ if (!b)
+ goto nomem;
+ }
+ if (s2 > 0) {
+ S = lshift(S, s2);
+ if (!S)
+ goto nomem;
+ }
+ /* Now we have d/10^k = b/S and
+ (mhi * 2^m2) / S = maximum acceptable error, divided by 10^k. */
+ if (k_check) {
+ if (cmp(b,S) < 0) {
+ k--;
+ b = multadd(b, 10, 0); /* we botched the k estimate */
+ if (!b)
+ goto nomem;
+ if (leftright) {
+ mhi = multadd(mhi, 10, 0);
+ if (!mhi)
+ goto nomem;
+ }
+ ilim = ilim1;
+ }
+ }
+ /* At this point 1 <= d/10^k = b/S < 10. */
+
+ if (ilim <= 0 && mode > 2) {
+ /* We're doing fixed-mode output and d is less than the minimum nonzero output in this mode.
+ Output either zero or the minimum nonzero output depending on which is closer to d. */
+ if (ilim < 0)
+ goto no_digits;
+ S = multadd(S,5,0);
+ if (!S)
+ goto nomem;
+ i = cmp(b,S);
+ if (i < 0 || (i == 0 && !biasUp)) {
+ /* Always emit at least one digit. If the number appears to be zero
+ using the current mode, then emit one '0' digit and set decpt to 1. */
+ /*no_digits:
+ k = -1 - ndigits;
+ goto ret; */
+ goto no_digits;
+ }
+ one_digit:
+ *s++ = '1';
+ k++;
+ goto ret;
+ }
+ if (leftright) {
+ if (m2 > 0) {
+ mhi = lshift(mhi, m2);
+ if (!mhi)
+ goto nomem;
+ }
+
+ /* Compute mlo -- check for special case
+ * that d is a normalized power of 2.
+ */
+
+ mlo = mhi;
+ if (spec_case) {
+ mhi = Balloc(mhi->k);
+ if (!mhi)
+ goto nomem;
+ Bcopy(mhi, mlo);
+ mhi = lshift(mhi, Log2P);
+ if (!mhi)
+ goto nomem;
+ }
+ /* mlo/S = maximum acceptable error, divided by 10^k, if the output is less than d. */
+ /* mhi/S = maximum acceptable error, divided by 10^k, if the output is greater than d. */
+
+ for(i = 1;;i++) {
+ dig = quorem(b,S) + '0';
+ /* Do we yet have the shortest decimal string
+ * that will round to d?
+ */
+ j = cmp(b, mlo);
+ /* j is b/S compared with mlo/S. */
+ delta = diff(S, mhi);
+ if (!delta)
+ goto nomem;
+ j1 = delta->sign ? 1 : cmp(b, delta);
+ Bfree(delta);
+ /* j1 is b/S compared with 1 - mhi/S. */
+#ifndef ROUND_BIASED
+ if (j1 == 0 && !mode && !(word1(d) & 1)) {
+ if (dig == '9')
+ goto round_9_up;
+ if (j > 0)
+ dig++;
+ *s++ = (char)dig;
+ goto ret;
+ }
+#endif
+ if ((j < 0) || (j == 0 && !mode
+#ifndef ROUND_BIASED
+ && !(word1(d) & 1)
+#endif
+ )) {
+ if (j1 > 0) {
+ /* Either dig or dig+1 would work here as the least significant decimal digit.
+ Use whichever would produce a decimal value closer to d. */
+ b = lshift(b, 1);
+ if (!b)
+ goto nomem;
+ j1 = cmp(b, S);
+ if (((j1 > 0) || (j1 == 0 && (dig & 1 || biasUp)))
+ && (dig++ == '9'))
+ goto round_9_up;
+ }
+ *s++ = (char)dig;
+ goto ret;
+ }
+ if (j1 > 0) {
+ if (dig == '9') { /* possible if i == 1 */
+ round_9_up:
+ *s++ = '9';
+ goto roundoff;
+ }
+ *s++ = (char)dig + 1;
+ goto ret;
+ }
+ *s++ = (char)dig;
+ if (i == ilim)
+ break;
+ b = multadd(b, 10, 0);
+ if (!b)
+ goto nomem;
+ if (mlo == mhi) {
+ mlo = mhi = multadd(mhi, 10, 0);
+ if (!mhi)
+ goto nomem;
+ }
+ else {
+ mlo = multadd(mlo, 10, 0);
+ if (!mlo)
+ goto nomem;
+ mhi = multadd(mhi, 10, 0);
+ if (!mhi)
+ goto nomem;
+ }
+ }
+ }
+ else
+ for(i = 1;; i++) {
+ *s++ = (char)(dig = quorem(b,S) + '0');
+ if (i >= ilim)
+ break;
+ b = multadd(b, 10, 0);
+ if (!b)
+ goto nomem;
+ }
+
+ /* Round off last digit */
+
+ b = lshift(b, 1);
+ if (!b)
+ goto nomem;
+ j = cmp(b, S);
+ if ((j > 0) || (j == 0 && (dig & 1 || biasUp))) {
+ roundoff:
+ while(*--s == '9')
+ if (s == buf) {
+ k++;
+ *s++ = '1';
+ goto ret;
+ }
+ ++*s++;
+ }
+ else {
+ /* Strip trailing zeros */
+ while(*--s == '0') ;
+ s++;
+ }
+ ret:
+ Bfree(S);
+ if (mhi) {
+ if (mlo && mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ ret1:
+ Bfree(b);
+ JS_ASSERT(s < buf + bufsize);
+ *s = '\0';
+ if (rve)
+ *rve = s;
+ *decpt = k + 1;
+ return JS_TRUE;
+
+nomem:
+ Bfree(S);
+ if (mhi) {
+ if (mlo && mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ Bfree(b);
+ return JS_FALSE;
+}
+
+
+/* Mapping of JSDToStrMode -> js_dtoa mode */
+static const int dtoaModes[] = {
+ 0, /* DTOSTR_STANDARD */
+ 0, /* DTOSTR_STANDARD_EXPONENTIAL, */
+ 3, /* DTOSTR_FIXED, */
+ 2, /* DTOSTR_EXPONENTIAL, */
+ 2}; /* DTOSTR_PRECISION */
+
+JS_FRIEND_API(char *)
+JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, double d)
+{
+ int decPt; /* Position of decimal point relative to first digit returned by js_dtoa */
+ int sign; /* Nonzero if the sign bit was set in d */
+ int nDigits; /* Number of significand digits returned by js_dtoa */
+ char *numBegin = buffer+2; /* Pointer to the digits returned by js_dtoa; the +2 leaves space for */
+ /* the sign and/or decimal point */
+ char *numEnd; /* Pointer past the digits returned by js_dtoa */
+ JSBool dtoaRet;
+
+ JS_ASSERT(bufferSize >= (size_t)(mode <= DTOSTR_STANDARD_EXPONENTIAL ? DTOSTR_STANDARD_BUFFER_SIZE :
+ DTOSTR_VARIABLE_BUFFER_SIZE(precision)));
+
+ if (mode == DTOSTR_FIXED && (d >= 1e21 || d <= -1e21))
+ mode = DTOSTR_STANDARD; /* Change mode here rather than below because the buffer may not be large enough to hold a large integer. */
+
+ /* Locking for Balloc's shared buffers */
+ ACQUIRE_DTOA_LOCK();
+ dtoaRet = js_dtoa(d, dtoaModes[mode], mode >= DTOSTR_FIXED, precision, &decPt, &sign, &numEnd, numBegin, bufferSize-2);
+ RELEASE_DTOA_LOCK();
+ if (!dtoaRet)
+ return 0;
+
+ nDigits = numEnd - numBegin;
+
+ /* If Infinity, -Infinity, or NaN, return the string regardless of the mode. */
+ if (decPt != 9999) {
+ JSBool exponentialNotation = JS_FALSE;
+ int minNDigits = 0; /* Minimum number of significand digits required by mode and precision */
+ char *p;
+ char *q;
+
+ switch (mode) {
+ case DTOSTR_STANDARD:
+ if (decPt < -5 || decPt > 21)
+ exponentialNotation = JS_TRUE;
+ else
+ minNDigits = decPt;
+ break;
+
+ case DTOSTR_FIXED:
+ if (precision >= 0)
+ minNDigits = decPt + precision;
+ else
+ minNDigits = decPt;
+ break;
+
+ case DTOSTR_EXPONENTIAL:
+ JS_ASSERT(precision > 0);
+ minNDigits = precision;
+ /* Fall through */
+ case DTOSTR_STANDARD_EXPONENTIAL:
+ exponentialNotation = JS_TRUE;
+ break;
+
+ case DTOSTR_PRECISION:
+ JS_ASSERT(precision > 0);
+ minNDigits = precision;
+ if (decPt < -5 || decPt > precision)
+ exponentialNotation = JS_TRUE;
+ break;
+ }
+
+ /* If the number has fewer than minNDigits, pad it with zeros at the end */
+ if (nDigits < minNDigits) {
+ p = numBegin + minNDigits;
+ nDigits = minNDigits;
+ do {
+ *numEnd++ = '0';
+ } while (numEnd != p);
+ *numEnd = '\0';
+ }
+
+ if (exponentialNotation) {
+ /* Insert a decimal point if more than one significand digit */
+ if (nDigits != 1) {
+ numBegin--;
+ numBegin[0] = numBegin[1];
+ numBegin[1] = '.';
+ }
+ JS_snprintf(numEnd, bufferSize - (numEnd - buffer), "e%+d", decPt-1);
+ } else if (decPt != nDigits) {
+ /* Some kind of a fraction in fixed notation */
+ JS_ASSERT(decPt <= nDigits);
+ if (decPt > 0) {
+ /* dd...dd . dd...dd */
+ p = --numBegin;
+ do {
+ *p = p[1];
+ p++;
+ } while (--decPt);
+ *p = '.';
+ } else {
+ /* 0 . 00...00dd...dd */
+ p = numEnd;
+ numEnd += 1 - decPt;
+ q = numEnd;
+ JS_ASSERT(numEnd < buffer + bufferSize);
+ *numEnd = '\0';
+ while (p != numBegin)
+ *--q = *--p;
+ for (p = numBegin + 1; p != q; p++)
+ *p = '0';
+ *numBegin = '.';
+ *--numBegin = '0';
+ }
+ }
+ }
+
+ /* If negative and neither -0.0 nor NaN, output a leading '-'. */
+ if (sign &&
+ !(word0(d) == Sign_bit && word1(d) == 0) &&
+ !((word0(d) & Exp_mask) == Exp_mask &&
+ (word1(d) || (word0(d) & Frac_mask)))) {
+ *--numBegin = '-';
+ }
+ return numBegin;
+}
+
+
+/* Let b = floor(b / divisor), and return the remainder. b must be nonnegative.
+ * divisor must be between 1 and 65536.
+ * This function cannot run out of memory. */
+static uint32
+divrem(Bigint *b, uint32 divisor)
+{
+ int32 n = b->wds;
+ uint32 remainder = 0;
+ ULong *bx;
+ ULong *bp;
+
+ JS_ASSERT(divisor > 0 && divisor <= 65536);
+
+ if (!n)
+ return 0; /* b is zero */
+ bx = b->x;
+ bp = bx + n;
+ do {
+ ULong a = *--bp;
+ ULong dividend = remainder << 16 | a >> 16;
+ ULong quotientHi = dividend / divisor;
+ ULong quotientLo;
+
+ remainder = dividend - quotientHi*divisor;
+ JS_ASSERT(quotientHi <= 0xFFFF && remainder < divisor);
+ dividend = remainder << 16 | (a & 0xFFFF);
+ quotientLo = dividend / divisor;
+ remainder = dividend - quotientLo*divisor;
+ JS_ASSERT(quotientLo <= 0xFFFF && remainder < divisor);
+ *bp = quotientHi << 16 | quotientLo;
+ } while (bp != bx);
+ /* Decrease the size of the number if its most significant word is now zero. */
+ if (bx[n-1] == 0)
+ b->wds--;
+ return remainder;
+}
+
+
+/* "-0.0000...(1073 zeros after decimal point)...0001\0" is the longest string that we could produce,
+ * which occurs when printing -5e-324 in binary. We could compute a better estimate of the size of
+ * the output string and malloc fewer bytes depending on d and base, but why bother? */
+#define DTOBASESTR_BUFFER_SIZE 1078
+#define BASEDIGIT(digit) ((char)(((digit) >= 10) ? 'a' - 10 + (digit) : '0' + (digit)))
+
+JS_FRIEND_API(char *)
+JS_dtobasestr(int base, double d)
+{
+ char *buffer; /* The output string */
+ char *p; /* Pointer to current position in the buffer */
+ char *pInt; /* Pointer to the beginning of the integer part of the string */
+ char *q;
+ uint32 digit;
+ double di; /* d truncated to an integer */
+ double df; /* The fractional part of d */
+
+ JS_ASSERT(base >= 2 && base <= 36);
+
+ buffer = (char*) malloc(DTOBASESTR_BUFFER_SIZE);
+ if (buffer) {
+ p = buffer;
+ if (d < 0.0
+#if defined(XP_WIN) || defined(XP_OS2)
+ && !((word0(d) & Exp_mask) == Exp_mask && ((word0(d) & Frac_mask) || word1(d))) /* Visual C++ doesn't know how to compare against NaN */
+#endif
+ ) {
+ *p++ = '-';
+ d = -d;
+ }
+
+ /* Check for Infinity and NaN */
+ if ((word0(d) & Exp_mask) == Exp_mask) {
+ strcpy(p, !word1(d) && !(word0(d) & Frac_mask) ? "Infinity" : "NaN");
+ return buffer;
+ }
+
+ /* Locking for Balloc's shared buffers */
+ ACQUIRE_DTOA_LOCK();
+
+ /* Output the integer part of d with the digits in reverse order. */
+ pInt = p;
+ di = fd_floor(d);
+ if (di <= 4294967295.0) {
+ uint32 n = (uint32)di;
+ if (n)
+ do {
+ uint32 m = n / base;
+ digit = n - m*base;
+ n = m;
+ JS_ASSERT(digit < (uint32)base);
+ *p++ = BASEDIGIT(digit);
+ } while (n);
+ else *p++ = '0';
+ } else {
+ int32 e;
+ int32 bits; /* Number of significant bits in di; not used. */
+ Bigint *b = d2b(di, &e, &bits);
+ if (!b)
+ goto nomem1;
+ b = lshift(b, e);
+ if (!b) {
+ nomem1:
+ Bfree(b);
+ RELEASE_DTOA_LOCK();
+ free(buffer);
+ return NULL;
+ }
+ do {
+ digit = divrem(b, base);
+ JS_ASSERT(digit < (uint32)base);
+ *p++ = BASEDIGIT(digit);
+ } while (b->wds);
+ Bfree(b);
+ }
+ /* Reverse the digits of the integer part of d. */
+ q = p-1;
+ while (q > pInt) {
+ char ch = *pInt;
+ *pInt++ = *q;
+ *q-- = ch;
+ }
+
+ df = d - di;
+ if (df != 0.0) {
+ /* We have a fraction. */
+ int32 e, bbits, s2, done;
+ Bigint *b, *s, *mlo, *mhi;
+
+ b = s = mlo = mhi = NULL;
+
+ *p++ = '.';
+ b = d2b(df, &e, &bbits);
+ if (!b) {
+ nomem2:
+ Bfree(b);
+ Bfree(s);
+ if (mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ RELEASE_DTOA_LOCK();
+ free(buffer);
+ return NULL;
+ }
+ JS_ASSERT(e < 0);
+ /* At this point df = b * 2^e. e must be less than zero because 0 < df < 1. */
+
+ s2 = -(int32)(word0(d) >> Exp_shift1 & Exp_mask>>Exp_shift1);
+#ifndef Sudden_Underflow
+ if (!s2)
+ s2 = -1;
+#endif
+ s2 += Bias + P;
+ /* 1/2^s2 = (nextDouble(d) - d)/2 */
+ JS_ASSERT(-s2 < e);
+ mlo = i2b(1);
+ if (!mlo)
+ goto nomem2;
+ mhi = mlo;
+ if (!word1(d) && !(word0(d) & Bndry_mask)
+#ifndef Sudden_Underflow
+ && word0(d) & (Exp_mask & Exp_mask << 1)
+#endif
+ ) {
+ /* The special case. Here we want to be within a quarter of the last input
+ significant digit instead of one half of it when the output string's value is less than d. */
+ s2 += Log2P;
+ mhi = i2b(1<<Log2P);
+ if (!mhi)
+ goto nomem2;
+ }
+ b = lshift(b, e + s2);
+ if (!b)
+ goto nomem2;
+ s = i2b(1);
+ if (!s)
+ goto nomem2;
+ s = lshift(s, s2);
+ if (!s)
+ goto nomem2;
+ /* At this point we have the following:
+ * s = 2^s2;
+ * 1 > df = b/2^s2 > 0;
+ * (d - prevDouble(d))/2 = mlo/2^s2;
+ * (nextDouble(d) - d)/2 = mhi/2^s2. */
+
+ done = JS_FALSE;
+ do {
+ int32 j, j1;
+ Bigint *delta;
+
+ b = multadd(b, base, 0);
+ if (!b)
+ goto nomem2;
+ digit = quorem2(b, s2);
+ if (mlo == mhi) {
+ mlo = mhi = multadd(mlo, base, 0);
+ if (!mhi)
+ goto nomem2;
+ }
+ else {
+ mlo = multadd(mlo, base, 0);
+ if (!mlo)
+ goto nomem2;
+ mhi = multadd(mhi, base, 0);
+ if (!mhi)
+ goto nomem2;
+ }
+
+ /* Do we yet have the shortest string that will round to d? */
+ j = cmp(b, mlo);
+ /* j is b/2^s2 compared with mlo/2^s2. */
+ delta = diff(s, mhi);
+ if (!delta)
+ goto nomem2;
+ j1 = delta->sign ? 1 : cmp(b, delta);
+ Bfree(delta);
+ /* j1 is b/2^s2 compared with 1 - mhi/2^s2. */
+
+#ifndef ROUND_BIASED
+ if (j1 == 0 && !(word1(d) & 1)) {
+ if (j > 0)
+ digit++;
+ done = JS_TRUE;
+ } else
+#endif
+ if (j < 0 || (j == 0
+#ifndef ROUND_BIASED
+ && !(word1(d) & 1)
+#endif
+ )) {
+ if (j1 > 0) {
+ /* Either dig or dig+1 would work here as the least significant digit.
+ Use whichever would produce an output value closer to d. */
+ b = lshift(b, 1);
+ if (!b)
+ goto nomem2;
+ j1 = cmp(b, s);
+ if (j1 > 0) /* The even test (|| (j1 == 0 && (digit & 1))) is not here because it messes up odd base output
+ * such as 3.5 in base 3. */
+ digit++;
+ }
+ done = JS_TRUE;
+ } else if (j1 > 0) {
+ digit++;
+ done = JS_TRUE;
+ }
+ JS_ASSERT(digit < (uint32)base);
+ *p++ = BASEDIGIT(digit);
+ } while (!done);
+ Bfree(b);
+ Bfree(s);
+ if (mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ JS_ASSERT(p < buffer + DTOBASESTR_BUFFER_SIZE);
+ *p = '\0';
+ RELEASE_DTOA_LOCK();
+ }
+ return buffer;
+}
diff --git a/third_party/js-1.7/jsdtoa.h b/third_party/js-1.7/jsdtoa.h
new file mode 100644
index 0000000..409f454
--- /dev/null
+++ b/third_party/js-1.7/jsdtoa.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsdtoa_h___
+#define jsdtoa_h___
+/*
+ * Public interface to portable double-precision floating point to string
+ * and back conversion package.
+ */
+
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * JS_strtod() returns as a double-precision floating-point number
+ * the value represented by the character string pointed to by
+ * s00. The string is scanned up to the first unrecognized
+ * character.
+ * If the value of se is not (char **)NULL, a pointer to
+ * the character terminating the scan is returned in the location pointed
+ * to by se. If no number can be formed, se is set to s00r, and
+ * zero is returned.
+ *
+ * *err is set to zero on success; it's set to JS_DTOA_ERANGE on range
+ * errors and JS_DTOA_ENOMEM on memory failure.
+ */
+#define JS_DTOA_ERANGE 1
+#define JS_DTOA_ENOMEM 2
+JS_FRIEND_API(double)
+JS_strtod(const char *s00, char **se, int *err);
+
+/*
+ * Modes for converting floating-point numbers to strings.
+ *
+ * Some of the modes can round-trip; this means that if the number is converted to
+ * a string using one of these mode and then converted back to a number, the result
+ * will be identical to the original number (except that, due to ECMA, -0 will get converted
+ * to +0). These round-trip modes return the minimum number of significand digits that
+ * permit the round trip.
+ *
+ * Some of the modes take an integer parameter <precision>.
+ */
+/* NB: Keep this in sync with number_constants[]. */
+typedef enum JSDToStrMode {
+ DTOSTR_STANDARD, /* Either fixed or exponential format; round-trip */
+ DTOSTR_STANDARD_EXPONENTIAL, /* Always exponential format; round-trip */
+ DTOSTR_FIXED, /* Round to <precision> digits after the decimal point; exponential if number is large */
+ DTOSTR_EXPONENTIAL, /* Always exponential format; <precision> significant digits */
+ DTOSTR_PRECISION /* Either fixed or exponential format; <precision> significant digits */
+} JSDToStrMode;
+
+
+/* Maximum number of characters (including trailing null) that a DTOSTR_STANDARD or DTOSTR_STANDARD_EXPONENTIAL
+ * conversion can produce. This maximum is reached for a number like -0.0000012345678901234567. */
+#define DTOSTR_STANDARD_BUFFER_SIZE 26
+
+/* Maximum number of characters (including trailing null) that one of the other conversions
+ * can produce. This maximum is reached for TO_FIXED, which can generate up to 21 digits before the decimal point. */
+#define DTOSTR_VARIABLE_BUFFER_SIZE(precision) ((precision)+24 > DTOSTR_STANDARD_BUFFER_SIZE ? (precision)+24 : DTOSTR_STANDARD_BUFFER_SIZE)
+
+/*
+ * Convert dval according to the given mode and return a pointer to the resulting ASCII string.
+ * The result is held somewhere in buffer, but not necessarily at the beginning. The size of
+ * buffer is given in bufferSize, and must be at least as large as given by the above macros.
+ *
+ * Return NULL if out of memory.
+ */
+JS_FRIEND_API(char *)
+JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, double dval);
+
+/*
+ * Convert d to a string in the given base. The integral part of d will be printed exactly
+ * in that base, regardless of how large it is, because there is no exponential notation for non-base-ten
+ * numbers. The fractional part will be rounded to as few digits as possible while still preserving
+ * the round-trip property (analogous to that of printing decimal numbers). In other words, if one were
+ * to read the resulting string in via a hypothetical base-number-reading routine that rounds to the nearest
+ * IEEE double (and to an even significand if there are two equally near doubles), then the result would
+ * equal d (except for -0.0, which converts to "0", and NaN, which is not equal to itself).
+ *
+ * Return NULL if out of memory. If the result is not NULL, it must be released via free().
+ */
+JS_FRIEND_API(char *)
+JS_dtobasestr(int base, double d);
+
+/*
+ * Clean up any persistent RAM allocated during the execution of DtoA
+ * routines, and remove any locks that might have been created.
+ */
+extern void js_FinishDtoa(void);
+
+JS_END_EXTERN_C
+
+#endif /* jsdtoa_h___ */
diff --git a/third_party/js-1.7/jsemit.c b/third_party/js-1.7/jsemit.c
new file mode 100644
index 0000000..f8a06be
--- /dev/null
+++ b/third_party/js-1.7/jsemit.c
@@ -0,0 +1,6845 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS bytecode generation.
+ */
+#include "jsstddef.h"
+#ifdef HAVE_MEMORY_H
+#include <memory.h>
+#endif
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsbit.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+
+/* Allocation chunk counts, must be powers of two in general. */
+#define BYTECODE_CHUNK 256 /* code allocation increment */
+#define SRCNOTE_CHUNK 64 /* initial srcnote allocation increment */
+#define TRYNOTE_CHUNK 64 /* trynote allocation increment */
+
+/* Macros to compute byte sizes from typed element counts. */
+#define BYTECODE_SIZE(n) ((n) * sizeof(jsbytecode))
+#define SRCNOTE_SIZE(n) ((n) * sizeof(jssrcnote))
+#define TRYNOTE_SIZE(n) ((n) * sizeof(JSTryNote))
+
+JS_FRIEND_API(JSBool)
+js_InitCodeGenerator(JSContext *cx, JSCodeGenerator *cg,
+ JSArenaPool *codePool, JSArenaPool *notePool,
+ const char *filename, uintN lineno,
+ JSPrincipals *principals)
+{
+ memset(cg, 0, sizeof *cg);
+ TREE_CONTEXT_INIT(&cg->treeContext);
+ cg->treeContext.flags |= TCF_COMPILING;
+ cg->codePool = codePool;
+ cg->notePool = notePool;
+ cg->codeMark = JS_ARENA_MARK(codePool);
+ cg->noteMark = JS_ARENA_MARK(notePool);
+ cg->tempMark = JS_ARENA_MARK(&cx->tempPool);
+ cg->current = &cg->main;
+ cg->filename = filename;
+ cg->firstLine = cg->prolog.currentLine = cg->main.currentLine = lineno;
+ cg->principals = principals;
+ ATOM_LIST_INIT(&cg->atomList);
+ cg->prolog.noteMask = cg->main.noteMask = SRCNOTE_CHUNK - 1;
+ ATOM_LIST_INIT(&cg->constList);
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(void)
+js_FinishCodeGenerator(JSContext *cx, JSCodeGenerator *cg)
+{
+ TREE_CONTEXT_FINISH(&cg->treeContext);
+ JS_ARENA_RELEASE(cg->codePool, cg->codeMark);
+ JS_ARENA_RELEASE(cg->notePool, cg->noteMark);
+ JS_ARENA_RELEASE(&cx->tempPool, cg->tempMark);
+}
+
+static ptrdiff_t
+EmitCheck(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t delta)
+{
+ jsbytecode *base, *limit, *next;
+ ptrdiff_t offset, length;
+ size_t incr, size;
+
+ base = CG_BASE(cg);
+ next = CG_NEXT(cg);
+ limit = CG_LIMIT(cg);
+ offset = PTRDIFF(next, base, jsbytecode);
+ if (next + delta > limit) {
+ length = offset + delta;
+ length = (length <= BYTECODE_CHUNK)
+ ? BYTECODE_CHUNK
+ : JS_BIT(JS_CeilingLog2(length));
+ incr = BYTECODE_SIZE(length);
+ if (!base) {
+ JS_ARENA_ALLOCATE_CAST(base, jsbytecode *, cg->codePool, incr);
+ } else {
+ size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
+ incr -= size;
+ JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
+ }
+ if (!base) {
+ JS_ReportOutOfMemory(cx);
+ return -1;
+ }
+ CG_BASE(cg) = base;
+ CG_LIMIT(cg) = base + length;
+ CG_NEXT(cg) = base + offset;
+ }
+ return offset;
+}
+
+static void
+UpdateDepth(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t target)
+{
+ jsbytecode *pc;
+ const JSCodeSpec *cs;
+ intN nuses;
+
+ pc = CG_CODE(cg, target);
+ cs = &js_CodeSpec[pc[0]];
+ nuses = cs->nuses;
+ if (nuses < 0)
+ nuses = 2 + GET_ARGC(pc); /* stack: fun, this, [argc arguments] */
+ cg->stackDepth -= nuses;
+ JS_ASSERT(cg->stackDepth >= 0);
+ if (cg->stackDepth < 0) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", target);
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING,
+ js_GetErrorMessage, NULL,
+ JSMSG_STACK_UNDERFLOW,
+ cg->filename ? cg->filename : "stdin",
+ numBuf);
+ }
+ cg->stackDepth += cs->ndefs;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+}
+
+ptrdiff_t
+js_Emit1(JSContext *cx, JSCodeGenerator *cg, JSOp op)
+{
+ ptrdiff_t offset = EmitCheck(cx, cg, op, 1);
+
+ if (offset >= 0) {
+ *CG_NEXT(cg)++ = (jsbytecode)op;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+ptrdiff_t
+js_Emit2(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1)
+{
+ ptrdiff_t offset = EmitCheck(cx, cg, op, 2);
+
+ if (offset >= 0) {
+ jsbytecode *next = CG_NEXT(cg);
+ next[0] = (jsbytecode)op;
+ next[1] = op1;
+ CG_NEXT(cg) = next + 2;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+ptrdiff_t
+js_Emit3(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1,
+ jsbytecode op2)
+{
+ ptrdiff_t offset = EmitCheck(cx, cg, op, 3);
+
+ if (offset >= 0) {
+ jsbytecode *next = CG_NEXT(cg);
+ next[0] = (jsbytecode)op;
+ next[1] = op1;
+ next[2] = op2;
+ CG_NEXT(cg) = next + 3;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+ptrdiff_t
+js_EmitN(JSContext *cx, JSCodeGenerator *cg, JSOp op, size_t extra)
+{
+ ptrdiff_t length = 1 + (ptrdiff_t)extra;
+ ptrdiff_t offset = EmitCheck(cx, cg, op, length);
+
+ if (offset >= 0) {
+ jsbytecode *next = CG_NEXT(cg);
+ *next = (jsbytecode)op;
+ memset(next + 1, 0, BYTECODE_SIZE(extra));
+ CG_NEXT(cg) = next + length;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+/* XXX too many "... statement" L10N gaffes below -- fix via js.msg! */
+const char js_with_statement_str[] = "with statement";
+const char js_finally_block_str[] = "finally block";
+const char js_script_str[] = "script";
+
+static const char *statementName[] = {
+ "label statement", /* LABEL */
+ "if statement", /* IF */
+ "else statement", /* ELSE */
+ "switch statement", /* SWITCH */
+ "block", /* BLOCK */
+ js_with_statement_str, /* WITH */
+ "catch block", /* CATCH */
+ "try block", /* TRY */
+ js_finally_block_str, /* FINALLY */
+ js_finally_block_str, /* SUBROUTINE */
+ "do loop", /* DO_LOOP */
+ "for loop", /* FOR_LOOP */
+ "for/in loop", /* FOR_IN_LOOP */
+ "while loop", /* WHILE_LOOP */
+};
+
+static const char *
+StatementName(JSCodeGenerator *cg)
+{
+ if (!cg->treeContext.topStmt)
+ return js_script_str;
+ return statementName[cg->treeContext.topStmt->type];
+}
+
+static void
+ReportStatementTooLarge(JSContext *cx, JSCodeGenerator *cg)
+{
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NEED_DIET,
+ StatementName(cg));
+}
+
+/**
+ Span-dependent instructions in JS bytecode consist of the jump (JOF_JUMP)
+ and switch (JOF_LOOKUPSWITCH, JOF_TABLESWITCH) format opcodes, subdivided
+ into unconditional (gotos and gosubs), and conditional jumps or branches
+ (which pop a value, test it, and jump depending on its value). Most jumps
+ have just one immediate operand, a signed offset from the jump opcode's pc
+ to the target bytecode. The lookup and table switch opcodes may contain
+ many jump offsets.
+
+ Mozilla bug #80981 (http://bugzilla.mozilla.org/show_bug.cgi?id=80981) was
+ fixed by adding extended "X" counterparts to the opcodes/formats (NB: X is
+ suffixed to prefer JSOP_ORX thereby avoiding a JSOP_XOR name collision for
+ the extended form of the JSOP_OR branch opcode). The unextended or short
+ formats have 16-bit signed immediate offset operands, the extended or long
+ formats have 32-bit signed immediates. The span-dependency problem consists
+ of selecting as few long instructions as possible, or about as few -- since
+ jumps can span other jumps, extending one jump may cause another to need to
+ be extended.
+
+ Most JS scripts are short, so need no extended jumps. We optimize for this
+ case by generating short jumps until we know a long jump is needed. After
+ that point, we keep generating short jumps, but each jump's 16-bit immediate
+ offset operand is actually an unsigned index into cg->spanDeps, an array of
+ JSSpanDep structs. Each struct tells the top offset in the script of the
+ opcode, the "before" offset of the jump (which will be the same as top for
+ simplex jumps, but which will index further into the bytecode array for a
+ non-initial jump offset in a lookup or table switch), the after "offset"
+ adjusted during span-dependent instruction selection (initially the same
+ value as the "before" offset), and the jump target (more below).
+
+ Since we generate cg->spanDeps lazily, from within js_SetJumpOffset, we must
+ ensure that all bytecode generated so far can be inspected to discover where
+ the jump offset immediate operands lie within CG_CODE(cg). But the bonus is
+ that we generate span-dependency records sorted by their offsets, so we can
+ binary-search when trying to find a JSSpanDep for a given bytecode offset,
+ or the nearest JSSpanDep at or above a given pc.
+
+ To avoid limiting scripts to 64K jumps, if the cg->spanDeps index overflows
+ 65534, we store SPANDEP_INDEX_HUGE in the jump's immediate operand. This
+ tells us that we need to binary-search for the cg->spanDeps entry by the
+ jump opcode's bytecode offset (sd->before).
+
+ Jump targets need to be maintained in a data structure that lets us look
+ up an already-known target by its address (jumps may have a common target),
+ and that also lets us update the addresses (script-relative, a.k.a. absolute
+ offsets) of targets that come after a jump target (for when a jump below
+ that target needs to be extended). We use an AVL tree, implemented using
+ recursion, but with some tricky optimizations to its height-balancing code
+ (see http://www.cmcrossroads.com/bradapp/ftp/src/libs/C++/AvlTrees.html).
+
+ A final wrinkle: backpatch chains are linked by jump-to-jump offsets with
+ positive sign, even though they link "backward" (i.e., toward lower bytecode
+ address). We don't want to waste space and search time in the AVL tree for
+ such temporary backpatch deltas, so we use a single-bit wildcard scheme to
+ tag true JSJumpTarget pointers and encode untagged, signed (positive) deltas
+ in JSSpanDep.target pointers, depending on whether the JSSpanDep has a known
+ target, or is still awaiting backpatching.
+
+ Note that backpatch chains would present a problem for BuildSpanDepTable,
+ which inspects bytecode to build cg->spanDeps on demand, when the first
+ short jump offset overflows. To solve this temporary problem, we emit a
+ proxy bytecode (JSOP_BACKPATCH; JSOP_BACKPATCH_POP for branch ops) whose
+ nuses/ndefs counts help keep the stack balanced, but whose opcode format
+ distinguishes its backpatch delta immediate operand from a normal jump
+ offset.
+ */
+static int
+BalanceJumpTargets(JSJumpTarget **jtp)
+{
+ JSJumpTarget *jt, *jt2, *root;
+ int dir, otherDir, heightChanged;
+ JSBool doubleRotate;
+
+ jt = *jtp;
+ JS_ASSERT(jt->balance != 0);
+
+ if (jt->balance < -1) {
+ dir = JT_RIGHT;
+ doubleRotate = (jt->kids[JT_LEFT]->balance > 0);
+ } else if (jt->balance > 1) {
+ dir = JT_LEFT;
+ doubleRotate = (jt->kids[JT_RIGHT]->balance < 0);
+ } else {
+ return 0;
+ }
+
+ otherDir = JT_OTHER_DIR(dir);
+ if (doubleRotate) {
+ jt2 = jt->kids[otherDir];
+ *jtp = root = jt2->kids[dir];
+
+ jt->kids[otherDir] = root->kids[dir];
+ root->kids[dir] = jt;
+
+ jt2->kids[dir] = root->kids[otherDir];
+ root->kids[otherDir] = jt2;
+
+ heightChanged = 1;
+ root->kids[JT_LEFT]->balance = -JS_MAX(root->balance, 0);
+ root->kids[JT_RIGHT]->balance = -JS_MIN(root->balance, 0);
+ root->balance = 0;
+ } else {
+ *jtp = root = jt->kids[otherDir];
+ jt->kids[otherDir] = root->kids[dir];
+ root->kids[dir] = jt;
+
+ heightChanged = (root->balance != 0);
+ jt->balance = -((dir == JT_LEFT) ? --root->balance : ++root->balance);
+ }
+
+ return heightChanged;
+}
+
+typedef struct AddJumpTargetArgs {
+ JSContext *cx;
+ JSCodeGenerator *cg;
+ ptrdiff_t offset;
+ JSJumpTarget *node;
+} AddJumpTargetArgs;
+
+static int
+AddJumpTarget(AddJumpTargetArgs *args, JSJumpTarget **jtp)
+{
+ JSJumpTarget *jt;
+ int balanceDelta;
+
+ jt = *jtp;
+ if (!jt) {
+ JSCodeGenerator *cg = args->cg;
+
+ jt = cg->jtFreeList;
+ if (jt) {
+ cg->jtFreeList = jt->kids[JT_LEFT];
+ } else {
+ JS_ARENA_ALLOCATE_CAST(jt, JSJumpTarget *, &args->cx->tempPool,
+ sizeof *jt);
+ if (!jt) {
+ JS_ReportOutOfMemory(args->cx);
+ return 0;
+ }
+ }
+ jt->offset = args->offset;
+ jt->balance = 0;
+ jt->kids[JT_LEFT] = jt->kids[JT_RIGHT] = NULL;
+ cg->numJumpTargets++;
+ args->node = jt;
+ *jtp = jt;
+ return 1;
+ }
+
+ if (jt->offset == args->offset) {
+ args->node = jt;
+ return 0;
+ }
+
+ if (args->offset < jt->offset)
+ balanceDelta = -AddJumpTarget(args, &jt->kids[JT_LEFT]);
+ else
+ balanceDelta = AddJumpTarget(args, &jt->kids[JT_RIGHT]);
+ if (!args->node)
+ return 0;
+
+ jt->balance += balanceDelta;
+ return (balanceDelta && jt->balance)
+ ? 1 - BalanceJumpTargets(jtp)
+ : 0;
+}
+
+#ifdef DEBUG_brendan
+static int AVLCheck(JSJumpTarget *jt)
+{
+ int lh, rh;
+
+ if (!jt) return 0;
+ JS_ASSERT(-1 <= jt->balance && jt->balance <= 1);
+ lh = AVLCheck(jt->kids[JT_LEFT]);
+ rh = AVLCheck(jt->kids[JT_RIGHT]);
+ JS_ASSERT(jt->balance == rh - lh);
+ return 1 + JS_MAX(lh, rh);
+}
+#endif
+
+static JSBool
+SetSpanDepTarget(JSContext *cx, JSCodeGenerator *cg, JSSpanDep *sd,
+ ptrdiff_t off)
+{
+ AddJumpTargetArgs args;
+
+ if (off < JUMPX_OFFSET_MIN || JUMPX_OFFSET_MAX < off) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ args.cx = cx;
+ args.cg = cg;
+ args.offset = sd->top + off;
+ args.node = NULL;
+ AddJumpTarget(&args, &cg->jumpTargets);
+ if (!args.node)
+ return JS_FALSE;
+
+#ifdef DEBUG_brendan
+ AVLCheck(cg->jumpTargets);
+#endif
+
+ SD_SET_TARGET(sd, args.node);
+ return JS_TRUE;
+}
+
+#define SPANDEPS_MIN 256
+#define SPANDEPS_SIZE(n) ((n) * sizeof(JSSpanDep))
+#define SPANDEPS_SIZE_MIN SPANDEPS_SIZE(SPANDEPS_MIN)
+
+static JSBool
+AddSpanDep(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc, jsbytecode *pc2,
+ ptrdiff_t off)
+{
+ uintN index;
+ JSSpanDep *sdbase, *sd;
+ size_t size;
+
+ index = cg->numSpanDeps;
+ if (index + 1 == 0) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ if ((index & (index - 1)) == 0 &&
+ (!(sdbase = cg->spanDeps) || index >= SPANDEPS_MIN)) {
+ if (!sdbase) {
+ size = SPANDEPS_SIZE_MIN;
+ JS_ARENA_ALLOCATE_CAST(sdbase, JSSpanDep *, &cx->tempPool, size);
+ } else {
+ size = SPANDEPS_SIZE(index);
+ JS_ARENA_GROW_CAST(sdbase, JSSpanDep *, &cx->tempPool, size, size);
+ }
+ if (!sdbase)
+ return JS_FALSE;
+ cg->spanDeps = sdbase;
+ }
+
+ cg->numSpanDeps = index + 1;
+ sd = cg->spanDeps + index;
+ sd->top = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
+ sd->offset = sd->before = PTRDIFF(pc2, CG_BASE(cg), jsbytecode);
+
+ if (js_CodeSpec[*pc].format & JOF_BACKPATCH) {
+ /* Jump offset will be backpatched if off is a non-zero "bpdelta". */
+ if (off != 0) {
+ JS_ASSERT(off >= 1 + JUMP_OFFSET_LEN);
+ if (off > BPDELTA_MAX) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ }
+ SD_SET_BPDELTA(sd, off);
+ } else if (off == 0) {
+ /* Jump offset will be patched directly, without backpatch chaining. */
+ SD_SET_TARGET(sd, NULL);
+ } else {
+ /* The jump offset in off is non-zero, therefore it's already known. */
+ if (!SetSpanDepTarget(cx, cg, sd, off))
+ return JS_FALSE;
+ }
+
+ if (index > SPANDEP_INDEX_MAX)
+ index = SPANDEP_INDEX_HUGE;
+ SET_SPANDEP_INDEX(pc2, index);
+ return JS_TRUE;
+}
+
+static JSBool
+BuildSpanDepTable(JSContext *cx, JSCodeGenerator *cg)
+{
+ jsbytecode *pc, *end;
+ JSOp op;
+ const JSCodeSpec *cs;
+ ptrdiff_t len, off;
+
+ pc = CG_BASE(cg) + cg->spanDepTodo;
+ end = CG_NEXT(cg);
+ while (pc < end) {
+ op = (JSOp)*pc;
+ cs = &js_CodeSpec[op];
+ len = (ptrdiff_t)cs->length;
+
+ switch (cs->format & JOF_TYPEMASK) {
+ case JOF_JUMP:
+ off = GET_JUMP_OFFSET(pc);
+ if (!AddSpanDep(cx, cg, pc, pc, off))
+ return JS_FALSE;
+ break;
+
+ case JOF_TABLESWITCH:
+ {
+ jsbytecode *pc2;
+ jsint i, low, high;
+
+ pc2 = pc;
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ for (i = low; i <= high; i++) {
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LOOKUPSWITCH:
+ {
+ jsbytecode *pc2;
+ jsint npairs;
+
+ pc2 = pc;
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ npairs = (jsint) GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+ while (npairs) {
+ pc2 += ATOM_INDEX_LEN;
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ npairs--;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+ }
+
+ JS_ASSERT(len > 0);
+ pc += len;
+ }
+
+ return JS_TRUE;
+}
+
+static JSSpanDep *
+GetSpanDep(JSCodeGenerator *cg, jsbytecode *pc)
+{
+ uintN index;
+ ptrdiff_t offset;
+ int lo, hi, mid;
+ JSSpanDep *sd;
+
+ index = GET_SPANDEP_INDEX(pc);
+ if (index != SPANDEP_INDEX_HUGE)
+ return cg->spanDeps + index;
+
+ offset = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
+ lo = 0;
+ hi = cg->numSpanDeps - 1;
+ while (lo <= hi) {
+ mid = (lo + hi) / 2;
+ sd = cg->spanDeps + mid;
+ if (sd->before == offset)
+ return sd;
+ if (sd->before < offset)
+ lo = mid + 1;
+ else
+ hi = mid - 1;
+ }
+
+ JS_ASSERT(0);
+ return NULL;
+}
+
+static JSBool
+SetBackPatchDelta(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
+ ptrdiff_t delta)
+{
+ JSSpanDep *sd;
+
+ JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
+ if (!cg->spanDeps && delta < JUMP_OFFSET_MAX) {
+ SET_JUMP_OFFSET(pc, delta);
+ return JS_TRUE;
+ }
+
+ if (delta > BPDELTA_MAX) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ if (!cg->spanDeps && !BuildSpanDepTable(cx, cg))
+ return JS_FALSE;
+
+ sd = GetSpanDep(cg, pc);
+ JS_ASSERT(SD_GET_BPDELTA(sd) == 0);
+ SD_SET_BPDELTA(sd, delta);
+ return JS_TRUE;
+}
+
+static void
+UpdateJumpTargets(JSJumpTarget *jt, ptrdiff_t pivot, ptrdiff_t delta)
+{
+ if (jt->offset > pivot) {
+ jt->offset += delta;
+ if (jt->kids[JT_LEFT])
+ UpdateJumpTargets(jt->kids[JT_LEFT], pivot, delta);
+ }
+ if (jt->kids[JT_RIGHT])
+ UpdateJumpTargets(jt->kids[JT_RIGHT], pivot, delta);
+}
+
+static JSSpanDep *
+FindNearestSpanDep(JSCodeGenerator *cg, ptrdiff_t offset, int lo,
+ JSSpanDep *guard)
+{
+ int num, hi, mid;
+ JSSpanDep *sdbase, *sd;
+
+ num = cg->numSpanDeps;
+ JS_ASSERT(num > 0);
+ hi = num - 1;
+ sdbase = cg->spanDeps;
+ while (lo <= hi) {
+ mid = (lo + hi) / 2;
+ sd = sdbase + mid;
+ if (sd->before == offset)
+ return sd;
+ if (sd->before < offset)
+ lo = mid + 1;
+ else
+ hi = mid - 1;
+ }
+ if (lo == num)
+ return guard;
+ sd = sdbase + lo;
+ JS_ASSERT(sd->before >= offset && (lo == 0 || sd[-1].before < offset));
+ return sd;
+}
+
+static void
+FreeJumpTargets(JSCodeGenerator *cg, JSJumpTarget *jt)
+{
+ if (jt->kids[JT_LEFT])
+ FreeJumpTargets(cg, jt->kids[JT_LEFT]);
+ if (jt->kids[JT_RIGHT])
+ FreeJumpTargets(cg, jt->kids[JT_RIGHT]);
+ jt->kids[JT_LEFT] = cg->jtFreeList;
+ cg->jtFreeList = jt;
+}
+
+static JSBool
+OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg)
+{
+ jsbytecode *pc, *oldpc, *base, *limit, *next;
+ JSSpanDep *sd, *sd2, *sdbase, *sdlimit, *sdtop, guard;
+ ptrdiff_t offset, growth, delta, top, pivot, span, length, target;
+ JSBool done;
+ JSOp op;
+ uint32 type;
+ size_t size, incr;
+ jssrcnote *sn, *snlimit;
+ JSSrcNoteSpec *spec;
+ uintN i, n, noteIndex;
+ JSTryNote *tn, *tnlimit;
+#ifdef DEBUG_brendan
+ int passes = 0;
+#endif
+
+ base = CG_BASE(cg);
+ sdbase = cg->spanDeps;
+ sdlimit = sdbase + cg->numSpanDeps;
+ offset = CG_OFFSET(cg);
+ growth = 0;
+
+ do {
+ done = JS_TRUE;
+ delta = 0;
+ top = pivot = -1;
+ sdtop = NULL;
+ pc = NULL;
+ op = JSOP_NOP;
+ type = 0;
+#ifdef DEBUG_brendan
+ passes++;
+#endif
+
+ for (sd = sdbase; sd < sdlimit; sd++) {
+ JS_ASSERT(JT_HAS_TAG(sd->target));
+ sd->offset += delta;
+
+ if (sd->top != top) {
+ sdtop = sd;
+ top = sd->top;
+ JS_ASSERT(top == sd->before);
+ pivot = sd->offset;
+ pc = base + top;
+ op = (JSOp) *pc;
+ type = (js_CodeSpec[op].format & JOF_TYPEMASK);
+ if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ /*
+ * We already extended all the jump offset operands for
+ * the opcode at sd->top. Jumps and branches have only
+ * one jump offset operand, but switches have many, all
+ * of which are adjacent in cg->spanDeps.
+ */
+ continue;
+ }
+
+ JS_ASSERT(type == JOF_JUMP ||
+ type == JOF_TABLESWITCH ||
+ type == JOF_LOOKUPSWITCH);
+ }
+
+ if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ span = SD_SPAN(sd, pivot);
+ if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
+ ptrdiff_t deltaFromTop = 0;
+
+ done = JS_FALSE;
+
+ switch (op) {
+ case JSOP_GOTO: op = JSOP_GOTOX; break;
+ case JSOP_IFEQ: op = JSOP_IFEQX; break;
+ case JSOP_IFNE: op = JSOP_IFNEX; break;
+ case JSOP_OR: op = JSOP_ORX; break;
+ case JSOP_AND: op = JSOP_ANDX; break;
+ case JSOP_GOSUB: op = JSOP_GOSUBX; break;
+ case JSOP_CASE: op = JSOP_CASEX; break;
+ case JSOP_DEFAULT: op = JSOP_DEFAULTX; break;
+ case JSOP_TABLESWITCH: op = JSOP_TABLESWITCHX; break;
+ case JSOP_LOOKUPSWITCH: op = JSOP_LOOKUPSWITCHX; break;
+ default:
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ *pc = (jsbytecode) op;
+
+ for (sd2 = sdtop; sd2 < sdlimit && sd2->top == top; sd2++) {
+ if (sd2 <= sd) {
+ /*
+ * sd2->offset already includes delta as it stood
+ * before we entered this loop, but it must also
+ * include the delta relative to top due to all the
+ * extended jump offset immediates for the opcode
+ * starting at top, which we extend in this loop.
+ *
+ * If there is only one extended jump offset, then
+ * sd2->offset won't change and this for loop will
+ * iterate once only.
+ */
+ sd2->offset += deltaFromTop;
+ deltaFromTop += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
+ } else {
+ /*
+ * sd2 comes after sd, and won't be revisited by
+ * the outer for loop, so we have to increase its
+ * offset by delta, not merely by deltaFromTop.
+ */
+ sd2->offset += delta;
+ }
+
+ delta += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
+ UpdateJumpTargets(cg->jumpTargets, sd2->offset,
+ JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
+ }
+ sd = sd2 - 1;
+ }
+ }
+ }
+
+ growth += delta;
+ } while (!done);
+
+ if (growth) {
+#ifdef DEBUG_brendan
+ printf("%s:%u: %u/%u jumps extended in %d passes (%d=%d+%d)\n",
+ cg->filename ? cg->filename : "stdin", cg->firstLine,
+ growth / (JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN), cg->numSpanDeps,
+ passes, offset + growth, offset, growth);
+#endif
+
+ /*
+ * Ensure that we have room for the extended jumps, but don't round up
+ * to a power of two -- we're done generating code, so we cut to fit.
+ */
+ limit = CG_LIMIT(cg);
+ length = offset + growth;
+ next = base + length;
+ if (next > limit) {
+ JS_ASSERT(length > BYTECODE_CHUNK);
+ size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
+ incr = BYTECODE_SIZE(length) - size;
+ JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
+ if (!base) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ CG_BASE(cg) = base;
+ CG_LIMIT(cg) = next = base + length;
+ }
+ CG_NEXT(cg) = next;
+
+ /*
+ * Set up a fake span dependency record to guard the end of the code
+ * being generated. This guard record is returned as a fencepost by
+ * FindNearestSpanDep if there is no real spandep at or above a given
+ * unextended code offset.
+ */
+ guard.top = -1;
+ guard.offset = offset + growth;
+ guard.before = offset;
+ guard.target = NULL;
+ }
+
+ /*
+ * Now work backwards through the span dependencies, copying chunks of
+ * bytecode between each extended jump toward the end of the grown code
+ * space, and restoring immediate offset operands for all jump bytecodes.
+ * The first chunk of bytecodes, starting at base and ending at the first
+ * extended jump offset (NB: this chunk includes the operation bytecode
+ * just before that immediate jump offset), doesn't need to be copied.
+ */
+ JS_ASSERT(sd == sdlimit);
+ top = -1;
+ while (--sd >= sdbase) {
+ if (sd->top != top) {
+ top = sd->top;
+ op = (JSOp) base[top];
+ type = (js_CodeSpec[op].format & JOF_TYPEMASK);
+
+ for (sd2 = sd - 1; sd2 >= sdbase && sd2->top == top; sd2--)
+ continue;
+ sd2++;
+ pivot = sd2->offset;
+ JS_ASSERT(top == sd2->before);
+ }
+
+ oldpc = base + sd->before;
+ span = SD_SPAN(sd, pivot);
+
+ /*
+ * If this jump didn't need to be extended, restore its span immediate
+ * offset operand now, overwriting the index of sd within cg->spanDeps
+ * that was stored temporarily after *pc when BuildSpanDepTable ran.
+ *
+ * Note that span might fit in 16 bits even for an extended jump op,
+ * if the op has multiple span operands, not all of which overflowed
+ * (e.g. JSOP_LOOKUPSWITCH or JSOP_TABLESWITCH where some cases are in
+ * range for a short jump, but others are not).
+ */
+ if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ JS_ASSERT(JUMP_OFFSET_MIN <= span && span <= JUMP_OFFSET_MAX);
+ SET_JUMP_OFFSET(oldpc, span);
+ continue;
+ }
+
+ /*
+ * Set up parameters needed to copy the next run of bytecode starting
+ * at offset (which is a cursor into the unextended, original bytecode
+ * vector), down to sd->before (a cursor of the same scale as offset,
+ * it's the index of the original jump pc). Reuse delta to count the
+ * nominal number of bytes to copy.
+ */
+ pc = base + sd->offset;
+ delta = offset - sd->before;
+ JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
+
+ /*
+ * Don't bother copying the jump offset we're about to reset, but do
+ * copy the bytecode at oldpc (which comes just before its immediate
+ * jump offset operand), on the next iteration through the loop, by
+ * including it in offset's new value.
+ */
+ offset = sd->before + 1;
+ size = BYTECODE_SIZE(delta - (1 + JUMP_OFFSET_LEN));
+ if (size) {
+ memmove(pc + 1 + JUMPX_OFFSET_LEN,
+ oldpc + 1 + JUMP_OFFSET_LEN,
+ size);
+ }
+
+ SET_JUMPX_OFFSET(pc, span);
+ }
+
+ if (growth) {
+ /*
+ * Fix source note deltas. Don't hardwire the delta fixup adjustment,
+ * even though currently it must be JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN
+ * at each sd that moved. The future may bring different offset sizes
+ * for span-dependent instruction operands. However, we fix only main
+ * notes here, not prolog notes -- we know that prolog opcodes are not
+ * span-dependent, and aren't likely ever to be.
+ */
+ offset = growth = 0;
+ sd = sdbase;
+ for (sn = cg->main.notes, snlimit = sn + cg->main.noteCount;
+ sn < snlimit;
+ sn = SN_NEXT(sn)) {
+ /*
+ * Recall that the offset of a given note includes its delta, and
+ * tells the offset of the annotated bytecode from the main entry
+ * point of the script.
+ */
+ offset += SN_DELTA(sn);
+ while (sd < sdlimit && sd->before < offset) {
+ /*
+ * To compute the delta to add to sn, we need to look at the
+ * spandep after sd, whose offset - (before + growth) tells by
+ * how many bytes sd's instruction grew.
+ */
+ sd2 = sd + 1;
+ if (sd2 == sdlimit)
+ sd2 = &guard;
+ delta = sd2->offset - (sd2->before + growth);
+ if (delta > 0) {
+ JS_ASSERT(delta == JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
+ sn = js_AddToSrcNoteDelta(cx, cg, sn, delta);
+ if (!sn)
+ return JS_FALSE;
+ snlimit = cg->main.notes + cg->main.noteCount;
+ growth += delta;
+ }
+ sd++;
+ }
+
+ /*
+ * If sn has span-dependent offset operands, check whether each
+ * covers further span-dependencies, and increase those operands
+ * accordingly. Some source notes measure offset not from the
+ * annotated pc, but from that pc plus some small bias. NB: we
+ * assume that spec->offsetBias can't itself span span-dependent
+ * instructions!
+ */
+ spec = &js_SrcNoteSpec[SN_TYPE(sn)];
+ if (spec->isSpanDep) {
+ pivot = offset + spec->offsetBias;
+ n = spec->arity;
+ for (i = 0; i < n; i++) {
+ span = js_GetSrcNoteOffset(sn, i);
+ if (span == 0)
+ continue;
+ target = pivot + span * spec->isSpanDep;
+ sd2 = FindNearestSpanDep(cg, target,
+ (target >= pivot)
+ ? sd - sdbase
+ : 0,
+ &guard);
+
+ /*
+ * Increase target by sd2's before-vs-after offset delta,
+ * which is absolute (i.e., relative to start of script,
+ * as is target). Recompute the span by subtracting its
+ * adjusted pivot from target.
+ */
+ target += sd2->offset - sd2->before;
+ span = target - (pivot + growth);
+ span *= spec->isSpanDep;
+ noteIndex = sn - cg->main.notes;
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, i, span))
+ return JS_FALSE;
+ sn = cg->main.notes + noteIndex;
+ snlimit = cg->main.notes + cg->main.noteCount;
+ }
+ }
+ }
+ cg->main.lastNoteOffset += growth;
+
+ /*
+ * Fix try/catch notes (O(numTryNotes * log2(numSpanDeps)), but it's
+ * not clear how we can beat that).
+ */
+ for (tn = cg->tryBase, tnlimit = cg->tryNext; tn < tnlimit; tn++) {
+ /*
+ * First, look for the nearest span dependency at/above tn->start.
+ * There may not be any such spandep, in which case the guard will
+ * be returned.
+ */
+ offset = tn->start;
+ sd = FindNearestSpanDep(cg, offset, 0, &guard);
+ delta = sd->offset - sd->before;
+ tn->start = offset + delta;
+
+ /*
+ * Next, find the nearest spandep at/above tn->start + tn->length.
+ * Use its delta minus tn->start's delta to increase tn->length.
+ */
+ length = tn->length;
+ sd2 = FindNearestSpanDep(cg, offset + length, sd - sdbase, &guard);
+ if (sd2 != sd)
+ tn->length = length + sd2->offset - sd2->before - delta;
+
+ /*
+ * Finally, adjust tn->catchStart upward only if it is non-zero,
+ * and provided there are spandeps below it that grew.
+ */
+ offset = tn->catchStart;
+ if (offset != 0) {
+ sd = FindNearestSpanDep(cg, offset, sd2 - sdbase, &guard);
+ tn->catchStart = offset + sd->offset - sd->before;
+ }
+ }
+ }
+
+#ifdef DEBUG_brendan
+ {
+ uintN bigspans = 0;
+ top = -1;
+ for (sd = sdbase; sd < sdlimit; sd++) {
+ offset = sd->offset;
+
+ /* NB: sd->top cursors into the original, unextended bytecode vector. */
+ if (sd->top != top) {
+ JS_ASSERT(top == -1 ||
+ !JOF_TYPE_IS_EXTENDED_JUMP(type) ||
+ bigspans != 0);
+ bigspans = 0;
+ top = sd->top;
+ JS_ASSERT(top == sd->before);
+ op = (JSOp) base[offset];
+ type = (js_CodeSpec[op].format & JOF_TYPEMASK);
+ JS_ASSERT(type == JOF_JUMP ||
+ type == JOF_JUMPX ||
+ type == JOF_TABLESWITCH ||
+ type == JOF_TABLESWITCHX ||
+ type == JOF_LOOKUPSWITCH ||
+ type == JOF_LOOKUPSWITCHX);
+ pivot = offset;
+ }
+
+ pc = base + offset;
+ if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ span = GET_JUMPX_OFFSET(pc);
+ if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
+ bigspans++;
+ } else {
+ JS_ASSERT(type == JOF_TABLESWITCHX ||
+ type == JOF_LOOKUPSWITCHX);
+ }
+ } else {
+ span = GET_JUMP_OFFSET(pc);
+ }
+ JS_ASSERT(SD_SPAN(sd, pivot) == span);
+ }
+ JS_ASSERT(!JOF_TYPE_IS_EXTENDED_JUMP(type) || bigspans != 0);
+ }
+#endif
+
+ /*
+ * Reset so we optimize at most once -- cg may be used for further code
+ * generation of successive, independent, top-level statements. No jump
+ * can span top-level statements, because JS lacks goto.
+ */
+ size = SPANDEPS_SIZE(JS_BIT(JS_CeilingLog2(cg->numSpanDeps)));
+ JS_ArenaFreeAllocation(&cx->tempPool, cg->spanDeps,
+ JS_MAX(size, SPANDEPS_SIZE_MIN));
+ cg->spanDeps = NULL;
+ FreeJumpTargets(cg, cg->jumpTargets);
+ cg->jumpTargets = NULL;
+ cg->numSpanDeps = cg->numJumpTargets = 0;
+ cg->spanDepTodo = CG_OFFSET(cg);
+ return JS_TRUE;
+}
+
+static JSBool
+EmitJump(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t off)
+{
+ JSBool extend;
+ ptrdiff_t jmp;
+ jsbytecode *pc;
+
+ extend = off < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < off;
+ if (extend && !cg->spanDeps && !BuildSpanDepTable(cx, cg))
+ return JS_FALSE;
+
+ jmp = js_Emit3(cx, cg, op, JUMP_OFFSET_HI(off), JUMP_OFFSET_LO(off));
+ if (jmp >= 0 && (extend || cg->spanDeps)) {
+ pc = CG_CODE(cg, jmp);
+ if (!AddSpanDep(cx, cg, pc, pc, off))
+ return JS_FALSE;
+ }
+ return jmp;
+}
+
+static ptrdiff_t
+GetJumpOffset(JSCodeGenerator *cg, jsbytecode *pc)
+{
+ JSSpanDep *sd;
+ JSJumpTarget *jt;
+ ptrdiff_t top;
+
+ if (!cg->spanDeps)
+ return GET_JUMP_OFFSET(pc);
+
+ sd = GetSpanDep(cg, pc);
+ jt = sd->target;
+ if (!JT_HAS_TAG(jt))
+ return JT_TO_BPDELTA(jt);
+
+ top = sd->top;
+ while (--sd >= cg->spanDeps && sd->top == top)
+ continue;
+ sd++;
+ return JT_CLR_TAG(jt)->offset - sd->offset;
+}
+
+JSBool
+js_SetJumpOffset(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
+ ptrdiff_t off)
+{
+ if (!cg->spanDeps) {
+ if (JUMP_OFFSET_MIN <= off && off <= JUMP_OFFSET_MAX) {
+ SET_JUMP_OFFSET(pc, off);
+ return JS_TRUE;
+ }
+
+ if (!BuildSpanDepTable(cx, cg))
+ return JS_FALSE;
+ }
+
+ return SetSpanDepTarget(cx, cg, GetSpanDep(cg, pc), off);
+}
+
+JSBool
+js_InStatement(JSTreeContext *tc, JSStmtType type)
+{
+ JSStmtInfo *stmt;
+
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (stmt->type == type)
+ return JS_TRUE;
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_IsGlobalReference(JSTreeContext *tc, JSAtom *atom, JSBool *loopyp)
+{
+ JSStmtInfo *stmt;
+ JSObject *obj;
+ JSScope *scope;
+
+ *loopyp = JS_FALSE;
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (stmt->type == STMT_WITH)
+ return JS_FALSE;
+ if (STMT_IS_LOOP(stmt)) {
+ *loopyp = JS_TRUE;
+ continue;
+ }
+ if (stmt->flags & SIF_SCOPE) {
+ obj = ATOM_TO_OBJECT(stmt->atom);
+ JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
+ scope = OBJ_SCOPE(obj);
+ if (SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom)))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+void
+js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
+ ptrdiff_t top)
+{
+ stmt->type = type;
+ stmt->flags = 0;
+ SET_STATEMENT_TOP(stmt, top);
+ stmt->atom = NULL;
+ stmt->down = tc->topStmt;
+ tc->topStmt = stmt;
+ if (STMT_LINKS_SCOPE(stmt)) {
+ stmt->downScope = tc->topScopeStmt;
+ tc->topScopeStmt = stmt;
+ } else {
+ stmt->downScope = NULL;
+ }
+}
+
+void
+js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSAtom *blockAtom,
+ ptrdiff_t top)
+{
+ JSObject *blockObj;
+
+ js_PushStatement(tc, stmt, STMT_BLOCK, top);
+ stmt->flags |= SIF_SCOPE;
+ blockObj = ATOM_TO_OBJECT(blockAtom);
+ blockObj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(tc->blockChain);
+ stmt->downScope = tc->topScopeStmt;
+ tc->topScopeStmt = stmt;
+ tc->blockChain = blockObj;
+ stmt->atom = blockAtom;
+}
+
+/*
+ * Emit a backpatch op with offset pointing to the previous jump of this type,
+ * so that we can walk back up the chain fixing up the op and jump offset.
+ */
+static ptrdiff_t
+EmitBackPatchOp(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t *lastp)
+{
+ ptrdiff_t offset, delta;
+
+ offset = CG_OFFSET(cg);
+ delta = offset - *lastp;
+ *lastp = offset;
+ JS_ASSERT(delta > 0);
+ return EmitJump(cx, cg, op, delta);
+}
+
+/*
+ * Macro to emit a bytecode followed by a uint16 immediate operand stored in
+ * big-endian order, used for arg and var numbers as well as for atomIndexes.
+ * NB: We use cx and cg from our caller's lexical environment, and return
+ * false on error.
+ */
+#define EMIT_UINT16_IMM_OP(op, i) \
+ JS_BEGIN_MACRO \
+ if (js_Emit3(cx, cg, op, UINT16_HI(i), UINT16_LO(i)) < 0) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+/* Emit additional bytecode(s) for non-local jumps. */
+static JSBool
+EmitNonLocalJumpFixup(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt,
+ JSOp *returnop)
+{
+ intN depth;
+ JSStmtInfo *stmt;
+ ptrdiff_t jmp;
+
+ /*
+ * Return from within a try block that has a finally clause must be split
+ * into two ops: JSOP_SETRVAL, to pop the r.v. and store it in fp->rval;
+ * and JSOP_RETRVAL, which makes control flow go back to the caller, who
+ * picks up fp->rval as usual. Otherwise, the stack will be unbalanced
+ * when executing the finally clause.
+ *
+ * We mutate *returnop once only if we find an enclosing try-block (viz,
+ * STMT_FINALLY) to ensure that we emit just one JSOP_SETRVAL before one
+ * or more JSOP_GOSUBs and other fixup opcodes emitted by this function.
+ * Our caller (the TOK_RETURN case of js_EmitTree) then emits *returnop.
+ * The fixup opcodes and gosubs must interleave in the proper order, from
+ * inner statement to outer, so that finally clauses run at the correct
+ * stack depth.
+ */
+ if (returnop) {
+ JS_ASSERT(*returnop == JSOP_RETURN);
+ for (stmt = cg->treeContext.topStmt; stmt != toStmt;
+ stmt = stmt->down) {
+ if (stmt->type == STMT_FINALLY ||
+ ((cg->treeContext.flags & TCF_FUN_HEAVYWEIGHT) &&
+ STMT_MAYBE_SCOPE(stmt))) {
+ if (js_Emit1(cx, cg, JSOP_SETRVAL) < 0)
+ return JS_FALSE;
+ *returnop = JSOP_RETRVAL;
+ break;
+ }
+ }
+
+ /*
+ * If there are no try-with-finally blocks open around this return
+ * statement, we can generate a return forthwith and skip generating
+ * any fixup code.
+ */
+ if (*returnop == JSOP_RETURN)
+ return JS_TRUE;
+ }
+
+ /*
+ * The non-local jump fixup we emit will unbalance cg->stackDepth, because
+ * the fixup replicates balanced code such as JSOP_LEAVEWITH emitted at the
+ * end of a with statement, so we save cg->stackDepth here and restore it
+ * just before a successful return.
+ */
+ depth = cg->stackDepth;
+ for (stmt = cg->treeContext.topStmt; stmt != toStmt; stmt = stmt->down) {
+ switch (stmt->type) {
+ case STMT_FINALLY:
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &GOSUBS(*stmt));
+ if (jmp < 0)
+ return JS_FALSE;
+ break;
+
+ case STMT_WITH:
+ /* There's a With object on the stack that we need to pop. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_LEAVEWITH) < 0)
+ return JS_FALSE;
+ break;
+
+ case STMT_FOR_IN_LOOP:
+ /*
+ * The iterator and the object being iterated need to be popped.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ENDITER) < 0)
+ return JS_FALSE;
+ break;
+
+ case STMT_SUBROUTINE:
+ /*
+ * There's a [exception or hole, retsub pc-index] pair on the
+ * stack that we need to pop.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_POP2) < 0)
+ return JS_FALSE;
+ break;
+
+ default:;
+ }
+
+ if (stmt->flags & SIF_SCOPE) {
+ uintN i;
+
+ /* There is a Block object with locals on the stack to pop. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ i = OBJ_BLOCK_COUNT(cx, ATOM_TO_OBJECT(stmt->atom));
+ EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, i);
+ }
+ }
+
+ cg->stackDepth = depth;
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+EmitGoto(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt,
+ ptrdiff_t *lastp, JSAtomListElement *label, JSSrcNoteType noteType)
+{
+ intN index;
+
+ if (!EmitNonLocalJumpFixup(cx, cg, toStmt, NULL))
+ return -1;
+
+ if (label)
+ index = js_NewSrcNote2(cx, cg, noteType, (ptrdiff_t) ALE_INDEX(label));
+ else if (noteType != SRC_NULL)
+ index = js_NewSrcNote(cx, cg, noteType);
+ else
+ index = 0;
+ if (index < 0)
+ return -1;
+
+ return EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, lastp);
+}
+
+static JSBool
+BackPatch(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t last,
+ jsbytecode *target, jsbytecode op)
+{
+ jsbytecode *pc, *stop;
+ ptrdiff_t delta, span;
+
+ pc = CG_CODE(cg, last);
+ stop = CG_CODE(cg, -1);
+ while (pc != stop) {
+ delta = GetJumpOffset(cg, pc);
+ span = PTRDIFF(target, pc, jsbytecode);
+ CHECK_AND_SET_JUMP_OFFSET(cx, cg, pc, span);
+
+ /*
+ * Set *pc after jump offset in case bpdelta didn't overflow, but span
+ * does (if so, CHECK_AND_SET_JUMP_OFFSET might call BuildSpanDepTable
+ * and need to see the JSOP_BACKPATCH* op at *pc).
+ */
+ *pc = op;
+ pc -= delta;
+ }
+ return JS_TRUE;
+}
+
+void
+js_PopStatement(JSTreeContext *tc)
+{
+ JSStmtInfo *stmt;
+ JSObject *blockObj;
+
+ stmt = tc->topStmt;
+ tc->topStmt = stmt->down;
+ if (STMT_LINKS_SCOPE(stmt)) {
+ tc->topScopeStmt = stmt->downScope;
+ if (stmt->flags & SIF_SCOPE) {
+ blockObj = ATOM_TO_OBJECT(stmt->atom);
+ tc->blockChain = JSVAL_TO_OBJECT(blockObj->slots[JSSLOT_PARENT]);
+ }
+ }
+}
+
+JSBool
+js_PopStatementCG(JSContext *cx, JSCodeGenerator *cg)
+{
+ JSStmtInfo *stmt;
+
+ stmt = cg->treeContext.topStmt;
+ if (!STMT_IS_TRYING(stmt) &&
+ (!BackPatch(cx, cg, stmt->breaks, CG_NEXT(cg), JSOP_GOTO) ||
+ !BackPatch(cx, cg, stmt->continues, CG_CODE(cg, stmt->update),
+ JSOP_GOTO))) {
+ return JS_FALSE;
+ }
+ js_PopStatement(&cg->treeContext);
+ return JS_TRUE;
+}
+
+JSBool
+js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ JSParseNode *pn)
+{
+ jsdouble dval;
+ jsint ival;
+ JSAtom *valueAtom;
+ JSAtomListElement *ale;
+
+ /* XXX just do numbers for now */
+ if (pn->pn_type == TOK_NUMBER) {
+ dval = pn->pn_dval;
+ valueAtom = (JSDOUBLE_IS_INT(dval, ival) && INT_FITS_IN_JSVAL(ival))
+ ? js_AtomizeInt(cx, ival, 0)
+ : js_AtomizeDouble(cx, dval, 0);
+ if (!valueAtom)
+ return JS_FALSE;
+ ale = js_IndexAtom(cx, atom, &cg->constList);
+ if (!ale)
+ return JS_FALSE;
+ ALE_SET_VALUE(ale, ATOM_KEY(valueAtom));
+ }
+ return JS_TRUE;
+}
+
+JSStmtInfo *
+js_LexicalLookup(JSTreeContext *tc, JSAtom *atom, jsint *slotp, JSBool letdecl)
+{
+ JSStmtInfo *stmt;
+ JSObject *obj;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ jsval v;
+
+ for (stmt = tc->topScopeStmt; stmt; stmt = stmt->downScope) {
+ if (stmt->type == STMT_WITH) {
+ /* Ignore with statements enclosing a single let declaration. */
+ if (letdecl)
+ continue;
+ break;
+ }
+
+ /* Skip "maybe scope" statements that don't contain let bindings. */
+ if (!(stmt->flags & SIF_SCOPE))
+ continue;
+
+ obj = ATOM_TO_OBJECT(stmt->atom);
+ JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
+ scope = OBJ_SCOPE(obj);
+ sprop = SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom));
+ if (sprop) {
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+
+ if (slotp) {
+ /*
+ * Use LOCKED_OBJ_GET_SLOT since we know obj is single-
+ * threaded and owned by this compiler activation.
+ */
+ v = LOCKED_OBJ_GET_SLOT(obj, JSSLOT_BLOCK_DEPTH);
+ JS_ASSERT(JSVAL_IS_INT(v) && JSVAL_TO_INT(v) >= 0);
+ *slotp = JSVAL_TO_INT(v) + sprop->shortid;
+ }
+ return stmt;
+ }
+ }
+
+ if (slotp)
+ *slotp = -1;
+ return stmt;
+}
+
+JSBool
+js_LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ jsval *vp)
+{
+ JSBool ok;
+ JSStackFrame *fp;
+ JSStmtInfo *stmt;
+ jsint slot;
+ JSAtomListElement *ale;
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ uintN attrs;
+
+ /*
+ * fp chases cg down the stack, but only until we reach the outermost cg.
+ * This enables propagating consts from top-level into switch cases in a
+ * function compiled along with the top-level script. All stack frames
+ * with matching code generators should be flagged with JSFRAME_COMPILING;
+ * we check sanity here.
+ */
+ *vp = JSVAL_VOID;
+ ok = JS_TRUE;
+ fp = cx->fp;
+ do {
+ JS_ASSERT(fp->flags & JSFRAME_COMPILING);
+
+ obj = fp->varobj;
+ if (obj == fp->scopeChain) {
+ /* XXX this will need revising when 'let const' is added. */
+ stmt = js_LexicalLookup(&cg->treeContext, atom, &slot, JS_FALSE);
+ if (stmt)
+ return JS_TRUE;
+
+ ATOM_LIST_SEARCH(ale, &cg->constList, atom);
+ if (ale) {
+ *vp = ALE_VALUE(ale);
+ return JS_TRUE;
+ }
+
+ /*
+ * Try looking in the variable object for a direct property that
+ * is readonly and permanent. We know such a property can't be
+ * shadowed by another property on obj's prototype chain, or a
+ * with object or catch variable; nor can prop's value be changed,
+ * nor can prop be deleted.
+ */
+ prop = NULL;
+ if (OBJ_GET_CLASS(cx, obj) == &js_FunctionClass) {
+ ok = js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom),
+ &pobj, &prop);
+ if (!ok)
+ break;
+ if (prop) {
+#ifdef DEBUG
+ JSScopeProperty *sprop = (JSScopeProperty *)prop;
+
+ /*
+ * Any hidden property must be a formal arg or local var,
+ * which will shadow a global const of the same name.
+ */
+ JS_ASSERT(sprop->getter == js_GetArgument ||
+ sprop->getter == js_GetLocalVariable);
+#endif
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ break;
+ }
+ }
+
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop);
+ if (ok) {
+ if (pobj == obj &&
+ (fp->flags & (JSFRAME_EVAL | JSFRAME_COMPILE_N_GO))) {
+ /*
+ * We're compiling code that will be executed immediately,
+ * not re-executed against a different scope chain and/or
+ * variable object. Therefore we can get constant values
+ * from our variable object here.
+ */
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop,
+ &attrs);
+ if (ok && !(~attrs & (JSPROP_READONLY | JSPROP_PERMANENT)))
+ ok = OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ if (!ok || prop)
+ break;
+ }
+ fp = fp->down;
+ } while ((cg = cg->parent) != NULL);
+ return ok;
+}
+
+/*
+ * Allocate an index invariant for all activations of the code being compiled
+ * in cg, that can be used to store and fetch a reference to a cloned RegExp
+ * object that shares the same JSRegExp private data created for the object
+ * literal in pn->pn_atom. We need clones to hold lastIndex and other direct
+ * properties that should not be shared among threads sharing a precompiled
+ * function or script.
+ *
+ * If the code being compiled is function code, allocate a reserved slot in
+ * the cloned function object that shares its precompiled script with other
+ * cloned function objects and with the compiler-created clone-parent. There
+ * are fun->nregexps such reserved slots in each function object cloned from
+ * fun->object. NB: during compilation, funobj slots must never be allocated,
+ * because js_AllocSlot could hand out one of the slots that should be given
+ * to a regexp clone.
+ *
+ * If the code being compiled is global code, reserve the fp->vars slot at
+ * ALE_INDEX(ale), by ensuring that cg->treeContext.numGlobalVars is at least
+ * one more than this index. For global code, fp->vars is parallel to the
+ * global script->atomMap.vector array, but possibly shorter for the common
+ * case (where var declarations and regexp literals cluster toward the front
+ * of the script or function body).
+ *
+ * Global variable name literals in script->atomMap have fast-global slot
+ * numbers (stored as int-tagged jsvals) in the corresponding fp->vars array
+ * element. The atomIndex for a regexp object literal thus also addresses an
+ * fp->vars element that is not used by any optimized global variable, so we
+ * use that GC-scanned element to keep the regexp object clone alive, as well
+ * as to lazily create and find it at run-time for the JSOP_REGEXP bytecode.
+ *
+ * In no case can cx->fp->varobj be a Call object here, because that implies
+ * we are compiling eval code, in which case (cx->fp->flags & JSFRAME_EVAL)
+ * is true, and js_GetToken will have already selected JSOP_OBJECT instead of
+ * JSOP_REGEXP, to avoid all this RegExp object cloning business.
+ *
+ * Why clone regexp objects? ECMA specifies that when a regular expression
+ * literal is scanned, a RegExp object is created. In the spec, compilation
+ * and execution happen indivisibly, but in this implementation and many of
+ * its embeddings, code is precompiled early and re-executed in multiple
+ * threads, or using multiple global objects, or both, for efficiency.
+ *
+ * In such cases, naively following ECMA leads to wrongful sharing of RegExp
+ * objects, which makes for collisions on the lastIndex property (especially
+ * for global regexps) and on any ad-hoc properties. Also, __proto__ and
+ * __parent__ refer to the pre-compilation prototype and global objects, a
+ * pigeon-hole problem for instanceof tests.
+ */
+static JSBool
+IndexRegExpClone(JSContext *cx, JSParseNode *pn, JSAtomListElement *ale,
+ JSCodeGenerator *cg)
+{
+ JSObject *varobj, *reobj;
+ JSClass *clasp;
+ JSFunction *fun;
+ JSRegExp *re;
+ uint16 *countPtr;
+ uintN cloneIndex;
+
+ JS_ASSERT(!(cx->fp->flags & (JSFRAME_EVAL | JSFRAME_COMPILE_N_GO)));
+
+ varobj = cx->fp->varobj;
+ clasp = OBJ_GET_CLASS(cx, varobj);
+ if (clasp == &js_FunctionClass) {
+ fun = (JSFunction *) JS_GetPrivate(cx, varobj);
+ countPtr = &fun->u.i.nregexps;
+ cloneIndex = *countPtr;
+ } else {
+ JS_ASSERT(clasp != &js_CallClass);
+ countPtr = &cg->treeContext.numGlobalVars;
+ cloneIndex = ALE_INDEX(ale);
+ }
+
+ if ((cloneIndex + 1) >> 16) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NEED_DIET, js_script_str);
+ return JS_FALSE;
+ }
+ if (cloneIndex >= *countPtr)
+ *countPtr = cloneIndex + 1;
+
+ reobj = ATOM_TO_OBJECT(pn->pn_atom);
+ JS_ASSERT(OBJ_GET_CLASS(cx, reobj) == &js_RegExpClass);
+ re = (JSRegExp *) JS_GetPrivate(cx, reobj);
+ re->cloneIndex = cloneIndex;
+ return JS_TRUE;
+}
+
+/*
+ * Emit a bytecode and its 2-byte constant (atom) index immediate operand.
+ * If the atomIndex requires more than 2 bytes, emit a prefix op whose 24-bit
+ * immediate operand indexes the atom in script->atomMap.
+ *
+ * If op has JOF_NAME mode, emit JSOP_FINDNAME to find and push the object in
+ * the scope chain in which the literal name was found, followed by the name
+ * as a string. This enables us to use the JOF_ELEM counterpart to op.
+ *
+ * Otherwise, if op has JOF_PROP mode, emit JSOP_LITERAL before op, to push
+ * the atom's value key. For JOF_PROP ops, the object being operated on has
+ * already been pushed, and JSOP_LITERAL will push the id, leaving the stack
+ * in the proper state for a JOF_ELEM counterpart.
+ *
+ * Otherwise, emit JSOP_LITOPX to push the atom index, then perform a special
+ * dispatch on op, but getting op's atom index from the stack instead of from
+ * an unsigned 16-bit immediate operand.
+ */
+static JSBool
+EmitAtomIndexOp(JSContext *cx, JSOp op, jsatomid atomIndex, JSCodeGenerator *cg)
+{
+ uint32 mode;
+ JSOp prefixOp;
+ ptrdiff_t off;
+ jsbytecode *pc;
+
+ if (atomIndex >= JS_BIT(16)) {
+ mode = (js_CodeSpec[op].format & JOF_MODEMASK);
+ if (op != JSOP_SETNAME) {
+ prefixOp = ((mode != JOF_NAME && mode != JOF_PROP) ||
+#if JS_HAS_XML_SUPPORT
+ op == JSOP_GETMETHOD ||
+ op == JSOP_SETMETHOD ||
+#endif
+ op == JSOP_SETCONST)
+ ? JSOP_LITOPX
+ : (mode == JOF_NAME)
+ ? JSOP_FINDNAME
+ : JSOP_LITERAL;
+ off = js_EmitN(cx, cg, prefixOp, 3);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_LITERAL_INDEX(pc, atomIndex);
+ }
+
+ switch (op) {
+ case JSOP_DECNAME: op = JSOP_DECELEM; break;
+ case JSOP_DECPROP: op = JSOP_DECELEM; break;
+ case JSOP_DELNAME: op = JSOP_DELELEM; break;
+ case JSOP_DELPROP: op = JSOP_DELELEM; break;
+ case JSOP_FORNAME: op = JSOP_FORELEM; break;
+ case JSOP_FORPROP: op = JSOP_FORELEM; break;
+ case JSOP_GETPROP: op = JSOP_GETELEM; break;
+ case JSOP_GETXPROP: op = JSOP_GETXELEM; break;
+ case JSOP_IMPORTPROP: op = JSOP_IMPORTELEM; break;
+ case JSOP_INCNAME: op = JSOP_INCELEM; break;
+ case JSOP_INCPROP: op = JSOP_INCELEM; break;
+ case JSOP_INITPROP: op = JSOP_INITELEM; break;
+ case JSOP_NAME: op = JSOP_GETELEM; break;
+ case JSOP_NAMEDEC: op = JSOP_ELEMDEC; break;
+ case JSOP_NAMEINC: op = JSOP_ELEMINC; break;
+ case JSOP_PROPDEC: op = JSOP_ELEMDEC; break;
+ case JSOP_PROPINC: op = JSOP_ELEMINC; break;
+ case JSOP_BINDNAME: return JS_TRUE;
+ case JSOP_SETNAME: op = JSOP_SETELEM; break;
+ case JSOP_SETPROP: op = JSOP_SETELEM; break;
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTNAME:
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+#endif
+ default:
+#if JS_HAS_XML_SUPPORT
+ JS_ASSERT(mode == 0 || op == JSOP_SETCONST ||
+ op == JSOP_GETMETHOD || op == JSOP_SETMETHOD);
+#else
+ JS_ASSERT(mode == 0 || op == JSOP_SETCONST);
+#endif
+ break;
+ }
+
+ return js_Emit1(cx, cg, op) >= 0;
+ }
+
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ return JS_TRUE;
+}
+
+/*
+ * Slight sugar for EmitAtomIndexOp, again accessing cx and cg from the macro
+ * caller's lexical environment, and embedding a false return on error.
+ * XXXbe hey, who checks for fun->nvars and fun->nargs overflow?!
+ */
+#define EMIT_ATOM_INDEX_OP(op, atomIndex) \
+ JS_BEGIN_MACRO \
+ if (!EmitAtomIndexOp(cx, op, atomIndex, cg)) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+static JSBool
+EmitAtomOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
+{
+ JSAtomListElement *ale;
+
+ ale = js_IndexAtom(cx, pn->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ if (op == JSOP_REGEXP && !IndexRegExpClone(cx, pn, ale, cg))
+ return JS_FALSE;
+ return EmitAtomIndexOp(cx, op, ALE_INDEX(ale), cg);
+}
+
+/*
+ * This routine tries to optimize name gets and sets to stack slot loads and
+ * stores, given the variables object and scope chain in cx's top frame, the
+ * compile-time context in tc, and a TOK_NAME node pn. It returns false on
+ * error, true on success.
+ *
+ * The caller can inspect pn->pn_slot for a non-negative slot number to tell
+ * whether optimization occurred, in which case BindNameToSlot also updated
+ * pn->pn_op. If pn->pn_slot is still -1 on return, pn->pn_op nevertheless
+ * may have been optimized, e.g., from JSOP_NAME to JSOP_ARGUMENTS. Whether
+ * or not pn->pn_op was modified, if this function finds an argument or local
+ * variable name, pn->pn_attrs will contain the property's attributes after a
+ * successful return.
+ *
+ * NB: if you add more opcodes specialized from JSOP_NAME, etc., don't forget
+ * to update the TOK_FOR (for-in) and TOK_ASSIGN (op=, e.g. +=) special cases
+ * in js_EmitTree.
+ */
+static JSBool
+BindNameToSlot(JSContext *cx, JSTreeContext *tc, JSParseNode *pn,
+ JSBool letdecl)
+{
+ JSAtom *atom;
+ JSStmtInfo *stmt;
+ jsint slot;
+ JSOp op;
+ JSStackFrame *fp;
+ JSObject *obj, *pobj;
+ JSClass *clasp;
+ JSBool optimizeGlobals;
+ JSPropertyOp getter;
+ uintN attrs;
+ JSAtomListElement *ale;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ if (pn->pn_slot >= 0 || pn->pn_op == JSOP_ARGUMENTS)
+ return JS_TRUE;
+
+ /* QNAME references can never be optimized to use arg/var storage. */
+ if (pn->pn_op == JSOP_QNAMEPART)
+ return JS_TRUE;
+
+ /*
+ * We can't optimize if we are compiling a with statement and its body,
+ * or we're in a catch block whose exception variable has the same name
+ * as this node. FIXME: we should be able to optimize catch vars to be
+ * block-locals.
+ */
+ atom = pn->pn_atom;
+ stmt = js_LexicalLookup(tc, atom, &slot, letdecl);
+ if (stmt) {
+ if (stmt->type == STMT_WITH)
+ return JS_TRUE;
+
+ JS_ASSERT(stmt->flags & SIF_SCOPE);
+ JS_ASSERT(slot >= 0);
+ op = pn->pn_op;
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETLOCAL; break;
+ case JSOP_SETNAME: op = JSOP_SETLOCAL; break;
+ case JSOP_INCNAME: op = JSOP_INCLOCAL; break;
+ case JSOP_NAMEINC: op = JSOP_LOCALINC; break;
+ case JSOP_DECNAME: op = JSOP_DECLOCAL; break;
+ case JSOP_NAMEDEC: op = JSOP_LOCALDEC; break;
+ case JSOP_FORNAME: op = JSOP_FORLOCAL; break;
+ case JSOP_DELNAME: op = JSOP_FALSE; break;
+ default: JS_ASSERT(0);
+ }
+ if (op != pn->pn_op) {
+ pn->pn_op = op;
+ pn->pn_slot = slot;
+ }
+ return JS_TRUE;
+ }
+
+ /*
+ * A Script object can be used to split an eval into a compile step done
+ * at construction time, and an execute step done separately, possibly in
+ * a different scope altogether. We therefore cannot do any name-to-slot
+ * optimizations, but must lookup names at runtime. Note that script_exec
+ * ensures that its caller's frame has a Call object, so arg and var name
+ * lookups will succeed.
+ */
+ fp = cx->fp;
+ if (fp->flags & JSFRAME_SCRIPT_OBJECT)
+ return JS_TRUE;
+
+ /*
+ * We can't optimize if var and closure (a local function not in a larger
+ * expression and not at top-level within another's body) collide.
+ * XXX suboptimal: keep track of colliding names and deoptimize only those
+ */
+ if (tc->flags & TCF_FUN_CLOSURE_VS_VAR)
+ return JS_TRUE;
+
+ /*
+ * We can't optimize if we're not compiling a function body, whether via
+ * eval, or directly when compiling a function statement or expression.
+ */
+ obj = fp->varobj;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp != &js_FunctionClass && clasp != &js_CallClass) {
+ /* Check for an eval or debugger frame. */
+ if (fp->flags & JSFRAME_SPECIAL)
+ return JS_TRUE;
+
+ /*
+ * Optimize global variable accesses if there are at least 100 uses
+ * in unambiguous contexts, or failing that, if least half of all the
+ * uses of global vars/consts/functions are in loops.
+ */
+ optimizeGlobals = (tc->globalUses >= 100 ||
+ (tc->loopyGlobalUses &&
+ tc->loopyGlobalUses >= tc->globalUses / 2));
+ if (!optimizeGlobals)
+ return JS_TRUE;
+ } else {
+ optimizeGlobals = JS_FALSE;
+ }
+
+ /*
+ * We can't optimize if we are in an eval called inside a with statement.
+ */
+ if (fp->scopeChain != obj)
+ return JS_TRUE;
+
+ op = pn->pn_op;
+ getter = NULL;
+#ifdef __GNUC__
+ attrs = slot = 0; /* quell GCC overwarning */
+#endif
+ if (optimizeGlobals) {
+ /*
+ * We are optimizing global variables, and there is no pre-existing
+ * global property named atom. If atom was declared via const or var,
+ * optimize pn to access fp->vars using the appropriate JOF_QVAR op.
+ */
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ if (!ale) {
+ /* Use precedes declaration, or name is never declared. */
+ return JS_TRUE;
+ }
+
+ attrs = (ALE_JSOP(ale) == JSOP_DEFCONST)
+ ? JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT
+ : JSPROP_ENUMERATE | JSPROP_PERMANENT;
+
+ /* Index atom so we can map fast global number to name. */
+ JS_ASSERT(tc->flags & TCF_COMPILING);
+ ale = js_IndexAtom(cx, atom, &((JSCodeGenerator *) tc)->atomList);
+ if (!ale)
+ return JS_FALSE;
+
+ /* Defend against tc->numGlobalVars 16-bit overflow. */
+ slot = ALE_INDEX(ale);
+ if ((slot + 1) >> 16)
+ return JS_TRUE;
+
+ if ((uint16)(slot + 1) > tc->numGlobalVars)
+ tc->numGlobalVars = (uint16)(slot + 1);
+ } else {
+ /*
+ * We may be able to optimize name to stack slot. Look for an argument
+ * or variable property in the function, or its call object, not found
+ * in any prototype object. Rewrite pn_op and update pn accordingly.
+ * NB: We know that JSOP_DELNAME on an argument or variable evaluates
+ * to false, due to JSPROP_PERMANENT.
+ */
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+ sprop = (JSScopeProperty *) prop;
+ if (sprop) {
+ if (pobj == obj) {
+ getter = sprop->getter;
+ attrs = sprop->attrs;
+ slot = (sprop->flags & SPROP_HAS_SHORTID) ? sprop->shortid : -1;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ }
+
+ if (optimizeGlobals || getter) {
+ if (optimizeGlobals) {
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETGVAR; break;
+ case JSOP_SETNAME: op = JSOP_SETGVAR; break;
+ case JSOP_SETCONST: /* NB: no change */ break;
+ case JSOP_INCNAME: op = JSOP_INCGVAR; break;
+ case JSOP_NAMEINC: op = JSOP_GVARINC; break;
+ case JSOP_DECNAME: op = JSOP_DECGVAR; break;
+ case JSOP_NAMEDEC: op = JSOP_GVARDEC; break;
+ case JSOP_FORNAME: /* NB: no change */ break;
+ case JSOP_DELNAME: /* NB: no change */ break;
+ default: JS_ASSERT(0);
+ }
+ } else if (getter == js_GetLocalVariable ||
+ getter == js_GetCallVariable) {
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETVAR; break;
+ case JSOP_SETNAME: op = JSOP_SETVAR; break;
+ case JSOP_SETCONST: op = JSOP_SETVAR; break;
+ case JSOP_INCNAME: op = JSOP_INCVAR; break;
+ case JSOP_NAMEINC: op = JSOP_VARINC; break;
+ case JSOP_DECNAME: op = JSOP_DECVAR; break;
+ case JSOP_NAMEDEC: op = JSOP_VARDEC; break;
+ case JSOP_FORNAME: op = JSOP_FORVAR; break;
+ case JSOP_DELNAME: op = JSOP_FALSE; break;
+ default: JS_ASSERT(0);
+ }
+ } else if (getter == js_GetArgument ||
+ (getter == js_CallClass.getProperty &&
+ fp->fun && (uintN) slot < fp->fun->nargs)) {
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETARG; break;
+ case JSOP_SETNAME: op = JSOP_SETARG; break;
+ case JSOP_INCNAME: op = JSOP_INCARG; break;
+ case JSOP_NAMEINC: op = JSOP_ARGINC; break;
+ case JSOP_DECNAME: op = JSOP_DECARG; break;
+ case JSOP_NAMEDEC: op = JSOP_ARGDEC; break;
+ case JSOP_FORNAME: op = JSOP_FORARG; break;
+ case JSOP_DELNAME: op = JSOP_FALSE; break;
+ default: JS_ASSERT(0);
+ }
+ }
+ if (op != pn->pn_op) {
+ pn->pn_op = op;
+ pn->pn_slot = slot;
+ }
+ pn->pn_attrs = attrs;
+ }
+
+ if (pn->pn_slot < 0) {
+ /*
+ * We couldn't optimize pn, so it's not a global or local slot name.
+ * Now we must check for the predefined arguments variable. It may be
+ * overridden by assignment, in which case the function is heavyweight
+ * and the interpreter will look up 'arguments' in the function's call
+ * object.
+ */
+ if (pn->pn_op == JSOP_NAME &&
+ atom == cx->runtime->atomState.argumentsAtom) {
+ pn->pn_op = JSOP_ARGUMENTS;
+ return JS_TRUE;
+ }
+
+ tc->flags |= TCF_FUN_USES_NONLOCALS;
+ }
+ return JS_TRUE;
+}
+
+/*
+ * If pn contains a useful expression, return true with *answer set to true.
+ * If pn contains a useless expression, return true with *answer set to false.
+ * Return false on error.
+ *
+ * The caller should initialize *answer to false and invoke this function on
+ * an expression statement or similar subtree to decide whether the tree could
+ * produce code that has any side effects. For an expression statement, we
+ * define useless code as code with no side effects, because the main effect,
+ * the value left on the stack after the code executes, will be discarded by a
+ * pop bytecode.
+ */
+static JSBool
+CheckSideEffects(JSContext *cx, JSTreeContext *tc, JSParseNode *pn,
+ JSBool *answer)
+{
+ JSBool ok;
+ JSFunction *fun;
+ JSParseNode *pn2;
+
+ ok = JS_TRUE;
+ if (!pn || *answer)
+ return ok;
+
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ /*
+ * A named function is presumed useful: we can't yet know that it is
+ * not called. The side effects are the creation of a scope object
+ * to parent this function object, and the binding of the function's
+ * name in that scope object. See comments at case JSOP_NAMEDFUNOBJ:
+ * in jsinterp.c.
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, ATOM_TO_OBJECT(pn->pn_funAtom));
+ if (fun->atom)
+ *answer = JS_TRUE;
+ break;
+
+ case PN_LIST:
+ if (pn->pn_type == TOK_NEW ||
+ pn->pn_type == TOK_LP ||
+ pn->pn_type == TOK_LB ||
+ pn->pn_type == TOK_RB ||
+ pn->pn_type == TOK_RC) {
+ /*
+ * All invocation operations (construct: TOK_NEW, call: TOK_LP)
+ * are presumed to be useful, because they may have side effects
+ * even if their main effect (their return value) is discarded.
+ *
+ * TOK_LB binary trees of 3 or more nodes are flattened into lists
+ * to avoid too much recursion. All such lists must be presumed
+ * to be useful because each index operation could invoke a getter
+ * (the JSOP_ARGUMENTS special case below, in the PN_BINARY case,
+ * does not apply here: arguments[i][j] might invoke a getter).
+ *
+ * Array and object initializers (TOK_RB and TOK_RC lists) must be
+ * considered useful, because they are sugar for constructor calls
+ * (to Array and Object, respectively).
+ */
+ *answer = JS_TRUE;
+ } else {
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next)
+ ok &= CheckSideEffects(cx, tc, pn2, answer);
+ }
+ break;
+
+ case PN_TERNARY:
+ ok = CheckSideEffects(cx, tc, pn->pn_kid1, answer) &&
+ CheckSideEffects(cx, tc, pn->pn_kid2, answer) &&
+ CheckSideEffects(cx, tc, pn->pn_kid3, answer);
+ break;
+
+ case PN_BINARY:
+ if (pn->pn_type == TOK_ASSIGN) {
+ /*
+ * Assignment is presumed to be useful, even if the next operation
+ * is another assignment overwriting this one's ostensible effect,
+ * because the left operand may be a property with a setter that
+ * has side effects.
+ *
+ * The only exception is assignment of a useless value to a const
+ * declared in the function currently being compiled.
+ */
+ pn2 = pn->pn_left;
+ if (pn2->pn_type != TOK_NAME) {
+ *answer = JS_TRUE;
+ } else {
+ if (!BindNameToSlot(cx, tc, pn2, JS_FALSE))
+ return JS_FALSE;
+ if (!CheckSideEffects(cx, tc, pn->pn_right, answer))
+ return JS_FALSE;
+ if (!*answer &&
+ (pn2->pn_slot < 0 || !(pn2->pn_attrs & JSPROP_READONLY))) {
+ *answer = JS_TRUE;
+ }
+ }
+ } else {
+ if (pn->pn_type == TOK_LB) {
+ pn2 = pn->pn_left;
+ if (pn2->pn_type == TOK_NAME &&
+ !BindNameToSlot(cx, tc, pn2, JS_FALSE)) {
+ return JS_FALSE;
+ }
+ if (pn2->pn_op != JSOP_ARGUMENTS) {
+ /*
+ * Any indexed property reference could call a getter with
+ * side effects, except for arguments[i] where arguments is
+ * unambiguous.
+ */
+ *answer = JS_TRUE;
+ }
+ }
+ ok = CheckSideEffects(cx, tc, pn->pn_left, answer) &&
+ CheckSideEffects(cx, tc, pn->pn_right, answer);
+ }
+ break;
+
+ case PN_UNARY:
+ if (pn->pn_type == TOK_INC || pn->pn_type == TOK_DEC ||
+ pn->pn_type == TOK_THROW ||
+#if JS_HAS_GENERATORS
+ pn->pn_type == TOK_YIELD ||
+#endif
+ pn->pn_type == TOK_DEFSHARP) {
+ /* All these operations have effects that we must commit. */
+ *answer = JS_TRUE;
+ } else if (pn->pn_type == TOK_DELETE) {
+ pn2 = pn->pn_kid;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ case TOK_DOT:
+#if JS_HAS_XML_SUPPORT
+ case TOK_DBLDOT:
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+ case TOK_LB:
+ /* All these delete addressing modes have effects too. */
+ *answer = JS_TRUE;
+ break;
+ default:
+ ok = CheckSideEffects(cx, tc, pn2, answer);
+ break;
+ }
+ } else {
+ ok = CheckSideEffects(cx, tc, pn->pn_kid, answer);
+ }
+ break;
+
+ case PN_NAME:
+ /*
+ * Take care to avoid trying to bind a label name (labels, both for
+ * statements and property values in object initialisers, have pn_op
+ * defaulted to JSOP_NOP).
+ */
+ if (pn->pn_type == TOK_NAME && pn->pn_op != JSOP_NOP) {
+ if (!BindNameToSlot(cx, tc, pn, JS_FALSE))
+ return JS_FALSE;
+ if (pn->pn_slot < 0 && pn->pn_op != JSOP_ARGUMENTS) {
+ /*
+ * Not an argument or local variable use, so this expression
+ * could invoke a getter that has side effects.
+ */
+ *answer = JS_TRUE;
+ }
+ }
+ pn2 = pn->pn_expr;
+ if (pn->pn_type == TOK_DOT) {
+ if (pn2->pn_type == TOK_NAME &&
+ !BindNameToSlot(cx, tc, pn2, JS_FALSE)) {
+ return JS_FALSE;
+ }
+ if (!(pn2->pn_op == JSOP_ARGUMENTS &&
+ pn->pn_atom == cx->runtime->atomState.lengthAtom)) {
+ /*
+ * Any dotted property reference could call a getter, except
+ * for arguments.length where arguments is unambiguous.
+ */
+ *answer = JS_TRUE;
+ }
+ }
+ ok = CheckSideEffects(cx, tc, pn2, answer);
+ break;
+
+ case PN_NULLARY:
+ if (pn->pn_type == TOK_DEBUGGER)
+ *answer = JS_TRUE;
+ break;
+ }
+ return ok;
+}
+
+/*
+ * Secret handshake with js_EmitTree's TOK_LP/TOK_NEW case logic, to flag all
+ * uses of JSOP_GETMETHOD that implicitly qualify the method property's name
+ * with a function:: prefix. All other JSOP_GETMETHOD and JSOP_SETMETHOD uses
+ * must be explicit, so we need a distinct source note (SRC_METHODBASE rather
+ * than SRC_PCBASE) for round-tripping through the beloved decompiler.
+ */
+#define JSPROP_IMPLICIT_FUNCTION_NAMESPACE 0x100
+
+static jssrcnote
+SrcNoteForPropOp(JSParseNode *pn, JSOp op)
+{
+ return ((op == JSOP_GETMETHOD &&
+ !(pn->pn_attrs & JSPROP_IMPLICIT_FUNCTION_NAMESPACE)) ||
+ op == JSOP_SETMETHOD)
+ ? SRC_METHODBASE
+ : SRC_PCBASE;
+}
+
+static JSBool
+EmitPropOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
+{
+ JSParseNode *pn2, *pndot, *pnup, *pndown;
+ ptrdiff_t top;
+
+ pn2 = pn->pn_expr;
+ if (op == JSOP_GETPROP &&
+ pn->pn_type == TOK_DOT &&
+ pn2->pn_type == TOK_NAME) {
+ /* Try to optimize arguments.length into JSOP_ARGCNT. */
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ if (pn2->pn_op == JSOP_ARGUMENTS &&
+ pn->pn_atom == cx->runtime->atomState.lengthAtom) {
+ return js_Emit1(cx, cg, JSOP_ARGCNT) >= 0;
+ }
+ }
+
+ /*
+ * If the object operand is also a dotted property reference, reverse the
+ * list linked via pn_expr temporarily so we can iterate over it from the
+ * bottom up (reversing again as we go), to avoid excessive recursion.
+ */
+ if (pn2->pn_type == TOK_DOT) {
+ pndot = pn2;
+ pnup = NULL;
+ top = CG_OFFSET(cg);
+ for (;;) {
+ /* Reverse pndot->pn_expr to point up, not down. */
+ pndot->pn_offset = top;
+ pndown = pndot->pn_expr;
+ pndot->pn_expr = pnup;
+ if (pndown->pn_type != TOK_DOT)
+ break;
+ pnup = pndot;
+ pndot = pndown;
+ }
+
+ /* pndown is a primary expression, not a dotted property reference. */
+ if (!js_EmitTree(cx, cg, pndown))
+ return JS_FALSE;
+
+ do {
+ /* Walk back up the list, emitting annotated name ops. */
+ if (js_NewSrcNote2(cx, cg, SrcNoteForPropOp(pndot, pndot->pn_op),
+ CG_OFFSET(cg) - pndown->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+ if (!EmitAtomOp(cx, pndot, pndot->pn_op, cg))
+ return JS_FALSE;
+
+ /* Reverse the pn_expr link again. */
+ pnup = pndot->pn_expr;
+ pndot->pn_expr = pndown;
+ pndown = pndot;
+ } while ((pndot = pnup) != NULL);
+ } else {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+
+ if (js_NewSrcNote2(cx, cg, SrcNoteForPropOp(pn, op),
+ CG_OFFSET(cg) - pn2->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+ if (!pn->pn_atom) {
+ JS_ASSERT(op == JSOP_IMPORTALL);
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitAtomOp(cx, pn, op, cg))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+EmitElemOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
+{
+ ptrdiff_t top;
+ JSParseNode *left, *right, *next, ltmp, rtmp;
+ jsint slot;
+
+ top = CG_OFFSET(cg);
+ if (pn->pn_arity == PN_LIST) {
+ /* Left-associative operator chain to avoid too much recursion. */
+ JS_ASSERT(pn->pn_op == JSOP_GETELEM || pn->pn_op == JSOP_IMPORTELEM);
+ JS_ASSERT(pn->pn_count >= 3);
+ left = pn->pn_head;
+ right = PN_LAST(pn);
+ next = left->pn_next;
+ JS_ASSERT(next != right);
+
+ /*
+ * Try to optimize arguments[0][j]... into JSOP_ARGSUB<0> followed by
+ * one or more index expression and JSOP_GETELEM op pairs.
+ */
+ if (left->pn_type == TOK_NAME && next->pn_type == TOK_NUMBER) {
+ if (!BindNameToSlot(cx, &cg->treeContext, left, JS_FALSE))
+ return JS_FALSE;
+ if (left->pn_op == JSOP_ARGUMENTS &&
+ JSDOUBLE_IS_INT(next->pn_dval, slot) &&
+ (jsuint)slot < JS_BIT(16)) {
+ left->pn_offset = next->pn_offset = top;
+ EMIT_UINT16_IMM_OP(JSOP_ARGSUB, (jsatomid)slot);
+ left = next;
+ next = left->pn_next;
+ }
+ }
+
+ /*
+ * Check whether we generated JSOP_ARGSUB, just above, and have only
+ * one more index expression to emit. Given arguments[0][j], we must
+ * skip the while loop altogether, falling through to emit code for j
+ * (in the subtree referenced by right), followed by the annotated op,
+ * at the bottom of this function.
+ */
+ JS_ASSERT(next != right || pn->pn_count == 3);
+ if (left == pn->pn_head) {
+ if (!js_EmitTree(cx, cg, left))
+ return JS_FALSE;
+ }
+ while (next != right) {
+ if (!js_EmitTree(cx, cg, next))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_GETELEM) < 0)
+ return JS_FALSE;
+ next = next->pn_next;
+ }
+ } else {
+ if (pn->pn_arity == PN_NAME) {
+ /*
+ * Set left and right so pn appears to be a TOK_LB node, instead
+ * of a TOK_DOT node. See the TOK_FOR/IN case in js_EmitTree, and
+ * EmitDestructuringOps nearer below. In the destructuring case,
+ * the base expression (pn_expr) of the name may be null, which
+ * means we have to emit a JSOP_BINDNAME.
+ */
+ left = pn->pn_expr;
+ if (!left) {
+ left = &ltmp;
+ left->pn_type = TOK_OBJECT;
+ left->pn_op = JSOP_BINDNAME;
+ left->pn_arity = PN_NULLARY;
+ left->pn_pos = pn->pn_pos;
+ left->pn_atom = pn->pn_atom;
+ }
+ right = &rtmp;
+ right->pn_type = TOK_STRING;
+ JS_ASSERT(ATOM_IS_STRING(pn->pn_atom));
+ right->pn_op = js_IsIdentifier(ATOM_TO_STRING(pn->pn_atom))
+ ? JSOP_QNAMEPART
+ : JSOP_STRING;
+ right->pn_arity = PN_NULLARY;
+ right->pn_pos = pn->pn_pos;
+ right->pn_atom = pn->pn_atom;
+ } else {
+ JS_ASSERT(pn->pn_arity == PN_BINARY);
+ left = pn->pn_left;
+ right = pn->pn_right;
+ }
+
+ /* Try to optimize arguments[0] (e.g.) into JSOP_ARGSUB<0>. */
+ if (op == JSOP_GETELEM &&
+ left->pn_type == TOK_NAME &&
+ right->pn_type == TOK_NUMBER) {
+ if (!BindNameToSlot(cx, &cg->treeContext, left, JS_FALSE))
+ return JS_FALSE;
+ if (left->pn_op == JSOP_ARGUMENTS &&
+ JSDOUBLE_IS_INT(right->pn_dval, slot) &&
+ (jsuint)slot < JS_BIT(16)) {
+ left->pn_offset = right->pn_offset = top;
+ EMIT_UINT16_IMM_OP(JSOP_ARGSUB, (jsatomid)slot);
+ return JS_TRUE;
+ }
+ }
+
+ if (!js_EmitTree(cx, cg, left))
+ return JS_FALSE;
+ }
+
+ /* The right side of the descendant operator is implicitly quoted. */
+ JS_ASSERT(op != JSOP_DESCENDANTS || right->pn_type != TOK_STRING ||
+ right->pn_op == JSOP_QNAMEPART);
+ if (!js_EmitTree(cx, cg, right))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ return js_Emit1(cx, cg, op) >= 0;
+}
+
+static JSBool
+EmitNumberOp(JSContext *cx, jsdouble dval, JSCodeGenerator *cg)
+{
+ jsint ival;
+ jsatomid atomIndex;
+ ptrdiff_t off;
+ jsbytecode *pc;
+ JSAtom *atom;
+ JSAtomListElement *ale;
+
+ if (JSDOUBLE_IS_INT(dval, ival) && INT_FITS_IN_JSVAL(ival)) {
+ if (ival == 0)
+ return js_Emit1(cx, cg, JSOP_ZERO) >= 0;
+ if (ival == 1)
+ return js_Emit1(cx, cg, JSOP_ONE) >= 0;
+
+ atomIndex = (jsatomid)ival;
+ if (atomIndex < JS_BIT(16)) {
+ EMIT_UINT16_IMM_OP(JSOP_UINT16, atomIndex);
+ return JS_TRUE;
+ }
+
+ if (atomIndex < JS_BIT(24)) {
+ off = js_EmitN(cx, cg, JSOP_UINT24, 3);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_LITERAL_INDEX(pc, atomIndex);
+ return JS_TRUE;
+ }
+
+ atom = js_AtomizeInt(cx, ival, 0);
+ } else {
+ atom = js_AtomizeDouble(cx, dval, 0);
+ }
+ if (!atom)
+ return JS_FALSE;
+
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ return EmitAtomIndexOp(cx, JSOP_NUMBER, ALE_INDEX(ale), cg);
+}
+
+static JSBool
+EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
+ JSStmtInfo *stmtInfo)
+{
+ JSOp switchOp;
+ JSBool ok, hasDefault, constPropagated;
+ ptrdiff_t top, off, defaultOffset;
+ JSParseNode *pn2, *pn3, *pn4;
+ uint32 caseCount, tableLength;
+ JSParseNode **table;
+ jsdouble d;
+ jsint i, low, high;
+ jsval v;
+ JSAtom *atom;
+ JSAtomListElement *ale;
+ intN noteIndex;
+ size_t switchSize, tableSize;
+ jsbytecode *pc, *savepc;
+#if JS_HAS_BLOCK_SCOPE
+ JSObject *obj;
+ jsint count;
+#endif
+
+ /* Try for most optimal, fall back if not dense ints, and per ECMAv2. */
+ switchOp = JSOP_TABLESWITCH;
+ ok = JS_TRUE;
+ hasDefault = constPropagated = JS_FALSE;
+ defaultOffset = -1;
+
+ /*
+ * If the switch contains let variables scoped by its body, model the
+ * resulting block on the stack first, before emitting the discriminant's
+ * bytecode (in case the discriminant contains a stack-model dependency
+ * such as a let expression).
+ */
+ pn2 = pn->pn_right;
+#if JS_HAS_BLOCK_SCOPE
+ if (pn2->pn_type == TOK_LEXICALSCOPE) {
+ atom = pn2->pn_atom;
+ obj = ATOM_TO_OBJECT(atom);
+ OBJ_SET_BLOCK_DEPTH(cx, obj, cg->stackDepth);
+
+ /*
+ * Push the body's block scope before discriminant code-gen for proper
+ * static block scope linkage in case the discriminant contains a let
+ * expression. The block's locals must lie under the discriminant on
+ * the stack so that case-dispatch bytecodes can find the discriminant
+ * on top of stack.
+ */
+ js_PushBlockScope(&cg->treeContext, stmtInfo, atom, -1);
+ stmtInfo->type = STMT_SWITCH;
+
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ cg->stackDepth += count;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+
+ /* Emit JSOP_ENTERBLOCK before code to evaluate the discriminant. */
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_ENTERBLOCK, ALE_INDEX(ale));
+
+ /*
+ * Pop the switch's statement info around discriminant code-gen. Note
+ * how this leaves cg->treeContext.blockChain referencing the switch's
+ * block scope object, which is necessary for correct block parenting
+ * in the case where the discriminant contains a let expression.
+ */
+ cg->treeContext.topStmt = stmtInfo->down;
+ cg->treeContext.topScopeStmt = stmtInfo->downScope;
+ }
+#ifdef __GNUC__
+ else {
+ atom = NULL;
+ count = -1;
+ }
+#endif
+#endif
+
+ /*
+ * Emit code for the discriminant first (or nearly first, in the case of a
+ * switch whose body is a block scope).
+ */
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+
+ /* Switch bytecodes run from here till end of final case. */
+ top = CG_OFFSET(cg);
+#if !JS_HAS_BLOCK_SCOPE
+ js_PushStatement(&cg->treeContext, stmtInfo, STMT_SWITCH, top);
+#else
+ if (pn2->pn_type == TOK_LC) {
+ js_PushStatement(&cg->treeContext, stmtInfo, STMT_SWITCH, top);
+ } else {
+ /* Re-push the switch's statement info record. */
+ cg->treeContext.topStmt = cg->treeContext.topScopeStmt = stmtInfo;
+
+ /* Set the statement info record's idea of top. */
+ stmtInfo->update = top;
+
+ /* Advance pn2 to refer to the switch case list. */
+ pn2 = pn2->pn_expr;
+ }
+#endif
+
+ caseCount = pn2->pn_count;
+ tableLength = 0;
+ table = NULL;
+
+ if (caseCount == 0 ||
+ (caseCount == 1 &&
+ (hasDefault = (pn2->pn_head->pn_type == TOK_DEFAULT)))) {
+ caseCount = 0;
+ low = 0;
+ high = -1;
+ } else {
+#define INTMAP_LENGTH 256
+ jsbitmap intmap_space[INTMAP_LENGTH];
+ jsbitmap *intmap = NULL;
+ int32 intmap_bitlen = 0;
+
+ low = JSVAL_INT_MAX;
+ high = JSVAL_INT_MIN;
+
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (pn3->pn_type == TOK_DEFAULT) {
+ hasDefault = JS_TRUE;
+ caseCount--; /* one of the "cases" was the default */
+ continue;
+ }
+
+ JS_ASSERT(pn3->pn_type == TOK_CASE);
+ if (switchOp == JSOP_CONDSWITCH)
+ continue;
+
+ pn4 = pn3->pn_left;
+ switch (pn4->pn_type) {
+ case TOK_NUMBER:
+ d = pn4->pn_dval;
+ if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
+ pn3->pn_val = INT_TO_JSVAL(i);
+ } else {
+ atom = js_AtomizeDouble(cx, d, 0);
+ if (!atom) {
+ ok = JS_FALSE;
+ goto release;
+ }
+ pn3->pn_val = ATOM_KEY(atom);
+ }
+ break;
+ case TOK_STRING:
+ pn3->pn_val = ATOM_KEY(pn4->pn_atom);
+ break;
+ case TOK_NAME:
+ if (!pn4->pn_expr) {
+ ok = js_LookupCompileTimeConstant(cx, cg, pn4->pn_atom, &v);
+ if (!ok)
+ goto release;
+ if (!JSVAL_IS_VOID(v)) {
+ pn3->pn_val = v;
+ constPropagated = JS_TRUE;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case TOK_PRIMARY:
+ if (pn4->pn_op == JSOP_TRUE) {
+ pn3->pn_val = JSVAL_TRUE;
+ break;
+ }
+ if (pn4->pn_op == JSOP_FALSE) {
+ pn3->pn_val = JSVAL_FALSE;
+ break;
+ }
+ /* FALL THROUGH */
+ default:
+ switchOp = JSOP_CONDSWITCH;
+ continue;
+ }
+
+ JS_ASSERT(JSVAL_IS_NUMBER(pn3->pn_val) ||
+ JSVAL_IS_STRING(pn3->pn_val) ||
+ JSVAL_IS_BOOLEAN(pn3->pn_val));
+
+ if (switchOp != JSOP_TABLESWITCH)
+ continue;
+ if (!JSVAL_IS_INT(pn3->pn_val)) {
+ switchOp = JSOP_LOOKUPSWITCH;
+ continue;
+ }
+ i = JSVAL_TO_INT(pn3->pn_val);
+ if ((jsuint)(i + (jsint)JS_BIT(15)) >= (jsuint)JS_BIT(16)) {
+ switchOp = JSOP_LOOKUPSWITCH;
+ continue;
+ }
+ if (i < low)
+ low = i;
+ if (high < i)
+ high = i;
+
+ /*
+ * Check for duplicates, which require a JSOP_LOOKUPSWITCH.
+ * We bias i by 65536 if it's negative, and hope that's a rare
+ * case (because it requires a malloc'd bitmap).
+ */
+ if (i < 0)
+ i += JS_BIT(16);
+ if (i >= intmap_bitlen) {
+ if (!intmap &&
+ i < (INTMAP_LENGTH << JS_BITS_PER_WORD_LOG2)) {
+ intmap = intmap_space;
+ intmap_bitlen = INTMAP_LENGTH << JS_BITS_PER_WORD_LOG2;
+ } else {
+ /* Just grab 8K for the worst-case bitmap. */
+ intmap_bitlen = JS_BIT(16);
+ intmap = (jsbitmap *)
+ JS_malloc(cx,
+ (JS_BIT(16) >> JS_BITS_PER_WORD_LOG2)
+ * sizeof(jsbitmap));
+ if (!intmap) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+ memset(intmap, 0, intmap_bitlen >> JS_BITS_PER_BYTE_LOG2);
+ }
+ if (JS_TEST_BIT(intmap, i)) {
+ switchOp = JSOP_LOOKUPSWITCH;
+ continue;
+ }
+ JS_SET_BIT(intmap, i);
+ }
+
+ release:
+ if (intmap && intmap != intmap_space)
+ JS_free(cx, intmap);
+ if (!ok)
+ return JS_FALSE;
+
+ /*
+ * Compute table length and select lookup instead if overlarge or
+ * more than half-sparse.
+ */
+ if (switchOp == JSOP_TABLESWITCH) {
+ tableLength = (uint32)(high - low + 1);
+ if (tableLength >= JS_BIT(16) || tableLength > 2 * caseCount)
+ switchOp = JSOP_LOOKUPSWITCH;
+ } else if (switchOp == JSOP_LOOKUPSWITCH) {
+ /*
+ * Lookup switch supports only atom indexes below 64K limit.
+ * Conservatively estimate the maximum possible index during
+ * switch generation and use conditional switch if it exceeds
+ * the limit.
+ */
+ if (caseCount + cg->atomList.count > JS_BIT(16))
+ switchOp = JSOP_CONDSWITCH;
+ }
+ }
+
+ /*
+ * Emit a note with two offsets: first tells total switch code length,
+ * second tells offset to first JSOP_CASE if condswitch.
+ */
+ noteIndex = js_NewSrcNote3(cx, cg, SRC_SWITCH, 0, 0);
+ if (noteIndex < 0)
+ return JS_FALSE;
+
+ if (switchOp == JSOP_CONDSWITCH) {
+ /*
+ * 0 bytes of immediate for unoptimized ECMAv2 switch.
+ */
+ switchSize = 0;
+ } else if (switchOp == JSOP_TABLESWITCH) {
+ /*
+ * 3 offsets (len, low, high) before the table, 1 per entry.
+ */
+ switchSize = (size_t)(JUMP_OFFSET_LEN * (3 + tableLength));
+ } else {
+ /*
+ * JSOP_LOOKUPSWITCH:
+ * 1 offset (len) and 1 atom index (npairs) before the table,
+ * 1 atom index and 1 jump offset per entry.
+ */
+ switchSize = (size_t)(JUMP_OFFSET_LEN + ATOM_INDEX_LEN +
+ (ATOM_INDEX_LEN + JUMP_OFFSET_LEN) * caseCount);
+ }
+
+ /*
+ * Emit switchOp followed by switchSize bytes of jump or lookup table.
+ *
+ * If switchOp is JSOP_LOOKUPSWITCH or JSOP_TABLESWITCH, it is crucial
+ * to emit the immediate operand(s) by which bytecode readers such as
+ * BuildSpanDepTable discover the length of the switch opcode *before*
+ * calling js_SetJumpOffset (which may call BuildSpanDepTable). It's
+ * also important to zero all unknown jump offset immediate operands,
+ * so they can be converted to span dependencies with null targets to
+ * be computed later (js_EmitN zeros switchSize bytes after switchOp).
+ */
+ if (js_EmitN(cx, cg, switchOp, switchSize) < 0)
+ return JS_FALSE;
+
+ off = -1;
+ if (switchOp == JSOP_CONDSWITCH) {
+ intN caseNoteIndex = -1;
+ JSBool beforeCases = JS_TRUE;
+
+ /* Emit code for evaluating cases and jumping to case statements. */
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ pn4 = pn3->pn_left;
+ if (pn4 && !js_EmitTree(cx, cg, pn4))
+ return JS_FALSE;
+ if (caseNoteIndex >= 0) {
+ /* off is the previous JSOP_CASE's bytecode offset. */
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)caseNoteIndex, 0,
+ CG_OFFSET(cg) - off)) {
+ return JS_FALSE;
+ }
+ }
+ if (!pn4) {
+ JS_ASSERT(pn3->pn_type == TOK_DEFAULT);
+ continue;
+ }
+ caseNoteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (caseNoteIndex < 0)
+ return JS_FALSE;
+ off = EmitJump(cx, cg, JSOP_CASE, 0);
+ if (off < 0)
+ return JS_FALSE;
+ pn3->pn_offset = off;
+ if (beforeCases) {
+ uintN noteCount, noteCountDelta;
+
+ /* Switch note's second offset is to first JSOP_CASE. */
+ noteCount = CG_NOTE_COUNT(cg);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 1,
+ off - top)) {
+ return JS_FALSE;
+ }
+ noteCountDelta = CG_NOTE_COUNT(cg) - noteCount;
+ if (noteCountDelta != 0)
+ caseNoteIndex += noteCountDelta;
+ beforeCases = JS_FALSE;
+ }
+ }
+
+ /*
+ * If we didn't have an explicit default (which could fall in between
+ * cases, preventing us from fusing this js_SetSrcNoteOffset with the
+ * call in the loop above), link the last case to the implicit default
+ * for the decompiler.
+ */
+ if (!hasDefault &&
+ caseNoteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)caseNoteIndex, 0,
+ CG_OFFSET(cg) - off)) {
+ return JS_FALSE;
+ }
+
+ /* Emit default even if no explicit default statement. */
+ defaultOffset = EmitJump(cx, cg, JSOP_DEFAULT, 0);
+ if (defaultOffset < 0)
+ return JS_FALSE;
+ } else {
+ pc = CG_CODE(cg, top + JUMP_OFFSET_LEN);
+
+ if (switchOp == JSOP_TABLESWITCH) {
+ /* Fill in switch bounds, which we know fit in 16-bit offsets. */
+ SET_JUMP_OFFSET(pc, low);
+ pc += JUMP_OFFSET_LEN;
+ SET_JUMP_OFFSET(pc, high);
+ pc += JUMP_OFFSET_LEN;
+
+ /*
+ * Use malloc to avoid arena bloat for programs with many switches.
+ * We free table if non-null at label out, so all control flow must
+ * exit this function through goto out or goto bad.
+ */
+ if (tableLength != 0) {
+ tableSize = (size_t)tableLength * sizeof *table;
+ table = (JSParseNode **) JS_malloc(cx, tableSize);
+ if (!table)
+ return JS_FALSE;
+ memset(table, 0, tableSize);
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (pn3->pn_type == TOK_DEFAULT)
+ continue;
+ i = JSVAL_TO_INT(pn3->pn_val);
+ i -= low;
+ JS_ASSERT((uint32)i < tableLength);
+ table[i] = pn3;
+ }
+ }
+ } else {
+ JS_ASSERT(switchOp == JSOP_LOOKUPSWITCH);
+
+ /* Fill in the number of cases. */
+ SET_ATOM_INDEX(pc, caseCount);
+ pc += ATOM_INDEX_LEN;
+ }
+
+ /*
+ * After this point, all control flow involving JSOP_TABLESWITCH
+ * must set ok and goto out to exit this function. To keep things
+ * simple, all switchOp cases exit that way.
+ */
+ if (constPropagated) {
+ /*
+ * Skip switchOp, as we are not setting jump offsets in the two
+ * for loops below. We'll restore CG_NEXT(cg) from savepc after,
+ * unless there was an error.
+ */
+ savepc = CG_NEXT(cg);
+ CG_NEXT(cg) = pc + 1;
+ if (switchOp == JSOP_TABLESWITCH) {
+ for (i = 0; i < (jsint)tableLength; i++) {
+ pn3 = table[i];
+ if (pn3 &&
+ (pn4 = pn3->pn_left) != NULL &&
+ pn4->pn_type == TOK_NAME) {
+ /* Note a propagated constant with the const's name. */
+ JS_ASSERT(!pn4->pn_expr);
+ ale = js_IndexAtom(cx, pn4->pn_atom, &cg->atomList);
+ if (!ale)
+ goto bad;
+ CG_NEXT(cg) = pc;
+ if (js_NewSrcNote2(cx, cg, SRC_LABEL, (ptrdiff_t)
+ ALE_INDEX(ale)) < 0) {
+ goto bad;
+ }
+ }
+ pc += JUMP_OFFSET_LEN;
+ }
+ } else {
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ pn4 = pn3->pn_left;
+ if (pn4 && pn4->pn_type == TOK_NAME) {
+ /* Note a propagated constant with the const's name. */
+ JS_ASSERT(!pn4->pn_expr);
+ ale = js_IndexAtom(cx, pn4->pn_atom, &cg->atomList);
+ if (!ale)
+ goto bad;
+ CG_NEXT(cg) = pc;
+ if (js_NewSrcNote2(cx, cg, SRC_LABEL, (ptrdiff_t)
+ ALE_INDEX(ale)) < 0) {
+ goto bad;
+ }
+ }
+ pc += ATOM_INDEX_LEN + JUMP_OFFSET_LEN;
+ }
+ }
+ CG_NEXT(cg) = savepc;
+ }
+ }
+
+ /* Emit code for each case's statements, copying pn_offset up to pn3. */
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (switchOp == JSOP_CONDSWITCH && pn3->pn_type != TOK_DEFAULT)
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, pn3->pn_offset);
+ pn4 = pn3->pn_right;
+ ok = js_EmitTree(cx, cg, pn4);
+ if (!ok)
+ goto out;
+ pn3->pn_offset = pn4->pn_offset;
+ if (pn3->pn_type == TOK_DEFAULT)
+ off = pn3->pn_offset - top;
+ }
+
+ if (!hasDefault) {
+ /* If no default case, offset for default is to end of switch. */
+ off = CG_OFFSET(cg) - top;
+ }
+
+ /* We better have set "off" by now. */
+ JS_ASSERT(off != -1);
+
+ /* Set the default offset (to end of switch if no default). */
+ if (switchOp == JSOP_CONDSWITCH) {
+ pc = NULL;
+ JS_ASSERT(defaultOffset != -1);
+ ok = js_SetJumpOffset(cx, cg, CG_CODE(cg, defaultOffset),
+ off - (defaultOffset - top));
+ if (!ok)
+ goto out;
+ } else {
+ pc = CG_CODE(cg, top);
+ ok = js_SetJumpOffset(cx, cg, pc, off);
+ if (!ok)
+ goto out;
+ pc += JUMP_OFFSET_LEN;
+ }
+
+ /* Set the SRC_SWITCH note's offset operand to tell end of switch. */
+ off = CG_OFFSET(cg) - top;
+ ok = js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, off);
+ if (!ok)
+ goto out;
+
+ if (switchOp == JSOP_TABLESWITCH) {
+ /* Skip over the already-initialized switch bounds. */
+ pc += 2 * JUMP_OFFSET_LEN;
+
+ /* Fill in the jump table, if there is one. */
+ for (i = 0; i < (jsint)tableLength; i++) {
+ pn3 = table[i];
+ off = pn3 ? pn3->pn_offset - top : 0;
+ ok = js_SetJumpOffset(cx, cg, pc, off);
+ if (!ok)
+ goto out;
+ pc += JUMP_OFFSET_LEN;
+ }
+ } else if (switchOp == JSOP_LOOKUPSWITCH) {
+ /* Skip over the already-initialized number of cases. */
+ pc += ATOM_INDEX_LEN;
+
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (pn3->pn_type == TOK_DEFAULT)
+ continue;
+ atom = js_AtomizeValue(cx, pn3->pn_val, 0);
+ if (!atom)
+ goto bad;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ goto bad;
+ SET_ATOM_INDEX(pc, ALE_INDEX(ale));
+ pc += ATOM_INDEX_LEN;
+
+ off = pn3->pn_offset - top;
+ ok = js_SetJumpOffset(cx, cg, pc, off);
+ if (!ok)
+ goto out;
+ pc += JUMP_OFFSET_LEN;
+ }
+ }
+
+out:
+ if (table)
+ JS_free(cx, table);
+ if (ok) {
+ ok = js_PopStatementCG(cx, cg);
+
+#if JS_HAS_BLOCK_SCOPE
+ if (ok && pn->pn_right->pn_type == TOK_LEXICALSCOPE) {
+ EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, count);
+ cg->stackDepth -= count;
+ }
+#endif
+ }
+ return ok;
+
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+JSBool
+js_EmitFunctionBytecode(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body)
+{
+ if (!js_AllocTryNotes(cx, cg))
+ return JS_FALSE;
+
+ if (cg->treeContext.flags & TCF_FUN_IS_GENERATOR) {
+ /* JSOP_GENERATOR must be the first instruction. */
+ CG_SWITCH_TO_PROLOG(cg);
+ JS_ASSERT(CG_NEXT(cg) == CG_BASE(cg));
+ if (js_Emit1(cx, cg, JSOP_GENERATOR) < 0)
+ return JS_FALSE;
+ CG_SWITCH_TO_MAIN(cg);
+ }
+
+ return js_EmitTree(cx, cg, body) &&
+ js_Emit1(cx, cg, JSOP_STOP) >= 0;
+}
+
+JSBool
+js_EmitFunctionBody(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body,
+ JSFunction *fun)
+{
+ JSStackFrame *fp, frame;
+ JSObject *funobj;
+ JSBool ok;
+
+ fp = cx->fp;
+ funobj = fun->object;
+ JS_ASSERT(!fp || (fp->fun != fun && fp->varobj != funobj &&
+ fp->scopeChain != funobj));
+ memset(&frame, 0, sizeof frame);
+ frame.fun = fun;
+ frame.varobj = frame.scopeChain = funobj;
+ frame.down = fp;
+ frame.flags = JS_HAS_COMPILE_N_GO_OPTION(cx)
+ ? JSFRAME_COMPILING | JSFRAME_COMPILE_N_GO
+ : JSFRAME_COMPILING;
+ cx->fp = &frame;
+ ok = js_EmitFunctionBytecode(cx, cg, body);
+ cx->fp = fp;
+ if (!ok)
+ return JS_FALSE;
+
+ if (!js_NewScriptFromCG(cx, cg, fun))
+ return JS_FALSE;
+
+ JS_ASSERT(FUN_INTERPRETED(fun));
+ return JS_TRUE;
+}
+
+/* A macro for inlining at the top of js_EmitTree (whence it came). */
+#define UPDATE_LINE_NUMBER_NOTES(cx, cg, pn) \
+ JS_BEGIN_MACRO \
+ uintN line_ = (pn)->pn_pos.begin.lineno; \
+ uintN delta_ = line_ - CG_CURRENT_LINE(cg); \
+ if (delta_ != 0) { \
+ /* \
+ * Encode any change in the current source line number by using \
+ * either several SRC_NEWLINE notes or just one SRC_SETLINE note, \
+ * whichever consumes less space. \
+ * \
+ * NB: We handle backward line number deltas (possible with for \
+ * loops where the update part is emitted after the body, but its \
+ * line number is <= any line number in the body) here by letting \
+ * unsigned delta_ wrap to a very large number, which triggers a \
+ * SRC_SETLINE. \
+ */ \
+ CG_CURRENT_LINE(cg) = line_; \
+ if (delta_ >= (uintN)(2 + ((line_ > SN_3BYTE_OFFSET_MASK)<<1))) { \
+ if (js_NewSrcNote2(cx, cg, SRC_SETLINE, (ptrdiff_t)line_) < 0)\
+ return JS_FALSE; \
+ } else { \
+ do { \
+ if (js_NewSrcNote(cx, cg, SRC_NEWLINE) < 0) \
+ return JS_FALSE; \
+ } while (--delta_ != 0); \
+ } \
+ } \
+ JS_END_MACRO
+
+/* A function, so that we avoid macro-bloating all the other callsites. */
+static JSBool
+UpdateLineNumberNotes(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
+{
+ UPDATE_LINE_NUMBER_NOTES(cx, cg, pn);
+ return JS_TRUE;
+}
+
+static JSBool
+MaybeEmitVarDecl(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn, jsatomid *result)
+{
+ jsatomid atomIndex;
+ JSAtomListElement *ale;
+
+ if (pn->pn_slot >= 0) {
+ atomIndex = (jsatomid) pn->pn_slot;
+ } else {
+ ale = js_IndexAtom(cx, pn->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+ }
+
+ if ((js_CodeSpec[pn->pn_op].format & JOF_TYPEMASK) == JOF_CONST &&
+ (!(cg->treeContext.flags & TCF_IN_FUNCTION) ||
+ (cg->treeContext.flags & TCF_FUN_HEAVYWEIGHT))) {
+ /* Emit a prolog bytecode to predefine the variable. */
+ CG_SWITCH_TO_PROLOG(cg);
+ if (!UpdateLineNumberNotes(cx, cg, pn))
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(prologOp, atomIndex);
+ CG_SWITCH_TO_MAIN(cg);
+ }
+
+ if (result)
+ *result = atomIndex;
+ return JS_TRUE;
+}
+
+#if JS_HAS_DESTRUCTURING
+
+typedef JSBool
+(*DestructuringDeclEmitter)(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn);
+
+static JSBool
+EmitDestructuringDecl(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn)
+{
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ if (!BindNameToSlot(cx, &cg->treeContext, pn, prologOp == JSOP_NOP))
+ return JS_FALSE;
+
+ JS_ASSERT(pn->pn_op != JSOP_ARGUMENTS);
+ return MaybeEmitVarDecl(cx, cg, prologOp, pn, NULL);
+}
+
+static JSBool
+EmitDestructuringDecls(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn)
+{
+ JSParseNode *pn2, *pn3;
+ DestructuringDeclEmitter emitter;
+
+ if (pn->pn_type == TOK_RB) {
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (pn2->pn_type == TOK_COMMA)
+ continue;
+ emitter = (pn2->pn_type == TOK_NAME)
+ ? EmitDestructuringDecl
+ : EmitDestructuringDecls;
+ if (!emitter(cx, cg, prologOp, pn2))
+ return JS_FALSE;
+ }
+ } else {
+ JS_ASSERT(pn->pn_type == TOK_RC);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ pn3 = pn2->pn_right;
+ emitter = (pn3->pn_type == TOK_NAME)
+ ? EmitDestructuringDecl
+ : EmitDestructuringDecls;
+ if (!emitter(cx, cg, prologOp, pn3))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+EmitDestructuringOpsHelper(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn);
+
+static JSBool
+EmitDestructuringLHS(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
+ JSBool wantpop)
+{
+ jsuint slot;
+
+ /* Skip any parenthesization. */
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+
+ /*
+ * Now emit the lvalue opcode sequence. If the lvalue is a nested
+ * destructuring initialiser-form, call ourselves to handle it, then
+ * pop the matched value. Otherwise emit an lvalue bytecode sequence
+ * ending with a JSOP_ENUMELEM or equivalent op.
+ */
+ if (pn->pn_type == TOK_RB || pn->pn_type == TOK_RC) {
+ if (!EmitDestructuringOpsHelper(cx, cg, pn))
+ return JS_FALSE;
+ if (wantpop && js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ } else {
+ if (pn->pn_type == TOK_NAME &&
+ !BindNameToSlot(cx, &cg->treeContext, pn, JS_FALSE)) {
+ return JS_FALSE;
+ }
+
+ switch (pn->pn_op) {
+ case JSOP_SETNAME:
+ /*
+ * NB: pn is a PN_NAME node, not a PN_BINARY. Nevertheless,
+ * we want to emit JSOP_ENUMELEM, which has format JOF_ELEM.
+ * So here and for JSOP_ENUMCONSTELEM, we use EmitElemOp.
+ */
+ if (!EmitElemOp(cx, pn, JSOP_ENUMELEM, cg))
+ return JS_FALSE;
+ break;
+
+ case JSOP_SETCONST:
+ if (!EmitElemOp(cx, pn, JSOP_ENUMCONSTELEM, cg))
+ return JS_FALSE;
+ break;
+
+ case JSOP_SETLOCAL:
+ if (wantpop) {
+ slot = (jsuint) pn->pn_slot;
+ EMIT_UINT16_IMM_OP(JSOP_SETLOCALPOP, slot);
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSOP_SETARG:
+ case JSOP_SETVAR:
+ case JSOP_SETGVAR:
+ slot = (jsuint) pn->pn_slot;
+ EMIT_UINT16_IMM_OP(pn->pn_op, slot);
+ if (wantpop && js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ break;
+
+ default:
+#if JS_HAS_LVALUE_RETURN || JS_HAS_XML_SUPPORT
+ {
+ ptrdiff_t top;
+
+ top = CG_OFFSET(cg);
+ if (!js_EmitTree(cx, cg, pn))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ENUMELEM) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+ case JSOP_ENUMELEM:
+ JS_ASSERT(0);
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/*
+ * Recursive helper for EmitDestructuringOps.
+ *
+ * Given a value to destructure on the stack, walk over an object or array
+ * initialiser at pn, emitting bytecodes to match property values and store
+ * them in the lvalues identified by the matched property names.
+ */
+static JSBool
+EmitDestructuringOpsHelper(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
+{
+ jsuint index;
+ JSParseNode *pn2, *pn3;
+ JSBool doElemOp;
+
+#ifdef DEBUG
+ intN stackDepth = cg->stackDepth;
+ JS_ASSERT(stackDepth != 0);
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ JS_ASSERT(pn->pn_type == TOK_RB || pn->pn_type == TOK_RC);
+#endif
+
+ if (pn->pn_count == 0) {
+ /* Emit a DUP;POP sequence for the decompiler. */
+ return js_Emit1(cx, cg, JSOP_DUP) >= 0 &&
+ js_Emit1(cx, cg, JSOP_POP) >= 0;
+ }
+
+ index = 0;
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ /*
+ * Duplicate the value being destructured to use as a reference base.
+ */
+ if (js_Emit1(cx, cg, JSOP_DUP) < 0)
+ return JS_FALSE;
+
+ /*
+ * Now push the property name currently being matched, which is either
+ * the array initialiser's current index, or the current property name
+ * "label" on the left of a colon in the object initialiser. Set pn3
+ * to the lvalue node, which is in the value-initializing position.
+ */
+ doElemOp = JS_TRUE;
+ if (pn->pn_type == TOK_RB) {
+ if (!EmitNumberOp(cx, index, cg))
+ return JS_FALSE;
+ pn3 = pn2;
+ } else {
+ JS_ASSERT(pn->pn_type == TOK_RC);
+ JS_ASSERT(pn2->pn_type == TOK_COLON);
+ pn3 = pn2->pn_left;
+ if (pn3->pn_type == TOK_NUMBER) {
+ /*
+ * If we are emitting an object destructuring initialiser,
+ * annotate the index op with SRC_INITPROP so we know we are
+ * not decompiling an array initialiser.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_INITPROP) < 0)
+ return JS_FALSE;
+ if (!EmitNumberOp(cx, pn3->pn_dval, cg))
+ return JS_FALSE;
+ } else {
+ JS_ASSERT(pn3->pn_type == TOK_STRING ||
+ pn3->pn_type == TOK_NAME);
+ if (!EmitAtomOp(cx, pn3, JSOP_GETPROP, cg))
+ return JS_FALSE;
+ doElemOp = JS_FALSE;
+ }
+ pn3 = pn2->pn_right;
+ }
+
+ if (doElemOp) {
+ /*
+ * Ok, get the value of the matching property name. This leaves
+ * that value on top of the value being destructured, so the stack
+ * is one deeper than when we started.
+ */
+ if (js_Emit1(cx, cg, JSOP_GETELEM) < 0)
+ return JS_FALSE;
+ JS_ASSERT(cg->stackDepth == stackDepth + 1);
+ }
+
+ /* Nullary comma node makes a hole in the array destructurer. */
+ if (pn3->pn_type == TOK_COMMA && pn3->pn_arity == PN_NULLARY) {
+ JS_ASSERT(pn->pn_type == TOK_RB);
+ JS_ASSERT(pn2 == pn3);
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitDestructuringLHS(cx, cg, pn3, JS_TRUE))
+ return JS_FALSE;
+ }
+
+ JS_ASSERT(cg->stackDepth == stackDepth);
+ ++index;
+ }
+
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+OpToDeclType(JSOp op)
+{
+ switch (op) {
+ case JSOP_NOP:
+ return SRC_DECL_LET;
+ case JSOP_DEFCONST:
+ return SRC_DECL_CONST;
+ case JSOP_DEFVAR:
+ return SRC_DECL_VAR;
+ default:
+ return SRC_DECL_NONE;
+ }
+}
+
+static JSBool
+EmitDestructuringOps(JSContext *cx, JSCodeGenerator *cg, JSOp declOp,
+ JSParseNode *pn)
+{
+ /*
+ * If we're called from a variable declaration, help the decompiler by
+ * annotating the first JSOP_DUP that EmitDestructuringOpsHelper emits.
+ * If the destructuring initialiser is empty, our helper will emit a
+ * JSOP_DUP followed by a JSOP_POP for the decompiler.
+ */
+ if (js_NewSrcNote2(cx, cg, SRC_DESTRUCT, OpToDeclType(declOp)) < 0)
+ return JS_FALSE;
+
+ /*
+ * Call our recursive helper to emit the destructuring assignments and
+ * related stack manipulations.
+ */
+ return EmitDestructuringOpsHelper(cx, cg, pn);
+}
+
+static JSBool
+EmitGroupAssignment(JSContext *cx, JSCodeGenerator *cg, JSOp declOp,
+ JSParseNode *lhs, JSParseNode *rhs)
+{
+ jsuint depth, limit, slot;
+ JSParseNode *pn;
+
+ depth = limit = (uintN) cg->stackDepth;
+ for (pn = rhs->pn_head; pn; pn = pn->pn_next) {
+ if (limit == JS_BIT(16)) {
+ js_ReportCompileErrorNumber(cx, rhs,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_ARRAY_INIT_TOO_BIG);
+ return JS_FALSE;
+ }
+
+ if (pn->pn_type == TOK_COMMA) {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ } else {
+ JS_ASSERT(pn->pn_type != TOK_DEFSHARP);
+ if (!js_EmitTree(cx, cg, pn))
+ return JS_FALSE;
+ }
+ ++limit;
+ }
+
+ if (js_NewSrcNote2(cx, cg, SRC_GROUPASSIGN, OpToDeclType(declOp)) < 0)
+ return JS_FALSE;
+
+ slot = depth;
+ for (pn = lhs->pn_head; pn; pn = pn->pn_next) {
+ if (slot < limit) {
+ EMIT_UINT16_IMM_OP(JSOP_GETLOCAL, slot);
+ } else {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ }
+ if (pn->pn_type == TOK_COMMA && pn->pn_arity == PN_NULLARY) {
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitDestructuringLHS(cx, cg, pn, pn->pn_next != NULL))
+ return JS_FALSE;
+ }
+ ++slot;
+ }
+
+ EMIT_UINT16_IMM_OP(JSOP_SETSP, (jsatomid)depth);
+ cg->stackDepth = (uintN) depth;
+ return JS_TRUE;
+}
+
+/*
+ * Helper called with pop out param initialized to a JSOP_POP* opcode. If we
+ * can emit a group assignment sequence, which results in 0 stack depth delta,
+ * we set *pop to JSOP_NOP so callers can veto emitting pn followed by a pop.
+ */
+static JSBool
+MaybeEmitGroupAssignment(JSContext *cx, JSCodeGenerator *cg, JSOp declOp,
+ JSParseNode *pn, JSOp *pop)
+{
+ JSParseNode *lhs, *rhs;
+
+ JS_ASSERT(pn->pn_type == TOK_ASSIGN);
+ JS_ASSERT(*pop == JSOP_POP || *pop == JSOP_POPV);
+ lhs = pn->pn_left;
+ rhs = pn->pn_right;
+ if (lhs->pn_type == TOK_RB && rhs->pn_type == TOK_RB &&
+ lhs->pn_count <= rhs->pn_count &&
+ (rhs->pn_count == 0 ||
+ rhs->pn_head->pn_type != TOK_DEFSHARP)) {
+ if (!EmitGroupAssignment(cx, cg, declOp, lhs, rhs))
+ return JS_FALSE;
+ *pop = JSOP_NOP;
+ }
+ return JS_TRUE;
+}
+
+#endif /* JS_HAS_DESTRUCTURING */
+
+static JSBool
+EmitVariables(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
+ JSBool inLetHead, ptrdiff_t *headNoteIndex)
+{
+ JSTreeContext *tc;
+ JSBool let, forInVar;
+#if JS_HAS_BLOCK_SCOPE
+ JSBool forInLet, popScope;
+ JSStmtInfo *stmt, *scopeStmt;
+#endif
+ ptrdiff_t off, noteIndex, tmp;
+ JSParseNode *pn2, *pn3;
+ JSOp op;
+ jsatomid atomIndex;
+ uintN oldflags;
+
+ /* Default in case of JS_HAS_BLOCK_SCOPE early return, below. */
+ *headNoteIndex = -1;
+
+ /*
+ * Let blocks and expressions have a parenthesized head in which the new
+ * scope is not yet open. Initializer evaluation uses the parent node's
+ * lexical scope. If popScope is true below, then we hide the top lexical
+ * block from any calls to BindNameToSlot hiding in pn2->pn_expr so that
+ * it won't find any names in the new let block.
+ *
+ * The same goes for let declarations in the head of any kind of for loop.
+ * Unlike a let declaration 'let x = i' within a block, where x is hoisted
+ * to the start of the block, a 'for (let x = i...) ...' loop evaluates i
+ * in the containing scope, and puts x in the loop body's scope.
+ */
+ tc = &cg->treeContext;
+ let = (pn->pn_op == JSOP_NOP);
+ forInVar = (pn->pn_extra & PNX_FORINVAR) != 0;
+#if JS_HAS_BLOCK_SCOPE
+ forInLet = let && forInVar;
+ popScope = (inLetHead || (let && (tc->flags & TCF_IN_FOR_INIT)));
+ JS_ASSERT(!popScope || let);
+#endif
+
+ off = noteIndex = -1;
+ for (pn2 = pn->pn_head; ; pn2 = pn2->pn_next) {
+#if JS_HAS_DESTRUCTURING
+ if (pn2->pn_type != TOK_NAME) {
+ if (pn2->pn_type == TOK_RB || pn2->pn_type == TOK_RC) {
+ /*
+ * Emit variable binding ops, but not destructuring ops.
+ * The parser (see Variables, jsparse.c) has ensured that
+ * our caller will be the TOK_FOR/TOK_IN case in js_EmitTree,
+ * and that case will emit the destructuring code only after
+ * emitting an enumerating opcode and a branch that tests
+ * whether the enumeration ended.
+ */
+ JS_ASSERT(forInVar);
+ JS_ASSERT(pn->pn_count == 1);
+ if (!EmitDestructuringDecls(cx, cg, pn->pn_op, pn2))
+ return JS_FALSE;
+ break;
+ }
+
+ /*
+ * A destructuring initialiser assignment preceded by var is
+ * always evaluated promptly, even if it is to the left of 'in'
+ * in a for-in loop. As with 'for (var x = i in o)...', this
+ * will cause the entire 'var [a, b] = i' to be hoisted out of
+ * the head of the loop.
+ */
+ JS_ASSERT(pn2->pn_type == TOK_ASSIGN);
+ if (pn->pn_count == 1 && !forInLet) {
+ /*
+ * If this is the only destructuring assignment in the list,
+ * try to optimize to a group assignment. If we're in a let
+ * head, pass JSOP_POP rather than the pseudo-prolog JSOP_NOP
+ * in pn->pn_op, to suppress a second (and misplaced) 'let'.
+ */
+ JS_ASSERT(noteIndex < 0 && !pn2->pn_next);
+ op = JSOP_POP;
+ if (!MaybeEmitGroupAssignment(cx, cg,
+ inLetHead ? JSOP_POP : pn->pn_op,
+ pn2, &op)) {
+ return JS_FALSE;
+ }
+ if (op == JSOP_NOP) {
+ pn->pn_extra = (pn->pn_extra & ~PNX_POPVAR) | PNX_GROUPINIT;
+ break;
+ }
+ }
+
+ pn3 = pn2->pn_left;
+ if (!EmitDestructuringDecls(cx, cg, pn->pn_op, pn3))
+ return JS_FALSE;
+
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * If this is a 'for (let [x, y] = i in o) ...' let declaration,
+ * throw away i if it is a useless expression.
+ */
+ if (forInLet) {
+ JSBool useful = JS_FALSE;
+
+ JS_ASSERT(pn->pn_count == 1);
+ if (!CheckSideEffects(cx, tc, pn2->pn_right, &useful))
+ return JS_FALSE;
+ if (!useful)
+ return JS_TRUE;
+ }
+#endif
+
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * The expression i in 'for (let [x, y] = i in o) ...', which is
+ * pn2->pn_right above, appears to have side effects. We've just
+ * emitted code to evaluate i, but we must not destructure i yet.
+ * Let the TOK_FOR: code in js_EmitTree do the destructuring to
+ * emit the right combination of source notes and bytecode for the
+ * decompiler.
+ *
+ * This has the effect of hoisting the evaluation of i out of the
+ * for-in loop, without hoisting the let variables, which must of
+ * course be scoped by the loop. Set PNX_POPVAR to cause JSOP_POP
+ * to be emitted, just before returning from this function.
+ */
+ if (forInVar) {
+ pn->pn_extra |= PNX_POPVAR;
+ if (forInLet)
+ break;
+ }
+#endif
+
+ /*
+ * Veto pn->pn_op if inLetHead to avoid emitting a SRC_DESTRUCT
+ * that's redundant with respect to the SRC_DECL/SRC_DECL_LET that
+ * we will emit at the bottom of this function.
+ */
+ if (!EmitDestructuringOps(cx, cg,
+ inLetHead ? JSOP_POP : pn->pn_op,
+ pn3)) {
+ return JS_FALSE;
+ }
+ goto emit_note_pop;
+ }
+#else
+ JS_ASSERT(pn2->pn_type == TOK_NAME);
+#endif
+
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, let))
+ return JS_FALSE;
+ JS_ASSERT(pn2->pn_slot >= 0 || !let);
+
+ op = pn2->pn_op;
+ if (op == JSOP_ARGUMENTS) {
+ /* JSOP_ARGUMENTS => no initializer */
+ JS_ASSERT(!pn2->pn_expr && !let);
+ pn3 = NULL;
+#ifdef __GNUC__
+ atomIndex = 0; /* quell GCC overwarning */
+#endif
+ } else {
+ if (!MaybeEmitVarDecl(cx, cg, pn->pn_op, pn2, &atomIndex))
+ return JS_FALSE;
+
+ pn3 = pn2->pn_expr;
+ if (pn3) {
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * If this is a 'for (let x = i in o) ...' let declaration,
+ * throw away i if it is a useless expression.
+ */
+ if (forInLet) {
+ JSBool useful = JS_FALSE;
+
+ JS_ASSERT(pn->pn_count == 1);
+ if (!CheckSideEffects(cx, tc, pn3, &useful))
+ return JS_FALSE;
+ if (!useful)
+ return JS_TRUE;
+ }
+#endif
+
+ if (op == JSOP_SETNAME) {
+ JS_ASSERT(!let);
+ EMIT_ATOM_INDEX_OP(JSOP_BINDNAME, atomIndex);
+ }
+ if (pn->pn_op == JSOP_DEFCONST &&
+ !js_DefineCompileTimeConstant(cx, cg, pn2->pn_atom,
+ pn3)) {
+ return JS_FALSE;
+ }
+
+#if JS_HAS_BLOCK_SCOPE
+ /* Evaluate expr in the outer lexical scope if requested. */
+ if (popScope) {
+ stmt = tc->topStmt;
+ scopeStmt = tc->topScopeStmt;
+
+ tc->topStmt = stmt->down;
+ tc->topScopeStmt = scopeStmt->downScope;
+ }
+#ifdef __GNUC__
+ else {
+ stmt = scopeStmt = NULL; /* quell GCC overwarning */
+ }
+#endif
+#endif
+
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+
+#if JS_HAS_BLOCK_SCOPE
+ if (popScope) {
+ tc->topStmt = stmt;
+ tc->topScopeStmt = scopeStmt;
+ }
+#endif
+ }
+ }
+
+ /*
+ * 'for (var x in o) ...' and 'for (var x = i in o) ...' call the
+ * TOK_VAR case, but only the initialized case (a strange one that
+ * falls out of ECMA-262's grammar) wants to run past this point.
+ * Both cases must conditionally emit a JSOP_DEFVAR, above. Note
+ * that the parser error-checks to ensure that pn->pn_count is 1.
+ *
+ * 'for (let x = i in o) ...' must evaluate i before the loop, and
+ * subject it to useless expression elimination. The variable list
+ * in pn is a single let declaration if pn_op == JSOP_NOP. We test
+ * the let local in order to break early in this case, as well as in
+ * the 'for (var x in o)' case.
+ *
+ * XXX Narcissus keeps track of variable declarations in the node
+ * for the script being compiled, so there's no need to share any
+ * conditional prolog code generation there. We could do likewise,
+ * but it's a big change, requiring extra allocation, so probably
+ * not worth the trouble for SpiderMonkey.
+ */
+ JS_ASSERT(pn3 == pn2->pn_expr);
+ if (forInVar && (!pn3 || let)) {
+ JS_ASSERT(pn->pn_count == 1);
+ break;
+ }
+
+ if (pn2 == pn->pn_head &&
+ !inLetHead &&
+ js_NewSrcNote2(cx, cg, SRC_DECL,
+ (pn->pn_op == JSOP_DEFCONST)
+ ? SRC_DECL_CONST
+ : (pn->pn_op == JSOP_DEFVAR)
+ ? SRC_DECL_VAR
+ : SRC_DECL_LET) < 0) {
+ return JS_FALSE;
+ }
+ if (op == JSOP_ARGUMENTS) {
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ } else if (pn2->pn_slot >= 0) {
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ } else {
+ EMIT_ATOM_INDEX_OP(op, atomIndex);
+ }
+
+#if JS_HAS_DESTRUCTURING
+ emit_note_pop:
+#endif
+ tmp = CG_OFFSET(cg);
+ if (noteIndex >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
+ return JS_FALSE;
+ }
+ if (!pn2->pn_next)
+ break;
+ off = tmp;
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (noteIndex < 0 || js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ }
+
+ /* If this is a let head, emit and return a srcnote on the pop. */
+ if (inLetHead) {
+ *headNoteIndex = js_NewSrcNote(cx, cg, SRC_DECL);
+ if (*headNoteIndex < 0)
+ return JS_FALSE;
+ if (!(pn->pn_extra & PNX_POPVAR))
+ return js_Emit1(cx, cg, JSOP_NOP) >= 0;
+ }
+
+ return !(pn->pn_extra & PNX_POPVAR) || js_Emit1(cx, cg, JSOP_POP) >= 0;
+}
+
+#if defined DEBUG_brendan || defined DEBUG_mrbkap
+static JSBool
+GettableNoteForNextOp(JSCodeGenerator *cg)
+{
+ ptrdiff_t offset, target;
+ jssrcnote *sn, *end;
+
+ offset = 0;
+ target = CG_OFFSET(cg);
+ for (sn = CG_NOTES(cg), end = sn + CG_NOTE_COUNT(cg); sn < end;
+ sn = SN_NEXT(sn)) {
+ if (offset == target && SN_IS_GETTABLE(sn))
+ return JS_TRUE;
+ offset += SN_DELTA(sn);
+ }
+ return JS_FALSE;
+}
+#endif
+
+JSBool
+js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
+{
+ JSBool ok, useful, wantval;
+ JSStmtInfo *stmt, stmtInfo;
+ ptrdiff_t top, off, tmp, beq, jmp;
+ JSParseNode *pn2, *pn3;
+ JSAtom *atom;
+ JSAtomListElement *ale;
+ jsatomid atomIndex;
+ ptrdiff_t noteIndex;
+ JSSrcNoteType noteType;
+ jsbytecode *pc;
+ JSOp op;
+ JSTokenType type;
+ uint32 argc;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ ok = JS_TRUE;
+ cg->emitLevel++;
+ pn->pn_offset = top = CG_OFFSET(cg);
+
+ /* Emit notes to tell the current bytecode's source line number. */
+ UPDATE_LINE_NUMBER_NOTES(cx, cg, pn);
+
+ switch (pn->pn_type) {
+ case TOK_FUNCTION:
+ {
+ void *cg2mark;
+ JSCodeGenerator *cg2;
+ JSFunction *fun;
+
+#if JS_HAS_XML_SUPPORT
+ if (pn->pn_arity == PN_NULLARY) {
+ if (js_Emit1(cx, cg, JSOP_GETFUNNS) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+
+ /* Generate code for the function's body. */
+ cg2mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_TYPE(cg2, JSCodeGenerator, &cx->tempPool);
+ if (!cg2) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ if (!js_InitCodeGenerator(cx, cg2, cg->codePool, cg->notePool,
+ cg->filename, pn->pn_pos.begin.lineno,
+ cg->principals)) {
+ return JS_FALSE;
+ }
+ cg2->treeContext.flags = (uint16) (pn->pn_flags | TCF_IN_FUNCTION);
+ cg2->treeContext.tryCount = pn->pn_tryCount;
+ cg2->parent = cg;
+ fun = (JSFunction *) JS_GetPrivate(cx, ATOM_TO_OBJECT(pn->pn_funAtom));
+ if (!js_EmitFunctionBody(cx, cg2, pn->pn_body, fun))
+ return JS_FALSE;
+
+ /*
+ * We need an activation object if an inner peeks out, or if such
+ * inner-peeking caused one of our inners to become heavyweight.
+ */
+ if (cg2->treeContext.flags &
+ (TCF_FUN_USES_NONLOCALS | TCF_FUN_HEAVYWEIGHT)) {
+ cg->treeContext.flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+ js_FinishCodeGenerator(cx, cg2);
+ JS_ARENA_RELEASE(&cx->tempPool, cg2mark);
+
+ /* Make the function object a literal in the outer script's pool. */
+ ale = js_IndexAtom(cx, pn->pn_funAtom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+
+ /* Emit a bytecode pointing to the closure object in its immediate. */
+ if (pn->pn_op != JSOP_NOP) {
+ EMIT_ATOM_INDEX_OP(pn->pn_op, atomIndex);
+ break;
+ }
+
+ /* Top-level named functions need a nop for decompilation. */
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_FUNCDEF, (ptrdiff_t)atomIndex);
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /*
+ * Top-levels also need a prolog op to predefine their names in the
+ * variable object, or if local, to fill their stack slots.
+ */
+ CG_SWITCH_TO_PROLOG(cg);
+
+ if (cg->treeContext.flags & TCF_IN_FUNCTION) {
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ uintN slot;
+
+ obj = OBJ_GET_PARENT(cx, fun->object);
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(fun->atom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+
+ JS_ASSERT(prop && pobj == obj);
+ sprop = (JSScopeProperty *) prop;
+ JS_ASSERT(sprop->getter == js_GetLocalVariable);
+ slot = sprop->shortid;
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ /*
+ * If this local function is declared in a body block induced by
+ * let declarations, reparent fun->object to the compiler-created
+ * body block object so that JSOP_DEFLOCALFUN can clone that block
+ * into the runtime scope chain.
+ */
+ stmt = cg->treeContext.topStmt;
+ if (stmt && stmt->type == STMT_BLOCK &&
+ stmt->down && stmt->down->type == STMT_BLOCK &&
+ (stmt->down->flags & SIF_SCOPE)) {
+ obj = ATOM_TO_OBJECT(stmt->down->atom);
+ JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
+ OBJ_SET_PARENT(cx, fun->object, obj);
+ }
+
+ if (atomIndex >= JS_BIT(16)) {
+ /*
+ * Lots of literals in the outer function, so we have to emit
+ * [JSOP_LITOPX, atomIndex, JSOP_DEFLOCALFUN, var slot].
+ */
+ off = js_EmitN(cx, cg, JSOP_LITOPX, 3);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_LITERAL_INDEX(pc, atomIndex);
+ EMIT_UINT16_IMM_OP(JSOP_DEFLOCALFUN, slot);
+ } else {
+ /* Emit [JSOP_DEFLOCALFUN, var slot, atomIndex]. */
+ off = js_EmitN(cx, cg, JSOP_DEFLOCALFUN,
+ VARNO_LEN + ATOM_INDEX_LEN);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_VARNO(pc, slot);
+ pc += VARNO_LEN;
+ SET_ATOM_INDEX(pc, atomIndex);
+ }
+ } else {
+ JS_ASSERT(!cg->treeContext.topStmt);
+ EMIT_ATOM_INDEX_OP(JSOP_DEFFUN, atomIndex);
+ }
+
+ CG_SWITCH_TO_MAIN(cg);
+ break;
+ }
+
+#if JS_HAS_EXPORT_IMPORT
+ case TOK_EXPORT:
+ pn2 = pn->pn_head;
+ if (pn2->pn_type == TOK_STAR) {
+ /*
+ * 'export *' must have no other elements in the list (what would
+ * be the point?).
+ */
+ if (js_Emit1(cx, cg, JSOP_EXPORTALL) < 0)
+ return JS_FALSE;
+ } else {
+ /*
+ * If not 'export *', the list consists of NAME nodes identifying
+ * properties of the variables object to flag as exported.
+ */
+ do {
+ ale = js_IndexAtom(cx, pn2->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_EXPORTNAME, ALE_INDEX(ale));
+ } while ((pn2 = pn2->pn_next) != NULL);
+ }
+ break;
+
+ case TOK_IMPORT:
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ /*
+ * Each subtree on an import list is rooted by a DOT or LB node.
+ * A DOT may have a null pn_atom member, in which case pn_op must
+ * be JSOP_IMPORTALL -- see EmitPropOp above.
+ */
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+ break;
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ case TOK_IF:
+ /* Initialize so we can detect else-if chains and avoid recursion. */
+ stmtInfo.type = STMT_IF;
+ beq = jmp = -1;
+ noteIndex = -1;
+
+ if_again:
+ /* Emit code for the condition before pushing stmtInfo. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid1))
+ return JS_FALSE;
+ top = CG_OFFSET(cg);
+ if (stmtInfo.type == STMT_IF) {
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_IF, top);
+ } else {
+ /*
+ * We came here from the goto further below that detects else-if
+ * chains, so we must mutate stmtInfo back into a STMT_IF record.
+ * Also (see below for why) we need a note offset for SRC_IF_ELSE
+ * to help the decompiler. Actually, we need two offsets, one for
+ * decompiling any else clause and the second for decompiling an
+ * else-if chain without bracing, overindenting, or incorrectly
+ * scoping let declarations.
+ */
+ JS_ASSERT(stmtInfo.type == STMT_ELSE);
+ stmtInfo.type = STMT_IF;
+ stmtInfo.update = top;
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 1, top - jmp))
+ return JS_FALSE;
+ }
+
+ /* Emit an annotated branch-if-false around the then part. */
+ pn3 = pn->pn_kid3;
+ noteIndex = js_NewSrcNote(cx, cg, pn3 ? SRC_IF_ELSE : SRC_IF);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+
+ /* Emit code for the then and optional else parts. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid2))
+ return JS_FALSE;
+ if (pn3) {
+ /* Modify stmtInfo so we know we're in the else part. */
+ stmtInfo.type = STMT_ELSE;
+
+ /*
+ * Emit a JSOP_BACKPATCH op to jump from the end of our then part
+ * around the else part. The js_PopStatementCG call at the bottom
+ * of this switch case will fix up the backpatch chain linked from
+ * stmtInfo.breaks.
+ */
+ jmp = EmitGoto(cx, cg, &stmtInfo, &stmtInfo.breaks, NULL, SRC_NULL);
+ if (jmp < 0)
+ return JS_FALSE;
+
+ /* Ensure the branch-if-false comes here, then emit the else. */
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ if (pn3->pn_type == TOK_IF) {
+ pn = pn3;
+ goto if_again;
+ }
+
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+
+ /*
+ * Annotate SRC_IF_ELSE with the offset from branch to jump, for
+ * the decompiler's benefit. We can't just "back up" from the pc
+ * of the else clause, because we don't know whether an extended
+ * jump was required to leap from the end of the then clause over
+ * the else clause.
+ */
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ } else {
+ /* No else part, fixup the branch-if-false to come here. */
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ }
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_SWITCH:
+ /* Out of line to avoid bloating js_EmitTree's stack frame size. */
+ ok = EmitSwitch(cx, cg, pn, &stmtInfo);
+ break;
+
+ case TOK_WHILE:
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_WHILE_LOOP, top);
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ noteIndex = js_NewSrcNote(cx, cg, SRC_WHILE);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+ jmp = EmitJump(cx, cg, JSOP_GOTO, top - CG_OFFSET(cg));
+ if (jmp < 0)
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_DO:
+ /* Emit an annotated nop so we know to decompile a 'do' keyword. */
+ if (js_NewSrcNote(cx, cg, SRC_WHILE) < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Compile the loop body. */
+ top = CG_OFFSET(cg);
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_DO_LOOP, top);
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+
+ /* Set loop and enclosing label update offsets, for continue. */
+ stmt = &stmtInfo;
+ do {
+ stmt->update = CG_OFFSET(cg);
+ } while ((stmt = stmt->down) != NULL && stmt->type == STMT_LABEL);
+
+ /* Compile the loop condition, now that continues know where to go. */
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+
+ /*
+ * No source note needed, because JSOP_IFNE is used only for do-while.
+ * If we ever use JSOP_IFNE for other purposes, we can still avoid yet
+ * another note here, by storing (jmp - top) in the SRC_WHILE note's
+ * offset, and fetching that delta in order to decompile recursively.
+ */
+ if (EmitJump(cx, cg, JSOP_IFNE, top - CG_OFFSET(cg)) < 0)
+ return JS_FALSE;
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_FOR:
+ beq = 0; /* suppress gcc warnings */
+ pn2 = pn->pn_left;
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_FOR_LOOP, top);
+
+ if (pn2->pn_type == TOK_IN) {
+ JSBool emitIFEQ;
+
+ /* Set stmtInfo type for later testing. */
+ stmtInfo.type = STMT_FOR_IN_LOOP;
+ noteIndex = -1;
+
+ /*
+ * If the left part is 'var x', emit code to define x if necessary
+ * using a prolog opcode, but do not emit a pop. If the left part
+ * is 'var x = i', emit prolog code to define x if necessary; then
+ * emit code to evaluate i, assign the result to x, and pop the
+ * result off the stack.
+ *
+ * All the logic to do this is implemented in the outer switch's
+ * TOK_VAR case, conditioned on pn_extra flags set by the parser.
+ *
+ * In the 'for (var x = i in o) ...' case, the js_EmitTree(...pn3)
+ * called here will generate the proper note for the assignment
+ * op that sets x = i, hoisting the initialized var declaration
+ * out of the loop: 'var x = i; for (x in o) ...'.
+ *
+ * In the 'for (var x in o) ...' case, nothing but the prolog op
+ * (if needed) should be generated here, we must emit the note
+ * just before the JSOP_FOR* opcode in the switch on pn3->pn_type
+ * a bit below, so nothing is hoisted: 'for (var x in o) ...'.
+ *
+ * A 'for (let x = i in o)' loop must not be hoisted, since in
+ * this form the let variable is scoped by the loop body (but not
+ * the head). The initializer expression i must be evaluated for
+ * any side effects. So we hoist only i in the let case.
+ */
+ pn3 = pn2->pn_left;
+ type = pn3->pn_type;
+ cg->treeContext.flags |= TCF_IN_FOR_INIT;
+ if (TOKEN_TYPE_IS_DECL(type) && !js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+
+ /* Emit a push to allocate the iterator. */
+ if (js_Emit1(cx, cg, JSOP_STARTITER) < 0)
+ return JS_FALSE;
+
+ /* Compile the object expression to the right of 'in'. */
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+
+ /*
+ * Emit a bytecode to convert top of stack value to the iterator
+ * object depending on the loop variant (for-in, for-each-in, or
+ * destructuring for-in).
+ */
+#if JS_HAS_DESTRUCTURING
+ JS_ASSERT(pn->pn_op == JSOP_FORIN ||
+ pn->pn_op == JSOP_FOREACHKEYVAL ||
+ pn->pn_op == JSOP_FOREACH);
+#else
+ JS_ASSERT(pn->pn_op == JSOP_FORIN || pn->pn_op == JSOP_FOREACH);
+#endif
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+
+ top = CG_OFFSET(cg);
+ SET_STATEMENT_TOP(&stmtInfo, top);
+
+ /*
+ * Compile a JSOP_FOR* bytecode based on the left hand side.
+ *
+ * Initialize op to JSOP_SETNAME in case of |for ([a, b] in o)...|
+ * or similar, to signify assignment, rather than declaration, to
+ * the decompiler. EmitDestructuringOps takes a prolog bytecode
+ * parameter and emits the appropriate source note, defaulting to
+ * assignment, so JSOP_SETNAME is not critical here; many similar
+ * ops could be used -- just not JSOP_NOP (which means 'let').
+ */
+ emitIFEQ = JS_TRUE;
+ op = JSOP_SETNAME;
+ switch (type) {
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+#endif
+ case TOK_VAR:
+ JS_ASSERT(pn3->pn_arity == PN_LIST && pn3->pn_count == 1);
+ pn3 = pn3->pn_head;
+#if JS_HAS_DESTRUCTURING
+ if (pn3->pn_type == TOK_ASSIGN) {
+ pn3 = pn3->pn_left;
+ JS_ASSERT(pn3->pn_type == TOK_RB || pn3->pn_type == TOK_RC);
+ }
+ if (pn3->pn_type == TOK_RB || pn3->pn_type == TOK_RC) {
+ op = pn2->pn_left->pn_op;
+ goto destructuring_for;
+ }
+#else
+ JS_ASSERT(pn3->pn_type == TOK_NAME);
+#endif
+ /*
+ * Always annotate JSOP_FORLOCAL if given input of the form
+ * 'for (let x in * o)' -- the decompiler must not hoist the
+ * 'let x' out of the loop head, or x will be bound in the
+ * wrong scope. Likewise, but in this case only for the sake
+ * of higher decompilation fidelity only, do not hoist 'var x'
+ * when given 'for (var x in o)'. But 'for (var x = i in o)'
+ * requires hoisting in order to preserve the initializer i.
+ * The decompiler can only handle so much!
+ */
+ if ((
+#if JS_HAS_BLOCK_SCOPE
+ type == TOK_LET ||
+#endif
+ !pn3->pn_expr) &&
+ js_NewSrcNote2(cx, cg, SRC_DECL,
+ type == TOK_VAR
+ ? SRC_DECL_VAR
+ : SRC_DECL_LET) < 0) {
+ return JS_FALSE;
+ }
+ /* FALL THROUGH */
+ case TOK_NAME:
+ if (pn3->pn_slot >= 0) {
+ op = pn3->pn_op;
+ switch (op) {
+ case JSOP_GETARG: /* FALL THROUGH */
+ case JSOP_SETARG: op = JSOP_FORARG; break;
+ case JSOP_GETVAR: /* FALL THROUGH */
+ case JSOP_SETVAR: op = JSOP_FORVAR; break;
+ case JSOP_GETGVAR: /* FALL THROUGH */
+ case JSOP_SETGVAR: op = JSOP_FORNAME; break;
+ case JSOP_GETLOCAL: /* FALL THROUGH */
+ case JSOP_SETLOCAL: op = JSOP_FORLOCAL; break;
+ default: JS_ASSERT(0);
+ }
+ } else {
+ pn3->pn_op = JSOP_FORNAME;
+ if (!BindNameToSlot(cx, &cg->treeContext, pn3, JS_FALSE))
+ return JS_FALSE;
+ op = pn3->pn_op;
+ }
+ if (pn3->pn_slot >= 0) {
+ if (pn3->pn_attrs & JSPROP_READONLY) {
+ JS_ASSERT(op == JSOP_FORVAR);
+ op = JSOP_GETVAR;
+ }
+ atomIndex = (jsatomid) pn3->pn_slot;
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ } else {
+ if (!EmitAtomOp(cx, pn3, op, cg))
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_DOT:
+ useful = JS_FALSE;
+ if (!CheckSideEffects(cx, &cg->treeContext, pn3->pn_expr,
+ &useful)) {
+ return JS_FALSE;
+ }
+ if (!useful) {
+ if (!EmitPropOp(cx, pn3, JSOP_FORPROP, cg))
+ return JS_FALSE;
+ break;
+ }
+ /* FALL THROUGH */
+
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ destructuring_for:
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+ case TOK_LB:
+ /*
+ * We separate the first/next bytecode from the enumerator
+ * variable binding to avoid any side-effects in the index
+ * expression (e.g., for (x[i++] in {}) should not bind x[i]
+ * or increment i at all).
+ */
+ emitIFEQ = JS_FALSE;
+ if (!js_Emit1(cx, cg, JSOP_FORELEM))
+ return JS_FALSE;
+
+ /*
+ * Emit a SRC_WHILE note with offset telling the distance to
+ * the loop-closing jump (we can't reckon from the branch at
+ * the top of the loop, because the loop-closing jump might
+ * need to be an extended jump, independent of whether the
+ * branch is short or long).
+ */
+ noteIndex = js_NewSrcNote(cx, cg, SRC_WHILE);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+
+#if JS_HAS_DESTRUCTURING
+ if (pn3->pn_type == TOK_RB || pn3->pn_type == TOK_RC) {
+ if (!EmitDestructuringOps(cx, cg, op, pn3))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+#if JS_HAS_LVALUE_RETURN
+ if (pn3->pn_type == TOK_LP) {
+ JS_ASSERT(pn3->pn_op == JSOP_SETCALL);
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (!js_Emit1(cx, cg, JSOP_ENUMELEM))
+ return JS_FALSE;
+ break;
+ }
+#endif
+#if JS_HAS_XML_SUPPORT
+ if (pn3->pn_type == TOK_UNARYOP) {
+ JS_ASSERT(pn3->pn_op == JSOP_BINDXMLNAME);
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (!js_Emit1(cx, cg, JSOP_ENUMELEM))
+ return JS_FALSE;
+ break;
+ }
+#endif
+
+ /* Now that we're safely past the IFEQ, commit side effects. */
+ if (!EmitElemOp(cx, pn3, JSOP_ENUMELEM, cg))
+ return JS_FALSE;
+ break;
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ if (emitIFEQ) {
+ /* Annotate so the decompiler can find the loop-closing jump. */
+ noteIndex = js_NewSrcNote(cx, cg, SRC_WHILE);
+ if (noteIndex < 0)
+ return JS_FALSE;
+
+ /* Pop and test the loop condition generated by JSOP_FOR*. */
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+ }
+ } else {
+ op = JSOP_POP;
+ if (!pn2->pn_kid1) {
+ /* No initializer: emit an annotated nop for the decompiler. */
+ op = JSOP_NOP;
+ } else {
+ cg->treeContext.flags |= TCF_IN_FOR_INIT;
+#if JS_HAS_DESTRUCTURING
+ pn3 = pn2->pn_kid1;
+ if (pn3->pn_type == TOK_ASSIGN &&
+ !MaybeEmitGroupAssignment(cx, cg, op, pn3, &op)) {
+ return JS_FALSE;
+ }
+#endif
+ if (op == JSOP_POP) {
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (TOKEN_TYPE_IS_DECL(pn3->pn_type)) {
+ /*
+ * Check whether a destructuring-initialized var decl
+ * was optimized to a group assignment. If so, we do
+ * not need to emit a pop below, so switch to a nop,
+ * just for the decompiler.
+ */
+ JS_ASSERT(pn3->pn_arity == PN_LIST);
+ if (pn3->pn_extra & PNX_GROUPINIT)
+ op = JSOP_NOP;
+ }
+ }
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ }
+ noteIndex = js_NewSrcNote(cx, cg, SRC_FOR);
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, op) < 0) {
+ return JS_FALSE;
+ }
+
+ top = CG_OFFSET(cg);
+ SET_STATEMENT_TOP(&stmtInfo, top);
+ if (!pn2->pn_kid2) {
+ /* No loop condition: flag this fact in the source notes. */
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, 0))
+ return JS_FALSE;
+ } else {
+ if (!js_EmitTree(cx, cg, pn2->pn_kid2))
+ return JS_FALSE;
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+ }
+
+ /* Set pn3 (used below) here to avoid spurious gcc warnings. */
+ pn3 = pn2->pn_kid3;
+ }
+
+ /* Emit code for the loop body. */
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+
+ if (pn2->pn_type != TOK_IN) {
+ /* Set the second note offset so we can find the update part. */
+ JS_ASSERT(noteIndex != -1);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 1,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+
+ if (pn3) {
+ /* Set loop and enclosing "update" offsets, for continue. */
+ stmt = &stmtInfo;
+ do {
+ stmt->update = CG_OFFSET(cg);
+ } while ((stmt = stmt->down) != NULL &&
+ stmt->type == STMT_LABEL);
+
+ op = JSOP_POP;
+#if JS_HAS_DESTRUCTURING
+ if (pn3->pn_type == TOK_ASSIGN &&
+ !MaybeEmitGroupAssignment(cx, cg, op, pn3, &op)) {
+ return JS_FALSE;
+ }
+#endif
+ if (op == JSOP_POP) {
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+
+ /* Restore the absolute line number for source note readers. */
+ off = (ptrdiff_t) pn->pn_pos.end.lineno;
+ if (CG_CURRENT_LINE(cg) != (uintN) off) {
+ if (js_NewSrcNote2(cx, cg, SRC_SETLINE, off) < 0)
+ return JS_FALSE;
+ CG_CURRENT_LINE(cg) = (uintN) off;
+ }
+ }
+
+ /* The third note offset helps us find the loop-closing jump. */
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 2,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+ }
+
+ /* Emit the loop-closing jump and fixup all jump offsets. */
+ jmp = EmitJump(cx, cg, JSOP_GOTO, top - CG_OFFSET(cg));
+ if (jmp < 0)
+ return JS_FALSE;
+ if (beq > 0)
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ if (pn2->pn_type == TOK_IN) {
+ /* Set the SRC_WHILE note offset so we can find the closing jump. */
+ JS_ASSERT(noteIndex != -1);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ }
+
+ /* Now fixup all breaks and continues (before for/in's JSOP_ENDITER). */
+ if (!js_PopStatementCG(cx, cg))
+ return JS_FALSE;
+
+ if (pn2->pn_type == TOK_IN) {
+ if (js_Emit1(cx, cg, JSOP_ENDITER) < 0)
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_BREAK:
+ stmt = cg->treeContext.topStmt;
+ atom = pn->pn_atom;
+ if (atom) {
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ while (stmt->type != STMT_LABEL || stmt->atom != atom)
+ stmt = stmt->down;
+ noteType = SRC_BREAK2LABEL;
+ } else {
+ ale = NULL;
+ while (!STMT_IS_LOOP(stmt) && stmt->type != STMT_SWITCH)
+ stmt = stmt->down;
+ noteType = SRC_NULL;
+ }
+
+ if (EmitGoto(cx, cg, stmt, &stmt->breaks, ale, noteType) < 0)
+ return JS_FALSE;
+ break;
+
+ case TOK_CONTINUE:
+ stmt = cg->treeContext.topStmt;
+ atom = pn->pn_atom;
+ if (atom) {
+ /* Find the loop statement enclosed by the matching label. */
+ JSStmtInfo *loop = NULL;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ while (stmt->type != STMT_LABEL || stmt->atom != atom) {
+ if (STMT_IS_LOOP(stmt))
+ loop = stmt;
+ stmt = stmt->down;
+ }
+ stmt = loop;
+ noteType = SRC_CONT2LABEL;
+ } else {
+ ale = NULL;
+ while (!STMT_IS_LOOP(stmt))
+ stmt = stmt->down;
+ noteType = SRC_CONTINUE;
+ }
+
+ if (EmitGoto(cx, cg, stmt, &stmt->continues, ale, noteType) < 0)
+ return JS_FALSE;
+ break;
+
+ case TOK_WITH:
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_WITH, CG_OFFSET(cg));
+ if (js_Emit1(cx, cg, JSOP_ENTERWITH) < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_LEAVEWITH) < 0)
+ return JS_FALSE;
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_TRY:
+ {
+ ptrdiff_t start, end, catchJump, catchStart, finallyCatch;
+ intN depth;
+ JSParseNode *lastCatch;
+
+ catchJump = catchStart = finallyCatch = -1;
+
+ /*
+ * Push stmtInfo to track jumps-over-catches and gosubs-to-finally
+ * for later fixup.
+ *
+ * When a finally block is 'active' (STMT_FINALLY on the treeContext),
+ * non-local jumps (including jumps-over-catches) result in a GOSUB
+ * being written into the bytecode stream and fixed-up later (c.f.
+ * EmitBackPatchOp and BackPatch).
+ */
+ js_PushStatement(&cg->treeContext, &stmtInfo,
+ pn->pn_kid3 ? STMT_FINALLY : STMT_TRY,
+ CG_OFFSET(cg));
+
+ /*
+ * About JSOP_SETSP: an exception can be thrown while the stack is in
+ * an unbalanced state, and this imbalance causes problems with things
+ * like function invocation later on.
+ *
+ * To fix this, we compute the 'balanced' stack depth upon try entry,
+ * and then restore the stack to this depth when we hit the first catch
+ * or finally block. We can't just zero the stack, because things like
+ * for/in and with that are active upon entry to the block keep state
+ * variables on the stack.
+ */
+ depth = cg->stackDepth;
+
+ /* Mark try location for decompilation, then emit try block. */
+ if (js_Emit1(cx, cg, JSOP_TRY) < 0)
+ return JS_FALSE;
+ start = CG_OFFSET(cg);
+ if (!js_EmitTree(cx, cg, pn->pn_kid1))
+ return JS_FALSE;
+
+ /* GOSUB to finally, if present. */
+ if (pn->pn_kid3) {
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &GOSUBS(stmtInfo));
+ if (jmp < 0)
+ return JS_FALSE;
+
+ /* JSOP_RETSUB pops the return pc-index, balancing the stack. */
+ cg->stackDepth = depth;
+ }
+
+ /* Emit (hidden) jump over catch and/or finally. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &catchJump);
+ if (jmp < 0)
+ return JS_FALSE;
+
+ end = CG_OFFSET(cg);
+
+ /* If this try has a catch block, emit it. */
+ pn2 = pn->pn_kid2;
+ lastCatch = NULL;
+ if (pn2) {
+ jsint count = 0; /* previous catch block's population */
+
+ catchStart = end;
+
+ /*
+ * The emitted code for a catch block looks like:
+ *
+ * [throwing] only if 2nd+ catch block
+ * [leaveblock] only if 2nd+ catch block
+ * enterblock with SRC_CATCH
+ * exception
+ * [dup] only if catchguard
+ * setlocalpop <slot> or destructuring code
+ * [< catchguard code >] if there's a catchguard
+ * [ifeq <offset to next catch block>] " "
+ * [pop] only if catchguard
+ * < catch block contents >
+ * leaveblock
+ * goto <end of catch blocks> non-local; finally applies
+ *
+ * If there's no catch block without a catchguard, the last
+ * <offset to next catch block> points to rethrow code. This
+ * code will [gosub] to the finally code if appropriate, and is
+ * also used for the catch-all trynote for capturing exceptions
+ * thrown from catch{} blocks.
+ */
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ ptrdiff_t guardJump, catchNote;
+
+ guardJump = GUARDJUMP(stmtInfo);
+ if (guardJump == -1) {
+ /* Set stack to original depth (see SETSP comment above). */
+ EMIT_UINT16_IMM_OP(JSOP_SETSP, (jsatomid)depth);
+ cg->stackDepth = depth;
+ } else {
+ /* Fix up and clean up previous catch block. */
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, guardJump);
+
+ /*
+ * Account for the pushed exception object that we still
+ * have after the jumping from the previous guard.
+ */
+ JS_ASSERT(cg->stackDepth == depth);
+ cg->stackDepth = depth + 1;
+
+ /*
+ * Move exception back to cx->exception to prepare for
+ * the next catch. We hide [throwing] from the decompiler
+ * since it compensates for the hidden JSOP_DUP at the
+ * start of the previous guarded catch.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_THROWING) < 0) {
+ return JS_FALSE;
+ }
+
+ /*
+ * Emit an unbalanced [leaveblock] for the previous catch,
+ * whose block object count is saved below.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ JS_ASSERT(count >= 0);
+ EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, count);
+ }
+
+ /*
+ * Annotate the JSOP_ENTERBLOCK that's about to be generated
+ * by the call to js_EmitTree immediately below. Save this
+ * source note's index in stmtInfo for use by the TOK_CATCH:
+ * case, where the length of the catch guard is set as the
+ * note's offset.
+ */
+ catchNote = js_NewSrcNote2(cx, cg, SRC_CATCH, 0);
+ if (catchNote < 0)
+ return JS_FALSE;
+ CATCHNOTE(stmtInfo) = catchNote;
+
+ /*
+ * Emit the lexical scope and catch body. Save the catch's
+ * block object population via count, for use when targeting
+ * guardJump at the next catch (the guard mismatch case).
+ */
+ JS_ASSERT(pn3->pn_type == TOK_LEXICALSCOPE);
+ count = OBJ_BLOCK_COUNT(cx, ATOM_TO_OBJECT(pn3->pn_atom));
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+
+ /* gosub <finally>, if required */
+ if (pn->pn_kid3) {
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH,
+ &GOSUBS(stmtInfo));
+ if (jmp < 0)
+ return JS_FALSE;
+ JS_ASSERT(cg->stackDepth == depth);
+ }
+
+ /*
+ * Jump over the remaining catch blocks. This will get fixed
+ * up to jump to after catch/finally.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &catchJump);
+ if (jmp < 0)
+ return JS_FALSE;
+
+ /*
+ * Save a pointer to the last catch node to handle try-finally
+ * and try-catch(guard)-finally special cases.
+ */
+ lastCatch = pn3->pn_expr;
+ }
+ }
+
+ /*
+ * Last catch guard jumps to the rethrow code sequence if none of the
+ * guards match. Target guardJump at the beginning of the rethrow
+ * sequence, just in case a guard expression throws and leaves the
+ * stack unbalanced.
+ */
+ if (lastCatch && lastCatch->pn_kid2) {
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, GUARDJUMP(stmtInfo));
+
+ /* Sync the stack to take into account pushed exception. */
+ JS_ASSERT(cg->stackDepth == depth);
+ cg->stackDepth = depth + 1;
+
+ /*
+ * Rethrow the exception, delegating executing of finally if any
+ * to the exception handler.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_THROW) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ JS_ASSERT(cg->stackDepth == depth);
+
+ /* Emit finally handler if any. */
+ if (pn->pn_kid3) {
+ /*
+ * We emit [setsp][gosub] to call try-finally when an exception is
+ * thrown from try or try-catch blocks. The [gosub] and [retsub]
+ * opcodes will take care of stacking and rethrowing any exception
+ * pending across the finally.
+ */
+ finallyCatch = CG_OFFSET(cg);
+ EMIT_UINT16_IMM_OP(JSOP_SETSP, (jsatomid)depth);
+
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH,
+ &GOSUBS(stmtInfo));
+ if (jmp < 0)
+ return JS_FALSE;
+
+ JS_ASSERT(cg->stackDepth == depth);
+ JS_ASSERT((uintN)depth <= cg->maxStackDepth);
+
+ /*
+ * Fix up the gosubs that might have been emitted before non-local
+ * jumps to the finally code.
+ */
+ if (!BackPatch(cx, cg, GOSUBS(stmtInfo), CG_NEXT(cg), JSOP_GOSUB))
+ return JS_FALSE;
+
+ /*
+ * The stack budget must be balanced at this point. All [gosub]
+ * calls emitted before this point will push two stack slots, one
+ * for the pending exception (or JSVAL_HOLE if there is no pending
+ * exception) and one for the [retsub] pc-index.
+ */
+ JS_ASSERT(cg->stackDepth == depth);
+ cg->stackDepth += 2;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+
+ /* Now indicate that we're emitting a subroutine body. */
+ stmtInfo.type = STMT_SUBROUTINE;
+ if (!UpdateLineNumberNotes(cx, cg, pn->pn_kid3))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_FINALLY) < 0 ||
+ !js_EmitTree(cx, cg, pn->pn_kid3) ||
+ js_Emit1(cx, cg, JSOP_RETSUB) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Restore stack depth budget to its balanced state. */
+ JS_ASSERT(cg->stackDepth == depth + 2);
+ cg->stackDepth = depth;
+ }
+ if (!js_PopStatementCG(cx, cg))
+ return JS_FALSE;
+
+ if (js_NewSrcNote(cx, cg, SRC_ENDBRACE) < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Fix up the end-of-try/catch jumps to come here. */
+ if (!BackPatch(cx, cg, catchJump, CG_NEXT(cg), JSOP_GOTO))
+ return JS_FALSE;
+
+ /*
+ * Add the try note last, to let post-order give us the right ordering
+ * (first to last for a given nesting level, inner to outer by level).
+ */
+ if (pn->pn_kid2) {
+ JS_ASSERT(end != -1 && catchStart != -1);
+ if (!js_NewTryNote(cx, cg, start, end, catchStart))
+ return JS_FALSE;
+ }
+
+ /*
+ * If we've got a finally, mark try+catch region with additional
+ * trynote to catch exceptions (re)thrown from a catch block or
+ * for the try{}finally{} case.
+ */
+ if (pn->pn_kid3) {
+ JS_ASSERT(finallyCatch != -1);
+ if (!js_NewTryNote(cx, cg, start, finallyCatch, finallyCatch))
+ return JS_FALSE;
+ }
+ break;
+ }
+
+ case TOK_CATCH:
+ {
+ ptrdiff_t catchStart, guardJump;
+
+ /*
+ * Morph STMT_BLOCK to STMT_CATCH, note the block entry code offset,
+ * and save the block object atom.
+ */
+ stmt = cg->treeContext.topStmt;
+ JS_ASSERT(stmt->type == STMT_BLOCK && (stmt->flags & SIF_SCOPE));
+ stmt->type = STMT_CATCH;
+ catchStart = stmt->update;
+ atom = stmt->atom;
+
+ /* Go up one statement info record to the TRY or FINALLY record. */
+ stmt = stmt->down;
+ JS_ASSERT(stmt->type == STMT_TRY || stmt->type == STMT_FINALLY);
+
+ /* Pick up the pending exception and bind it to the catch variable. */
+ if (js_Emit1(cx, cg, JSOP_EXCEPTION) < 0)
+ return JS_FALSE;
+
+ /*
+ * Dup the exception object if there is a guard for rethrowing to use
+ * it later when rethrowing or in other catches.
+ */
+ if (pn->pn_kid2) {
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_DUP) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ pn2 = pn->pn_kid1;
+ switch (pn2->pn_type) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ if (!EmitDestructuringOps(cx, cg, JSOP_NOP, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ break;
+#endif
+
+ case TOK_NAME:
+ /* Inline BindNameToSlot, adding block depth to pn2->pn_slot. */
+ pn2->pn_slot += OBJ_BLOCK_DEPTH(cx, ATOM_TO_OBJECT(atom));
+ EMIT_UINT16_IMM_OP(JSOP_SETLOCALPOP, pn2->pn_slot);
+ break;
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ /* Emit the guard expression, if there is one. */
+ if (pn->pn_kid2) {
+ if (!js_EmitTree(cx, cg, pn->pn_kid2))
+ return JS_FALSE;
+ if (!js_SetSrcNoteOffset(cx, cg, CATCHNOTE(*stmt), 0,
+ CG_OFFSET(cg) - catchStart)) {
+ return JS_FALSE;
+ }
+ /* ifeq <next block> */
+ guardJump = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (guardJump < 0)
+ return JS_FALSE;
+ GUARDJUMP(*stmt) = guardJump;
+
+ /* Pop duplicated exception object as we no longer need it. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_POP) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ /* Emit the catch body. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid3))
+ return JS_FALSE;
+
+ /*
+ * Annotate the JSOP_LEAVEBLOCK that will be emitted as we unwind via
+ * our TOK_LEXICALSCOPE parent, so the decompiler knows to pop.
+ */
+ off = cg->stackDepth;
+ if (js_NewSrcNote2(cx, cg, SRC_CATCH, off) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_VAR:
+ if (!EmitVariables(cx, cg, pn, JS_FALSE, &noteIndex))
+ return JS_FALSE;
+ break;
+
+ case TOK_RETURN:
+ /* Push a return value */
+ pn2 = pn->pn_kid;
+ if (pn2) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ } else {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ }
+
+ /*
+ * EmitNonLocalJumpFixup mutates op to JSOP_RETRVAL after emitting a
+ * JSOP_SETRVAL if there are open try blocks having finally clauses.
+ * We can't simply transfer control flow to our caller in that case,
+ * because we must gosub to those clauses from inner to outer, with
+ * the correct stack pointer (i.e., after popping any with, for/in,
+ * etc., slots nested inside the finally's try).
+ */
+ op = JSOP_RETURN;
+ if (!EmitNonLocalJumpFixup(cx, cg, NULL, &op))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+
+#if JS_HAS_GENERATORS
+ case TOK_YIELD:
+ if (pn->pn_kid) {
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ } else {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, JSOP_YIELD) < 0)
+ return JS_FALSE;
+ break;
+#endif
+
+ case TOK_LC:
+#if JS_HAS_XML_SUPPORT
+ if (pn->pn_arity == PN_UNARY) {
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+
+ noteIndex = -1;
+ tmp = CG_OFFSET(cg);
+ if (pn->pn_extra & PNX_NEEDBRACES) {
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_BRACE, 0);
+ if (noteIndex < 0 || js_Emit1(cx, cg, JSOP_NOP) < 0)
+ return JS_FALSE;
+ }
+
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_BLOCK, top);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+
+ if (noteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - tmp)) {
+ return JS_FALSE;
+ }
+
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_BODY:
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_BODY, top);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_SEMI:
+ pn2 = pn->pn_kid;
+ if (pn2) {
+ /*
+ * Top-level or called-from-a-native JS_Execute/EvaluateScript,
+ * debugger, and eval frames may need the value of the ultimate
+ * expression statement as the script's result, despite the fact
+ * that it appears useless to the compiler.
+ */
+ useful = wantval = !cx->fp->fun ||
+ !FUN_INTERPRETED(cx->fp->fun) ||
+ (cx->fp->flags & JSFRAME_SPECIAL);
+ if (!useful) {
+ if (!CheckSideEffects(cx, &cg->treeContext, pn2, &useful))
+ return JS_FALSE;
+ }
+
+ /*
+ * Don't eliminate apparently useless expressions if they are
+ * labeled expression statements. The tc->topStmt->update test
+ * catches the case where we are nesting in js_EmitTree for a
+ * labeled compound statement.
+ */
+ if (!useful &&
+ (!cg->treeContext.topStmt ||
+ cg->treeContext.topStmt->type != STMT_LABEL ||
+ cg->treeContext.topStmt->update < CG_OFFSET(cg))) {
+ CG_CURRENT_LINE(cg) = pn2->pn_pos.begin.lineno;
+ if (!js_ReportCompileErrorNumber(cx, cg,
+ JSREPORT_CG |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_USELESS_EXPR)) {
+ return JS_FALSE;
+ }
+ } else {
+ op = wantval ? JSOP_POPV : JSOP_POP;
+#if JS_HAS_DESTRUCTURING
+ if (!wantval &&
+ pn2->pn_type == TOK_ASSIGN &&
+ !MaybeEmitGroupAssignment(cx, cg, op, pn2, &op)) {
+ return JS_FALSE;
+ }
+#endif
+ if (op != JSOP_NOP) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+ }
+ }
+ break;
+
+ case TOK_COLON:
+ /* Emit an annotated nop so we know to decompile a label. */
+ atom = pn->pn_atom;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ pn2 = pn->pn_expr;
+ noteType = (pn2->pn_type == TOK_LC ||
+ (pn2->pn_type == TOK_LEXICALSCOPE &&
+ pn2->pn_expr->pn_type == TOK_LC))
+ ? SRC_LABELBRACE
+ : SRC_LABEL;
+ noteIndex = js_NewSrcNote2(cx, cg, noteType,
+ (ptrdiff_t) ALE_INDEX(ale));
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Emit code for the labeled statement. */
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_LABEL,
+ CG_OFFSET(cg));
+ stmtInfo.atom = atom;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (!js_PopStatementCG(cx, cg))
+ return JS_FALSE;
+
+ /* If the statement was compound, emit a note for the end brace. */
+ if (noteType == SRC_LABELBRACE) {
+ if (js_NewSrcNote(cx, cg, SRC_ENDBRACE) < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+ }
+ break;
+
+ case TOK_COMMA:
+ /*
+ * Emit SRC_PCDELTA notes on each JSOP_POP between comma operands.
+ * These notes help the decompiler bracket the bytecodes generated
+ * from each sub-expression that follows a comma.
+ */
+ off = noteIndex = -1;
+ for (pn2 = pn->pn_head; ; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ tmp = CG_OFFSET(cg);
+ if (noteIndex >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
+ return JS_FALSE;
+ }
+ if (!pn2->pn_next)
+ break;
+ off = tmp;
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, JSOP_POP) < 0) {
+ return JS_FALSE;
+ }
+ }
+ break;
+
+ case TOK_ASSIGN:
+ /*
+ * Check left operand type and generate specialized code for it.
+ * Specialize to avoid ECMA "reference type" values on the operand
+ * stack, which impose pervasive runtime "GetValue" costs.
+ */
+ pn2 = pn->pn_left;
+ JS_ASSERT(pn2->pn_type != TOK_RP);
+ atomIndex = (jsatomid) -1;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ if (pn2->pn_slot >= 0) {
+ atomIndex = (jsatomid) pn2->pn_slot;
+ } else {
+ ale = js_IndexAtom(cx, pn2->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+ EMIT_ATOM_INDEX_OP(JSOP_BINDNAME, atomIndex);
+ }
+ break;
+ case TOK_DOT:
+ if (!js_EmitTree(cx, cg, pn2->pn_expr))
+ return JS_FALSE;
+ ale = js_IndexAtom(cx, pn2->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+ break;
+ case TOK_LB:
+ JS_ASSERT(pn2->pn_arity == PN_BINARY);
+ if (!js_EmitTree(cx, cg, pn2->pn_left))
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+ break;
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ break;
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ JS_ASSERT(pn2->pn_op == JSOP_SETXMLNAME);
+ if (!js_EmitTree(cx, cg, pn2->pn_kid))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_BINDXMLNAME) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ default:
+ JS_ASSERT(0);
+ }
+
+ op = pn->pn_op;
+#if JS_HAS_GETTER_SETTER
+ if (op == JSOP_GETTER || op == JSOP_SETTER) {
+ /* We'll emit these prefix bytecodes after emitting the r.h.s. */
+ if (atomIndex != (jsatomid) -1 && atomIndex >= JS_BIT(16)) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ } else
+#endif
+ /* If += or similar, dup the left operand and get its value. */
+ if (op != JSOP_NOP) {
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ if (pn2->pn_op != JSOP_SETNAME) {
+ EMIT_UINT16_IMM_OP((pn2->pn_op == JSOP_SETGVAR)
+ ? JSOP_GETGVAR
+ : (pn2->pn_op == JSOP_SETARG)
+ ? JSOP_GETARG
+ : (pn2->pn_op == JSOP_SETLOCAL)
+ ? JSOP_GETLOCAL
+ : JSOP_GETVAR,
+ atomIndex);
+ break;
+ }
+ /* FALL THROUGH */
+ case TOK_DOT:
+ if (js_Emit1(cx, cg, JSOP_DUP) < 0)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP((pn2->pn_type == TOK_NAME)
+ ? JSOP_GETXPROP
+ : JSOP_GETPROP,
+ atomIndex);
+ break;
+ case TOK_LB:
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+#endif
+ if (js_Emit1(cx, cg, JSOP_DUP2) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_GETELEM) < 0)
+ return JS_FALSE;
+ break;
+ default:;
+ }
+ }
+
+ /* Now emit the right operand (it may affect the namespace). */
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+
+ /* If += etc., emit the binary operator with a decompiler note. */
+ if (op != JSOP_NOP) {
+ /*
+ * Take care to avoid SRC_ASSIGNOP if the left-hand side is a
+ * const declared in a function (i.e., with non-negative pn_slot
+ * and JSPROP_READONLY in pn_attrs), as in this case (just a bit
+ * further below) we will avoid emitting the assignment op.
+ */
+ if (pn2->pn_type != TOK_NAME ||
+ pn2->pn_slot < 0 ||
+ !(pn2->pn_attrs & JSPROP_READONLY)) {
+ if (js_NewSrcNote(cx, cg, SRC_ASSIGNOP) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+
+ /* Left parts such as a.b.c and a[b].c need a decompiler note. */
+ if (pn2->pn_type != TOK_NAME &&
+#if JS_HAS_DESTRUCTURING
+ pn2->pn_type != TOK_RB &&
+ pn2->pn_type != TOK_RC &&
+#endif
+ js_NewSrcNote2(cx, cg, SrcNoteForPropOp(pn2, pn2->pn_op),
+ CG_OFFSET(cg) - top) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Finally, emit the specialized assignment bytecode. */
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ if (pn2->pn_slot < 0 || !(pn2->pn_attrs & JSPROP_READONLY)) {
+ if (pn2->pn_slot >= 0) {
+ EMIT_UINT16_IMM_OP(pn2->pn_op, atomIndex);
+ } else {
+ case TOK_DOT:
+ EMIT_ATOM_INDEX_OP(pn2->pn_op, atomIndex);
+ }
+ }
+ break;
+ case TOK_LB:
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+ if (js_Emit1(cx, cg, JSOP_SETELEM) < 0)
+ return JS_FALSE;
+ break;
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ if (!EmitDestructuringOps(cx, cg, JSOP_SETNAME, pn2))
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (js_Emit1(cx, cg, JSOP_SETXMLNAME) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ default:
+ JS_ASSERT(0);
+ }
+ break;
+
+ case TOK_HOOK:
+ /* Emit the condition, then branch if false to the else part. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid1))
+ return JS_FALSE;
+ noteIndex = js_NewSrcNote(cx, cg, SRC_COND);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0 || !js_EmitTree(cx, cg, pn->pn_kid2))
+ return JS_FALSE;
+
+ /* Jump around else, fixup the branch, emit else, fixup jump. */
+ jmp = EmitJump(cx, cg, JSOP_GOTO, 0);
+ if (jmp < 0)
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+
+ /*
+ * Because each branch pushes a single value, but our stack budgeting
+ * analysis ignores branches, we now have to adjust cg->stackDepth to
+ * ignore the value pushed by the first branch. Execution will follow
+ * only one path, so we must decrement cg->stackDepth.
+ *
+ * Failing to do this will foil code, such as the try/catch/finally
+ * exception handling code generator, that samples cg->stackDepth for
+ * use at runtime (JSOP_SETSP), or in let expression and block code
+ * generation, which must use the stack depth to compute local stack
+ * indexes correctly.
+ */
+ JS_ASSERT(cg->stackDepth > 0);
+ cg->stackDepth--;
+ if (!js_EmitTree(cx, cg, pn->pn_kid3))
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, jmp);
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ break;
+
+ case TOK_OR:
+ case TOK_AND:
+ /*
+ * JSOP_OR converts the operand on the stack to boolean, and if true,
+ * leaves the original operand value on the stack and jumps; otherwise
+ * it pops and falls into the next bytecode, which evaluates the right
+ * operand. The jump goes around the right operand evaluation.
+ *
+ * JSOP_AND converts the operand on the stack to boolean, and if false,
+ * leaves the original operand value on the stack and jumps; otherwise
+ * it pops and falls into the right operand's bytecode.
+ *
+ * Avoid tail recursion for long ||...|| expressions and long &&...&&
+ * expressions or long mixtures of ||'s and &&'s that can easily blow
+ * the stack, by forward-linking and then backpatching all the JSOP_OR
+ * and JSOP_AND bytecodes' immediate jump-offset operands.
+ */
+ pn3 = pn;
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ top = EmitJump(cx, cg, JSOP_BACKPATCH_POP, 0);
+ if (top < 0)
+ return JS_FALSE;
+ jmp = top;
+ pn2 = pn->pn_right;
+ while (pn2->pn_type == TOK_OR || pn2->pn_type == TOK_AND) {
+ pn = pn2;
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ off = EmitJump(cx, cg, JSOP_BACKPATCH_POP, 0);
+ if (off < 0)
+ return JS_FALSE;
+ if (!SetBackPatchDelta(cx, cg, CG_CODE(cg, jmp), off - jmp))
+ return JS_FALSE;
+ jmp = off;
+ pn2 = pn->pn_right;
+ }
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ off = CG_OFFSET(cg);
+ do {
+ pc = CG_CODE(cg, top);
+ tmp = GetJumpOffset(cg, pc);
+ CHECK_AND_SET_JUMP_OFFSET(cx, cg, pc, off - top);
+ *pc = pn3->pn_op;
+ top += tmp;
+ } while ((pn3 = pn3->pn_right) != pn2);
+ break;
+
+ case TOK_BITOR:
+ case TOK_BITXOR:
+ case TOK_BITAND:
+ case TOK_EQOP:
+ case TOK_RELOP:
+ case TOK_IN:
+ case TOK_INSTANCEOF:
+ case TOK_SHOP:
+ case TOK_PLUS:
+ case TOK_MINUS:
+ case TOK_STAR:
+ case TOK_DIVOP:
+ if (pn->pn_arity == PN_LIST) {
+ /* Left-associative operator chain: avoid too much recursion. */
+ pn2 = pn->pn_head;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ op = pn->pn_op;
+ while ((pn2 = pn2->pn_next) != NULL) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+ } else {
+#if JS_HAS_XML_SUPPORT
+ uintN oldflags;
+
+ case TOK_DBLCOLON:
+ if (pn->pn_arity == PN_NAME) {
+ if (!js_EmitTree(cx, cg, pn->pn_expr))
+ return JS_FALSE;
+ if (!EmitAtomOp(cx, pn, pn->pn_op, cg))
+ return JS_FALSE;
+ break;
+ }
+
+ /*
+ * Binary :: has a right operand that brackets arbitrary code,
+ * possibly including a let (a = b) ... expression. We must clear
+ * TCF_IN_FOR_INIT to avoid mis-compiling such beasts.
+ */
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+#endif
+
+ /* Binary operators that evaluate both operands unconditionally. */
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+#if JS_HAS_XML_SUPPORT
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+#endif
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_THROW:
+#if JS_HAS_XML_SUPPORT
+ case TOK_AT:
+ case TOK_DEFAULT:
+ JS_ASSERT(pn->pn_arity == PN_UNARY);
+ /* FALL THROUGH */
+#endif
+ case TOK_UNARYOP:
+ {
+ uintN oldflags;
+
+ /* Unary op, including unary +/-. */
+ pn2 = pn->pn_kid;
+ op = pn->pn_op;
+ if (op == JSOP_TYPEOF) {
+ for (pn3 = pn2; pn3->pn_type == TOK_RP; pn3 = pn3->pn_kid)
+ continue;
+ if (pn3->pn_type != TOK_NAME)
+ op = JSOP_TYPEOFEXPR;
+ }
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+#if JS_HAS_XML_SUPPORT
+ if (op == JSOP_XMLNAME &&
+ js_NewSrcNote2(cx, cg, SRC_PCBASE,
+ CG_OFFSET(cg) - pn2->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+#endif
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_INC:
+ case TOK_DEC:
+ {
+ intN depth;
+
+ /* Emit lvalue-specialized code for ++/-- operators. */
+ pn2 = pn->pn_kid;
+ JS_ASSERT(pn2->pn_type != TOK_RP);
+ op = pn->pn_op;
+ depth = cg->stackDepth;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ pn2->pn_op = op;
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ op = pn2->pn_op;
+ if (pn2->pn_slot >= 0) {
+ if (pn2->pn_attrs & JSPROP_READONLY) {
+ /* Incrementing a declared const: just get its value. */
+ op = ((js_CodeSpec[op].format & JOF_TYPEMASK) == JOF_CONST)
+ ? JSOP_GETGVAR
+ : JSOP_GETVAR;
+ }
+ atomIndex = (jsatomid) pn2->pn_slot;
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ } else {
+ if (!EmitAtomOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ }
+ break;
+ case TOK_DOT:
+ if (!EmitPropOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ ++depth;
+ break;
+ case TOK_LB:
+ if (!EmitElemOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ depth += 2;
+ break;
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ depth = cg->stackDepth;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE,
+ CG_OFFSET(cg) - pn2->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ JS_ASSERT(pn2->pn_op == JSOP_SETXMLNAME);
+ if (!js_EmitTree(cx, cg, pn2->pn_kid))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_BINDXMLNAME) < 0)
+ return JS_FALSE;
+ depth = cg->stackDepth;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ default:
+ JS_ASSERT(0);
+ }
+
+ /*
+ * Allocate another stack slot for GC protection in case the initial
+ * value being post-incremented or -decremented is not a number, but
+ * converts to a jsdouble. In the TOK_NAME cases, op has 0 operand
+ * uses and 1 definition, so we don't need an extra stack slot -- we
+ * can use the one allocated for the def.
+ */
+ if (pn2->pn_type != TOK_NAME &&
+ (js_CodeSpec[op].format & JOF_POST) &&
+ (uintN)depth == cg->maxStackDepth) {
+ ++cg->maxStackDepth;
+ }
+ break;
+ }
+
+ case TOK_DELETE:
+ /*
+ * Under ECMA 3, deleting a non-reference returns true -- but alas we
+ * must evaluate the operand if it appears it might have side effects.
+ */
+ pn2 = pn->pn_kid;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ pn2->pn_op = JSOP_DELNAME;
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ op = pn2->pn_op;
+ if (op == JSOP_FALSE) {
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitAtomOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ }
+ break;
+ case TOK_DOT:
+ if (!EmitPropOp(cx, pn2, JSOP_DELPROP, cg))
+ return JS_FALSE;
+ break;
+#if JS_HAS_XML_SUPPORT
+ case TOK_DBLDOT:
+ if (!EmitElemOp(cx, pn2, JSOP_DELDESC, cg))
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ if (pn2->pn_op != JSOP_SETCALL) {
+ JS_ASSERT(pn2->pn_op == JSOP_CALL || pn2->pn_op == JSOP_EVAL);
+ pn2->pn_op = JSOP_SETCALL;
+ }
+ top = CG_OFFSET(cg);
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_DELELEM) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ case TOK_LB:
+ if (!EmitElemOp(cx, pn2, JSOP_DELELEM, cg))
+ return JS_FALSE;
+ break;
+ default:
+ /*
+ * If useless, just emit JSOP_TRUE; otherwise convert delete foo()
+ * to foo(), true (a comma expression, requiring SRC_PCDELTA).
+ */
+ useful = JS_FALSE;
+ if (!CheckSideEffects(cx, &cg->treeContext, pn2, &useful))
+ return JS_FALSE;
+ if (!useful) {
+ off = noteIndex = -1;
+ } else {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ off = CG_OFFSET(cg);
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (noteIndex < 0 || js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, JSOP_TRUE) < 0)
+ return JS_FALSE;
+ if (noteIndex >= 0) {
+ tmp = CG_OFFSET(cg);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
+ return JS_FALSE;
+ }
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_FILTER:
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ jmp = js_Emit3(cx, cg, JSOP_FILTER, 0, 0);
+ if (jmp < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ENDFILTER) < 0)
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, jmp);
+ break;
+#endif
+
+ case TOK_DOT:
+ /*
+ * Pop a stack operand, convert it to object, get a property named by
+ * this bytecode's immediate-indexed atom operand, and push its value
+ * (not a reference to it). This bytecode sets the virtual machine's
+ * "obj" register to the left operand's ToObject conversion result,
+ * for use by JSOP_PUSHOBJ.
+ */
+ ok = EmitPropOp(cx, pn, pn->pn_op, cg);
+ break;
+
+ case TOK_LB:
+#if JS_HAS_XML_SUPPORT
+ case TOK_DBLDOT:
+#endif
+ /*
+ * Pop two operands, convert the left one to object and the right one
+ * to property name (atom or tagged int), get the named property, and
+ * push its value. Set the "obj" register to the result of ToObject
+ * on the left operand.
+ */
+ ok = EmitElemOp(cx, pn, pn->pn_op, cg);
+ break;
+
+ case TOK_NEW:
+ case TOK_LP:
+ {
+ uintN oldflags;
+
+ /*
+ * Emit function call or operator new (constructor call) code.
+ * First, emit code for the left operand to evaluate the callable or
+ * constructable object expression.
+ *
+ * For E4X, if this expression is a dotted member reference, select
+ * JSOP_GETMETHOD instead of JSOP_GETPROP. ECMA-357 separates XML
+ * method lookup from the normal property id lookup done for native
+ * objects.
+ */
+ pn2 = pn->pn_head;
+#if JS_HAS_XML_SUPPORT
+ if (pn2->pn_type == TOK_DOT && pn2->pn_op != JSOP_GETMETHOD) {
+ JS_ASSERT(pn2->pn_op == JSOP_GETPROP);
+ pn2->pn_op = JSOP_GETMETHOD;
+ pn2->pn_attrs |= JSPROP_IMPLICIT_FUNCTION_NAMESPACE;
+ }
+#endif
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+
+ /*
+ * Push the virtual machine's "obj" register, which was set by a
+ * name, property, or element get (or set) bytecode.
+ */
+ if (js_Emit1(cx, cg, JSOP_PUSHOBJ) < 0)
+ return JS_FALSE;
+
+ /* Remember start of callable-object bytecode for decompilation hint. */
+ off = top;
+
+ /*
+ * Emit code for each argument in order, then emit the JSOP_*CALL or
+ * JSOP_NEW bytecode with a two-byte immediate telling how many args
+ * were pushed on the operand stack.
+ */
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ for (pn2 = pn2->pn_next; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - off) < 0)
+ return JS_FALSE;
+
+ argc = pn->pn_count - 1;
+ if (js_Emit3(cx, cg, pn->pn_op, ARGC_HI(argc), ARGC_LO(argc)) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_LEXICALSCOPE:
+ {
+ JSObject *obj;
+ jsint count;
+
+ atom = pn->pn_atom;
+ obj = ATOM_TO_OBJECT(atom);
+ js_PushBlockScope(&cg->treeContext, &stmtInfo, atom, CG_OFFSET(cg));
+
+ OBJ_SET_BLOCK_DEPTH(cx, obj, cg->stackDepth);
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ cg->stackDepth += count;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+
+ /*
+ * If this lexical scope is not for a catch block, let block or let
+ * expression, or any kind of for loop (where the scope starts in the
+ * head after the first part if for (;;), else in the body if for-in);
+ * and if our container is top-level but not a function body, or else
+ * a block statement; then emit a SRC_BRACE note. All other container
+ * statements get braces by default from the decompiler.
+ */
+ noteIndex = -1;
+ type = pn->pn_expr->pn_type;
+ if (type != TOK_CATCH && type != TOK_LET && type != TOK_FOR &&
+ (!(stmt = stmtInfo.down)
+ ? !(cg->treeContext.flags & TCF_IN_FUNCTION)
+ : stmt->type == STMT_BLOCK)) {
+#if defined DEBUG_brendan || defined DEBUG_mrbkap
+ /* There must be no source note already output for the next op. */
+ JS_ASSERT(CG_NOTE_COUNT(cg) == 0 ||
+ CG_LAST_NOTE_OFFSET(cg) != CG_OFFSET(cg) ||
+ !GettableNoteForNextOp(cg));
+#endif
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_BRACE, 0);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ }
+
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ JS_ASSERT(CG_OFFSET(cg) == top);
+ EMIT_ATOM_INDEX_OP(JSOP_ENTERBLOCK, ALE_INDEX(ale));
+
+ if (!js_EmitTree(cx, cg, pn->pn_expr))
+ return JS_FALSE;
+
+ op = pn->pn_op;
+ if (op == JSOP_LEAVEBLOCKEXPR) {
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ } else {
+ if (noteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+ }
+
+ /* Emit the JSOP_LEAVEBLOCK or JSOP_LEAVEBLOCKEXPR opcode. */
+ EMIT_UINT16_IMM_OP(op, count);
+ cg->stackDepth -= count;
+
+ ok = js_PopStatementCG(cx, cg);
+ break;
+ }
+
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+ /* Let statements have their variable declarations on the left. */
+ if (pn->pn_arity == PN_BINARY) {
+ pn2 = pn->pn_right;
+ pn = pn->pn_left;
+ } else {
+ pn2 = NULL;
+ }
+
+ /* Non-null pn2 means that pn is the variable list from a let head. */
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ if (!EmitVariables(cx, cg, pn, pn2 != NULL, &noteIndex))
+ return JS_FALSE;
+
+ /* Thus non-null pn2 is the body of the let block or expression. */
+ tmp = CG_OFFSET(cg);
+ if (pn2 && !js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+
+ if (noteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - tmp)) {
+ return JS_FALSE;
+ }
+ break;
+#endif /* JS_HAS_BLOCK_SCOPE */
+
+#if JS_HAS_GENERATORS
+ case TOK_ARRAYPUSH:
+ /*
+ * The array object's stack index is in cg->arrayCompSlot. See below
+ * under the array initialiser code generator for array comprehension
+ * special casing.
+ */
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ EMIT_UINT16_IMM_OP(pn->pn_op, cg->arrayCompSlot);
+ break;
+#endif
+
+ case TOK_RB:
+#if JS_HAS_GENERATORS
+ case TOK_ARRAYCOMP:
+#endif
+ /*
+ * Emit code for [a, b, c] of the form:
+ * t = new Array; t[0] = a; t[1] = b; t[2] = c; t;
+ * but use a stack slot for t and avoid dup'ing and popping it via
+ * the JSOP_NEWINIT and JSOP_INITELEM bytecodes.
+ */
+ ale = js_IndexAtom(cx, CLASS_ATOM(cx, Array), &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_NAME, ALE_INDEX(ale));
+ if (js_Emit1(cx, cg, JSOP_PUSHOBJ) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_NEWINIT) < 0)
+ return JS_FALSE;
+
+ pn2 = pn->pn_head;
+#if JS_HAS_SHARP_VARS
+ if (pn2 && pn2->pn_type == TOK_DEFSHARP) {
+ EMIT_UINT16_IMM_OP(JSOP_DEFSHARP, (jsatomid)pn2->pn_num);
+ pn2 = pn2->pn_next;
+ }
+#endif
+
+#if JS_HAS_GENERATORS
+ if (pn->pn_type == TOK_ARRAYCOMP) {
+ uintN saveSlot;
+
+ /*
+ * Pass the new array's stack index to the TOK_ARRAYPUSH case by
+ * storing it in pn->pn_extra, then simply traverse the TOK_FOR
+ * node and its kids under pn2 to generate this comprehension.
+ */
+ JS_ASSERT(cg->stackDepth > 0);
+ saveSlot = cg->arrayCompSlot;
+ cg->arrayCompSlot = (uint32) (cg->stackDepth - 1);
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ cg->arrayCompSlot = saveSlot;
+
+ /* Emit the usual op needed for decompilation. */
+ if (js_Emit1(cx, cg, JSOP_ENDINIT) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif /* JS_HAS_GENERATORS */
+
+ for (atomIndex = 0; pn2; atomIndex++, pn2 = pn2->pn_next) {
+ if (!EmitNumberOp(cx, atomIndex, cg))
+ return JS_FALSE;
+
+ /* FIXME 260106: holes in a sparse initializer are void-filled. */
+ if (pn2->pn_type == TOK_COMMA) {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ } else {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+
+ if (js_Emit1(cx, cg, JSOP_INITELEM) < 0)
+ return JS_FALSE;
+ }
+
+ if (pn->pn_extra & PNX_ENDCOMMA) {
+ /* Emit a source note so we know to decompile an extra comma. */
+ if (js_NewSrcNote(cx, cg, SRC_CONTINUE) < 0)
+ return JS_FALSE;
+ }
+
+ /* Emit an op for sharp array cleanup and decompilation. */
+ if (js_Emit1(cx, cg, JSOP_ENDINIT) < 0)
+ return JS_FALSE;
+ break;
+
+ case TOK_RC:
+ /*
+ * Emit code for {p:a, '%q':b, 2:c} of the form:
+ * t = new Object; t.p = a; t['%q'] = b; t[2] = c; t;
+ * but use a stack slot for t and avoid dup'ing and popping it via
+ * the JSOP_NEWINIT and JSOP_INITELEM bytecodes.
+ */
+ ale = js_IndexAtom(cx, CLASS_ATOM(cx, Object), &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_NAME, ALE_INDEX(ale));
+
+ if (js_Emit1(cx, cg, JSOP_PUSHOBJ) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_NEWINIT) < 0)
+ return JS_FALSE;
+
+ pn2 = pn->pn_head;
+#if JS_HAS_SHARP_VARS
+ if (pn2 && pn2->pn_type == TOK_DEFSHARP) {
+ EMIT_UINT16_IMM_OP(JSOP_DEFSHARP, (jsatomid)pn2->pn_num);
+ pn2 = pn2->pn_next;
+ }
+#endif
+
+ for (; pn2; pn2 = pn2->pn_next) {
+ /* Emit an index for t[2], else map an atom for t.p or t['%q']. */
+ pn3 = pn2->pn_left;
+ switch (pn3->pn_type) {
+ case TOK_NUMBER:
+ if (!EmitNumberOp(cx, pn3->pn_dval, cg))
+ return JS_FALSE;
+ break;
+ case TOK_NAME:
+ case TOK_STRING:
+ ale = js_IndexAtom(cx, pn3->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ break;
+ default:
+ JS_ASSERT(0);
+ }
+
+ /* Emit code for the property initializer. */
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+
+#if JS_HAS_GETTER_SETTER
+ op = pn2->pn_op;
+ if (op == JSOP_GETTER || op == JSOP_SETTER) {
+ if (pn3->pn_type != TOK_NUMBER &&
+ ALE_INDEX(ale) >= JS_BIT(16)) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+#endif
+ /* Annotate JSOP_INITELEM so we decompile 2:c and not just c. */
+ if (pn3->pn_type == TOK_NUMBER) {
+ if (js_NewSrcNote(cx, cg, SRC_INITPROP) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_INITELEM) < 0)
+ return JS_FALSE;
+ } else {
+ EMIT_ATOM_INDEX_OP(JSOP_INITPROP, ALE_INDEX(ale));
+ }
+ }
+
+ /* Emit an op for sharpArray cleanup and decompilation. */
+ if (js_Emit1(cx, cg, JSOP_ENDINIT) < 0)
+ return JS_FALSE;
+ break;
+
+#if JS_HAS_SHARP_VARS
+ case TOK_DEFSHARP:
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ EMIT_UINT16_IMM_OP(JSOP_DEFSHARP, (jsatomid) pn->pn_num);
+ break;
+
+ case TOK_USESHARP:
+ EMIT_UINT16_IMM_OP(JSOP_USESHARP, (jsatomid) pn->pn_num);
+ break;
+#endif /* JS_HAS_SHARP_VARS */
+
+ case TOK_RP:
+ {
+ uintN oldflags;
+
+ /*
+ * The node for (e) has e as its kid, enabling users who want to nest
+ * assignment expressions in conditions to avoid the error correction
+ * done by Condition (from x = y to x == y) by double-parenthesizing.
+ */
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+ if (js_Emit1(cx, cg, JSOP_GROUP) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_NAME:
+ if (!BindNameToSlot(cx, &cg->treeContext, pn, JS_FALSE))
+ return JS_FALSE;
+ op = pn->pn_op;
+ if (op == JSOP_ARGUMENTS) {
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+ }
+ if (pn->pn_slot >= 0) {
+ atomIndex = (jsatomid) pn->pn_slot;
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ break;
+ }
+ /* FALL THROUGH */
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLATTR:
+ case TOK_XMLSPACE:
+ case TOK_XMLTEXT:
+ case TOK_XMLCDATA:
+ case TOK_XMLCOMMENT:
+#endif
+ case TOK_STRING:
+ case TOK_OBJECT:
+ /*
+ * The scanner and parser associate JSOP_NAME with TOK_NAME, although
+ * other bytecodes may result instead (JSOP_BINDNAME/JSOP_SETNAME,
+ * JSOP_FORNAME, etc.). Among JSOP_*NAME* variants, only JSOP_NAME
+ * may generate the first operand of a call or new expression, so only
+ * it sets the "obj" virtual machine register to the object along the
+ * scope chain in which the name was found.
+ *
+ * Token types for STRING and OBJECT have corresponding bytecode ops
+ * in pn_op and emit the same format as NAME, so they share this code.
+ */
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ break;
+
+ case TOK_NUMBER:
+ ok = EmitNumberOp(cx, pn->pn_dval, cg);
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_ANYNAME:
+#endif
+ case TOK_PRIMARY:
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ break;
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ case TOK_DEBUGGER:
+ if (js_Emit1(cx, cg, JSOP_DEBUGGER) < 0)
+ return JS_FALSE;
+ break;
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLELEM:
+ case TOK_XMLLIST:
+ if (pn->pn_op == JSOP_XMLOBJECT) {
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ break;
+ }
+
+ JS_ASSERT(pn->pn_type == TOK_XMLLIST || pn->pn_count != 0);
+ switch (pn->pn_head ? pn->pn_head->pn_type : TOK_XMLLIST) {
+ case TOK_XMLETAGO:
+ JS_ASSERT(0);
+ /* FALL THROUGH */
+ case TOK_XMLPTAGC:
+ case TOK_XMLSTAGO:
+ break;
+ default:
+ if (js_Emit1(cx, cg, JSOP_STARTXML) < 0)
+ return JS_FALSE;
+ }
+
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (pn2->pn_type == TOK_LC &&
+ js_Emit1(cx, cg, JSOP_STARTXMLEXPR) < 0) {
+ return JS_FALSE;
+ }
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (pn2 != pn->pn_head && js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+ }
+
+ if (pn->pn_extra & PNX_XMLROOT) {
+ if (pn->pn_count == 0) {
+ JS_ASSERT(pn->pn_type == TOK_XMLLIST);
+ atom = cx->runtime->atomState.emptyAtom;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_STRING, ALE_INDEX(ale));
+ }
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ }
+#ifdef DEBUG
+ else
+ JS_ASSERT(pn->pn_count != 0);
+#endif
+ break;
+
+ case TOK_XMLPTAGC:
+ if (pn->pn_op == JSOP_XMLOBJECT) {
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ break;
+ }
+ /* FALL THROUGH */
+
+ case TOK_XMLSTAGO:
+ case TOK_XMLETAGO:
+ {
+ uint32 i;
+
+ if (js_Emit1(cx, cg, JSOP_STARTXML) < 0)
+ return JS_FALSE;
+
+ ale = js_IndexAtom(cx,
+ (pn->pn_type == TOK_XMLETAGO)
+ ? cx->runtime->atomState.etagoAtom
+ : cx->runtime->atomState.stagoAtom,
+ &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_STRING, ALE_INDEX(ale));
+
+ JS_ASSERT(pn->pn_count != 0);
+ pn2 = pn->pn_head;
+ if (pn2->pn_type == TOK_LC && js_Emit1(cx, cg, JSOP_STARTXMLEXPR) < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+
+ for (pn2 = pn2->pn_next, i = 0; pn2; pn2 = pn2->pn_next, i++) {
+ if (pn2->pn_type == TOK_LC &&
+ js_Emit1(cx, cg, JSOP_STARTXMLEXPR) < 0) {
+ return JS_FALSE;
+ }
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if ((i & 1) && pn2->pn_type == TOK_LC) {
+ if (js_Emit1(cx, cg, JSOP_TOATTRVAL) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg,
+ (i & 1) ? JSOP_ADDATTRVAL : JSOP_ADDATTRNAME) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ ale = js_IndexAtom(cx,
+ (pn->pn_type == TOK_XMLPTAGC)
+ ? cx->runtime->atomState.ptagcAtom
+ : cx->runtime->atomState.tagcAtom,
+ &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_STRING, ALE_INDEX(ale));
+ if (js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+
+ if ((pn->pn_extra & PNX_XMLROOT) && js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_XMLNAME:
+ if (pn->pn_arity == PN_LIST) {
+ JS_ASSERT(pn->pn_count != 0);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (pn2 != pn->pn_head && js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+ }
+ } else {
+ JS_ASSERT(pn->pn_arity == PN_NULLARY);
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ }
+ break;
+
+ case TOK_XMLPI:
+ ale = js_IndexAtom(cx, pn->pn_atom2, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ if (!EmitAtomIndexOp(cx, JSOP_QNAMEPART, ALE_INDEX(ale), cg))
+ return JS_FALSE;
+ if (!EmitAtomOp(cx, pn, JSOP_XMLPI, cg))
+ return JS_FALSE;
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ if (ok && --cg->emitLevel == 0 && cg->spanDeps)
+ ok = OptimizeSpanDeps(cx, cg);
+
+ return ok;
+}
+
+/* XXX get rid of offsetBias, it's used only by SRC_FOR and SRC_DECL */
+JS_FRIEND_DATA(JSSrcNoteSpec) js_SrcNoteSpec[] = {
+ {"null", 0, 0, 0},
+ {"if", 0, 0, 0},
+ {"if-else", 2, 0, 1},
+ {"while", 1, 0, 1},
+ {"for", 3, 1, 1},
+ {"continue", 0, 0, 0},
+ {"decl", 1, 1, 1},
+ {"pcdelta", 1, 0, 1},
+ {"assignop", 0, 0, 0},
+ {"cond", 1, 0, 1},
+ {"brace", 1, 0, 1},
+ {"hidden", 0, 0, 0},
+ {"pcbase", 1, 0, -1},
+ {"label", 1, 0, 0},
+ {"labelbrace", 1, 0, 0},
+ {"endbrace", 0, 0, 0},
+ {"break2label", 1, 0, 0},
+ {"cont2label", 1, 0, 0},
+ {"switch", 2, 0, 1},
+ {"funcdef", 1, 0, 0},
+ {"catch", 1, 0, 1},
+ {"extended", -1, 0, 0},
+ {"newline", 0, 0, 0},
+ {"setline", 1, 0, 0},
+ {"xdelta", 0, 0, 0},
+};
+
+static intN
+AllocSrcNote(JSContext *cx, JSCodeGenerator *cg)
+{
+ intN index;
+ JSArenaPool *pool;
+ size_t size;
+
+ index = CG_NOTE_COUNT(cg);
+ if (((uintN)index & CG_NOTE_MASK(cg)) == 0) {
+ pool = cg->notePool;
+ size = SRCNOTE_SIZE(CG_NOTE_MASK(cg) + 1);
+ if (!CG_NOTES(cg)) {
+ /* Allocate the first note array lazily; leave noteMask alone. */
+ JS_ARENA_ALLOCATE_CAST(CG_NOTES(cg), jssrcnote *, pool, size);
+ } else {
+ /* Grow by doubling note array size; update noteMask on success. */
+ JS_ARENA_GROW_CAST(CG_NOTES(cg), jssrcnote *, pool, size, size);
+ if (CG_NOTES(cg))
+ CG_NOTE_MASK(cg) = (CG_NOTE_MASK(cg) << 1) | 1;
+ }
+ if (!CG_NOTES(cg)) {
+ JS_ReportOutOfMemory(cx);
+ return -1;
+ }
+ }
+
+ CG_NOTE_COUNT(cg) = index + 1;
+ return index;
+}
+
+intN
+js_NewSrcNote(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type)
+{
+ intN index, n;
+ jssrcnote *sn;
+ ptrdiff_t offset, delta, xdelta;
+
+ /*
+ * Claim a note slot in CG_NOTES(cg) by growing it if necessary and then
+ * incrementing CG_NOTE_COUNT(cg).
+ */
+ index = AllocSrcNote(cx, cg);
+ if (index < 0)
+ return -1;
+ sn = &CG_NOTES(cg)[index];
+
+ /*
+ * Compute delta from the last annotated bytecode's offset. If it's too
+ * big to fit in sn, allocate one or more xdelta notes and reset sn.
+ */
+ offset = CG_OFFSET(cg);
+ delta = offset - CG_LAST_NOTE_OFFSET(cg);
+ CG_LAST_NOTE_OFFSET(cg) = offset;
+ if (delta >= SN_DELTA_LIMIT) {
+ do {
+ xdelta = JS_MIN(delta, SN_XDELTA_MASK);
+ SN_MAKE_XDELTA(sn, xdelta);
+ delta -= xdelta;
+ index = AllocSrcNote(cx, cg);
+ if (index < 0)
+ return -1;
+ sn = &CG_NOTES(cg)[index];
+ } while (delta >= SN_DELTA_LIMIT);
+ }
+
+ /*
+ * Initialize type and delta, then allocate the minimum number of notes
+ * needed for type's arity. Usually, we won't need more, but if an offset
+ * does take two bytes, js_SetSrcNoteOffset will grow CG_NOTES(cg).
+ */
+ SN_MAKE_NOTE(sn, type, delta);
+ for (n = (intN)js_SrcNoteSpec[type].arity; n > 0; n--) {
+ if (js_NewSrcNote(cx, cg, SRC_NULL) < 0)
+ return -1;
+ }
+ return index;
+}
+
+intN
+js_NewSrcNote2(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset)
+{
+ intN index;
+
+ index = js_NewSrcNote(cx, cg, type);
+ if (index >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, index, 0, offset))
+ return -1;
+ }
+ return index;
+}
+
+intN
+js_NewSrcNote3(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset1, ptrdiff_t offset2)
+{
+ intN index;
+
+ index = js_NewSrcNote(cx, cg, type);
+ if (index >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, index, 0, offset1))
+ return -1;
+ if (!js_SetSrcNoteOffset(cx, cg, index, 1, offset2))
+ return -1;
+ }
+ return index;
+}
+
+static JSBool
+GrowSrcNotes(JSContext *cx, JSCodeGenerator *cg)
+{
+ JSArenaPool *pool;
+ size_t size;
+
+ /* Grow by doubling note array size; update noteMask on success. */
+ pool = cg->notePool;
+ size = SRCNOTE_SIZE(CG_NOTE_MASK(cg) + 1);
+ JS_ARENA_GROW_CAST(CG_NOTES(cg), jssrcnote *, pool, size, size);
+ if (!CG_NOTES(cg)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ CG_NOTE_MASK(cg) = (CG_NOTE_MASK(cg) << 1) | 1;
+ return JS_TRUE;
+}
+
+jssrcnote *
+js_AddToSrcNoteDelta(JSContext *cx, JSCodeGenerator *cg, jssrcnote *sn,
+ ptrdiff_t delta)
+{
+ ptrdiff_t base, limit, newdelta, diff;
+ intN index;
+
+ /*
+ * Called only from OptimizeSpanDeps and js_FinishTakingSrcNotes to add to
+ * main script note deltas, and only by a small positive amount.
+ */
+ JS_ASSERT(cg->current == &cg->main);
+ JS_ASSERT((unsigned) delta < (unsigned) SN_XDELTA_LIMIT);
+
+ base = SN_DELTA(sn);
+ limit = SN_IS_XDELTA(sn) ? SN_XDELTA_LIMIT : SN_DELTA_LIMIT;
+ newdelta = base + delta;
+ if (newdelta < limit) {
+ SN_SET_DELTA(sn, newdelta);
+ } else {
+ index = sn - cg->main.notes;
+ if ((cg->main.noteCount & cg->main.noteMask) == 0) {
+ if (!GrowSrcNotes(cx, cg))
+ return NULL;
+ sn = cg->main.notes + index;
+ }
+ diff = cg->main.noteCount - index;
+ cg->main.noteCount++;
+ memmove(sn + 1, sn, SRCNOTE_SIZE(diff));
+ SN_MAKE_XDELTA(sn, delta);
+ sn++;
+ }
+ return sn;
+}
+
+JS_FRIEND_API(uintN)
+js_SrcNoteLength(jssrcnote *sn)
+{
+ uintN arity;
+ jssrcnote *base;
+
+ arity = (intN)js_SrcNoteSpec[SN_TYPE(sn)].arity;
+ for (base = sn++; arity; sn++, arity--) {
+ if (*sn & SN_3BYTE_OFFSET_FLAG)
+ sn += 2;
+ }
+ return sn - base;
+}
+
+JS_FRIEND_API(ptrdiff_t)
+js_GetSrcNoteOffset(jssrcnote *sn, uintN which)
+{
+ /* Find the offset numbered which (i.e., skip exactly which offsets). */
+ JS_ASSERT(SN_TYPE(sn) != SRC_XDELTA);
+ JS_ASSERT(which < js_SrcNoteSpec[SN_TYPE(sn)].arity);
+ for (sn++; which; sn++, which--) {
+ if (*sn & SN_3BYTE_OFFSET_FLAG)
+ sn += 2;
+ }
+ if (*sn & SN_3BYTE_OFFSET_FLAG) {
+ return (ptrdiff_t)(((uint32)(sn[0] & SN_3BYTE_OFFSET_MASK) << 16)
+ | (sn[1] << 8)
+ | sn[2]);
+ }
+ return (ptrdiff_t)*sn;
+}
+
+JSBool
+js_SetSrcNoteOffset(JSContext *cx, JSCodeGenerator *cg, uintN index,
+ uintN which, ptrdiff_t offset)
+{
+ jssrcnote *sn;
+ ptrdiff_t diff;
+
+ if ((jsuword)offset >= (jsuword)((ptrdiff_t)SN_3BYTE_OFFSET_FLAG << 16)) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ /* Find the offset numbered which (i.e., skip exactly which offsets). */
+ sn = &CG_NOTES(cg)[index];
+ JS_ASSERT(SN_TYPE(sn) != SRC_XDELTA);
+ JS_ASSERT(which < js_SrcNoteSpec[SN_TYPE(sn)].arity);
+ for (sn++; which; sn++, which--) {
+ if (*sn & SN_3BYTE_OFFSET_FLAG)
+ sn += 2;
+ }
+
+ /* See if the new offset requires three bytes. */
+ if (offset > (ptrdiff_t)SN_3BYTE_OFFSET_MASK) {
+ /* Maybe this offset was already set to a three-byte value. */
+ if (!(*sn & SN_3BYTE_OFFSET_FLAG)) {
+ /* Losing, need to insert another two bytes for this offset. */
+ index = PTRDIFF(sn, CG_NOTES(cg), jssrcnote);
+
+ /*
+ * Simultaneously test to see if the source note array must grow to
+ * accomodate either the first or second byte of additional storage
+ * required by this 3-byte offset.
+ */
+ if (((CG_NOTE_COUNT(cg) + 1) & CG_NOTE_MASK(cg)) <= 1) {
+ if (!GrowSrcNotes(cx, cg))
+ return JS_FALSE;
+ sn = CG_NOTES(cg) + index;
+ }
+ CG_NOTE_COUNT(cg) += 2;
+
+ diff = CG_NOTE_COUNT(cg) - (index + 3);
+ JS_ASSERT(diff >= 0);
+ if (diff > 0)
+ memmove(sn + 3, sn + 1, SRCNOTE_SIZE(diff));
+ }
+ *sn++ = (jssrcnote)(SN_3BYTE_OFFSET_FLAG | (offset >> 16));
+ *sn++ = (jssrcnote)(offset >> 8);
+ }
+ *sn = (jssrcnote)offset;
+ return JS_TRUE;
+}
+
+#ifdef DEBUG_notme
+#define DEBUG_srcnotesize
+#endif
+
+#ifdef DEBUG_srcnotesize
+#define NBINS 10
+static uint32 hist[NBINS];
+
+void DumpSrcNoteSizeHist()
+{
+ static FILE *fp;
+ int i, n;
+
+ if (!fp) {
+ fp = fopen("/tmp/srcnotes.hist", "w");
+ if (!fp)
+ return;
+ setvbuf(fp, NULL, _IONBF, 0);
+ }
+ fprintf(fp, "SrcNote size histogram:\n");
+ for (i = 0; i < NBINS; i++) {
+ fprintf(fp, "%4u %4u ", JS_BIT(i), hist[i]);
+ for (n = (int) JS_HOWMANY(hist[i], 10); n > 0; --n)
+ fputc('*', fp);
+ fputc('\n', fp);
+ }
+ fputc('\n', fp);
+}
+#endif
+
+/*
+ * Fill in the storage at notes with prolog and main srcnotes; the space at
+ * notes was allocated using the CG_COUNT_FINAL_SRCNOTES macro from jsemit.h.
+ * SO DON'T CHANGE THIS FUNCTION WITHOUT AT LEAST CHECKING WHETHER jsemit.h's
+ * CG_COUNT_FINAL_SRCNOTES MACRO NEEDS CORRESPONDING CHANGES!
+ */
+JSBool
+js_FinishTakingSrcNotes(JSContext *cx, JSCodeGenerator *cg, jssrcnote *notes)
+{
+ uintN prologCount, mainCount, totalCount;
+ ptrdiff_t offset, delta;
+ jssrcnote *sn;
+
+ JS_ASSERT(cg->current == &cg->main);
+
+ prologCount = cg->prolog.noteCount;
+ if (prologCount && cg->prolog.currentLine != cg->firstLine) {
+ CG_SWITCH_TO_PROLOG(cg);
+ if (js_NewSrcNote2(cx, cg, SRC_SETLINE, (ptrdiff_t)cg->firstLine) < 0)
+ return JS_FALSE;
+ prologCount = cg->prolog.noteCount;
+ CG_SWITCH_TO_MAIN(cg);
+ } else {
+ /*
+ * Either no prolog srcnotes, or no line number change over prolog.
+ * We don't need a SRC_SETLINE, but we may need to adjust the offset
+ * of the first main note, by adding to its delta and possibly even
+ * prepending SRC_XDELTA notes to it to account for prolog bytecodes
+ * that came at and after the last annotated bytecode.
+ */
+ offset = CG_PROLOG_OFFSET(cg) - cg->prolog.lastNoteOffset;
+ JS_ASSERT(offset >= 0);
+ if (offset > 0 && cg->main.noteCount != 0) {
+ /* NB: Use as much of the first main note's delta as we can. */
+ sn = cg->main.notes;
+ delta = SN_IS_XDELTA(sn)
+ ? SN_XDELTA_MASK - (*sn & SN_XDELTA_MASK)
+ : SN_DELTA_MASK - (*sn & SN_DELTA_MASK);
+ if (offset < delta)
+ delta = offset;
+ for (;;) {
+ if (!js_AddToSrcNoteDelta(cx, cg, sn, delta))
+ return JS_FALSE;
+ offset -= delta;
+ if (offset == 0)
+ break;
+ delta = JS_MIN(offset, SN_XDELTA_MASK);
+ sn = cg->main.notes;
+ }
+ }
+ }
+
+ mainCount = cg->main.noteCount;
+ totalCount = prologCount + mainCount;
+ if (prologCount)
+ memcpy(notes, cg->prolog.notes, SRCNOTE_SIZE(prologCount));
+ memcpy(notes + prologCount, cg->main.notes, SRCNOTE_SIZE(mainCount));
+ SN_MAKE_TERMINATOR(&notes[totalCount]);
+
+#ifdef DEBUG_notme
+ { int bin = JS_CeilingLog2(totalCount);
+ if (bin >= NBINS)
+ bin = NBINS - 1;
+ ++hist[bin];
+ }
+#endif
+ return JS_TRUE;
+}
+
+JSBool
+js_AllocTryNotes(JSContext *cx, JSCodeGenerator *cg)
+{
+ size_t size, incr;
+ ptrdiff_t delta;
+
+ size = TRYNOTE_SIZE(cg->treeContext.tryCount);
+ if (size <= cg->tryNoteSpace)
+ return JS_TRUE;
+
+ /*
+ * Allocate trynotes from cx->tempPool.
+ * XXX Too much growing and we bloat, as other tempPool allocators block
+ * in-place growth, and we never recycle old free space in an arena.
+ * YYY But once we consume an entire arena, we'll realloc it, letting the
+ * malloc heap recycle old space, while still freeing _en masse_ via the
+ * arena pool.
+ */
+ if (!cg->tryBase) {
+ size = JS_ROUNDUP(size, TRYNOTE_SIZE(TRYNOTE_CHUNK));
+ JS_ARENA_ALLOCATE_CAST(cg->tryBase, JSTryNote *, &cx->tempPool, size);
+ if (!cg->tryBase)
+ return JS_FALSE;
+ cg->tryNoteSpace = size;
+ cg->tryNext = cg->tryBase;
+ } else {
+ delta = PTRDIFF((char *)cg->tryNext, (char *)cg->tryBase, char);
+ incr = size - cg->tryNoteSpace;
+ incr = JS_ROUNDUP(incr, TRYNOTE_SIZE(TRYNOTE_CHUNK));
+ size = cg->tryNoteSpace;
+ JS_ARENA_GROW_CAST(cg->tryBase, JSTryNote *, &cx->tempPool, size, incr);
+ if (!cg->tryBase)
+ return JS_FALSE;
+ cg->tryNoteSpace = size + incr;
+ cg->tryNext = (JSTryNote *)((char *)cg->tryBase + delta);
+ }
+ return JS_TRUE;
+}
+
+JSTryNote *
+js_NewTryNote(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t start,
+ ptrdiff_t end, ptrdiff_t catchStart)
+{
+ JSTryNote *tn;
+
+ JS_ASSERT(cg->tryBase <= cg->tryNext);
+ JS_ASSERT(catchStart >= 0);
+ tn = cg->tryNext++;
+ tn->start = start;
+ tn->length = end - start;
+ tn->catchStart = catchStart;
+ return tn;
+}
+
+void
+js_FinishTakingTryNotes(JSContext *cx, JSCodeGenerator *cg, JSTryNote *notes)
+{
+ uintN count;
+
+ count = PTRDIFF(cg->tryNext, cg->tryBase, JSTryNote);
+ if (!count)
+ return;
+
+ memcpy(notes, cg->tryBase, TRYNOTE_SIZE(count));
+ notes[count].start = 0;
+ notes[count].length = CG_OFFSET(cg);
+ notes[count].catchStart = 0;
+}
diff --git a/third_party/js-1.7/jsemit.h b/third_party/js-1.7/jsemit.h
new file mode 100644
index 0000000..90709c2
--- /dev/null
+++ b/third_party/js-1.7/jsemit.h
@@ -0,0 +1,743 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsemit_h___
+#define jsemit_h___
+/*
+ * JS bytecode generation.
+ */
+
+#include "jsstddef.h"
+#include "jstypes.h"
+#include "jsatom.h"
+#include "jsopcode.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * NB: If you add enumerators for scope statements, add them between STMT_WITH
+ * and STMT_CATCH, or you will break the STMT_TYPE_IS_SCOPE macro. If you add
+ * non-looping statement enumerators, add them before STMT_DO_LOOP or you will
+ * break the STMT_TYPE_IS_LOOP macro.
+ *
+ * Also remember to keep the statementName array in jsemit.c in sync.
+ */
+typedef enum JSStmtType {
+ STMT_LABEL, /* labeled statement: L: s */
+ STMT_IF, /* if (then) statement */
+ STMT_ELSE, /* else clause of if statement */
+ STMT_BODY, /* synthetic body of function with
+ destructuring formal parameters */
+ STMT_BLOCK, /* compound statement: { s1[;... sN] } */
+ STMT_SWITCH, /* switch statement */
+ STMT_WITH, /* with statement */
+ STMT_CATCH, /* catch block */
+ STMT_TRY, /* try block */
+ STMT_FINALLY, /* finally block */
+ STMT_SUBROUTINE, /* gosub-target subroutine body */
+ STMT_DO_LOOP, /* do/while loop statement */
+ STMT_FOR_LOOP, /* for loop statement */
+ STMT_FOR_IN_LOOP, /* for/in loop statement */
+ STMT_WHILE_LOOP /* while loop statement */
+} JSStmtType;
+
+#define STMT_TYPE_IN_RANGE(t,b,e) ((uint)((t) - (b)) <= (uintN)((e) - (b)))
+
+/*
+ * A comment on the encoding of the JSStmtType enum and type-testing macros:
+ *
+ * STMT_TYPE_MAYBE_SCOPE tells whether a statement type is always, or may
+ * become, a lexical scope. It therefore includes block and switch (the two
+ * low-numbered "maybe" scope types) and excludes with (with has dynamic scope
+ * pending the "reformed with" in ES4/JS2). It includes all try-catch-finally
+ * types, which are high-numbered maybe-scope types.
+ *
+ * STMT_TYPE_LINKS_SCOPE tells whether a JSStmtInfo of the given type eagerly
+ * links to other scoping statement info records. It excludes the two early
+ * "maybe" types, block and switch, as well as the try and both finally types,
+ * since try and the other trailing maybe-scope types don't need block scope
+ * unless they contain let declarations.
+ *
+ * We treat with as a static scope because it prevents lexical binding from
+ * continuing further up the static scope chain. With the "reformed with"
+ * proposal for JS2, we'll be able to model it statically, too.
+ */
+#define STMT_TYPE_MAYBE_SCOPE(type) \
+ (type != STMT_WITH && \
+ STMT_TYPE_IN_RANGE(type, STMT_BLOCK, STMT_SUBROUTINE))
+
+#define STMT_TYPE_LINKS_SCOPE(type) \
+ STMT_TYPE_IN_RANGE(type, STMT_WITH, STMT_CATCH)
+
+#define STMT_TYPE_IS_TRYING(type) \
+ STMT_TYPE_IN_RANGE(type, STMT_TRY, STMT_SUBROUTINE)
+
+#define STMT_TYPE_IS_LOOP(type) ((type) >= STMT_DO_LOOP)
+
+#define STMT_MAYBE_SCOPE(stmt) STMT_TYPE_MAYBE_SCOPE((stmt)->type)
+#define STMT_LINKS_SCOPE(stmt) (STMT_TYPE_LINKS_SCOPE((stmt)->type) || \
+ ((stmt)->flags & SIF_SCOPE))
+#define STMT_IS_TRYING(stmt) STMT_TYPE_IS_TRYING((stmt)->type)
+#define STMT_IS_LOOP(stmt) STMT_TYPE_IS_LOOP((stmt)->type)
+
+typedef struct JSStmtInfo JSStmtInfo;
+
+struct JSStmtInfo {
+ uint16 type; /* statement type */
+ uint16 flags; /* flags, see below */
+ ptrdiff_t update; /* loop update offset (top if none) */
+ ptrdiff_t breaks; /* offset of last break in loop */
+ ptrdiff_t continues; /* offset of last continue in loop */
+ JSAtom *atom; /* name of LABEL, or block scope object */
+ JSStmtInfo *down; /* info for enclosing statement */
+ JSStmtInfo *downScope; /* next enclosing lexical scope */
+};
+
+#define SIF_SCOPE 0x0001 /* statement has its own lexical scope */
+#define SIF_BODY_BLOCK 0x0002 /* STMT_BLOCK type is a function body */
+
+/*
+ * To reuse space in JSStmtInfo, rename breaks and continues for use during
+ * try/catch/finally code generation and backpatching. To match most common
+ * use cases, the macro argument is a struct, not a struct pointer. Only a
+ * loop, switch, or label statement info record can have breaks and continues,
+ * and only a for loop has an update backpatch chain, so it's safe to overlay
+ * these for the "trying" JSStmtTypes.
+ */
+#define CATCHNOTE(stmt) ((stmt).update)
+#define GOSUBS(stmt) ((stmt).breaks)
+#define GUARDJUMP(stmt) ((stmt).continues)
+
+#define AT_TOP_LEVEL(tc) \
+ (!(tc)->topStmt || ((tc)->topStmt->flags & SIF_BODY_BLOCK))
+
+#define SET_STATEMENT_TOP(stmt, top) \
+ ((stmt)->update = (top), (stmt)->breaks = (stmt)->continues = (-1))
+
+struct JSTreeContext { /* tree context for semantic checks */
+ uint16 flags; /* statement state flags, see below */
+ uint16 numGlobalVars; /* max. no. of global variables/regexps */
+ uint32 tryCount; /* total count of try statements parsed */
+ uint32 globalUses; /* optimizable global var uses in total */
+ uint32 loopyGlobalUses;/* optimizable global var uses in loops */
+ JSStmtInfo *topStmt; /* top of statement info stack */
+ JSStmtInfo *topScopeStmt; /* top lexical scope statement */
+ JSObject *blockChain; /* compile time block scope chain (NB: one
+ deeper than the topScopeStmt/downScope
+ chain when in head of let block/expr) */
+ JSParseNode *blockNode; /* parse node for a lexical scope.
+ XXX combine with blockChain? */
+ JSAtomList decls; /* function, const, and var declarations */
+ JSParseNode *nodeList; /* list of recyclable parse-node structs */
+};
+
+#define TCF_COMPILING 0x01 /* generating bytecode; this tc is a cg */
+#define TCF_IN_FUNCTION 0x02 /* parsing inside function body */
+#define TCF_RETURN_EXPR 0x04 /* function has 'return expr;' */
+#define TCF_RETURN_VOID 0x08 /* function has 'return;' */
+#define TCF_RETURN_FLAGS 0x0C /* propagate these out of blocks */
+#define TCF_IN_FOR_INIT 0x10 /* parsing init expr of for; exclude 'in' */
+#define TCF_FUN_CLOSURE_VS_VAR 0x20 /* function and var with same name */
+#define TCF_FUN_USES_NONLOCALS 0x40 /* function refers to non-local names */
+#define TCF_FUN_HEAVYWEIGHT 0x80 /* function needs Call object per call */
+#define TCF_FUN_IS_GENERATOR 0x100 /* parsed yield statement in function */
+#define TCF_FUN_FLAGS 0x1E0 /* flags to propagate from FunctionBody */
+#define TCF_HAS_DEFXMLNS 0x200 /* default xml namespace = ...; parsed */
+#define TCF_HAS_FUNCTION_STMT 0x400 /* block contains a function statement */
+
+#define TREE_CONTEXT_INIT(tc) \
+ ((tc)->flags = (tc)->numGlobalVars = 0, \
+ (tc)->tryCount = (tc)->globalUses = (tc)->loopyGlobalUses = 0, \
+ (tc)->topStmt = (tc)->topScopeStmt = NULL, \
+ (tc)->blockChain = NULL, \
+ ATOM_LIST_INIT(&(tc)->decls), \
+ (tc)->nodeList = NULL, (tc)->blockNode = NULL)
+
+#define TREE_CONTEXT_FINISH(tc) \
+ ((void)0)
+
+/*
+ * Span-dependent instructions are jumps whose span (from the jump bytecode to
+ * the jump target) may require 2 or 4 bytes of immediate operand.
+ */
+typedef struct JSSpanDep JSSpanDep;
+typedef struct JSJumpTarget JSJumpTarget;
+
+struct JSSpanDep {
+ ptrdiff_t top; /* offset of first bytecode in an opcode */
+ ptrdiff_t offset; /* offset - 1 within opcode of jump operand */
+ ptrdiff_t before; /* original offset - 1 of jump operand */
+ JSJumpTarget *target; /* tagged target pointer or backpatch delta */
+};
+
+/*
+ * Jump targets are stored in an AVL tree, for O(log(n)) lookup with targets
+ * sorted by offset from left to right, so that targets after a span-dependent
+ * instruction whose jump offset operand must be extended can be found quickly
+ * and adjusted upward (toward higher offsets).
+ */
+struct JSJumpTarget {
+ ptrdiff_t offset; /* offset of span-dependent jump target */
+ int balance; /* AVL tree balance number */
+ JSJumpTarget *kids[2]; /* left and right AVL tree child pointers */
+};
+
+#define JT_LEFT 0
+#define JT_RIGHT 1
+#define JT_OTHER_DIR(dir) (1 - (dir))
+#define JT_IMBALANCE(dir) (((dir) << 1) - 1)
+#define JT_DIR(imbalance) (((imbalance) + 1) >> 1)
+
+/*
+ * Backpatch deltas are encoded in JSSpanDep.target if JT_TAG_BIT is clear,
+ * so we can maintain backpatch chains when using span dependency records to
+ * hold jump offsets that overflow 16 bits.
+ */
+#define JT_TAG_BIT ((jsword) 1)
+#define JT_UNTAG_SHIFT 1
+#define JT_SET_TAG(jt) ((JSJumpTarget *)((jsword)(jt) | JT_TAG_BIT))
+#define JT_CLR_TAG(jt) ((JSJumpTarget *)((jsword)(jt) & ~JT_TAG_BIT))
+#define JT_HAS_TAG(jt) ((jsword)(jt) & JT_TAG_BIT)
+
+#define BITS_PER_PTRDIFF (sizeof(ptrdiff_t) * JS_BITS_PER_BYTE)
+#define BITS_PER_BPDELTA (BITS_PER_PTRDIFF - 1 - JT_UNTAG_SHIFT)
+#define BPDELTA_MAX (((ptrdiff_t)1 << BITS_PER_BPDELTA) - 1)
+#define BPDELTA_TO_JT(bp) ((JSJumpTarget *)((bp) << JT_UNTAG_SHIFT))
+#define JT_TO_BPDELTA(jt) ((ptrdiff_t)((jsword)(jt) >> JT_UNTAG_SHIFT))
+
+#define SD_SET_TARGET(sd,jt) ((sd)->target = JT_SET_TAG(jt))
+#define SD_GET_TARGET(sd) (JS_ASSERT(JT_HAS_TAG((sd)->target)), \
+ JT_CLR_TAG((sd)->target))
+#define SD_SET_BPDELTA(sd,bp) ((sd)->target = BPDELTA_TO_JT(bp))
+#define SD_GET_BPDELTA(sd) (JS_ASSERT(!JT_HAS_TAG((sd)->target)), \
+ JT_TO_BPDELTA((sd)->target))
+
+/* Avoid asserting twice by expanding SD_GET_TARGET in the "then" clause. */
+#define SD_SPAN(sd,pivot) (SD_GET_TARGET(sd) \
+ ? JT_CLR_TAG((sd)->target)->offset - (pivot) \
+ : 0)
+
+struct JSCodeGenerator {
+ JSTreeContext treeContext; /* base state: statement info stack, etc. */
+
+ JSArenaPool *codePool; /* pointer to thread code arena pool */
+ JSArenaPool *notePool; /* pointer to thread srcnote arena pool */
+ void *codeMark; /* low watermark in cg->codePool */
+ void *noteMark; /* low watermark in cg->notePool */
+ void *tempMark; /* low watermark in cx->tempPool */
+
+ struct {
+ jsbytecode *base; /* base of JS bytecode vector */
+ jsbytecode *limit; /* one byte beyond end of bytecode */
+ jsbytecode *next; /* pointer to next free bytecode */
+ jssrcnote *notes; /* source notes, see below */
+ uintN noteCount; /* number of source notes so far */
+ uintN noteMask; /* growth increment for notes */
+ ptrdiff_t lastNoteOffset; /* code offset for last source note */
+ uintN currentLine; /* line number for tree-based srcnote gen */
+ } prolog, main, *current;
+
+ const char *filename; /* null or weak link to source filename */
+ uintN firstLine; /* first line, for js_NewScriptFromCG */
+ JSPrincipals *principals; /* principals for constant folding eval */
+ JSAtomList atomList; /* literals indexed for mapping */
+
+ intN stackDepth; /* current stack depth in script frame */
+ uintN maxStackDepth; /* maximum stack depth so far */
+
+ JSTryNote *tryBase; /* first exception handling note */
+ JSTryNote *tryNext; /* next available note */
+ size_t tryNoteSpace; /* # of bytes allocated at tryBase */
+
+ JSSpanDep *spanDeps; /* span dependent instruction records */
+ JSJumpTarget *jumpTargets; /* AVL tree of jump target offsets */
+ JSJumpTarget *jtFreeList; /* JT_LEFT-linked list of free structs */
+ uintN numSpanDeps; /* number of span dependencies */
+ uintN numJumpTargets; /* number of jump targets */
+ ptrdiff_t spanDepTodo; /* offset from main.base of potentially
+ unoptimized spandeps */
+
+ uintN arrayCompSlot; /* stack slot of array in comprehension */
+
+ uintN emitLevel; /* js_EmitTree recursion level */
+ JSAtomList constList; /* compile time constants */
+ JSCodeGenerator *parent; /* Enclosing function or global context */
+};
+
+#define CG_BASE(cg) ((cg)->current->base)
+#define CG_LIMIT(cg) ((cg)->current->limit)
+#define CG_NEXT(cg) ((cg)->current->next)
+#define CG_CODE(cg,offset) (CG_BASE(cg) + (offset))
+#define CG_OFFSET(cg) PTRDIFF(CG_NEXT(cg), CG_BASE(cg), jsbytecode)
+
+#define CG_NOTES(cg) ((cg)->current->notes)
+#define CG_NOTE_COUNT(cg) ((cg)->current->noteCount)
+#define CG_NOTE_MASK(cg) ((cg)->current->noteMask)
+#define CG_LAST_NOTE_OFFSET(cg) ((cg)->current->lastNoteOffset)
+#define CG_CURRENT_LINE(cg) ((cg)->current->currentLine)
+
+#define CG_PROLOG_BASE(cg) ((cg)->prolog.base)
+#define CG_PROLOG_LIMIT(cg) ((cg)->prolog.limit)
+#define CG_PROLOG_NEXT(cg) ((cg)->prolog.next)
+#define CG_PROLOG_CODE(cg,poff) (CG_PROLOG_BASE(cg) + (poff))
+#define CG_PROLOG_OFFSET(cg) PTRDIFF(CG_PROLOG_NEXT(cg), CG_PROLOG_BASE(cg),\
+ jsbytecode)
+
+#define CG_SWITCH_TO_MAIN(cg) ((cg)->current = &(cg)->main)
+#define CG_SWITCH_TO_PROLOG(cg) ((cg)->current = &(cg)->prolog)
+
+/*
+ * Initialize cg to allocate bytecode space from codePool, source note space
+ * from notePool, and all other arena-allocated temporaries from cx->tempPool.
+ * Return true on success. Report an error and return false if the initial
+ * code segment can't be allocated.
+ */
+extern JS_FRIEND_API(JSBool)
+js_InitCodeGenerator(JSContext *cx, JSCodeGenerator *cg,
+ JSArenaPool *codePool, JSArenaPool *notePool,
+ const char *filename, uintN lineno,
+ JSPrincipals *principals);
+
+/*
+ * Release cg->codePool, cg->notePool, and cx->tempPool to marks set by
+ * js_InitCodeGenerator. Note that cgs are magic: they own the arena pool
+ * "tops-of-stack" space above their codeMark, noteMark, and tempMark points.
+ * This means you cannot alloc from tempPool and save the pointer beyond the
+ * next JS_FinishCodeGenerator.
+ */
+extern JS_FRIEND_API(void)
+js_FinishCodeGenerator(JSContext *cx, JSCodeGenerator *cg);
+
+/*
+ * Emit one bytecode.
+ */
+extern ptrdiff_t
+js_Emit1(JSContext *cx, JSCodeGenerator *cg, JSOp op);
+
+/*
+ * Emit two bytecodes, an opcode (op) with a byte of immediate operand (op1).
+ */
+extern ptrdiff_t
+js_Emit2(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1);
+
+/*
+ * Emit three bytecodes, an opcode with two bytes of immediate operands.
+ */
+extern ptrdiff_t
+js_Emit3(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1,
+ jsbytecode op2);
+
+/*
+ * Emit (1 + extra) bytecodes, for N bytes of op and its immediate operand.
+ */
+extern ptrdiff_t
+js_EmitN(JSContext *cx, JSCodeGenerator *cg, JSOp op, size_t extra);
+
+/*
+ * Unsafe macro to call js_SetJumpOffset and return false if it does.
+ */
+#define CHECK_AND_SET_JUMP_OFFSET(cx,cg,pc,off) \
+ JS_BEGIN_MACRO \
+ if (!js_SetJumpOffset(cx, cg, pc, off)) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+#define CHECK_AND_SET_JUMP_OFFSET_AT(cx,cg,off) \
+ CHECK_AND_SET_JUMP_OFFSET(cx, cg, CG_CODE(cg,off), CG_OFFSET(cg) - (off))
+
+extern JSBool
+js_SetJumpOffset(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
+ ptrdiff_t off);
+
+/* Test whether we're in a statement of given type. */
+extern JSBool
+js_InStatement(JSTreeContext *tc, JSStmtType type);
+
+/* Test whether we're in a with statement. */
+#define js_InWithStatement(tc) js_InStatement(tc, STMT_WITH)
+
+/*
+ * Test whether atom refers to a global variable (or is a reference error).
+ * Return true in *loopyp if any loops enclose the lexical reference, false
+ * otherwise.
+ */
+extern JSBool
+js_IsGlobalReference(JSTreeContext *tc, JSAtom *atom, JSBool *loopyp);
+
+/*
+ * Push the C-stack-allocated struct at stmt onto the stmtInfo stack.
+ */
+extern void
+js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
+ ptrdiff_t top);
+
+/*
+ * Push a block scope statement and link blockAtom's object-valued key into
+ * tc->blockChain. To pop this statement info record, use js_PopStatement as
+ * usual, or if appropriate (if generating code), js_PopStatementCG.
+ */
+extern void
+js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSAtom *blockAtom,
+ ptrdiff_t top);
+
+/*
+ * Pop tc->topStmt. If the top JSStmtInfo struct is not stack-allocated, it
+ * is up to the caller to free it.
+ */
+extern void
+js_PopStatement(JSTreeContext *tc);
+
+/*
+ * Like js_PopStatement(&cg->treeContext), also patch breaks and continues
+ * unless the top statement info record represents a try-catch-finally suite.
+ * May fail if a jump offset overflows.
+ */
+extern JSBool
+js_PopStatementCG(JSContext *cx, JSCodeGenerator *cg);
+
+/*
+ * Define and lookup a primitive jsval associated with the const named by atom.
+ * js_DefineCompileTimeConstant analyzes the constant-folded initializer at pn
+ * and saves the const's value in cg->constList, if it can be used at compile
+ * time. It returns true unless an error occurred.
+ *
+ * If the initializer's value could not be saved, js_LookupCompileTimeConstant
+ * calls will return the undefined value. js_LookupCompileTimeConstant tries
+ * to find a const value memorized for atom, returning true with *vp set to a
+ * value other than undefined if the constant was found, true with *vp set to
+ * JSVAL_VOID if not found, and false on error.
+ */
+extern JSBool
+js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ JSParseNode *pn);
+
+extern JSBool
+js_LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ jsval *vp);
+
+/*
+ * Find a lexically scoped variable (one declared by let, catch, or an array
+ * comprehension) named by atom, looking in tc's compile-time scopes.
+ *
+ * If a WITH statement is reached along the scope stack, return its statement
+ * info record, so callers can tell that atom is ambiguous. If slotp is not
+ * null, then if atom is found, set *slotp to its stack slot, otherwise to -1.
+ * This means that if slotp is not null, all the block objects on the lexical
+ * scope chain must have had their depth slots computed by the code generator,
+ * so the caller must be under js_EmitTree.
+ *
+ * In any event, directly return the statement info record in which atom was
+ * found. Otherwise return null.
+ */
+extern JSStmtInfo *
+js_LexicalLookup(JSTreeContext *tc, JSAtom *atom, jsint *slotp,
+ JSBool letdecl);
+
+/*
+ * Emit code into cg for the tree rooted at pn.
+ */
+extern JSBool
+js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn);
+
+/*
+ * Emit function code into cg for the tree rooted at body.
+ */
+extern JSBool
+js_EmitFunctionBytecode(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body);
+
+/*
+ * Emit code into cg for the tree rooted at body, then create a persistent
+ * script for fun from cg.
+ */
+extern JSBool
+js_EmitFunctionBody(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body,
+ JSFunction *fun);
+
+/*
+ * Source notes generated along with bytecode for decompiling and debugging.
+ * A source note is a uint8 with 5 bits of type and 3 of offset from the pc of
+ * the previous note. If 3 bits of offset aren't enough, extended delta notes
+ * (SRC_XDELTA) consisting of 2 set high order bits followed by 6 offset bits
+ * are emitted before the next note. Some notes have operand offsets encoded
+ * immediately after them, in note bytes or byte-triples.
+ *
+ * Source Note Extended Delta
+ * +7-6-5-4-3+2-1-0+ +7-6-5+4-3-2-1-0+
+ * |note-type|delta| |1 1| ext-delta |
+ * +---------+-----+ +---+-----------+
+ *
+ * At most one "gettable" note (i.e., a note of type other than SRC_NEWLINE,
+ * SRC_SETLINE, and SRC_XDELTA) applies to a given bytecode.
+ *
+ * NB: the js_SrcNoteSpec array in jsemit.c is indexed by this enum, so its
+ * initializers need to match the order here.
+ *
+ * Note on adding new source notes: every pair of bytecodes (A, B) where A and
+ * B have disjoint sets of source notes that could apply to each bytecode may
+ * reuse the same note type value for two notes (snA, snB) that have the same
+ * arity, offsetBias, and isSpanDep initializers in js_SrcNoteSpec. This is
+ * why SRC_IF and SRC_INITPROP have the same value below. For bad historical
+ * reasons, some bytecodes below that could be overlayed have not been, but
+ * before using SRC_EXTENDED, consider compressing the existing note types.
+ *
+ * Don't forget to update JSXDR_BYTECODE_VERSION in jsxdrapi.h for all such
+ * incompatible source note or other bytecode changes.
+ */
+typedef enum JSSrcNoteType {
+ SRC_NULL = 0, /* terminates a note vector */
+ SRC_IF = 1, /* JSOP_IFEQ bytecode is from an if-then */
+ SRC_INITPROP = 1, /* disjoint meaning applied to JSOP_INITELEM or
+ to an index label in a regular (structuring)
+ or a destructuring object initialiser */
+ SRC_IF_ELSE = 2, /* JSOP_IFEQ bytecode is from an if-then-else */
+ SRC_WHILE = 3, /* JSOP_IFEQ is from a while loop */
+ SRC_FOR = 4, /* JSOP_NOP or JSOP_POP in for loop head */
+ SRC_CONTINUE = 5, /* JSOP_GOTO is a continue, not a break;
+ also used on JSOP_ENDINIT if extra comma
+ at end of array literal: [1,2,,] */
+ SRC_DECL = 6, /* type of a declaration (var, const, let*) */
+ SRC_DESTRUCT = 6, /* JSOP_DUP starting a destructuring assignment
+ operation, with SRC_DECL_* offset operand */
+ SRC_PCDELTA = 7, /* distance forward from comma-operator to
+ next POP, or from CONDSWITCH to first CASE
+ opcode, etc. -- always a forward delta */
+ SRC_GROUPASSIGN = 7, /* SRC_DESTRUCT variant for [a, b] = [c, d] */
+ SRC_ASSIGNOP = 8, /* += or another assign-op follows */
+ SRC_COND = 9, /* JSOP_IFEQ is from conditional ?: operator */
+ SRC_BRACE = 10, /* mandatory brace, for scope or to avoid
+ dangling else */
+ SRC_HIDDEN = 11, /* opcode shouldn't be decompiled */
+ SRC_PCBASE = 12, /* distance back from annotated getprop or
+ setprop op to left-most obj.prop.subprop
+ bytecode -- always a backward delta */
+ SRC_METHODBASE = 13, /* SRC_PCBASE variant for obj.function::foo
+ gets and sets; disjoint from SRC_LABEL by
+ bytecode to which it applies */
+ SRC_LABEL = 13, /* JSOP_NOP for label: with atomid immediate */
+ SRC_LABELBRACE = 14, /* JSOP_NOP for label: {...} begin brace */
+ SRC_ENDBRACE = 15, /* JSOP_NOP for label: {...} end brace */
+ SRC_BREAK2LABEL = 16, /* JSOP_GOTO for 'break label' with atomid */
+ SRC_CONT2LABEL = 17, /* JSOP_GOTO for 'continue label' with atomid */
+ SRC_SWITCH = 18, /* JSOP_*SWITCH with offset to end of switch,
+ 2nd off to first JSOP_CASE if condswitch */
+ SRC_FUNCDEF = 19, /* JSOP_NOP for function f() with atomid */
+ SRC_CATCH = 20, /* catch block has guard */
+ SRC_EXTENDED = 21, /* extended source note, 32-159, in next byte */
+ SRC_NEWLINE = 22, /* bytecode follows a source newline */
+ SRC_SETLINE = 23, /* a file-absolute source line number note */
+ SRC_XDELTA = 24 /* 24-31 are for extended delta notes */
+} JSSrcNoteType;
+
+/*
+ * Constants for the SRC_DECL source note. Note that span-dependent bytecode
+ * selection means that any SRC_DECL offset greater than SRC_DECL_LET may need
+ * to be adjusted, but these "offsets" are too small to span a span-dependent
+ * instruction, so can be used to denote distinct declaration syntaxes to the
+ * decompiler.
+ *
+ * NB: the var_prefix array in jsopcode.c depends on these dense indexes from
+ * SRC_DECL_VAR through SRC_DECL_LET.
+ */
+#define SRC_DECL_VAR 0
+#define SRC_DECL_CONST 1
+#define SRC_DECL_LET 2
+#define SRC_DECL_NONE 3
+
+#define SN_TYPE_BITS 5
+#define SN_DELTA_BITS 3
+#define SN_XDELTA_BITS 6
+#define SN_TYPE_MASK (JS_BITMASK(SN_TYPE_BITS) << SN_DELTA_BITS)
+#define SN_DELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_DELTA_BITS))
+#define SN_XDELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_XDELTA_BITS))
+
+#define SN_MAKE_NOTE(sn,t,d) (*(sn) = (jssrcnote) \
+ (((t) << SN_DELTA_BITS) \
+ | ((d) & SN_DELTA_MASK)))
+#define SN_MAKE_XDELTA(sn,d) (*(sn) = (jssrcnote) \
+ ((SRC_XDELTA << SN_DELTA_BITS) \
+ | ((d) & SN_XDELTA_MASK)))
+
+#define SN_IS_XDELTA(sn) ((*(sn) >> SN_DELTA_BITS) >= SRC_XDELTA)
+#define SN_TYPE(sn) (SN_IS_XDELTA(sn) ? SRC_XDELTA \
+ : *(sn) >> SN_DELTA_BITS)
+#define SN_SET_TYPE(sn,type) SN_MAKE_NOTE(sn, type, SN_DELTA(sn))
+#define SN_IS_GETTABLE(sn) (SN_TYPE(sn) < SRC_NEWLINE)
+
+#define SN_DELTA(sn) ((ptrdiff_t)(SN_IS_XDELTA(sn) \
+ ? *(sn) & SN_XDELTA_MASK \
+ : *(sn) & SN_DELTA_MASK))
+#define SN_SET_DELTA(sn,delta) (SN_IS_XDELTA(sn) \
+ ? SN_MAKE_XDELTA(sn, delta) \
+ : SN_MAKE_NOTE(sn, SN_TYPE(sn), delta))
+
+#define SN_DELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_DELTA_BITS))
+#define SN_XDELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_XDELTA_BITS))
+
+/*
+ * Offset fields follow certain notes and are frequency-encoded: an offset in
+ * [0,0x7f] consumes one byte, an offset in [0x80,0x7fffff] takes three, and
+ * the high bit of the first byte is set.
+ */
+#define SN_3BYTE_OFFSET_FLAG 0x80
+#define SN_3BYTE_OFFSET_MASK 0x7f
+
+typedef struct JSSrcNoteSpec {
+ const char *name; /* name for disassembly/debugging output */
+ uint8 arity; /* number of offset operands */
+ uint8 offsetBias; /* bias of offset(s) from annotated pc */
+ int8 isSpanDep; /* 1 or -1 if offsets could span extended ops,
+ 0 otherwise; sign tells span direction */
+} JSSrcNoteSpec;
+
+extern JS_FRIEND_DATA(JSSrcNoteSpec) js_SrcNoteSpec[];
+extern JS_FRIEND_API(uintN) js_SrcNoteLength(jssrcnote *sn);
+
+#define SN_LENGTH(sn) ((js_SrcNoteSpec[SN_TYPE(sn)].arity == 0) ? 1 \
+ : js_SrcNoteLength(sn))
+#define SN_NEXT(sn) ((sn) + SN_LENGTH(sn))
+
+/* A source note array is terminated by an all-zero element. */
+#define SN_MAKE_TERMINATOR(sn) (*(sn) = SRC_NULL)
+#define SN_IS_TERMINATOR(sn) (*(sn) == SRC_NULL)
+
+/*
+ * Append a new source note of the given type (and therefore size) to cg's
+ * notes dynamic array, updating cg->noteCount. Return the new note's index
+ * within the array pointed at by cg->current->notes. Return -1 if out of
+ * memory.
+ */
+extern intN
+js_NewSrcNote(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type);
+
+extern intN
+js_NewSrcNote2(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset);
+
+extern intN
+js_NewSrcNote3(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset1, ptrdiff_t offset2);
+
+/*
+ * NB: this function can add at most one extra extended delta note.
+ */
+extern jssrcnote *
+js_AddToSrcNoteDelta(JSContext *cx, JSCodeGenerator *cg, jssrcnote *sn,
+ ptrdiff_t delta);
+
+/*
+ * Get and set the offset operand identified by which (0 for the first, etc.).
+ */
+extern JS_FRIEND_API(ptrdiff_t)
+js_GetSrcNoteOffset(jssrcnote *sn, uintN which);
+
+extern JSBool
+js_SetSrcNoteOffset(JSContext *cx, JSCodeGenerator *cg, uintN index,
+ uintN which, ptrdiff_t offset);
+
+/*
+ * Finish taking source notes in cx's notePool, copying final notes to the new
+ * stable store allocated by the caller and passed in via notes. Return false
+ * on malloc failure, which means this function reported an error.
+ *
+ * To compute the number of jssrcnotes to allocate and pass in via notes, use
+ * the CG_COUNT_FINAL_SRCNOTES macro. This macro knows a lot about details of
+ * js_FinishTakingSrcNotes, SO DON'T CHANGE jsemit.c's js_FinishTakingSrcNotes
+ * FUNCTION WITHOUT CHECKING WHETHER THIS MACRO NEEDS CORRESPONDING CHANGES!
+ */
+#define CG_COUNT_FINAL_SRCNOTES(cg, cnt) \
+ JS_BEGIN_MACRO \
+ ptrdiff_t diff_ = CG_PROLOG_OFFSET(cg) - (cg)->prolog.lastNoteOffset; \
+ cnt = (cg)->prolog.noteCount + (cg)->main.noteCount + 1; \
+ if ((cg)->prolog.noteCount && \
+ (cg)->prolog.currentLine != (cg)->firstLine) { \
+ if (diff_ > SN_DELTA_MASK) \
+ cnt += JS_HOWMANY(diff_ - SN_DELTA_MASK, SN_XDELTA_MASK); \
+ cnt += 2 + (((cg)->firstLine > SN_3BYTE_OFFSET_MASK) << 1); \
+ } else if (diff_ > 0) { \
+ if (cg->main.noteCount) { \
+ jssrcnote *sn_ = (cg)->main.notes; \
+ diff_ -= SN_IS_XDELTA(sn_) \
+ ? SN_XDELTA_MASK - (*sn_ & SN_XDELTA_MASK) \
+ : SN_DELTA_MASK - (*sn_ & SN_DELTA_MASK); \
+ } \
+ if (diff_ > 0) \
+ cnt += JS_HOWMANY(diff_, SN_XDELTA_MASK); \
+ } \
+ JS_END_MACRO
+
+extern JSBool
+js_FinishTakingSrcNotes(JSContext *cx, JSCodeGenerator *cg, jssrcnote *notes);
+
+/*
+ * Allocate cg->treeContext.tryCount notes (plus one for the end sentinel)
+ * from cx->tempPool and set up cg->tryBase/tryNext for exactly tryCount
+ * js_NewTryNote calls. The storage is freed by js_FinishCodeGenerator.
+ */
+extern JSBool
+js_AllocTryNotes(JSContext *cx, JSCodeGenerator *cg);
+
+/*
+ * Grab the next trynote slot in cg, filling it in appropriately.
+ */
+extern JSTryNote *
+js_NewTryNote(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t start,
+ ptrdiff_t end, ptrdiff_t catchStart);
+
+/*
+ * Finish generating exception information into the space at notes. As with
+ * js_FinishTakingSrcNotes, the caller must use CG_COUNT_FINAL_TRYNOTES(cg) to
+ * preallocate enough space in a JSTryNote[] to pass as the notes parameter of
+ * js_FinishTakingTryNotes.
+ */
+#define CG_COUNT_FINAL_TRYNOTES(cg, cnt) \
+ JS_BEGIN_MACRO \
+ cnt = ((cg)->tryNext > (cg)->tryBase) \
+ ? PTRDIFF(cg->tryNext, cg->tryBase, JSTryNote) + 1 \
+ : 0; \
+ JS_END_MACRO
+
+extern void
+js_FinishTakingTryNotes(JSContext *cx, JSCodeGenerator *cg, JSTryNote *notes);
+
+JS_END_EXTERN_C
+
+#endif /* jsemit_h___ */
diff --git a/third_party/js-1.7/jsexn.c b/third_party/js-1.7/jsexn.c
new file mode 100644
index 0000000..e60f85e
--- /dev/null
+++ b/third_party/js-1.7/jsexn.c
@@ -0,0 +1,1348 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS standard exception implementation.
+ */
+
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsinterp.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsscript.h"
+
+/* Forward declarations for js_ErrorClass's initializer. */
+static JSBool
+Exception(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+static void
+exn_finalize(JSContext *cx, JSObject *obj);
+
+static uint32
+exn_mark(JSContext *cx, JSObject *obj, void *arg);
+
+static void
+exn_finalize(JSContext *cx, JSObject *obj);
+
+static JSBool
+exn_enumerate(JSContext *cx, JSObject *obj);
+
+static JSBool
+exn_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp);
+
+JSClass js_ErrorClass = {
+ js_Error_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Error),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ exn_enumerate, (JSResolveOp)exn_resolve, JS_ConvertStub, exn_finalize,
+ NULL, NULL, NULL, Exception,
+ NULL, NULL, exn_mark, NULL
+};
+
+typedef struct JSStackTraceElem {
+ JSString *funName;
+ size_t argc;
+ const char *filename;
+ uintN ulineno;
+} JSStackTraceElem;
+
+typedef struct JSExnPrivate {
+ /* A copy of the JSErrorReport originally generated. */
+ JSErrorReport *errorReport;
+ JSString *message;
+ JSString *filename;
+ uintN lineno;
+ size_t stackDepth;
+ JSStackTraceElem stackElems[1];
+} JSExnPrivate;
+
+static JSString *
+StackTraceToString(JSContext *cx, JSExnPrivate *priv);
+
+static JSErrorReport *
+CopyErrorReport(JSContext *cx, JSErrorReport *report)
+{
+ /*
+ * We use a single malloc block to make a deep copy of JSErrorReport with
+ * the following layout:
+ * JSErrorReport
+ * array of copies of report->messageArgs
+ * jschar array with characters for all messageArgs
+ * jschar array with characters for ucmessage
+ * jschar array with characters for uclinebuf and uctokenptr
+ * char array with characters for linebuf and tokenptr
+ * char array with characters for filename
+ * Such layout together with the properties enforced by the following
+ * asserts does not need any extra alignment padding.
+ */
+ JS_STATIC_ASSERT(sizeof(JSErrorReport) % sizeof(const char *) == 0);
+ JS_STATIC_ASSERT(sizeof(const char *) % sizeof(jschar) == 0);
+
+ size_t filenameSize;
+ size_t linebufSize;
+ size_t uclinebufSize;
+ size_t ucmessageSize;
+ size_t i, argsArraySize, argsCopySize, argSize;
+ size_t mallocSize;
+ JSErrorReport *copy;
+ uint8 *cursor;
+
+#define JS_CHARS_SIZE(jschars) ((js_strlen(jschars) + 1) * sizeof(jschar))
+
+ filenameSize = report->filename ? strlen(report->filename) + 1 : 0;
+ linebufSize = report->linebuf ? strlen(report->linebuf) + 1 : 0;
+ uclinebufSize = report->uclinebuf ? JS_CHARS_SIZE(report->uclinebuf) : 0;
+ ucmessageSize = 0;
+ argsArraySize = 0;
+ argsCopySize = 0;
+ if (report->ucmessage) {
+ ucmessageSize = JS_CHARS_SIZE(report->ucmessage);
+ if (report->messageArgs) {
+ for (i = 0; report->messageArgs[i]; ++i)
+ argsCopySize += JS_CHARS_SIZE(report->messageArgs[i]);
+
+ /* Non-null messageArgs should have at least one non-null arg. */
+ JS_ASSERT(i != 0);
+ argsArraySize = (i + 1) * sizeof(const jschar *);
+ }
+ }
+
+ /*
+ * The mallocSize can not overflow since it represents the sum of the
+ * sizes of already allocated objects.
+ */
+ mallocSize = sizeof(JSErrorReport) + argsArraySize + argsCopySize +
+ ucmessageSize + uclinebufSize + linebufSize + filenameSize;
+ cursor = (uint8 *)JS_malloc(cx, mallocSize);
+ if (!cursor)
+ return NULL;
+
+ copy = (JSErrorReport *)cursor;
+ memset(cursor, 0, sizeof(JSErrorReport));
+ cursor += sizeof(JSErrorReport);
+
+ if (argsArraySize != 0) {
+ copy->messageArgs = (const jschar **)cursor;
+ cursor += argsArraySize;
+ for (i = 0; report->messageArgs[i]; ++i) {
+ copy->messageArgs[i] = (const jschar *)cursor;
+ argSize = JS_CHARS_SIZE(report->messageArgs[i]);
+ memcpy(cursor, report->messageArgs[i], argSize);
+ cursor += argSize;
+ }
+ copy->messageArgs[i] = NULL;
+ JS_ASSERT(cursor == (uint8 *)copy->messageArgs[0] + argsCopySize);
+ }
+
+ if (report->ucmessage) {
+ copy->ucmessage = (const jschar *)cursor;
+ memcpy(cursor, report->ucmessage, ucmessageSize);
+ cursor += ucmessageSize;
+ }
+
+ if (report->uclinebuf) {
+ copy->uclinebuf = (const jschar *)cursor;
+ memcpy(cursor, report->uclinebuf, uclinebufSize);
+ cursor += uclinebufSize;
+ if (report->uctokenptr) {
+ copy->uctokenptr = copy->uclinebuf + (report->uctokenptr -
+ report->uclinebuf);
+ }
+ }
+
+ if (report->linebuf) {
+ copy->linebuf = (const char *)cursor;
+ memcpy(cursor, report->linebuf, linebufSize);
+ cursor += linebufSize;
+ if (report->tokenptr) {
+ copy->tokenptr = copy->linebuf + (report->tokenptr -
+ report->linebuf);
+ }
+ }
+
+ if (report->filename) {
+ copy->filename = (const char *)cursor;
+ memcpy(cursor, report->filename, filenameSize);
+ }
+ JS_ASSERT(cursor + filenameSize == (uint8 *)copy + mallocSize);
+
+ /* Copy non-pointer members. */
+ copy->lineno = report->lineno;
+ copy->errorNumber = report->errorNumber;
+
+ /* Note that this is before it gets flagged with JSREPORT_EXCEPTION */
+ copy->flags = report->flags;
+
+#undef JS_CHARS_SIZE
+ return copy;
+}
+
+static jsval *
+GetStackTraceValueBuffer(JSExnPrivate *priv)
+{
+ /*
+ * We use extra memory after JSExnPrivateInfo.stackElems to store jsvals
+ * that helps to produce more informative stack traces. The following
+ * assert allows us to assume that no gap after stackElems is necessary to
+ * align the buffer properly.
+ */
+ JS_STATIC_ASSERT(sizeof(JSStackTraceElem) % sizeof(jsval) == 0);
+
+ return (jsval *)(priv->stackElems + priv->stackDepth);
+}
+
+static JSBool
+InitExnPrivate(JSContext *cx, JSObject *exnObject, JSString *message,
+ JSString *filename, uintN lineno, JSErrorReport *report)
+{
+ JSCheckAccessOp checkAccess;
+ JSErrorReporter older;
+ JSExceptionState *state;
+ jsval callerid, v;
+ JSStackFrame *fp, *fpstop;
+ size_t stackDepth, valueCount, size;
+ JSBool overflow;
+ JSExnPrivate *priv;
+ JSStackTraceElem *elem;
+ jsval *values;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, exnObject) == &js_ErrorClass);
+
+ /*
+ * Prepare stack trace data.
+ *
+ * Set aside any error reporter for cx and save its exception state
+ * so we can suppress any checkAccess failures. Such failures should stop
+ * the backtrace procedure, not result in a failure of this constructor.
+ */
+ checkAccess = cx->runtime->checkObjectAccess;
+ older = JS_SetErrorReporter(cx, NULL);
+ state = JS_SaveExceptionState(cx);
+
+ callerid = ATOM_KEY(cx->runtime->atomState.callerAtom);
+ stackDepth = 0;
+ valueCount = 0;
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->fun && fp->argv) {
+ if (checkAccess) {
+ v = fp->argv[-2];
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ !checkAccess(cx, JSVAL_TO_OBJECT(v), callerid,
+ JSACC_READ, &v /* ignored */)) {
+ break;
+ }
+ }
+ valueCount += fp->argc;
+ }
+ ++stackDepth;
+ }
+ JS_RestoreExceptionState(cx, state);
+ JS_SetErrorReporter(cx, older);
+ fpstop = fp;
+
+ size = offsetof(JSExnPrivate, stackElems);
+ overflow = (stackDepth > ((size_t)-1 - size) / sizeof(JSStackTraceElem));
+ size += stackDepth * sizeof(JSStackTraceElem);
+ overflow |= (valueCount > ((size_t)-1 - size) / sizeof(jsval));
+ size += valueCount * sizeof(jsval);
+ if (overflow) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ priv = (JSExnPrivate *)JS_malloc(cx, size);
+ if (!priv)
+ return JS_FALSE;
+
+ /*
+ * We initialize errorReport with a copy of report after setting the
+ * private slot, to prevent GC accessing a junk value we clear the field
+ * here.
+ */
+ priv->errorReport = NULL;
+ priv->message = message;
+ priv->filename = filename;
+ priv->lineno = lineno;
+ priv->stackDepth = stackDepth;
+
+ values = GetStackTraceValueBuffer(priv);
+ elem = priv->stackElems;
+ for (fp = cx->fp; fp != fpstop; fp = fp->down) {
+ if (!fp->fun) {
+ elem->funName = NULL;
+ elem->argc = 0;
+ } else {
+ elem->funName = fp->fun->atom
+ ? ATOM_TO_STRING(fp->fun->atom)
+ : cx->runtime->emptyString;
+ elem->argc = fp->argc;
+ memcpy(values, fp->argv, fp->argc * sizeof(jsval));
+ values += fp->argc;
+ }
+ elem->ulineno = 0;
+ elem->filename = NULL;
+ if (fp->script) {
+ elem->filename = fp->script->filename;
+ if (fp->pc)
+ elem->ulineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ }
+ ++elem;
+ }
+ JS_ASSERT(priv->stackElems + stackDepth == elem);
+ JS_ASSERT(GetStackTraceValueBuffer(priv) + valueCount == values);
+
+ OBJ_SET_SLOT(cx, exnObject, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(priv));
+
+ if (report) {
+ /*
+ * Construct a new copy of the error report struct. We can't use the
+ * error report struct that was passed in, because it's allocated on
+ * the stack, and also because it may point to transient data in the
+ * JSTokenStream.
+ */
+ priv->errorReport = CopyErrorReport(cx, report);
+ if (!priv->errorReport) {
+ /* The finalizer realeases priv since it is in the private slot. */
+ return JS_FALSE;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSExnPrivate *
+GetExnPrivate(JSContext *cx, JSObject *obj)
+{
+ jsval privateValue;
+ JSExnPrivate *priv;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_ErrorClass);
+ privateValue = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (JSVAL_IS_VOID(privateValue))
+ return NULL;
+ priv = (JSExnPrivate *)JSVAL_TO_PRIVATE(privateValue);
+ JS_ASSERT(priv);
+ return priv;
+}
+
+static uint32
+exn_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSExnPrivate *priv;
+ JSStackTraceElem *elem;
+ size_t vcount, i;
+ jsval *vp, v;
+
+ priv = GetExnPrivate(cx, obj);
+ if (priv) {
+ GC_MARK(cx, priv->message, "exception message");
+ GC_MARK(cx, priv->filename, "exception filename");
+ elem = priv->stackElems;
+ for (vcount = i = 0; i != priv->stackDepth; ++i, ++elem) {
+ if (elem->funName)
+ GC_MARK(cx, elem->funName, "stack trace function name");
+ if (elem->filename)
+ js_MarkScriptFilename(elem->filename);
+ vcount += elem->argc;
+ }
+ vp = GetStackTraceValueBuffer(priv);
+ for (i = 0; i != vcount; ++i, ++vp) {
+ v = *vp;
+ if (JSVAL_IS_GCTHING(v))
+ GC_MARK(cx, JSVAL_TO_GCTHING(v), "stack trace argument");
+ }
+ }
+ return 0;
+}
+
+static void
+exn_finalize(JSContext *cx, JSObject *obj)
+{
+ JSExnPrivate *priv;
+
+ priv = GetExnPrivate(cx, obj);
+ if (priv) {
+ if (priv->errorReport)
+ JS_free(cx, priv->errorReport);
+ JS_free(cx, priv);
+ }
+}
+
+static JSBool
+exn_enumerate(JSContext *cx, JSObject *obj)
+{
+ JSAtomState *atomState;
+ uintN i;
+ JSAtom *atom;
+ JSObject *pobj;
+ JSProperty *prop;
+
+ JS_STATIC_ASSERT(sizeof(JSAtomState) <= (size_t)(uint16)-1);
+ static const uint16 offsets[] = {
+ (uint16)offsetof(JSAtomState, messageAtom),
+ (uint16)offsetof(JSAtomState, fileNameAtom),
+ (uint16)offsetof(JSAtomState, lineNumberAtom),
+ (uint16)offsetof(JSAtomState, stackAtom),
+ };
+
+ atomState = &cx->runtime->atomState;
+ for (i = 0; i != JS_ARRAY_LENGTH(offsets); ++i) {
+ atom = *(JSAtom **)((uint8 *)atomState + offsets[i]);
+ if (!js_LookupProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+exn_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSExnPrivate *priv;
+ JSString *str;
+ JSAtom *atom;
+ JSString *stack;
+ const char *prop;
+ jsval v;
+
+ *objp = NULL;
+ priv = GetExnPrivate(cx, obj);
+ if (priv && JSVAL_IS_STRING(id)) {
+ str = JSVAL_TO_STRING(id);
+
+ atom = cx->runtime->atomState.messageAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ prop = js_message_str;
+ v = STRING_TO_JSVAL(priv->message);
+ goto define;
+ }
+
+ atom = cx->runtime->atomState.fileNameAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ prop = js_fileName_str;
+ v = STRING_TO_JSVAL(priv->filename);
+ goto define;
+ }
+
+ atom = cx->runtime->atomState.lineNumberAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ prop = js_lineNumber_str;
+ v = INT_TO_JSVAL(priv->lineno);
+ goto define;
+ }
+
+ atom = cx->runtime->atomState.stackAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ stack = StackTraceToString(cx, priv);
+ if (!stack)
+ return JS_FALSE;
+
+ /* Allow to GC all things that were used to build stack trace. */
+ priv->stackDepth = 0;
+ prop = js_stack_str;
+ v = STRING_TO_JSVAL(stack);
+ goto define;
+ }
+ }
+ return JS_TRUE;
+
+ define:
+ if (!JS_DefineProperty(cx, obj, prop, v, NULL, NULL, JSPROP_ENUMERATE))
+ return JS_FALSE;
+ *objp = obj;
+ return JS_TRUE;
+}
+
+JSErrorReport *
+js_ErrorFromException(JSContext *cx, jsval exn)
+{
+ JSObject *obj;
+ JSExnPrivate *priv;
+
+ if (JSVAL_IS_PRIMITIVE(exn))
+ return NULL;
+ obj = JSVAL_TO_OBJECT(exn);
+ if (OBJ_GET_CLASS(cx, obj) != &js_ErrorClass)
+ return NULL;
+ priv = GetExnPrivate(cx, obj);
+ if (!priv)
+ return NULL;
+ return priv->errorReport;
+}
+
+struct JSExnSpec {
+ int protoIndex;
+ const char *name;
+ JSProtoKey key;
+ JSNative native;
+};
+
+/*
+ * All *Error constructors share the same JSClass, js_ErrorClass. But each
+ * constructor function for an *Error class must have a distinct native 'call'
+ * function pointer, in order for instanceof to work properly across multiple
+ * standard class sets. See jsfun.c:fun_hasInstance.
+ */
+#define MAKE_EXCEPTION_CTOR(name) \
+static JSBool \
+name(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) \
+{ \
+ return Exception(cx, obj, argc, argv, rval); \
+}
+
+MAKE_EXCEPTION_CTOR(Error)
+MAKE_EXCEPTION_CTOR(InternalError)
+MAKE_EXCEPTION_CTOR(EvalError)
+MAKE_EXCEPTION_CTOR(RangeError)
+MAKE_EXCEPTION_CTOR(ReferenceError)
+MAKE_EXCEPTION_CTOR(SyntaxError)
+MAKE_EXCEPTION_CTOR(TypeError)
+MAKE_EXCEPTION_CTOR(URIError)
+
+#undef MAKE_EXCEPTION_CTOR
+
+static struct JSExnSpec exceptions[] = {
+ {JSEXN_NONE, js_Error_str, JSProto_Error, Error},
+ {JSEXN_ERR, js_InternalError_str, JSProto_InternalError, InternalError},
+ {JSEXN_ERR, js_EvalError_str, JSProto_EvalError, EvalError},
+ {JSEXN_ERR, js_RangeError_str, JSProto_RangeError, RangeError},
+ {JSEXN_ERR, js_ReferenceError_str, JSProto_ReferenceError, ReferenceError},
+ {JSEXN_ERR, js_SyntaxError_str, JSProto_SyntaxError, SyntaxError},
+ {JSEXN_ERR, js_TypeError_str, JSProto_TypeError, TypeError},
+ {JSEXN_ERR, js_URIError_str, JSProto_URIError, URIError},
+ {0, NULL, JSProto_Null, NULL}
+};
+
+static JSString *
+ValueToShortSource(JSContext *cx, jsval v)
+{
+ JSString *str;
+
+ /* Avoid toSource bloat and fallibility for object types. */
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ str = js_ValueToSource(cx, v);
+ } else if (VALUE_IS_FUNCTION(cx, v)) {
+ /*
+ * XXX Avoid function decompilation bloat for now.
+ */
+ str = JS_GetFunctionId(JS_ValueToFunction(cx, v));
+ if (!str && !(str = js_ValueToSource(cx, v))) {
+ /*
+ * Continue to soldier on if the function couldn't be
+ * converted into a string.
+ */
+ JS_ClearPendingException(cx);
+ str = JS_NewStringCopyZ(cx, "[unknown function]");
+ }
+ } else {
+ /*
+ * XXX Avoid toString on objects, it takes too long and uses too much
+ * memory, for too many classes (see Mozilla bug 166743).
+ */
+ char buf[100];
+ JS_snprintf(buf, sizeof buf, "[object %s]",
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v))->name);
+ str = JS_NewStringCopyZ(cx, buf);
+ }
+ return str;
+}
+
+static JSString *
+StackTraceToString(JSContext *cx, JSExnPrivate *priv)
+{
+ jschar *stackbuf;
+ size_t stacklen, stackmax;
+ JSStackTraceElem *elem, *endElem;
+ jsval *values;
+ size_t i;
+ JSString *str;
+ const char *cp;
+ char ulnbuf[11];
+
+ /* After this point, failing control flow must goto bad. */
+ stackbuf = NULL;
+ stacklen = stackmax = 0;
+
+/* Limit the stackbuf length to a reasonable value to avoid overflow checks. */
+#define STACK_LENGTH_LIMIT JS_BIT(20)
+
+#define APPEND_CHAR_TO_STACK(c) \
+ JS_BEGIN_MACRO \
+ if (stacklen == stackmax) { \
+ void *ptr_; \
+ if (stackmax >= STACK_LENGTH_LIMIT) \
+ goto done; \
+ stackmax = stackmax ? 2 * stackmax : 64; \
+ ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \
+ if (!ptr_) \
+ goto bad; \
+ stackbuf = ptr_; \
+ } \
+ stackbuf[stacklen++] = (c); \
+ JS_END_MACRO
+
+#define APPEND_STRING_TO_STACK(str) \
+ JS_BEGIN_MACRO \
+ JSString *str_ = str; \
+ size_t length_ = JSSTRING_LENGTH(str_); \
+ if (length_ > stackmax - stacklen) { \
+ void *ptr_; \
+ if (stackmax >= STACK_LENGTH_LIMIT || \
+ length_ >= STACK_LENGTH_LIMIT - stacklen) { \
+ goto done; \
+ } \
+ stackmax = JS_BIT(JS_CeilingLog2(stacklen + length_)); \
+ ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \
+ if (!ptr_) \
+ goto bad; \
+ stackbuf = ptr_; \
+ } \
+ js_strncpy(stackbuf + stacklen, JSSTRING_CHARS(str_), length_); \
+ stacklen += length_; \
+ JS_END_MACRO
+
+ values = GetStackTraceValueBuffer(priv);
+ elem = priv->stackElems;
+ for (endElem = elem + priv->stackDepth; elem != endElem; elem++) {
+ if (elem->funName) {
+ APPEND_STRING_TO_STACK(elem->funName);
+ APPEND_CHAR_TO_STACK('(');
+ for (i = 0; i != elem->argc; i++, values++) {
+ if (i > 0)
+ APPEND_CHAR_TO_STACK(',');
+ str = ValueToShortSource(cx, *values);
+ if (!str)
+ goto bad;
+ APPEND_STRING_TO_STACK(str);
+ }
+ APPEND_CHAR_TO_STACK(')');
+ }
+ APPEND_CHAR_TO_STACK('@');
+ if (elem->filename) {
+ for (cp = elem->filename; *cp; cp++)
+ APPEND_CHAR_TO_STACK(*cp);
+ }
+ APPEND_CHAR_TO_STACK(':');
+ JS_snprintf(ulnbuf, sizeof ulnbuf, "%u", elem->ulineno);
+ for (cp = ulnbuf; *cp; cp++)
+ APPEND_CHAR_TO_STACK(*cp);
+ APPEND_CHAR_TO_STACK('\n');
+ }
+#undef APPEND_CHAR_TO_STACK
+#undef APPEND_STRING_TO_STACK
+#undef STACK_LENGTH_LIMIT
+
+ done:
+ if (stacklen == 0) {
+ JS_ASSERT(!stackbuf);
+ return cx->runtime->emptyString;
+ }
+ if (stacklen < stackmax) {
+ /*
+ * Realloc can fail when shrinking on some FreeBSD versions, so
+ * don't use JS_realloc here; simply let the oversized allocation
+ * be owned by the string in that rare case.
+ */
+ void *shrunk = JS_realloc(cx, stackbuf, (stacklen+1) * sizeof(jschar));
+ if (shrunk)
+ stackbuf = shrunk;
+ }
+
+ stackbuf[stacklen] = 0;
+ str = js_NewString(cx, stackbuf, stacklen, 0);
+ if (str)
+ return str;
+
+ bad:
+ if (stackbuf)
+ JS_free(cx, stackbuf);
+ return NULL;
+}
+
+/* XXXbe Consolidate the ugly truth that we don't treat filename as UTF-8
+ with these two functions. */
+static JSString *
+FilenameToString(JSContext *cx, const char *filename)
+{
+ return JS_NewStringCopyZ(cx, filename);
+}
+
+static const char *
+StringToFilename(JSContext *cx, JSString *str)
+{
+ return JS_GetStringBytes(str);
+}
+
+static JSBool
+Exception(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool ok;
+ uint32 lineno;
+ JSString *message, *filename;
+ JSStackFrame *fp;
+
+ if (cx->creatingException)
+ return JS_FALSE;
+ cx->creatingException = JS_TRUE;
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /*
+ * ECMA ed. 3, 15.11.1 requires Error, etc., to construct even when
+ * called as functions, without operator new. But as we do not give
+ * each constructor a distinct JSClass, whose .name member is used by
+ * js_NewObject to find the class prototype, we must get the class
+ * prototype ourselves.
+ */
+ ok = OBJ_GET_PROPERTY(cx, JSVAL_TO_OBJECT(argv[-2]),
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ rval);
+ if (!ok)
+ goto out;
+ obj = js_NewObject(cx, &js_ErrorClass, JSVAL_TO_OBJECT(*rval), NULL);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ /*
+ * If it's a new object of class Exception, then null out the private
+ * data so that the finalizer doesn't attempt to free it.
+ */
+ if (OBJ_GET_CLASS(cx, obj) == &js_ErrorClass)
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, JSVAL_VOID);
+
+ /* Set the 'message' property. */
+ if (argc != 0) {
+ message = js_ValueToString(cx, argv[0]);
+ if (!message) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ argv[0] = STRING_TO_JSVAL(message);
+ } else {
+ message = cx->runtime->emptyString;
+ }
+
+ /* Set the 'fileName' property. */
+ if (argc > 1) {
+ filename = js_ValueToString(cx, argv[1]);
+ if (!filename) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ argv[1] = STRING_TO_JSVAL(filename);
+ fp = NULL;
+ } else {
+ fp = JS_GetScriptedCaller(cx, NULL);
+ if (fp) {
+ filename = FilenameToString(cx, fp->script->filename);
+ if (!filename) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ } else {
+ filename = cx->runtime->emptyString;
+ }
+ }
+
+ /* Set the 'lineNumber' property. */
+ if (argc > 2) {
+ ok = js_ValueToECMAUint32(cx, argv[2], &lineno);
+ if (!ok)
+ goto out;
+ } else {
+ if (!fp)
+ fp = JS_GetScriptedCaller(cx, NULL);
+ lineno = (fp && fp->pc) ? js_PCToLineNumber(cx, fp->script, fp->pc) : 0;
+ }
+
+ ok = (OBJ_GET_CLASS(cx, obj) != &js_ErrorClass) ||
+ InitExnPrivate(cx, obj, message, filename, lineno, NULL);
+
+ out:
+ cx->creatingException = JS_FALSE;
+ return ok;
+}
+
+/*
+ * Convert to string.
+ *
+ * This method only uses JavaScript-modifiable properties name, message. It
+ * is left to the host to check for private data and report filename and line
+ * number information along with this message.
+ */
+static JSBool
+exn_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSString *name, *message, *result;
+ jschar *chars, *cp;
+ size_t name_length, message_length, length;
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.nameAtom),
+ &v)) {
+ return JS_FALSE;
+ }
+ name = JSVAL_IS_STRING(v) ? JSVAL_TO_STRING(v) : cx->runtime->emptyString;
+ *rval = STRING_TO_JSVAL(name);
+
+ if (!JS_GetProperty(cx, obj, js_message_str, &v))
+ return JS_FALSE;
+ message = JSVAL_IS_STRING(v) ? JSVAL_TO_STRING(v)
+ : cx->runtime->emptyString;
+
+ if (JSSTRING_LENGTH(message) != 0) {
+ name_length = JSSTRING_LENGTH(name);
+ message_length = JSSTRING_LENGTH(message);
+ length = (name_length ? name_length + 2 : 0) + message_length;
+ cp = chars = (jschar*) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+
+ if (name_length) {
+ js_strncpy(cp, JSSTRING_CHARS(name), name_length);
+ cp += name_length;
+ *cp++ = ':'; *cp++ = ' ';
+ }
+ js_strncpy(cp, JSSTRING_CHARS(message), message_length);
+ cp += message_length;
+ *cp = 0;
+
+ result = js_NewString(cx, chars, length, 0);
+ if (!result) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ } else {
+ result = name;
+ }
+
+ *rval = STRING_TO_JSVAL(result);
+ return JS_TRUE;
+}
+
+#if JS_HAS_TOSOURCE
+/*
+ * Return a string that may eval to something similar to the original object.
+ */
+static JSBool
+exn_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp;
+ JSString *name, *message, *filename, *lineno_as_str, *result;
+ uint32 lineno;
+ size_t lineno_length, name_length, message_length, filename_length, length;
+ jschar *chars, *cp;
+
+ vp = argv + argc; /* beginning of explicit local roots */
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.nameAtom),
+ rval)) {
+ return JS_FALSE;
+ }
+ name = js_ValueToString(cx, *rval);
+ if (!name)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(name);
+
+ if (!JS_GetProperty(cx, obj, js_message_str, &vp[0]) ||
+ !(message = js_ValueToSource(cx, vp[0]))) {
+ return JS_FALSE;
+ }
+ vp[0] = STRING_TO_JSVAL(message);
+
+ if (!JS_GetProperty(cx, obj, js_fileName_str, &vp[1]) ||
+ !(filename = js_ValueToSource(cx, vp[1]))) {
+ return JS_FALSE;
+ }
+ vp[1] = STRING_TO_JSVAL(filename);
+
+ if (!JS_GetProperty(cx, obj, js_lineNumber_str, &vp[2]) ||
+ !js_ValueToECMAUint32 (cx, vp[2], &lineno)) {
+ return JS_FALSE;
+ }
+
+ if (lineno != 0) {
+ lineno_as_str = js_ValueToString(cx, vp[2]);
+ if (!lineno_as_str)
+ return JS_FALSE;
+ lineno_length = JSSTRING_LENGTH(lineno_as_str);
+ } else {
+ lineno_as_str = NULL;
+ lineno_length = 0;
+ }
+
+ /* Magic 8, for the characters in ``(new ())''. */
+ name_length = JSSTRING_LENGTH(name);
+ message_length = JSSTRING_LENGTH(message);
+ length = 8 + name_length + message_length;
+
+ filename_length = JSSTRING_LENGTH(filename);
+ if (filename_length != 0) {
+ /* append filename as ``, {filename}'' */
+ length += 2 + filename_length;
+ if (lineno_as_str) {
+ /* append lineno as ``, {lineno_as_str}'' */
+ length += 2 + lineno_length;
+ }
+ } else {
+ if (lineno_as_str) {
+ /*
+ * no filename, but have line number,
+ * need to append ``, "", {lineno_as_str}''
+ */
+ length += 6 + lineno_length;
+ }
+ }
+
+ cp = chars = (jschar*) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+
+ *cp++ = '('; *cp++ = 'n'; *cp++ = 'e'; *cp++ = 'w'; *cp++ = ' ';
+ js_strncpy(cp, JSSTRING_CHARS(name), name_length);
+ cp += name_length;
+ *cp++ = '(';
+ if (message_length != 0) {
+ js_strncpy(cp, JSSTRING_CHARS(message), message_length);
+ cp += message_length;
+ }
+
+ if (filename_length != 0) {
+ /* append filename as ``, {filename}'' */
+ *cp++ = ','; *cp++ = ' ';
+ js_strncpy(cp, JSSTRING_CHARS(filename), filename_length);
+ cp += filename_length;
+ } else {
+ if (lineno_as_str) {
+ /*
+ * no filename, but have line number,
+ * need to append ``, "", {lineno_as_str}''
+ */
+ *cp++ = ','; *cp++ = ' '; *cp++ = '"'; *cp++ = '"';
+ }
+ }
+ if (lineno_as_str) {
+ /* append lineno as ``, {lineno_as_str}'' */
+ *cp++ = ','; *cp++ = ' ';
+ js_strncpy(cp, JSSTRING_CHARS(lineno_as_str), lineno_length);
+ cp += lineno_length;
+ }
+
+ *cp++ = ')'; *cp++ = ')'; *cp = 0;
+
+ result = js_NewString(cx, chars, length, 0);
+ if (!result) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(result);
+ return JS_TRUE;
+}
+#endif
+
+static JSFunctionSpec exception_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, exn_toSource, 0,0,3},
+#endif
+ {js_toString_str, exn_toString, 0,0,0},
+ {0,0,0,0,0}
+};
+
+JSObject *
+js_InitExceptionClasses(JSContext *cx, JSObject *obj)
+{
+ JSObject *obj_proto, *protos[JSEXN_LIMIT];
+ int i;
+
+ /*
+ * If lazy class initialization occurs for any Error subclass, then all
+ * classes are initialized, starting with Error. To avoid reentry and
+ * redundant initialization, we must not pass a null proto parameter to
+ * js_NewObject below, when called for the Error superclass. We need to
+ * ensure that Object.prototype is the proto of Error.prototype.
+ *
+ * See the equivalent code to ensure that parent_proto is non-null when
+ * JS_InitClass calls js_NewObject, in jsapi.c.
+ */
+ if (!js_GetClassPrototype(cx, obj, INT_TO_JSID(JSProto_Object),
+ &obj_proto)) {
+ return NULL;
+ }
+
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+
+ /* Initialize the prototypes first. */
+ for (i = 0; exceptions[i].name != 0; i++) {
+ JSAtom *atom;
+ JSFunction *fun;
+ JSObject *funobj;
+ JSString *nameString;
+ int protoIndex = exceptions[i].protoIndex;
+
+ /* Make the prototype for the current constructor name. */
+ protos[i] = js_NewObject(cx, &js_ErrorClass,
+ (protoIndex != JSEXN_NONE)
+ ? protos[protoIndex]
+ : obj_proto,
+ obj);
+ if (!protos[i])
+ break;
+
+ /* So exn_finalize knows whether to destroy private data. */
+ OBJ_SET_SLOT(cx, protos[i], JSSLOT_PRIVATE, JSVAL_VOID);
+
+ /* Make a constructor function for the current name. */
+ atom = cx->runtime->atomState.classAtoms[exceptions[i].key];
+ fun = js_DefineFunction(cx, obj, atom, exceptions[i].native, 3, 0);
+ if (!fun)
+ break;
+
+ /* Make this constructor make objects of class Exception. */
+ fun->clasp = &js_ErrorClass;
+
+ /* Extract the constructor object. */
+ funobj = fun->object;
+
+ /* Make the prototype and constructor links. */
+ if (!js_SetClassPrototype(cx, funobj, protos[i],
+ JSPROP_READONLY | JSPROP_PERMANENT)) {
+ break;
+ }
+
+ /* proto bootstrap bit from JS_InitClass omitted. */
+ nameString = JS_NewStringCopyZ(cx, exceptions[i].name);
+ if (!nameString)
+ break;
+
+ /* Add the name property to the prototype. */
+ if (!JS_DefineProperty(cx, protos[i], js_name_str,
+ STRING_TO_JSVAL(nameString),
+ NULL, NULL,
+ JSPROP_ENUMERATE)) {
+ break;
+ }
+
+ /* Finally, stash the constructor for later uses. */
+ if (!js_SetClassObject(cx, obj, exceptions[i].key, funobj))
+ break;
+ }
+
+ js_LeaveLocalRootScope(cx);
+ if (exceptions[i].name)
+ return NULL;
+
+ /*
+ * Add an empty message property. (To Exception.prototype only,
+ * because this property will be the same for all the exception
+ * protos.)
+ */
+ if (!JS_DefineProperty(cx, protos[0], js_message_str,
+ STRING_TO_JSVAL(cx->runtime->emptyString),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return NULL;
+ }
+ if (!JS_DefineProperty(cx, protos[0], js_fileName_str,
+ STRING_TO_JSVAL(cx->runtime->emptyString),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return NULL;
+ }
+ if (!JS_DefineProperty(cx, protos[0], js_lineNumber_str,
+ INT_TO_JSVAL(0),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return NULL;
+ }
+
+ /*
+ * Add methods only to Exception.prototype, because ostensibly all
+ * exception types delegate to that.
+ */
+ if (!JS_DefineFunctions(cx, protos[0], exception_methods))
+ return NULL;
+
+ return protos[0];
+}
+
+const JSErrorFormatString*
+js_GetLocalizedErrorMessage(JSContext* cx, void *userRef, const char *locale, const uintN errorNumber)
+{
+ const JSErrorFormatString *errorString = NULL;
+
+ if (cx->localeCallbacks && cx->localeCallbacks->localeGetErrorMessage) {
+ errorString = cx->localeCallbacks
+ ->localeGetErrorMessage(userRef, locale, errorNumber);
+ }
+ if (!errorString)
+ errorString = js_GetErrorMessage(userRef, locale, errorNumber);
+ return errorString;
+}
+
+#if defined ( DEBUG_mccabe ) && defined ( PRINTNAMES )
+/* For use below... get character strings for error name and exception name */
+static struct exnname { char *name; char *exception; } errortoexnname[] = {
+#define MSG_DEF(name, number, count, exception, format) \
+ {#name, #exception},
+#include "js.msg"
+#undef MSG_DEF
+};
+#endif /* DEBUG */
+
+JSBool
+js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp)
+{
+ JSErrNum errorNumber;
+ const JSErrorFormatString *errorString;
+ JSExnType exn;
+ jsval tv[4];
+ JSTempValueRooter tvr;
+ JSBool ok;
+ JSObject *errProto, *errObject;
+ JSString *messageStr, *filenameStr;
+
+ /*
+ * Tell our caller to report immediately if cx has no active frames, or if
+ * this report is just a warning.
+ */
+ JS_ASSERT(reportp);
+ if (!cx->fp || JSREPORT_IS_WARNING(reportp->flags))
+ return JS_FALSE;
+
+ /* Find the exception index associated with this error. */
+ errorNumber = (JSErrNum) reportp->errorNumber;
+ errorString = js_GetLocalizedErrorMessage(cx, NULL, NULL, errorNumber);
+ exn = errorString ? errorString->exnType : JSEXN_NONE;
+ JS_ASSERT(exn < JSEXN_LIMIT);
+
+#if defined( DEBUG_mccabe ) && defined ( PRINTNAMES )
+ /* Print the error name and the associated exception name to stderr */
+ fprintf(stderr, "%s\t%s\n",
+ errortoexnname[errorNumber].name,
+ errortoexnname[errorNumber].exception);
+#endif
+
+ /*
+ * Return false (no exception raised) if no exception is associated
+ * with the given error number.
+ */
+ if (exn == JSEXN_NONE)
+ return JS_FALSE;
+
+ /*
+ * Prevent runaway recursion, just as the Exception native constructor
+ * must do, via cx->creatingException. If an out-of-memory error occurs,
+ * no exception object will be created, but we don't assume that OOM is
+ * the only kind of error that subroutines of this function called below
+ * might raise.
+ */
+ if (cx->creatingException)
+ return JS_FALSE;
+
+ /* After this point the control must flow through the label out. */
+ cx->creatingException = JS_TRUE;
+
+ /* Protect the newly-created strings below from nesting GCs. */
+ memset(tv, 0, sizeof tv);
+ JS_PUSH_TEMP_ROOT(cx, sizeof tv / sizeof tv[0], tv, &tvr);
+
+ /*
+ * Try to get an appropriate prototype by looking up the corresponding
+ * exception constructor name in the scope chain of the current context's
+ * top stack frame, or in the global object if no frame is active.
+ */
+ ok = js_GetClassPrototype(cx, NULL, INT_TO_JSID(exceptions[exn].key),
+ &errProto);
+ if (!ok)
+ goto out;
+ tv[0] = OBJECT_TO_JSVAL(errProto);
+
+ errObject = js_NewObject(cx, &js_ErrorClass, errProto, NULL);
+ if (!errObject) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ tv[1] = OBJECT_TO_JSVAL(errObject);
+
+ messageStr = JS_NewStringCopyZ(cx, message);
+ if (!messageStr) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ tv[2] = STRING_TO_JSVAL(messageStr);
+
+ filenameStr = JS_NewStringCopyZ(cx, reportp->filename);
+ if (!filenameStr) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ tv[3] = STRING_TO_JSVAL(filenameStr);
+
+ ok = InitExnPrivate(cx, errObject, messageStr, filenameStr,
+ reportp->lineno, reportp);
+ if (!ok)
+ goto out;
+
+ JS_SetPendingException(cx, OBJECT_TO_JSVAL(errObject));
+
+ /* Flag the error report passed in to indicate an exception was raised. */
+ reportp->flags |= JSREPORT_EXCEPTION;
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ cx->creatingException = JS_FALSE;
+ return ok;
+}
+
+JSBool
+js_ReportUncaughtException(JSContext *cx)
+{
+ jsval exn;
+ JSObject *exnObject;
+ jsval vp[5];
+ JSTempValueRooter tvr;
+ JSErrorReport *reportp, report;
+ JSString *str;
+ const char *bytes;
+ JSBool ok;
+
+ if (!JS_IsExceptionPending(cx))
+ return JS_TRUE;
+
+ if (!JS_GetPendingException(cx, &exn))
+ return JS_FALSE;
+
+ /*
+ * Because js_ValueToString below could error and an exception object
+ * could become unrooted, we must root exnObject. Later, if exnObject is
+ * non-null, we need to root other intermediates, so allocate an operand
+ * stack segment to protect all of these values.
+ */
+ if (JSVAL_IS_PRIMITIVE(exn)) {
+ exnObject = NULL;
+ } else {
+ exnObject = JSVAL_TO_OBJECT(exn);
+ vp[0] = exn;
+ memset(vp + 1, 0, sizeof vp - sizeof vp[0]);
+ JS_PUSH_TEMP_ROOT(cx, JS_ARRAY_LENGTH(vp), vp, &tvr);
+ }
+
+ JS_ClearPendingException(cx);
+ reportp = js_ErrorFromException(cx, exn);
+
+ /* XXX L10N angels cry once again (see also jsemit.c, /L10N gaffes/) */
+ str = js_ValueToString(cx, exn);
+ if (!str) {
+ bytes = "unknown (can't convert to string)";
+ } else {
+ if (exnObject)
+ vp[1] = STRING_TO_JSVAL(str);
+ bytes = js_GetStringBytes(cx->runtime, str);
+ }
+ ok = JS_TRUE;
+
+ if (!reportp &&
+ exnObject &&
+ OBJ_GET_CLASS(cx, exnObject) == &js_ErrorClass) {
+ const char *filename;
+ uint32 lineno;
+
+ ok = JS_GetProperty(cx, exnObject, js_message_str, &vp[2]);
+ if (!ok)
+ goto out;
+ if (JSVAL_IS_STRING(vp[2]))
+ bytes = JS_GetStringBytes(JSVAL_TO_STRING(vp[2]));
+
+ ok = JS_GetProperty(cx, exnObject, js_fileName_str, &vp[3]);
+ if (!ok)
+ goto out;
+ str = js_ValueToString(cx, vp[3]);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ filename = StringToFilename(cx, str);
+
+ ok = JS_GetProperty(cx, exnObject, js_lineNumber_str, &vp[4]);
+ if (!ok)
+ goto out;
+ ok = js_ValueToECMAUint32 (cx, vp[4], &lineno);
+ if (!ok)
+ goto out;
+
+ reportp = &report;
+ memset(&report, 0, sizeof report);
+ report.filename = filename;
+ report.lineno = (uintN) lineno;
+ }
+
+ if (!reportp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_UNCAUGHT_EXCEPTION, bytes);
+ } else {
+ /* Flag the error as an exception. */
+ reportp->flags |= JSREPORT_EXCEPTION;
+ js_ReportErrorAgain(cx, bytes, reportp);
+ }
+
+out:
+ if (exnObject)
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
diff --git a/third_party/js-1.7/jsexn.h b/third_party/js-1.7/jsexn.h
new file mode 100644
index 0000000..58cb984
--- /dev/null
+++ b/third_party/js-1.7/jsexn.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS runtime exception classes.
+ */
+
+#ifndef jsexn_h___
+#define jsexn_h___
+
+JS_BEGIN_EXTERN_C
+
+extern JSClass js_ErrorClass;
+
+/*
+ * Initialize the exception constructor/prototype hierarchy.
+ */
+extern JSObject *
+js_InitExceptionClasses(JSContext *cx, JSObject *obj);
+
+/*
+ * Given a JSErrorReport, check to see if there is an exception associated with
+ * the error number. If there is, then create an appropriate exception object,
+ * set it as the pending exception, and set the JSREPORT_EXCEPTION flag on the
+ * error report. Exception-aware host error reporters should probably ignore
+ * error reports so flagged. Returns JS_TRUE if an associated exception is
+ * found and set, JS_FALSE otherwise..
+ */
+extern JSBool
+js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp);
+
+/*
+ * Called if a JS API call to js_Execute or js_InternalCall fails; calls the
+ * error reporter with the error report associated with any uncaught exception
+ * that has been raised. Returns true if there was an exception pending, and
+ * the error reporter was actually called.
+ *
+ * The JSErrorReport * that the error reporter is called with is currently
+ * associated with a JavaScript object, and is not guaranteed to persist after
+ * the object is collected. Any persistent uses of the JSErrorReport contents
+ * should make their own copy.
+ *
+ * The flags field of the JSErrorReport will have the JSREPORT_EXCEPTION flag
+ * set; embeddings that want to silently propagate JavaScript exceptions to
+ * other contexts may want to use an error reporter that ignores errors with
+ * this flag.
+ */
+extern JSBool
+js_ReportUncaughtException(JSContext *cx);
+
+extern JSErrorReport *
+js_ErrorFromException(JSContext *cx, jsval exn);
+
+extern const JSErrorFormatString *
+js_GetLocalizedErrorMessage(JSContext* cx, void *userRef, const char *locale,
+ const uintN errorNumber);
+
+JS_END_EXTERN_C
+
+#endif /* jsexn_h___ */
diff --git a/third_party/js-1.7/jsfile.c b/third_party/js-1.7/jsfile.c
new file mode 100644
index 0000000..ed1c4e8
--- /dev/null
+++ b/third_party/js-1.7/jsfile.c
@@ -0,0 +1,2735 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS File object
+ */
+#if JS_HAS_FILE_OBJECT
+
+#include "jsstddef.h"
+#include "jsfile.h"
+
+/* ----------------- Platform-specific includes and defines ----------------- */
+#if defined(XP_WIN) || defined(XP_OS2)
+# include <direct.h>
+# include <io.h>
+# include <sys/types.h>
+# include <sys/stat.h>
+# define FILESEPARATOR '\\'
+# define FILESEPARATOR2 '/'
+# define CURRENT_DIR "c:\\"
+# define POPEN _popen
+# define PCLOSE _pclose
+#elif defined(XP_UNIX) || defined(XP_BEOS)
+# include <strings.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <unistd.h>
+# define FILESEPARATOR '/'
+# define FILESEPARATOR2 '\0'
+# define CURRENT_DIR "/"
+# define POPEN popen
+# define PCLOSE pclose
+#endif
+
+/* --------------- Platform-independent includes and defines ---------------- */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsdate.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include <string.h>
+
+/* NSPR dependencies */
+#include "prio.h"
+#include "prerror.h"
+
+#define SPECIAL_FILE_STRING "Special File"
+#define CURRENTDIR_PROPERTY "currentDir"
+#define SEPARATOR_PROPERTY "separator"
+#define FILE_CONSTRUCTOR "File"
+#define PIPE_SYMBOL '|'
+
+#define ASCII 0
+#define UTF8 1
+#define UCS2 2
+
+#define asciistring "text"
+#define utfstring "binary"
+#define unicodestring "unicode"
+
+#define MAX_PATH_LENGTH 1024
+#define MODE_SIZE 256
+#define NUMBER_SIZE 32
+#define MAX_LINE_LENGTH 256
+#define URL_PREFIX "file://"
+
+#define STDINPUT_NAME "Standard input stream"
+#define STDOUTPUT_NAME "Standard output stream"
+#define STDERROR_NAME "Standard error stream"
+
+#define RESOLVE_PATH js_canonicalPath /* js_absolutePath */
+
+/* Error handling */
+typedef enum JSFileErrNum {
+#define MSG_DEF(name, number, count, exception, format) \
+ name = number,
+#include "jsfile.msg"
+#undef MSG_DEF
+ JSFileErr_Limit
+#undef MSGDEF
+} JSFileErrNum;
+
+#define JSFILE_HAS_DFLT_MSG_STRINGS 1
+
+JSErrorFormatString JSFile_ErrorFormatString[JSFileErr_Limit] = {
+#if JSFILE_HAS_DFLT_MSG_STRINGS
+#define MSG_DEF(name, number, count, exception, format) \
+ { format, count },
+#else
+#define MSG_DEF(name, number, count, exception, format) \
+ { NULL, count },
+#endif
+#include "jsfile.msg"
+#undef MSG_DEF
+};
+
+const JSErrorFormatString *
+JSFile_GetErrorMessage(void *userRef, const char *locale,
+ const uintN errorNumber)
+{
+ if ((errorNumber > 0) && (errorNumber < JSFileErr_Limit))
+ return &JSFile_ErrorFormatString[errorNumber];
+ else
+ return NULL;
+}
+
+#define JSFILE_CHECK_NATIVE(op) \
+ if (file->isNative) { \
+ JS_ReportWarning(cx, "Cannot call or access \"%s\" on native file %s",\
+ op, file->path); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_WRITE \
+ if (!file->isOpen) { \
+ JS_ReportWarning(cx, \
+ "File %s is closed, will open it for writing, proceeding", \
+ file->path); \
+ js_FileOpen(cx, obj, file, "write,append,create"); \
+ } \
+ if (!js_canWrite(cx, file)) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_CANNOT_WRITE, file->path); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_READ \
+ if (!file->isOpen) { \
+ JS_ReportWarning(cx, \
+ "File %s is closed, will open it for reading, proceeding", \
+ file->path); \
+ js_FileOpen(cx, obj, file, "read"); \
+ } \
+ if (!js_canRead(cx, file)) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_CANNOT_READ, file->path); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_OPEN(op) \
+ if (!file->isOpen) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_FILE_MUST_BE_CLOSED, op); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_CLOSED(op) \
+ if (file->isOpen) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_FILE_MUST_BE_OPEN, op); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_ONE_ARG(op) \
+ if (argc != 1) { \
+ char str[NUMBER_SIZE]; \
+ sprintf(str, "%d", argc); \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_EXPECTS_ONE_ARG_ERROR, op, str); \
+ goto out; \
+ }
+
+
+/*
+ Security mechanism, should define a callback for this.
+ The parameters are as follows:
+ SECURITY_CHECK(JSContext *cx, JSPrincipals *ps, char *op_name, JSFile *file)
+ XXX Should this be a real function returning a JSBool result (and getting
+ some typesafety help from the compiler?).
+*/
+#define SECURITY_CHECK(cx, ps, op, file) \
+ /* Define a callback here... */
+
+
+/* Structure representing the file internally */
+typedef struct JSFile {
+ char *path; /* the path to the file. */
+ JSBool isOpen;
+ int32 mode; /* mode used to open the file: read, write, append, create, etc.. */
+ int32 type; /* Asciiz, utf, unicode */
+ char byteBuffer[3]; /* bytes read in advance by js_FileRead ( UTF8 encoding ) */
+ jsint nbBytesInBuf; /* number of bytes stored in the buffer above */
+ jschar charBuffer; /* character read in advance by readln ( mac files only ) */
+ JSBool charBufferUsed; /* flag indicating if the buffer above is being used */
+ JSBool hasRandomAccess;/* can the file be randomly accessed? false for stdin, and
+ UTF-encoded files. */
+ JSBool hasAutoflush; /* should we force a flush for each line break? */
+ JSBool isNative; /* if the file is using OS-specific file FILE type */
+ /* We can actually put the following two in a union since they should never be used at the same time */
+ PRFileDesc *handle; /* the handle for the file, if open. */
+ FILE *nativehandle; /* native handle, for stuff NSPR doesn't do. */
+ JSBool isPipe; /* if the file is really an OS pipe */
+} JSFile;
+
+/* a few forward declarations... */
+JS_PUBLIC_API(JSObject*) js_NewFileObject(JSContext *cx, char *filename);
+static JSBool file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+static JSBool file_close(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+/* New filename manipulation procesures */
+/* assumes we don't have leading/trailing spaces */
+static JSBool
+js_filenameHasAPipe(const char *filename)
+{
+ if (!filename)
+ return JS_FALSE;
+
+ return filename[0] == PIPE_SYMBOL ||
+ filename[strlen(filename) - 1] == PIPE_SYMBOL;
+}
+
+static JSBool
+js_isAbsolute(const char *name)
+{
+#if defined(XP_WIN) || defined(XP_OS2)
+ return *name && name[1] == ':';
+#else
+ return (name[0]
+# if defined(XP_UNIX) || defined(XP_BEOS)
+ ==
+# else
+ !=
+# endif
+ FILESEPARATOR);
+#endif
+}
+
+/*
+ * Concatinates base and name to produce a valid filename.
+ * Returned string must be freed.
+*/
+static char*
+js_combinePath(JSContext *cx, const char *base, const char *name)
+{
+ int len = strlen(base);
+ char* result = JS_malloc(cx, len + strlen(name) + 2);
+
+ if (!result)
+ return NULL;
+
+ strcpy(result, base);
+
+ if (base[len - 1] != FILESEPARATOR && base[len - 1] != FILESEPARATOR2) {
+ result[len] = FILESEPARATOR;
+ result[len + 1] = '\0';
+ }
+ strcat(result, name);
+ return result;
+}
+
+/* Extract the last component from a path name. Returned string must be freed */
+static char *
+js_fileBaseName(JSContext *cx, const char *pathname)
+{
+ jsint index, aux;
+ char *result;
+
+ index = strlen(pathname)-1;
+
+ /* Chop off trailing seperators. */
+ while (index > 0 && (pathname[index]==FILESEPARATOR ||
+ pathname[index]==FILESEPARATOR2)) {
+ --index;
+ }
+
+ aux = index;
+
+ /* Now find the next separator. */
+ while (index >= 0 && pathname[index] != FILESEPARATOR &&
+ pathname[index] != FILESEPARATOR2) {
+ --index;
+ }
+
+ /* Allocate and copy. */
+ result = JS_malloc(cx, aux - index + 1);
+ if (!result)
+ return NULL;
+ strncpy(result, pathname + index + 1, aux - index);
+ result[aux - index] = '\0';
+ return result;
+}
+
+/*
+ * Returns everything but the last component from a path name.
+ * Returned string must be freed.
+ */
+static char *
+js_fileDirectoryName(JSContext *cx, const char *pathname)
+{
+ char *result;
+ const char *cp, *end;
+ size_t pathsize;
+
+ end = pathname + strlen(pathname);
+ cp = end - 1;
+
+ /* If this is already a directory, chop off the trailing /s. */
+ while (cp >= pathname) {
+ if (*cp != FILESEPARATOR && *cp != FILESEPARATOR2)
+ break;
+ --cp;
+ }
+
+ if (cp < pathname && end != pathname) {
+ /* There were just /s, return the root. */
+ result = JS_malloc(cx, 1 + 1); /* The separator + trailing NUL. */
+ result[0] = FILESEPARATOR;
+ result[1] = '\0';
+ return result;
+ }
+
+ /* Now chop off the last portion. */
+ while (cp >= pathname) {
+ if (*cp == FILESEPARATOR || *cp == FILESEPARATOR2)
+ break;
+ --cp;
+ }
+
+ /* Check if this is a leaf. */
+ if (cp < pathname) {
+ /* It is, return "pathname/". */
+ if (end[-1] == FILESEPARATOR || end[-1] == FILESEPARATOR2) {
+ /* Already has its terminating /. */
+ return JS_strdup(cx, pathname);
+ }
+
+ pathsize = end - pathname + 1;
+ result = JS_malloc(cx, pathsize + 1);
+ if (!result)
+ return NULL;
+
+ strcpy(result, pathname);
+ result[pathsize - 1] = FILESEPARATOR;
+ result[pathsize] = '\0';
+
+ return result;
+ }
+
+ /* Return everything up to and including the seperator. */
+ pathsize = cp - pathname + 1;
+ result = JS_malloc(cx, pathsize + 1);
+ if (!result)
+ return NULL;
+
+ strncpy(result, pathname, pathsize);
+ result[pathsize] = '\0';
+
+ return result;
+}
+
+static char *
+js_absolutePath(JSContext *cx, const char * path)
+{
+ JSObject *obj;
+ JSString *str;
+ jsval prop;
+
+ if (js_isAbsolute(path)) {
+ return JS_strdup(cx, path);
+ } else {
+ obj = JS_GetGlobalObject(cx);
+ if (!JS_GetProperty(cx, obj, FILE_CONSTRUCTOR, &prop)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FILE_CONSTRUCTOR_UNDEFINED_ERROR);
+ return JS_strdup(cx, path);
+ }
+
+ obj = JSVAL_TO_OBJECT(prop);
+ if (!JS_GetProperty(cx, obj, CURRENTDIR_PROPERTY, &prop)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FILE_CURRENTDIR_UNDEFINED_ERROR);
+ return JS_strdup(cx, path);
+ }
+
+ str = JS_ValueToString(cx, prop);
+ if (!str)
+ return JS_strdup(cx, path);
+
+ /* should we have an array of curr dirs indexed by drive for windows? */
+ return js_combinePath(cx, JS_GetStringBytes(str), path);
+ }
+}
+
+/* Side effect: will remove spaces in the beginning/end of the filename */
+static char *
+js_canonicalPath(JSContext *cx, char *oldpath)
+{
+ char *tmp;
+ char *path = oldpath;
+ char *base, *dir, *current, *result;
+ jsint c;
+ jsint back = 0;
+ unsigned int i = 0, j = strlen(path)-1;
+
+ /* This is probably optional */
+ /* Remove possible spaces in the beginning and end */
+ while (i < j && path[i] == ' ')
+ i++;
+ while (j >= 0 && path[j] == ' ')
+ j--;
+
+ tmp = JS_malloc(cx, j-i+2);
+ if (!tmp)
+ return NULL;
+
+ strncpy(tmp, path + i, j - i + 1);
+ tmp[j - i + 1] = '\0';
+
+ path = tmp;
+
+ /* Pipe support. */
+ if (js_filenameHasAPipe(path))
+ return path;
+
+ /* file:// support. */
+ if (!strncmp(path, URL_PREFIX, strlen(URL_PREFIX))) {
+ tmp = js_canonicalPath(cx, path + strlen(URL_PREFIX));
+ JS_free(cx, path);
+ return tmp;
+ }
+
+ if (!js_isAbsolute(path)) {
+ tmp = js_absolutePath(cx, path);
+ if (!tmp)
+ return NULL;
+ path = tmp;
+ }
+
+ result = JS_strdup(cx, "");
+
+ current = path;
+
+ base = js_fileBaseName(cx, current);
+ dir = js_fileDirectoryName(cx, current);
+
+ while (strcmp(dir, current)) {
+ if (!strcmp(base, "..")) {
+ back++;
+ } else {
+ if (back > 0) {
+ back--;
+ } else {
+ tmp = result;
+ result = JS_malloc(cx, strlen(base) + 1 + strlen(tmp) + 1);
+ if (!result)
+ goto out;
+
+ strcpy(result, base);
+ c = strlen(result);
+ if (*tmp) {
+ result[c] = FILESEPARATOR;
+ result[c + 1] = '\0';
+ strcat(result, tmp);
+ }
+ JS_free(cx, tmp);
+ }
+ }
+ JS_free(cx, current);
+ JS_free(cx, base);
+ current = dir;
+ base = js_fileBaseName(cx, current);
+ dir = js_fileDirectoryName(cx, current);
+ }
+
+ tmp = result;
+ result = JS_malloc(cx, strlen(dir)+1+strlen(tmp)+1);
+ if (!result)
+ goto out;
+
+ strcpy(result, dir);
+ c = strlen(result);
+ if (tmp[0]!='\0') {
+ if ((result[c-1]!=FILESEPARATOR)&&(result[c-1]!=FILESEPARATOR2)) {
+ result[c] = FILESEPARATOR;
+ result[c+1] = '\0';
+ }
+ strcat(result, tmp);
+ }
+
+out:
+ if (tmp)
+ JS_free(cx, tmp);
+ if (dir)
+ JS_free(cx, dir);
+ if (base)
+ JS_free(cx, base);
+ if (current)
+ JS_free(cx, current);
+
+ return result;
+}
+
+/* -------------------------- Text conversion ------------------------------- */
+/* The following is ripped from libi18n/unicvt.c and include files.. */
+
+/*
+ * UTF8 defines and macros
+ */
+#define ONE_OCTET_BASE 0x00 /* 0xxxxxxx */
+#define ONE_OCTET_MASK 0x7F /* x1111111 */
+#define CONTINUING_OCTET_BASE 0x80 /* 10xxxxxx */
+#define CONTINUING_OCTET_MASK 0x3F /* 00111111 */
+#define TWO_OCTET_BASE 0xC0 /* 110xxxxx */
+#define TWO_OCTET_MASK 0x1F /* 00011111 */
+#define THREE_OCTET_BASE 0xE0 /* 1110xxxx */
+#define THREE_OCTET_MASK 0x0F /* 00001111 */
+#define FOUR_OCTET_BASE 0xF0 /* 11110xxx */
+#define FOUR_OCTET_MASK 0x07 /* 00000111 */
+#define FIVE_OCTET_BASE 0xF8 /* 111110xx */
+#define FIVE_OCTET_MASK 0x03 /* 00000011 */
+#define SIX_OCTET_BASE 0xFC /* 1111110x */
+#define SIX_OCTET_MASK 0x01 /* 00000001 */
+
+#define IS_UTF8_1ST_OF_1(x) (( (x)&~ONE_OCTET_MASK ) == ONE_OCTET_BASE)
+#define IS_UTF8_1ST_OF_2(x) (( (x)&~TWO_OCTET_MASK ) == TWO_OCTET_BASE)
+#define IS_UTF8_1ST_OF_3(x) (( (x)&~THREE_OCTET_MASK) == THREE_OCTET_BASE)
+#define IS_UTF8_1ST_OF_4(x) (( (x)&~FOUR_OCTET_MASK ) == FOUR_OCTET_BASE)
+#define IS_UTF8_1ST_OF_5(x) (( (x)&~FIVE_OCTET_MASK ) == FIVE_OCTET_BASE)
+#define IS_UTF8_1ST_OF_6(x) (( (x)&~SIX_OCTET_MASK ) == SIX_OCTET_BASE)
+#define IS_UTF8_2ND_THRU_6TH(x) \
+ (( (x)&~CONTINUING_OCTET_MASK ) == CONTINUING_OCTET_BASE)
+#define IS_UTF8_1ST_OF_UCS2(x) \
+ IS_UTF8_1ST_OF_1(x) \
+ || IS_UTF8_1ST_OF_2(x) \
+ || IS_UTF8_1ST_OF_3(x)
+
+
+#define MAX_UCS2 0xFFFF
+#define DEFAULT_CHAR 0x003F /* Default char is "?" */
+#define BYTE_MASK 0xBF
+#define BYTE_MARK 0x80
+
+
+/* Function: one_ucs2_to_utf8_char
+ *
+ * Function takes one UCS-2 char and writes it to a UTF-8 buffer.
+ * We need a UTF-8 buffer because we don't know before this
+ * function how many bytes of utf-8 data will be written. It also
+ * takes a pointer to the end of the UTF-8 buffer so that we don't
+ * overwrite data. This function returns the number of UTF-8 bytes
+ * of data written, or -1 if the buffer would have been overrun.
+ */
+
+#define LINE_SEPARATOR 0x2028
+#define PARAGRAPH_SEPARATOR 0x2029
+static int16 one_ucs2_to_utf8_char(unsigned char *tobufp,
+ unsigned char *tobufendp,
+ uint16 onechar)
+{
+ int16 numUTF8bytes = 0;
+
+ if (onechar == LINE_SEPARATOR || onechar == PARAGRAPH_SEPARATOR) {
+ strcpy((char*)tobufp, "\n");
+ return strlen((char*)tobufp);
+ }
+
+ if (onechar < 0x80) {
+ numUTF8bytes = 1;
+ } else if (onechar < 0x800) {
+ numUTF8bytes = 2;
+ } else {
+ /* 0x800 >= onechar <= MAX_UCS2 */
+ numUTF8bytes = 3;
+ }
+
+ tobufp += numUTF8bytes;
+
+ /* return error if we don't have space for the whole character */
+ if (tobufp > tobufendp) {
+ return(-1);
+ }
+
+ switch(numUTF8bytes) {
+ case 3: *--tobufp = (onechar | BYTE_MARK) & BYTE_MASK; onechar >>=6;
+ *--tobufp = (onechar | BYTE_MARK) & BYTE_MASK; onechar >>=6;
+ *--tobufp = onechar | THREE_OCTET_BASE;
+ break;
+
+ case 2: *--tobufp = (onechar | BYTE_MARK) & BYTE_MASK; onechar >>=6;
+ *--tobufp = onechar | TWO_OCTET_BASE;
+ break;
+
+ case 1: *--tobufp = (unsigned char)onechar;
+ break;
+ }
+
+ return numUTF8bytes;
+}
+
+/*
+ * utf8_to_ucs2_char
+ *
+ * Convert a utf8 multibyte character to ucs2
+ *
+ * inputs: pointer to utf8 character(s)
+ * length of utf8 buffer ("read" length limit)
+ * pointer to return ucs2 character
+ *
+ * outputs: number of bytes in the utf8 character
+ * -1 if not a valid utf8 character sequence
+ * -2 if the buffer is too short
+ */
+static int16
+utf8_to_ucs2_char(const unsigned char *utf8p, int16 buflen, uint16 *ucs2p)
+{
+ uint16 lead, cont1, cont2;
+
+ /*
+ * Check for minimum buffer length
+ */
+ if ((buflen < 1) || (utf8p == NULL)) {
+ return -2;
+ }
+ lead = (uint16) (*utf8p);
+
+ /*
+ * Check for a one octet sequence
+ */
+ if (IS_UTF8_1ST_OF_1(lead)) {
+ *ucs2p = lead & ONE_OCTET_MASK;
+ return 1;
+ }
+
+ /*
+ * Check for a two octet sequence
+ */
+ if (IS_UTF8_1ST_OF_2(*utf8p)) {
+ if (buflen < 2)
+ return -2;
+ cont1 = (uint16) *(utf8p+1);
+ if (!IS_UTF8_2ND_THRU_6TH(cont1))
+ return -1;
+ *ucs2p = (lead & TWO_OCTET_MASK) << 6;
+ *ucs2p |= cont1 & CONTINUING_OCTET_MASK;
+ return 2;
+ }
+
+ /*
+ * Check for a three octet sequence
+ */
+ else if (IS_UTF8_1ST_OF_3(lead)) {
+ if (buflen < 3)
+ return -2;
+ cont1 = (uint16) *(utf8p+1);
+ cont2 = (uint16) *(utf8p+2);
+ if ( (!IS_UTF8_2ND_THRU_6TH(cont1))
+ || (!IS_UTF8_2ND_THRU_6TH(cont2)))
+ return -1;
+ *ucs2p = (lead & THREE_OCTET_MASK) << 12;
+ *ucs2p |= (cont1 & CONTINUING_OCTET_MASK) << 6;
+ *ucs2p |= cont2 & CONTINUING_OCTET_MASK;
+ return 3;
+ }
+ else { /* not a valid utf8/ucs2 character */
+ return -1;
+ }
+}
+
+/* ----------------------------- Helper functions --------------------------- */
+/* Ripped off from lm_win.c .. */
+/* where is strcasecmp?.. for now, it's case sensitive..
+ *
+ * strcasecmp is in strings.h, but on windows it's called _stricmp...
+ * will need to #ifdef this
+*/
+
+static int32
+js_FileHasOption(JSContext *cx, const char *oldoptions, const char *name)
+{
+ char *comma, *equal, *current;
+ char *options = JS_strdup(cx, oldoptions);
+ int32 found = 0;
+
+ current = options;
+ for (;;) {
+ comma = strchr(current, ',');
+ if (comma) *comma = '\0';
+ equal = strchr(current, '=');
+ if (equal) *equal = '\0';
+ if (strcmp(current, name) == 0) {
+ if (!equal || strcmp(equal + 1, "yes") == 0)
+ found = 1;
+ else
+ found = atoi(equal + 1);
+ }
+ if (equal) *equal = '=';
+ if (comma) *comma = ',';
+ if (found || !comma)
+ break;
+ current = comma + 1;
+ }
+ JS_free(cx, options);
+ return found;
+}
+
+/* empty the buffer */
+static void
+js_ResetBuffers(JSFile * file)
+{
+ file->charBufferUsed = JS_FALSE;
+ file->nbBytesInBuf = 0;
+}
+
+/* Reset file attributes */
+static void
+js_ResetAttributes(JSFile * file)
+{
+ file->mode = file->type = 0;
+ file->isOpen = JS_FALSE;
+ file->handle = NULL;
+ file->nativehandle = NULL;
+ file->hasRandomAccess = JS_TRUE; /* Innocent until proven guilty. */
+ file->hasAutoflush = JS_FALSE;
+ file->isNative = JS_FALSE;
+ file->isPipe = JS_FALSE;
+
+ js_ResetBuffers(file);
+}
+
+static JSBool
+js_FileOpen(JSContext *cx, JSObject *obj, JSFile *file, char *mode){
+ JSString *type, *mask;
+ jsval v[2];
+ jsval rval;
+
+ type = JS_InternString(cx, asciistring);
+ mask = JS_NewStringCopyZ(cx, mode);
+ v[0] = STRING_TO_JSVAL(mask);
+ v[1] = STRING_TO_JSVAL(type);
+
+ if (!file_open(cx, obj, 2, v, &rval))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+/* Buffered version of PR_Read. Used by js_FileRead */
+static int32
+js_BufferedRead(JSFile *f, unsigned char *buf, int32 len)
+{
+ int32 count = 0;
+
+ while (f->nbBytesInBuf>0&&len>0) {
+ buf[0] = f->byteBuffer[0];
+ f->byteBuffer[0] = f->byteBuffer[1];
+ f->byteBuffer[1] = f->byteBuffer[2];
+ f->nbBytesInBuf--;
+ len--;
+ buf+=1;
+ count++;
+ }
+
+ if (len > 0) {
+ count += (!f->isNative)
+ ? PR_Read(f->handle, buf, len)
+ : fread(buf, 1, len, f->nativehandle);
+ }
+ return count;
+}
+
+static int32
+js_FileRead(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode)
+{
+ unsigned char *aux;
+ int32 count = 0, i;
+ jsint remainder;
+ unsigned char utfbuf[3];
+
+ if (file->charBufferUsed) {
+ buf[0] = file->charBuffer;
+ buf++;
+ len--;
+ file->charBufferUsed = JS_FALSE;
+ }
+
+ switch (mode) {
+ case ASCII:
+ aux = (unsigned char*)JS_malloc(cx, len);
+ if (!aux)
+ return 0;
+
+ count = js_BufferedRead(file, aux, len);
+ if (count == -1) {
+ JS_free(cx, aux);
+ return 0;
+ }
+
+ for (i = 0; i < len; i++)
+ buf[i] = (jschar)aux[i];
+
+ JS_free(cx, aux);
+ break;
+
+ case UTF8:
+ remainder = 0;
+ for (count = 0;count<len;count++) {
+ i = js_BufferedRead(file, utfbuf+remainder, 3-remainder);
+ if (i<=0) {
+ return count;
+ }
+ i = utf8_to_ucs2_char(utfbuf, (int16)i, &buf[count] );
+ if (i<0) {
+ return count;
+ } else {
+ if (i==1) {
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder = 2;
+ } else if (i==2) {
+ utfbuf[0] = utfbuf[2];
+ remainder = 1;
+ } else if (i==3) {
+ remainder = 0;
+ }
+ }
+ }
+ while (remainder>0) {
+ file->byteBuffer[file->nbBytesInBuf] = utfbuf[0];
+ file->nbBytesInBuf++;
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder--;
+ }
+ break;
+
+ case UCS2:
+ count = js_BufferedRead(file, (unsigned char *)buf, len * 2) >> 1;
+ if (count == -1)
+ return 0;
+
+ break;
+
+ default:
+ /* Not reached. */
+ JS_ASSERT(0);
+ }
+
+ if(count == -1) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "read", file->path);
+ }
+
+ return count;
+}
+
+static int32
+js_FileSeek(JSContext *cx, JSFile *file, int32 len, int32 mode)
+{
+ int32 count = 0, i;
+ jsint remainder;
+ unsigned char utfbuf[3];
+ jschar tmp;
+
+ switch (mode) {
+ case ASCII:
+ count = PR_Seek(file->handle, len, PR_SEEK_CUR);
+ break;
+
+ case UTF8:
+ remainder = 0;
+ for (count = 0;count<len;count++) {
+ i = js_BufferedRead(file, utfbuf+remainder, 3-remainder);
+ if (i<=0) {
+ return 0;
+ }
+ i = utf8_to_ucs2_char(utfbuf, (int16)i, &tmp );
+ if (i<0) {
+ return 0;
+ } else {
+ if (i==1) {
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder = 2;
+ } else if (i==2) {
+ utfbuf[0] = utfbuf[2];
+ remainder = 1;
+ } else if (i==3) {
+ remainder = 0;
+ }
+ }
+ }
+ while (remainder>0) {
+ file->byteBuffer[file->nbBytesInBuf] = utfbuf[0];
+ file->nbBytesInBuf++;
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder--;
+ }
+ break;
+
+ case UCS2:
+ count = PR_Seek(file->handle, len*2, PR_SEEK_CUR)/2;
+ break;
+
+ default:
+ /* Not reached. */
+ JS_ASSERT(0);
+ }
+
+ if(count == -1) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "seek", file->path);
+ }
+
+ return count;
+}
+
+static int32
+js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode)
+{
+ unsigned char *aux;
+ int32 count = 0, i, j;
+ unsigned char *utfbuf;
+
+ switch (mode) {
+ case ASCII:
+ aux = (unsigned char*)JS_malloc(cx, len);
+ if (!aux)
+ return 0;
+
+ for (i = 0; i<len; i++)
+ aux[i] = buf[i] % 256;
+
+ count = (!file->isNative)
+ ? PR_Write(file->handle, aux, len)
+ : fwrite(aux, 1, len, file->nativehandle);
+
+ if (count==-1) {
+ JS_free(cx, aux);
+ return 0;
+ }
+
+ JS_free(cx, aux);
+ break;
+
+ case UTF8:
+ utfbuf = (unsigned char*)JS_malloc(cx, len*3);
+ if (!utfbuf) return 0;
+ i = 0;
+ for (count = 0;count<len;count++) {
+ j = one_ucs2_to_utf8_char(utfbuf+i, utfbuf+len*3, buf[count]);
+ if (j==-1) {
+ JS_free(cx, utfbuf);
+ return 0;
+ }
+ i+=j;
+ }
+ j = (!file->isNative)
+ ? PR_Write(file->handle, utfbuf, i)
+ : fwrite(utfbuf, 1, i, file->nativehandle);
+
+ if (j<i) {
+ JS_free(cx, utfbuf);
+ return 0;
+ }
+ JS_free(cx, utfbuf);
+ break;
+
+ case UCS2:
+ count = (!file->isNative)
+ ? PR_Write(file->handle, buf, len*2) >> 1
+ : fwrite(buf, 1, len*2, file->nativehandle) >> 1;
+
+ if (count == -1)
+ return 0;
+ break;
+
+ default:
+ /* Not reached. */
+ JS_ASSERT(0);
+ }
+
+ if(count == -1) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "write", file->path);
+ }
+
+ return count;
+}
+
+/* ----------------------------- Property checkers -------------------------- */
+static JSBool
+js_exists(JSContext *cx, JSFile *file)
+{
+ if (file->isNative) {
+ /* It doesn't make sense for a pipe of stdstream. */
+ return JS_FALSE;
+ }
+
+ return PR_Access(file->path, PR_ACCESS_EXISTS) == PR_SUCCESS;
+}
+
+static JSBool
+js_canRead(JSContext *cx, JSFile *file)
+{
+ if (!file->isNative) {
+ if (file->isOpen && !(file->mode & PR_RDONLY))
+ return JS_FALSE;
+ return PR_Access(file->path, PR_ACCESS_READ_OK) == PR_SUCCESS;
+ }
+
+ if (file->isPipe) {
+ /* Is this pipe open for reading? */
+ return file->path[0] == PIPE_SYMBOL;
+ }
+
+ return !strcmp(file->path, STDINPUT_NAME);
+}
+
+static JSBool
+js_canWrite(JSContext *cx, JSFile *file)
+{
+ if (!file->isNative) {
+ if (file->isOpen && !(file->mode & PR_WRONLY))
+ return JS_FALSE;
+ return PR_Access(file->path, PR_ACCESS_WRITE_OK) == PR_SUCCESS;
+ }
+
+ if(file->isPipe) {
+ /* Is this pipe open for writing? */
+ return file->path[strlen(file->path)-1] == PIPE_SYMBOL;
+ }
+
+ return !strcmp(file->path, STDOUTPUT_NAME) ||
+ !strcmp(file->path, STDERROR_NAME);
+}
+
+static JSBool
+js_isFile(JSContext *cx, JSFile *file)
+{
+ if (!file->isNative) {
+ PRFileInfo info;
+
+ if (file->isOpen
+ ? PR_GetOpenFileInfo(file->handle, &info)
+ : PR_GetFileInfo(file->path, &info) != PR_SUCCESS) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ return JS_FALSE;
+ }
+
+ return info.type == PR_FILE_FILE;
+ }
+
+ /* This doesn't make sense for a pipe of stdstream. */
+ return JS_FALSE;
+}
+
+static JSBool
+js_isDirectory(JSContext *cx, JSFile *file)
+{
+ if(!file->isNative){
+ PRFileInfo info;
+
+ /* Hack needed to get get_property to work. */
+ if (!js_exists(cx, file))
+ return JS_FALSE;
+
+ if (file->isOpen
+ ? PR_GetOpenFileInfo(file->handle, &info)
+ : PR_GetFileInfo(file->path, &info) != PR_SUCCESS) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ return JS_FALSE;
+ }
+
+ return info.type == PR_FILE_DIRECTORY;
+ }
+
+ /* This doesn't make sense for a pipe of stdstream. */
+ return JS_FALSE;
+}
+
+static jsval
+js_size(JSContext *cx, JSFile *file)
+{
+ PRFileInfo info;
+
+ JSFILE_CHECK_NATIVE("size");
+
+ if (file->isOpen
+ ? PR_GetOpenFileInfo(file->handle, &info)
+ : PR_GetFileInfo(file->path, &info) != PR_SUCCESS) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ return JSVAL_VOID;
+ }
+
+ return INT_TO_JSVAL(info.size);
+
+out:
+ return JSVAL_VOID;
+}
+
+/*
+ * Return the parent object
+ */
+static JSBool
+js_parent(JSContext *cx, JSFile *file, jsval *resultp)
+{
+ char *str;
+
+ /* Since we only care about pipes and native files, return NULL. */
+ if (file->isNative) {
+ *resultp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ str = js_fileDirectoryName(cx, file->path);
+ if (!str)
+ return JS_FALSE;
+
+ /* If the directory is equal to the original path, we're at the root. */
+ if (!strcmp(file->path, str)) {
+ *resultp = JSVAL_NULL;
+ } else {
+ JSObject *obj = js_NewFileObject(cx, str);
+ if (!obj) {
+ JS_free(cx, str);
+ return JS_FALSE;
+ }
+ *resultp = OBJECT_TO_JSVAL(obj);
+ }
+
+ JS_free(cx, str);
+ return JS_TRUE;
+}
+
+static JSBool
+js_name(JSContext *cx, JSFile *file, jsval *vp)
+{
+ char *name;
+ JSString *str;
+
+ if (file->isPipe) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ name = js_fileBaseName(cx, file->path);
+ if (!name)
+ return JS_FALSE;
+
+ str = JS_NewString(cx, name, strlen(name));
+ if (!str) {
+ JS_free(cx, name);
+ return JS_FALSE;
+ }
+
+ *vp = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* ------------------------------ File object methods ---------------------------- */
+static JSBool
+file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *strmode, *strtype;
+ char *ctype, *mode;
+ int32 mask, type;
+ int len;
+
+ mode = NULL;
+
+ SECURITY_CHECK(cx, NULL, "open", file);
+
+ /* A native file that is already open */
+ if(file->isOpen && file->isNative) {
+ JS_ReportWarning(cx, "Native file %s is already open, proceeding",
+ file->path);
+ goto good;
+ }
+
+ /* Close before proceeding */
+ if (file->isOpen) {
+ JS_ReportWarning(cx, "File %s is already open, we will close it and "
+ "reopen, proceeding", file->path);
+ if(!file_close(cx, obj, 0, NULL, rval))
+ goto out;
+ }
+
+ if (js_isDirectory(cx, file)) {
+ JS_ReportWarning(cx, "%s seems to be a directory, there is no point in "
+ "trying to open it, proceeding", file->path);
+ goto good;
+ }
+
+ /* Path must be defined at this point */
+ len = strlen(file->path);
+
+ /* Mode */
+ if (argc >= 1) {
+ strmode = JS_ValueToString(cx, argv[0]);
+ if (!strmode) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_OPEN_NOT_STRING_ERROR,
+ argv[0]);
+ goto out;
+ }
+ mode = JS_strdup(cx, JS_GetStringBytes(strmode));
+ } else {
+ if(file->path[0]==PIPE_SYMBOL) {
+ /* pipe default mode */
+ mode = JS_strdup(cx, "read");
+ } else if(file->path[len-1]==PIPE_SYMBOL) {
+ /* pipe default mode */
+ mode = JS_strdup(cx, "write");
+ } else {
+ /* non-destructive, permissive defaults. */
+ mode = JS_strdup(cx, "readWrite,append,create");
+ }
+ }
+
+ /* Process the mode */
+ mask = 0;
+ /* TODO: this is pretty ugly, we walk thru the string too many times */
+ mask |= js_FileHasOption(cx, mode, "read") ? PR_RDONLY : 0;
+ mask |= js_FileHasOption(cx, mode, "write") ? PR_WRONLY : 0;
+ mask |= js_FileHasOption(cx, mode, "readWrite")? PR_RDWR : 0;
+ mask |= js_FileHasOption(cx, mode, "append") ? PR_APPEND : 0;
+ mask |= js_FileHasOption(cx, mode, "create") ? PR_CREATE_FILE : 0;
+ mask |= js_FileHasOption(cx, mode, "replace") ? PR_TRUNCATE : 0;
+
+ if (mask & PR_RDWR)
+ mask |= (PR_RDONLY | PR_WRONLY);
+ if ((mask & PR_RDONLY) && (mask & PR_WRONLY))
+ mask |= PR_RDWR;
+
+ file->hasAutoflush |= js_FileHasOption(cx, mode, "autoflush");
+
+ /* Type */
+ if (argc > 1) {
+ strtype = JS_ValueToString(cx, argv[1]);
+ if (!strtype) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_SECOND_ARGUMENT_OPEN_NOT_STRING_ERROR,
+ argv[1]);
+ goto out;
+ }
+ ctype = JS_GetStringBytes(strtype);
+
+ if(!strcmp(ctype, utfstring)) {
+ type = UTF8;
+ } else if (!strcmp(ctype, unicodestring)) {
+ type = UCS2;
+ } else {
+ if (strcmp(ctype, asciistring)) {
+ JS_ReportWarning(cx, "File type %s is not supported, using "
+ "'text' instead, proceeding", ctype);
+ }
+ type = ASCII;
+ }
+ } else {
+ type = ASCII;
+ }
+
+ /* Save the relevant fields */
+ file->type = type;
+ file->mode = mask;
+ file->nativehandle = NULL;
+ file->hasRandomAccess = (type != UTF8);
+
+ /*
+ * Deal with pipes here. We can't use NSPR for pipes, so we have to use
+ * POPEN.
+ */
+ if (file->path[0]==PIPE_SYMBOL || file->path[len-1]==PIPE_SYMBOL) {
+ if (file->path[0] == PIPE_SYMBOL && file->path[len-1] == PIPE_SYMBOL) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_BIDIRECTIONAL_PIPE_NOT_SUPPORTED);
+ goto out;
+ } else {
+ int i = 0;
+ char pipemode[3];
+ SECURITY_CHECK(cx, NULL, "pipe_open", file);
+
+ if(file->path[0] == PIPE_SYMBOL){
+ if(mask & (PR_WRONLY | PR_APPEND | PR_CREATE_FILE | PR_TRUNCATE)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OPEN_MODE_NOT_SUPPORTED_WITH_PIPES,
+ mode, file->path);
+ goto out;
+ }
+ /* open(SPOOLER, "| cat -v | lpr -h 2>/dev/null") -- pipe for writing */
+ pipemode[i++] = 'r';
+#ifndef XP_UNIX
+ pipemode[i++] = file->type==UTF8 ? 'b' : 't';
+#endif
+ pipemode[i++] = '\0';
+ file->nativehandle = POPEN(&file->path[1], pipemode);
+ } else if(file->path[len-1] == PIPE_SYMBOL) {
+ char *command = JS_malloc(cx, len);
+
+ strncpy(command, file->path, len-1);
+ command[len-1] = '\0';
+ /* open(STATUS, "netstat -an 2>&1 |") */
+ pipemode[i++] = 'w';
+#ifndef XP_UNIX
+ pipemode[i++] = file->type==UTF8 ? 'b' : 't';
+#endif
+ pipemode[i++] = '\0';
+ file->nativehandle = POPEN(command, pipemode);
+ JS_free(cx, command);
+ }
+ /* set the flags */
+ file->isNative = JS_TRUE;
+ file->isPipe = JS_TRUE;
+ file->hasRandomAccess = JS_FALSE;
+ }
+ } else {
+ /* TODO: what about the permissions?? Java ignores the problem... */
+ file->handle = PR_Open(file->path, mask, 0644);
+ }
+
+ js_ResetBuffers(file);
+ JS_free(cx, mode);
+ mode = NULL;
+
+ /* Set the open flag and return result */
+ if (file->handle == NULL && file->nativehandle == NULL) {
+ file->isOpen = JS_FALSE;
+
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", file->path);
+ goto out;
+ }
+
+good:
+ file->isOpen = JS_TRUE;
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+
+out:
+ if(mode)
+ JS_free(cx, mode);
+ return JS_FALSE;
+}
+
+static JSBool
+file_close(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "close", file);
+
+ if(!file->isOpen){
+ JS_ReportWarning(cx, "File %s is not open, can't close it, proceeding",
+ file->path);
+ goto out;
+ }
+
+ if(!file->isPipe){
+ if(file->isNative){
+ JS_ReportWarning(cx, "Unable to close a native file, proceeding", file->path);
+ goto out;
+ }else{
+ if(file->handle && PR_Close(file->handle)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", file->path);
+
+ goto out;
+ }
+ }
+ }else{
+ if(PCLOSE(file->nativehandle)==-1){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "pclose", file->path);
+ goto out;
+ }
+ }
+
+ js_ResetAttributes(file);
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+
+out:
+ return JS_FALSE;
+}
+
+
+static JSBool
+file_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "remove", file);
+ JSFILE_CHECK_NATIVE("remove");
+ JSFILE_CHECK_CLOSED("remove");
+
+ if ((js_isDirectory(cx, file) ?
+ PR_RmDir(file->path) : PR_Delete(file->path))==PR_SUCCESS) {
+ js_ResetAttributes(file);
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+ } else {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "remove", file->path);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+/* Raw PR-based function. No text processing. Just raw data copying. */
+static JSBool
+file_copyTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char *dest = NULL;
+ PRFileDesc *handle = NULL;
+ char *buffer;
+ jsval count, size;
+ JSBool fileInitiallyOpen=JS_FALSE;
+
+ SECURITY_CHECK(cx, NULL, "copyTo", file); /* may need a second argument!*/
+ JSFILE_CHECK_ONE_ARG("copyTo");
+ JSFILE_CHECK_NATIVE("copyTo");
+ /* remeber the state */
+ fileInitiallyOpen = file->isOpen;
+ JSFILE_CHECK_READ;
+
+ dest = JS_GetStringBytes(JS_ValueToString(cx, argv[0]));
+
+ /* make sure we are not reading a file open for writing */
+ if (file->isOpen && !js_canRead(cx, file)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_COPY_FILE_OPEN_FOR_WRITING_ERROR, file->path);
+ goto out;
+ }
+
+ if (file->handle==NULL){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", file->path);
+ goto out;
+ }
+
+ handle = PR_Open(dest, PR_WRONLY|PR_CREATE_FILE|PR_TRUNCATE, 0644);
+
+ if(!handle){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", dest);
+ goto out;
+ }
+
+ if ((size=js_size(cx, file))==JSVAL_VOID) {
+ goto out;
+ }
+
+ buffer = JS_malloc(cx, size);
+
+ count = INT_TO_JSVAL(PR_Read(file->handle, buffer, size));
+
+ /* reading panic */
+ if (count!=size) {
+ JS_free(cx, buffer);
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_COPY_READ_ERROR, file->path);
+ goto out;
+ }
+
+ count = INT_TO_JSVAL(PR_Write(handle, buffer, JSVAL_TO_INT(size)));
+
+ /* writing panic */
+ if (count!=size) {
+ JS_free(cx, buffer);
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_COPY_WRITE_ERROR, file->path);
+ goto out;
+ }
+
+ JS_free(cx, buffer);
+
+ if(!fileInitiallyOpen){
+ if(!file_close(cx, obj, 0, NULL, rval)) goto out;
+ }
+
+ if(PR_Close(handle)!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", dest);
+ goto out;
+ }
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ if(file->isOpen && !fileInitiallyOpen){
+ if(PR_Close(file->handle)!=PR_SUCCESS){
+ JS_ReportWarning(cx, "Can't close %s, proceeding", file->path);
+ }
+ }
+
+ if(handle && PR_Close(handle)!=PR_SUCCESS){
+ JS_ReportWarning(cx, "Can't close %s, proceeding", dest);
+ }
+
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_renameTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char *dest;
+
+ SECURITY_CHECK(cx, NULL, "renameTo", file); /* may need a second argument!*/
+ JSFILE_CHECK_ONE_ARG("renameTo");
+ JSFILE_CHECK_NATIVE("renameTo");
+ JSFILE_CHECK_CLOSED("renameTo");
+
+ dest = RESOLVE_PATH(cx, JS_GetStringBytes(JS_ValueToString(cx, argv[0])));
+
+ if (PR_Rename(file->path, dest)==PR_SUCCESS){
+ /* copy the new filename */
+ JS_free(cx, file->path);
+ file->path = dest;
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_RENAME_FAILED, file->path, dest);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_flush(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "flush", file);
+ JSFILE_CHECK_NATIVE("flush");
+ JSFILE_CHECK_OPEN("flush");
+
+ if (PR_Sync(file->handle)==PR_SUCCESS){
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "flush", file->path);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_write(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+ int32 count;
+ uintN i;
+
+ SECURITY_CHECK(cx, NULL, "write", file);
+ JSFILE_CHECK_WRITE;
+
+ for (i = 0; i<argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ count = js_FileWrite(cx, file, JS_GetStringChars(str),
+ JS_GetStringLength(str), file->type);
+ if (count==-1){
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+ }
+ }
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_writeln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+
+ SECURITY_CHECK(cx, NULL, "writeln", file);
+ JSFILE_CHECK_WRITE;
+
+ /* don't report an error here */
+ if(!file_write(cx, obj, argc, argv, rval)) return JS_FALSE;
+ /* don't do security here -- we passed the check in file_write */
+ str = JS_NewStringCopyZ(cx, "\n");
+
+ if (js_FileWrite(cx, file, JS_GetStringChars(str), JS_GetStringLength(str),
+ file->type)==-1){
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+ }
+
+ /* eol causes flush if hasAutoflush is turned on */
+ if (file->hasAutoflush)
+ file_flush(cx, obj, 0, NULL, rval);
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_writeAll(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ jsuint i;
+ jsuint limit;
+ JSObject *array;
+ JSObject *elem;
+ jsval elemval;
+
+ SECURITY_CHECK(cx, NULL, "writeAll", file);
+ JSFILE_CHECK_ONE_ARG("writeAll");
+ JSFILE_CHECK_WRITE;
+
+ if (!JS_IsArrayObject(cx, JSVAL_TO_OBJECT(argv[0]))) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_WRITEALL_NOT_ARRAY_ERROR);
+ goto out;
+ }
+
+ array = JSVAL_TO_OBJECT(argv[0]);
+
+ JS_GetArrayLength(cx, array, &limit);
+
+ for (i = 0; i<limit; i++) {
+ if (!JS_GetElement(cx, array, i, &elemval)) return JS_FALSE;
+ elem = JSVAL_TO_OBJECT(elemval);
+ file_writeln(cx, obj, 1, &elemval, rval);
+ }
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_read(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+ int32 want, count;
+ jschar *buf;
+
+ SECURITY_CHECK(cx, NULL, "read", file);
+ JSFILE_CHECK_ONE_ARG("read");
+ JSFILE_CHECK_READ;
+
+ if (!JS_ValueToInt32(cx, argv[0], &want)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, "read", argv[0]);
+ goto out;
+ }
+
+ /* want = (want>262144)?262144:want; * arbitrary size limitation */
+
+ buf = JS_malloc(cx, want*sizeof buf[0]);
+ if (!buf) goto out;
+
+ count = js_FileRead(cx, file, buf, want, file->type);
+ if (count>0) {
+ str = JS_NewUCStringCopyN(cx, buf, count);
+ *rval = STRING_TO_JSVAL(str);
+ JS_free(cx, buf);
+ return JS_TRUE;
+ } else {
+ JS_free(cx, buf);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_readln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+ jschar *buf = NULL, *tmp;
+ int32 offset, read;
+ intN room;
+ jschar data, data2;
+
+ SECURITY_CHECK(cx, NULL, "readln", file);
+ JSFILE_CHECK_READ;
+
+ buf = JS_malloc(cx, MAX_LINE_LENGTH * sizeof data);
+ if (!buf)
+ return JS_FALSE;
+
+ room = MAX_LINE_LENGTH - 1;
+ offset = 0;
+
+ for (;;) {
+ read = js_FileRead(cx, file, &data, 1, file->type);
+ if (read < 0)
+ goto out;
+ if (read == 0)
+ goto eof;
+
+ switch (data) {
+ case '\r':
+ read = js_FileRead(cx, file, &data2, 1, file->type);
+ if (read < 0)
+ goto out;
+
+ if (read == 1 && data2 != '\n') {
+ /* We read one char too far. Buffer it. */
+ file->charBuffer = data2;
+ file->charBufferUsed = JS_TRUE;
+ }
+
+ /* Fall through. */
+ case '\n':
+ goto done;
+
+ default:
+ if (--room < 0) {
+ tmp = JS_realloc(cx, buf,
+ (offset + MAX_LINE_LENGTH) * sizeof data);
+ if (!tmp)
+ goto out;
+
+ room = MAX_LINE_LENGTH - 1;
+ buf = tmp;
+ }
+
+ buf[offset++] = data;
+ break;
+ }
+ }
+
+eof:
+ if (offset == 0) {
+ *rval = JSVAL_NULL;
+ return JS_TRUE;
+ }
+
+done:
+ buf[offset] = 0;
+ tmp = JS_realloc(cx, buf, (offset + 1) * sizeof data);
+ if (!tmp)
+ goto out;
+
+ str = JS_NewUCString(cx, tmp, offset);
+ if (!str)
+ goto out;
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+
+out:
+ if (buf)
+ JS_free(cx, buf);
+
+ return JS_FALSE;
+}
+
+static JSBool
+file_readAll(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSObject *array;
+ jsint len;
+ jsval line;
+ JSBool lineok = JS_FALSE;
+
+ SECURITY_CHECK(cx, NULL, "readAll", file);
+ JSFILE_CHECK_READ;
+
+ array = JS_NewArrayObject(cx, 0, NULL);
+ if (!array)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(array);
+
+ len = 0;
+
+ lineok = file_readln(cx, obj, 0, NULL, &line);
+ while (lineok && !JSVAL_IS_NULL(line)) {
+ JS_SetElement(cx, array, len++, &line);
+ lineok = file_readln(cx, obj, 0, NULL, &line);
+ }
+
+out:
+ return lineok;
+}
+
+static JSBool
+file_seek(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ int32 toskip;
+ int32 pos;
+
+ SECURITY_CHECK(cx, NULL, "seek", file);
+ JSFILE_CHECK_ONE_ARG("seek");
+ JSFILE_CHECK_NATIVE("seek");
+ JSFILE_CHECK_READ;
+
+ if (!JS_ValueToInt32(cx, argv[0], &toskip)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, "seek", argv[0]);
+ goto out;
+ }
+
+ if(!file->hasRandomAccess){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_NO_RANDOM_ACCESS, file->path);
+ goto out;
+ }
+
+ if(js_isDirectory(cx, file)){
+ JS_ReportWarning(cx,"Seek on directories is not supported, proceeding");
+ goto out;
+ }
+
+ pos = js_FileSeek(cx, file, toskip, file->type);
+
+ if (pos!=-1) {
+ *rval = INT_TO_JSVAL(pos);
+ return JS_TRUE;
+ }
+out:
+ *rval = JSVAL_VOID;
+ return JS_FALSE;
+}
+
+static JSBool
+file_list(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ PRDir *dir;
+ PRDirEntry *entry;
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSObject *array;
+ JSObject *eachFile;
+ jsint len;
+ jsval v;
+ JSRegExp *re = NULL;
+ JSFunction *func = NULL;
+ JSString *str;
+ jsval args[1];
+ char *filePath;
+
+ SECURITY_CHECK(cx, NULL, "list", file);
+ JSFILE_CHECK_NATIVE("list");
+
+ if (argc==1) {
+ if (JSVAL_IS_REGEXP(cx, argv[0])) {
+ re = JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[0]));
+ }else
+ if (VALUE_IS_FUNCTION(cx, argv[0])) {
+ func = JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[0]));
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_FUNCTION_OR_REGEX, argv[0]);
+ goto out;
+ }
+ }
+
+ if (!js_isDirectory(cx, file)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_DO_LIST_ON_A_FILE, file->path);
+ goto out;
+ }
+
+ dir = PR_OpenDir(file->path);
+ if(!dir){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", file->path);
+ goto out;
+ }
+
+ /* create JSArray here... */
+ array = JS_NewArrayObject(cx, 0, NULL);
+ len = 0;
+
+ while ((entry = PR_ReadDir(dir, PR_SKIP_BOTH))!=NULL) {
+ /* first, check if we have a regexp */
+ if (re!=NULL) {
+ size_t index = 0;
+
+ str = JS_NewStringCopyZ(cx, entry->name);
+ if(!js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, &v)){
+ /* don't report anything here */
+ goto out;
+ }
+ /* not matched! */
+ if (JSVAL_IS_NULL(v)) {
+ continue;
+ }
+ }else
+ if (func!=NULL) {
+ str = JS_NewStringCopyZ(cx, entry->name);
+ args[0] = STRING_TO_JSVAL(str);
+ if(!JS_CallFunction(cx, obj, func, 1, args, &v)){
+ goto out;
+ }
+
+ if (v==JSVAL_FALSE) {
+ continue;
+ }
+ }
+
+ filePath = js_combinePath(cx, file->path, (char*)entry->name);
+
+ eachFile = js_NewFileObject(cx, filePath);
+ JS_free(cx, filePath);
+ if (!eachFile){
+ JS_ReportWarning(cx, "File %s cannot be retrieved", filePath);
+ continue;
+ }
+ v = OBJECT_TO_JSVAL(eachFile);
+ JS_SetElement(cx, array, len, &v);
+ JS_SetProperty(cx, array, entry->name, &v);
+ len++;
+ }
+
+ if(PR_CloseDir(dir)!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", file->path);
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(array);
+ return JS_TRUE;
+out:
+ *rval = JSVAL_NULL;
+ return JS_FALSE;
+}
+
+static JSBool
+file_mkdir(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "mkdir", file);
+ JSFILE_CHECK_ONE_ARG("mkdir");
+ JSFILE_CHECK_NATIVE("mkdir");
+
+ /* if the current file is not a directory, find out the directory name */
+ if (!js_isDirectory(cx, file)) {
+ char *dir = js_fileDirectoryName(cx, file->path);
+ JSObject *dirObj = js_NewFileObject(cx, dir);
+
+ JS_free(cx, dir);
+
+ /* call file_mkdir with the right set of parameters if needed */
+ if (file_mkdir(cx, dirObj, argc, argv, rval))
+ return JS_TRUE;
+ else
+ goto out;
+ }else{
+ char *dirName = JS_GetStringBytes(JS_ValueToString(cx, argv[0]));
+ char *fullName;
+
+ fullName = js_combinePath(cx, file->path, dirName);
+ if (PR_MkDir(fullName, 0755)==PR_SUCCESS){
+ *rval = JSVAL_TRUE;
+ JS_free(cx, fullName);
+ return JS_TRUE;
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "mkdir", fullName);
+ JS_free(cx, fullName);
+ goto out;
+ }
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval*rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+
+ str = JS_NewStringCopyZ(cx, file->path);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+file_toURL(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char url[MAX_PATH_LENGTH];
+ jschar *urlChars;
+ size_t len;
+ JSString *str;
+
+ JSFILE_CHECK_NATIVE("toURL");
+
+ sprintf(url, "file://%s", file->path);
+
+ len = strlen(url);
+ urlChars = js_InflateString(cx, url, &len);
+ if (!urlChars)
+ return JS_FALSE;
+ str = js_NewString(cx, urlChars, len, 0);
+ if (!str) {
+ JS_free(cx, urlChars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+
+ /* TODO: js_escape in jsstr.h may go away at some point */
+ return js_str_escape(cx, obj, 0, rval, rval);
+
+out:
+ *rval = JSVAL_VOID;
+ return JS_FALSE;
+}
+
+
+static void
+file_finalize(JSContext *cx, JSObject *obj)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ if(file) {
+ /* Close the file before exiting. */
+ if(file->isOpen && !file->isNative) {
+ jsval vp;
+ file_close(cx, obj, 0, NULL, &vp);
+ }
+
+ if (file->path)
+ JS_free(cx, file->path);
+
+ JS_free(cx, file);
+ }
+}
+
+/*
+ Allocates memory for the file object, sets fields to defaults.
+*/
+static JSFile*
+file_init(JSContext *cx, JSObject *obj, char *bytes)
+{
+ JSFile *file;
+
+ file = JS_malloc(cx, sizeof *file);
+ if (!file)
+ return NULL;
+ memset(file, 0 , sizeof *file);
+
+ js_ResetAttributes(file);
+
+ file->path = RESOLVE_PATH(cx, bytes);
+
+ if (!JS_SetPrivate(cx, obj, file)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_SET_PRIVATE_FILE, file->path);
+ JS_free(cx, file);
+ return NULL;
+ }
+
+ return file;
+}
+
+/* Returns a JSObject. This function is globally visible */
+JS_PUBLIC_API(JSObject*)
+js_NewFileObject(JSContext *cx, char *filename)
+{
+ JSObject *obj;
+ JSFile *file;
+
+ obj = JS_NewObject(cx, &js_FileClass, NULL, NULL);
+ if (!obj){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OBJECT_CREATION_FAILED, "js_NewFileObject");
+ return NULL;
+ }
+ file = file_init(cx, obj, filename);
+ if(!file) return NULL;
+ return obj;
+}
+
+/* Internal function, used for cases which NSPR file support doesn't cover */
+JSObject*
+js_NewFileObjectFromFILE(JSContext *cx, FILE *nativehandle, char *filename,
+ int32 mode, JSBool open, JSBool randomAccess)
+{
+ JSObject *obj;
+ JSFile *file;
+
+ obj = JS_NewObject(cx, &js_FileClass, NULL, NULL);
+ if (!obj){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OBJECT_CREATION_FAILED, "js_NewFileObjectFromFILE");
+ return NULL;
+ }
+ file = file_init(cx, obj, filename);
+ if(!file) return NULL;
+
+ file->nativehandle = nativehandle;
+
+ /* free result of RESOLVE_PATH from file_init. */
+ JS_ASSERT(file->path != NULL);
+ JS_free(cx, file->path);
+
+ file->path = strdup(filename);
+ file->isOpen = open;
+ file->mode = mode;
+ file->hasRandomAccess = randomAccess;
+ file->isNative = JS_TRUE;
+ return obj;
+}
+
+/*
+ Real file constructor that is called from JavaScript.
+ Basically, does error processing and calls file_init.
+*/
+static JSBool
+file_constructor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ JSFile *file;
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /* Replace obj with a new File object. */
+ obj = JS_NewObject(cx, &js_FileClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ str = (argc == 0)
+ ? JS_InternString(cx, "")
+ : JS_ValueToString(cx, argv[0]);
+
+ if (!str) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_CONSTRUCTOR_NOT_STRING_ERROR,
+ argv[0]);
+ return JS_FALSE;
+ }
+
+ file = file_init(cx, obj, JS_GetStringBytes(str));
+ if (!file)
+ return JS_FALSE;
+
+ SECURITY_CHECK(cx, NULL, "constructor", file);
+
+ return JS_TRUE;
+}
+
+/* -------------------- File methods and properties ------------------------- */
+static JSFunctionSpec file_functions[] = {
+ { "open", file_open, 0},
+ { "close", file_close, 0},
+ { "remove", file_remove, 0},
+ { "copyTo", file_copyTo, 0},
+ { "renameTo", file_renameTo, 0},
+ { "flush", file_flush, 0},
+ { "seek", file_seek, 0},
+ { "read", file_read, 0},
+ { "readln", file_readln, 0},
+ { "readAll", file_readAll, 0},
+ { "write", file_write, 0},
+ { "writeln", file_writeln, 0},
+ { "writeAll", file_writeAll, 0},
+ { "list", file_list, 0},
+ { "mkdir", file_mkdir, 0},
+ { "toString", file_toString, 0},
+ { "toURL", file_toURL, 0},
+ {0}
+};
+
+enum file_tinyid {
+ FILE_LENGTH = -2,
+ FILE_PARENT = -3,
+ FILE_PATH = -4,
+ FILE_NAME = -5,
+ FILE_ISDIR = -6,
+ FILE_ISFILE = -7,
+ FILE_EXISTS = -8,
+ FILE_CANREAD = -9,
+ FILE_CANWRITE = -10,
+ FILE_OPEN = -11,
+ FILE_TYPE = -12,
+ FILE_MODE = -13,
+ FILE_CREATED = -14,
+ FILE_MODIFIED = -15,
+ FILE_SIZE = -16,
+ FILE_RANDOMACCESS = -17,
+ FILE_POSITION = -18,
+ FILE_APPEND = -19,
+ FILE_REPLACE = -20,
+ FILE_AUTOFLUSH = -21,
+ FILE_ISNATIVE = -22,
+};
+
+static JSPropertySpec file_props[] = {
+ {"length", FILE_LENGTH, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"parent", FILE_PARENT, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"path", FILE_PATH, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"name", FILE_NAME, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"isDirectory", FILE_ISDIR, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"isFile", FILE_ISFILE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"exists", FILE_EXISTS, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canRead", FILE_CANREAD, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canWrite", FILE_CANWRITE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canAppend", FILE_APPEND, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canReplace", FILE_REPLACE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"isOpen", FILE_OPEN, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"type", FILE_TYPE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"mode", FILE_MODE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"creationTime", FILE_CREATED, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"lastModified", FILE_MODIFIED, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"size", FILE_SIZE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"hasRandomAccess", FILE_RANDOMACCESS, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"hasAutoFlush", FILE_AUTOFLUSH, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"position", FILE_POSITION, JSPROP_ENUMERATE },
+ {"isNative", FILE_ISNATIVE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {0}
+};
+
+/* ------------------------- Property getter/setter ------------------------- */
+static JSBool
+file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char *bytes;
+ JSString *str;
+ jsint tiny;
+ PRFileInfo info;
+ JSBool flag;
+ PRExplodedTime expandedTime;
+
+ tiny = JSVAL_TO_INT(id);
+ if (!file)
+ return JS_TRUE;
+
+ switch (tiny) {
+ case FILE_PARENT:
+ SECURITY_CHECK(cx, NULL, "parent", file);
+ if (!js_parent(cx, file, vp))
+ return JS_FALSE;
+ break;
+ case FILE_PATH:
+ str = JS_NewStringCopyZ(cx, file->path);
+ if (!str)
+ return JS_FALSE;
+ *vp = STRING_TO_JSVAL(str);
+ break;
+ case FILE_NAME:
+ if (!js_name(cx, file, vp))
+ return JS_FALSE;
+ break;
+ case FILE_ISDIR:
+ SECURITY_CHECK(cx, NULL, "isDirectory", file);
+ *vp = BOOLEAN_TO_JSVAL(js_isDirectory(cx, file));
+ break;
+ case FILE_ISFILE:
+ SECURITY_CHECK(cx, NULL, "isFile", file);
+ *vp = BOOLEAN_TO_JSVAL(js_isFile(cx, file));
+ break;
+ case FILE_EXISTS:
+ SECURITY_CHECK(cx, NULL, "exists", file);
+ *vp = BOOLEAN_TO_JSVAL(js_exists(cx, file));
+ break;
+ case FILE_ISNATIVE:
+ SECURITY_CHECK(cx, NULL, "isNative", file);
+ *vp = BOOLEAN_TO_JSVAL(file->isNative);
+ break;
+ case FILE_CANREAD:
+ SECURITY_CHECK(cx, NULL, "canRead", file);
+ *vp = BOOLEAN_TO_JSVAL(js_canRead(cx, file));
+ break;
+ case FILE_CANWRITE:
+ SECURITY_CHECK(cx, NULL, "canWrite", file);
+ *vp = BOOLEAN_TO_JSVAL(js_canWrite(cx, file));
+ break;
+ case FILE_OPEN:
+ SECURITY_CHECK(cx, NULL, "isOpen", file);
+ *vp = BOOLEAN_TO_JSVAL(file->isOpen);
+ break;
+ case FILE_APPEND :
+ SECURITY_CHECK(cx, NULL, "canAppend", file);
+ JSFILE_CHECK_OPEN("canAppend");
+ *vp = BOOLEAN_TO_JSVAL(!file->isNative &&
+ (file->mode&PR_APPEND)==PR_APPEND);
+ break;
+ case FILE_REPLACE :
+ SECURITY_CHECK(cx, NULL, "canReplace", file);
+ JSFILE_CHECK_OPEN("canReplace");
+ *vp = BOOLEAN_TO_JSVAL(!file->isNative &&
+ (file->mode&PR_TRUNCATE)==PR_TRUNCATE);
+ break;
+ case FILE_AUTOFLUSH :
+ SECURITY_CHECK(cx, NULL, "hasAutoFlush", file);
+ JSFILE_CHECK_OPEN("hasAutoFlush");
+ *vp = BOOLEAN_TO_JSVAL(!file->isNative && file->hasAutoflush);
+ break;
+ case FILE_TYPE:
+ SECURITY_CHECK(cx, NULL, "type", file);
+ JSFILE_CHECK_OPEN("type");
+ if(js_isDirectory(cx, file)){
+ *vp = JSVAL_VOID;
+ break;
+ }
+
+ switch (file->type) {
+ case ASCII:
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, asciistring));
+ break;
+ case UTF8:
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, utfstring));
+ break;
+ case UCS2:
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, unicodestring));
+ break;
+ default:
+ JS_ReportWarning(cx, "Unsupported file type %d, proceeding",
+ file->type);
+ }
+ break;
+ case FILE_MODE:
+ SECURITY_CHECK(cx, NULL, "mode", file);
+ JSFILE_CHECK_OPEN("mode");
+ bytes = JS_malloc(cx, MODE_SIZE);
+ bytes[0] = '\0';
+ flag = JS_FALSE;
+
+ if ((file->mode&PR_RDONLY)==PR_RDONLY) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "read");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_WRONLY)==PR_WRONLY) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "write");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_RDWR)==PR_RDWR) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "readWrite");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_APPEND)==PR_APPEND) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "append");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_CREATE_FILE)==PR_CREATE_FILE) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "create");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_TRUNCATE)==PR_TRUNCATE) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "replace");
+ flag = JS_TRUE;
+ }
+ if (file->hasAutoflush) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "hasAutoFlush");
+ flag = JS_TRUE;
+ }
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, bytes));
+ JS_free(cx, bytes);
+ break;
+ case FILE_CREATED:
+ SECURITY_CHECK(cx, NULL, "creationTime", file);
+ JSFILE_CHECK_NATIVE("creationTime");
+ if(((file->isOpen)?
+ PR_GetOpenFileInfo(file->handle, &info):
+ PR_GetFileInfo(file->path, &info))!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ goto out;
+ }
+
+ PR_ExplodeTime(info.creationTime, PR_LocalTimeParameters,&expandedTime);
+ *vp = OBJECT_TO_JSVAL(js_NewDateObject(cx, expandedTime.tm_year,
+ expandedTime.tm_month,
+ expandedTime.tm_mday,
+ expandedTime.tm_hour,
+ expandedTime.tm_min,
+ expandedTime.tm_sec));
+ break;
+ case FILE_MODIFIED:
+ SECURITY_CHECK(cx, NULL, "lastModified", file);
+ JSFILE_CHECK_NATIVE("lastModified");
+ if(((file->isOpen)?
+ PR_GetOpenFileInfo(file->handle, &info):
+ PR_GetFileInfo(file->path, &info))!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ goto out;
+ }
+
+ PR_ExplodeTime(info.modifyTime, PR_LocalTimeParameters, &expandedTime);
+ *vp = OBJECT_TO_JSVAL(js_NewDateObject(cx, expandedTime.tm_year,
+ expandedTime.tm_month,
+ expandedTime.tm_mday,
+ expandedTime.tm_hour,
+ expandedTime.tm_min,
+ expandedTime.tm_sec));
+ break;
+ case FILE_SIZE:
+ SECURITY_CHECK(cx, NULL, "size", file);
+ *vp = js_size(cx, file);
+ break;
+ case FILE_LENGTH:
+ SECURITY_CHECK(cx, NULL, "length", file);
+ JSFILE_CHECK_NATIVE("length");
+
+ if (js_isDirectory(cx, file)) { /* XXX debug me */
+ PRDir *dir;
+ PRDirEntry *entry;
+ jsint count = 0;
+
+ if(!(dir = PR_OpenDir(file->path))){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_OPEN_DIR, file->path);
+ goto out;
+ }
+
+ while ((entry = PR_ReadDir(dir, PR_SKIP_BOTH))) {
+ count++;
+ }
+
+ if(!PR_CloseDir(dir)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", file->path);
+
+ goto out;
+ }
+
+ *vp = INT_TO_JSVAL(count);
+ break;
+ }else{
+ /* return file size */
+ *vp = js_size(cx, file);
+ }
+ break;
+ case FILE_RANDOMACCESS:
+ SECURITY_CHECK(cx, NULL, "hasRandomAccess", file);
+ JSFILE_CHECK_OPEN("hasRandomAccess");
+ *vp = BOOLEAN_TO_JSVAL(file->hasRandomAccess);
+ break;
+ case FILE_POSITION:
+ SECURITY_CHECK(cx, NULL, "position", file);
+ JSFILE_CHECK_NATIVE("position");
+ JSFILE_CHECK_OPEN("position");
+
+ if(!file->hasRandomAccess){
+ JS_ReportWarning(cx, "File %s doesn't support random access, can't report the position, proceeding");
+ *vp = JSVAL_VOID;
+ break;
+ }
+
+ if (file->isOpen && js_isFile(cx, file)) {
+ int pos = PR_Seek(file->handle, 0, PR_SEEK_CUR);
+ if(pos!=-1){
+ *vp = INT_TO_JSVAL(pos);
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_REPORT_POSITION, file->path);
+ goto out;
+ }
+ }else {
+ JS_ReportWarning(cx, "File %s is closed or not a plain file,"
+ " can't report position, proceeding");
+ goto out;
+ }
+ break;
+ default:
+ SECURITY_CHECK(cx, NULL, "file_access", file);
+
+ /* this is some other property -- try to use the dir["file"] syntax */
+ if (js_isDirectory(cx, file)) {
+ PRDir *dir = NULL;
+ PRDirEntry *entry = NULL;
+ char *prop_name;
+
+ str = JS_ValueToString(cx, id);
+ if (!str)
+ return JS_FALSE;
+
+ prop_name = JS_GetStringBytes(str);
+
+ /* no native files past this point */
+ dir = PR_OpenDir(file->path);
+ if(!dir) {
+ /* This is probably not a directory */
+ JS_ReportWarning(cx, "Can't open directory %s", file->path);
+ return JS_FALSE;
+ }
+
+ while ((entry = PR_ReadDir(dir, PR_SKIP_NONE)) != NULL) {
+ if (!strcmp(entry->name, prop_name)){
+ bytes = js_combinePath(cx, file->path, prop_name);
+ *vp = OBJECT_TO_JSVAL(js_NewFileObject(cx, bytes));
+ PR_CloseDir(dir);
+ JS_free(cx, bytes);
+ return !JSVAL_IS_NULL(*vp);
+ }
+ }
+ PR_CloseDir(dir);
+ }
+ }
+ return JS_TRUE;
+
+out:
+ return JS_FALSE;
+}
+
+static JSBool
+file_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ jsint slot;
+
+ if (JSVAL_IS_STRING(id)){
+ return JS_TRUE;
+ }
+
+ slot = JSVAL_TO_INT(id);
+
+ switch (slot) {
+ /* File.position = 10 */
+ case FILE_POSITION:
+ SECURITY_CHECK(cx, NULL, "set_position", file);
+ JSFILE_CHECK_NATIVE("set_position");
+
+ if(!file->hasRandomAccess){
+ JS_ReportWarning(cx, "File %s doesn't support random access, can't "
+ "report the position, proceeding");
+ goto out;
+ }
+
+ if (file->isOpen && js_isFile(cx, file)) {
+ int32 pos;
+ int32 offset;
+
+ if (!JS_ValueToInt32(cx, *vp, &offset)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, "position", *vp);
+ goto out;
+ }
+
+ pos = PR_Seek(file->handle, offset, PR_SEEK_SET);
+
+ if(pos!=-1){
+ *vp = INT_TO_JSVAL(pos);
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_SET_POSITION, file->path);
+ goto out;
+ }
+ } else {
+ JS_ReportWarning(cx, "File %s is closed or not a file, can't set "
+ "position, proceeding", file->path);
+ goto out;
+ }
+ }
+
+ return JS_TRUE;
+out:
+ return JS_FALSE;
+}
+
+/*
+ File.currentDir = new File("D:\") or File.currentDir = "D:\"
+*/
+static JSBool
+file_currentDirSetter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSFile *file;
+
+ file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ /* Look at the rhs and extract a file object from it */
+ if (JSVAL_IS_OBJECT(*vp)) {
+ if (JS_InstanceOf(cx, obj, &js_FileClass, NULL)) {
+ /* Braindamaged rhs -- just return the old value */
+ if (file && (!js_exists(cx, file) || !js_isDirectory(cx, file))) {
+ JS_GetProperty(cx, obj, CURRENTDIR_PROPERTY, vp);
+ return JS_FALSE;
+ } else {
+ chdir(file->path);
+ return JS_TRUE;
+ }
+ } else {
+ return JS_FALSE;
+ }
+ } else {
+ JSObject *rhsObject;
+ char *path;
+
+ path = JS_GetStringBytes(JS_ValueToString(cx, *vp));
+ rhsObject = js_NewFileObject(cx, path);
+ if (!rhsObject)
+ return JS_FALSE;
+
+ if (!file || !js_exists(cx, file) || !js_isDirectory(cx, file)){
+ JS_GetProperty(cx, obj, CURRENTDIR_PROPERTY, vp);
+ } else {
+ *vp = OBJECT_TO_JSVAL(rhsObject);
+ chdir(path);
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/* Declare class */
+JSClass js_FileClass = {
+ "File", JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_File),
+ JS_PropertyStub, JS_PropertyStub, file_getProperty, file_setProperty,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, file_finalize
+};
+
+/* -------------------- Functions exposed to the outside -------------------- */
+JS_PUBLIC_API(JSObject*)
+js_InitFileClass(JSContext *cx, JSObject* obj)
+{
+ JSObject *file, *ctor, *afile;
+ jsval vp;
+ char *currentdir;
+ char separator[2];
+
+ file = JS_InitClass(cx, obj, NULL, &js_FileClass, file_constructor, 1,
+ file_props, file_functions, NULL, NULL);
+ if (!file) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_INIT_FAILED);
+ return NULL;
+ }
+
+ ctor = JS_GetConstructor(cx, file);
+ if (!ctor) return NULL;
+
+ /* Define CURRENTDIR property. We are doing this to get a
+ slash at the end of the current dir */
+ afile = js_NewFileObject(cx, CURRENT_DIR);
+ currentdir = JS_malloc(cx, MAX_PATH_LENGTH);
+ currentdir = getcwd(currentdir, MAX_PATH_LENGTH);
+ afile = js_NewFileObject(cx, currentdir);
+ JS_free(cx, currentdir);
+ vp = OBJECT_TO_JSVAL(afile);
+ JS_DefinePropertyWithTinyId(cx, ctor, CURRENTDIR_PROPERTY, 0, vp,
+ JS_PropertyStub, file_currentDirSetter,
+ JSPROP_ENUMERATE | JSPROP_READONLY );
+
+ /* Define input */
+ vp = OBJECT_TO_JSVAL(js_NewFileObjectFromFILE(cx, stdin,
+ STDINPUT_NAME, PR_RDONLY, JS_TRUE, JS_FALSE));
+ JS_SetProperty(cx, ctor, "input", &vp);
+
+ /* Define output */
+ vp = OBJECT_TO_JSVAL(js_NewFileObjectFromFILE(cx, stdout,
+ STDOUTPUT_NAME, PR_WRONLY, JS_TRUE, JS_FALSE));
+ JS_SetProperty(cx, ctor, "output", &vp);
+
+ /* Define error */
+ vp = OBJECT_TO_JSVAL(js_NewFileObjectFromFILE(cx, stderr,
+ STDERROR_NAME, PR_WRONLY, JS_TRUE, JS_FALSE));
+ JS_SetProperty(cx, ctor, "error", &vp);
+
+ separator[0] = FILESEPARATOR;
+ separator[1] = '\0';
+ vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, separator));
+ JS_DefinePropertyWithTinyId(cx, ctor, SEPARATOR_PROPERTY, 0, vp,
+ JS_PropertyStub, JS_PropertyStub,
+ JSPROP_ENUMERATE | JSPROP_READONLY );
+ return file;
+}
+#endif /* JS_HAS_FILE_OBJECT */
diff --git a/third_party/js-1.7/jsfile.h b/third_party/js-1.7/jsfile.h
new file mode 100644
index 0000000..78707e8
--- /dev/null
+++ b/third_party/js-1.7/jsfile.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _jsfile_h__
+#define _jsfile_h__
+
+#if JS_HAS_FILE_OBJECT
+
+#include "jsobj.h"
+
+extern JS_PUBLIC_API(JSObject*)
+js_InitFileClass(JSContext *cx, JSObject* obj);
+
+extern JS_PUBLIC_API(JSObject*)
+js_NewFileObject(JSContext *cx, char *bytes);
+
+extern JSClass js_FileClass;
+
+#endif /* JS_HAS_FILE_OBJECT */
+#endif /* _jsfile_h__ */
diff --git a/third_party/js-1.7/jsfile.msg b/third_party/js-1.7/jsfile.msg
new file mode 100644
index 0000000..137b35d
--- /dev/null
+++ b/third_party/js-1.7/jsfile.msg
@@ -0,0 +1,90 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ Error messages for jsfile.c. See js.msg for format specification.
+*/
+
+MSG_DEF(JSFILEMSG_NOT_AN_ERROR, 0, 0, JSEXN_NONE, "<Error #0 is reserved>")
+MSG_DEF(JSFILEMSG_FILE_CONSTRUCTOR_UNDEFINED_ERROR, 1, 0, JSEXN_NONE, "File constructor is undefined")
+MSG_DEF(JSFILEMSG_FILE_CURRENTDIR_UNDEFINED_ERROR, 2, 0, JSEXN_NONE, "File.currentDir is undefined")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_OPEN_NOT_STRING_ERROR, 3, 1, JSEXN_NONE, "The first argument {0} to file.open must be a string")
+MSG_DEF(JSFILEMSG_SECOND_ARGUMENT_OPEN_NOT_STRING_ERROR, 4, 0, JSEXN_NONE, "The second argument to file.open must be a string")
+MSG_DEF(JSFILEMSG_CANNOT_COPY_FILE_OPEN_FOR_WRITING_ERROR, 5, 1, JSEXN_NONE, "Cannot copy file {0} open for writing")
+MSG_DEF(JSFILEMSG_CANNOT_ACCESS_FILE_INFO_ERROR, 6, 1, JSEXN_NONE, "Cannot access file information for {0}")
+MSG_DEF(JSFILEMSG_COPY_READ_ERROR, 7, 1, JSEXN_NONE, "An error occured while attempting to read a file {0} to copy")
+MSG_DEF(JSFILEMSG_COPY_WRITE_ERROR, 8, 1, JSEXN_NONE, "An error occured while attempting to copy into file {0}")
+MSG_DEF(JSFILEMSG_EXPECTS_ONE_ARG_ERROR, 9, 0, JSEXN_NONE, "Operation {0} expects one argument, not {1}")
+MSG_DEF(JSFILEMSG_CANNOT_FLUSH_CLOSE_FILE_ERROR, 10, 1, JSEXN_NONE, "Cannot flush closed file {0}")
+MSG_DEF(JSFILEMSG_CANNOT_OPEN_WRITING_ERROR, 11, 1, JSEXN_NONE, "Cannot open file {0} for writing")
+MSG_DEF(JSFILEMSG_WRITEALL_EXPECTS_ONE_ARG_ERROR, 12, 0, JSEXN_NONE, "writeAll expects one argument")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_WRITEALL_NOT_ARRAY_ERROR, 13, 0, JSEXN_NONE, "writeAll expects an array as an argument")
+MSG_DEF(JSFILEMSG_UNUSED0, 14, 0, JSEXN_NONE, "Unused error message slot")
+MSG_DEF(JSFILEMSG_CANNOT_OPEN_FILE_ERROR, 15, 1, JSEXN_NONE, "Cannot open file {0}")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_CONSTRUCTOR_NOT_STRING_ERROR, 16, 1, JSEXN_NONE, "The argument to the File constructor {0} must be a string")
+MSG_DEF(JSFILEMSG_BIDIRECTIONAL_PIPE_NOT_SUPPORTED, 17, 0, JSEXN_NONE, "Bidirectional pipes are not supported")
+MSG_DEF(JSFILEMSG_OPEN_MODE_NOT_SUPPORTED_WITH_PIPES, 18, 2, JSEXN_NONE, "The opening mode you have chosen {0} is not supported by the pipe you are trying to open: {1}")
+MSG_DEF(JSFILEMSG_OPEN_FAILED, 19, 1, JSEXN_NONE, "open on file {0} failed")
+MSG_DEF(JSFILEMSG_CLOSE_FAILED, 20, 1, JSEXN_NONE, "close on file {0} failed")
+MSG_DEF(JSFILEMSG_PCLOSE_FAILED, 21, 1, JSEXN_NONE, "pclose on file {0} failed")
+MSG_DEF(JSFILEMSG_REMOVE_FAILED, 22, 1, JSEXN_NONE, "remove on file {0} failed")
+MSG_DEF(JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, 23, 1, JSEXN_NONE, "Cannot access file status for {0}")
+MSG_DEF(JSFILEMSG_RENAME_FAILED, 24, 2, JSEXN_NONE, "Cannot rename {0} to {1}")
+MSG_DEF(JSFILEMSG_WRITE_FAILED, 25, 1, JSEXN_NONE, "Write failed on file {0}")
+MSG_DEF(JSFILEMSG_READ_FAILED, 26, 1, JSEXN_NONE, "Read failed on file {0}")
+MSG_DEF(JSFILEMSG_SKIP_FAILED, 27, 1, JSEXN_NONE, "Skip failed on file {0}")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_FUNCTION_OR_REGEX, 28, 1, JSEXN_NONE, "The first argument to file.list must be a function or a regex")
+MSG_DEF(JSFILEMSG_CANNOT_DO_LIST_ON_A_FILE, 29, 1, JSEXN_NONE, "{0} must be a directory, cannot do list")
+MSG_DEF(JSFILEMSG_NATIVE_OPERATION_IS_NOT_SUPPORTED, 30, 2, JSEXN_NONE, "Native operation {0} is not supported on {1}")
+MSG_DEF(JSFILEMSG_CANNOT_SET_PRIVATE_FILE, 31, 1, JSEXN_NONE, "Cannot set private data for file {0}")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, 32, 2, JSEXN_NONE, "First argument to {0} must be a number, not {1}")
+MSG_DEF(JSFILEMSG_CANNOT_WRITE, 33, 1, JSEXN_NONE, "Cannot write to {0}, file mode is different")
+MSG_DEF(JSFILEMSG_CANNOT_READ, 34, 1, JSEXN_NONE, "Cannot read from {0}, file mode is different")
+MSG_DEF(JSFILEMSG_CANNOT_FLUSH, 35, 1, JSEXN_NONE, "Flush failed on {0}")
+MSG_DEF(JSFILEMSG_OP_FAILED, 36, 1, JSEXN_NONE, "File operation {0} failed")
+MSG_DEF(JSFILEMSG_FILE_MUST_BE_OPEN, 37, 1, JSEXN_NONE, "File must be open for {0}")
+MSG_DEF(JSFILEMSG_FILE_MUST_BE_CLOSED, 38, 1, JSEXN_NONE, "File must be closed for {0}")
+MSG_DEF(JSFILEMSG_NO_RANDOM_ACCESS, 39, 1, JSEXN_NONE, "File {0} doesn't allow random access")
+MSG_DEF(JSFILEMSG_OBJECT_CREATION_FAILED, 40, 1, JSEXN_NONE, "Couldn't create {0}")
+MSG_DEF(JSFILEMSG_CANNOT_OPEN_DIR, 41, 1, JSEXN_NONE, "Couldn't open directory {0}")
+MSG_DEF(JSFILEMSG_CANNOT_REPORT_POSITION, 42, 1, JSEXN_NONE, "Couldn't report position for {0}")
+MSG_DEF(JSFILEMSG_CANNOT_SET_POSITION, 43, 1, JSEXN_NONE, "Couldn't set position for {0}")
+MSG_DEF(JSFILEMSG_INIT_FAILED, 44, 0, JSEXN_NONE, "File class initialization failed")
+
+
diff --git a/third_party/js-1.7/jsfun.c b/third_party/js-1.7/jsfun.c
new file mode 100644
index 0000000..2a2df53
--- /dev/null
+++ b/third_party/js-1.7/jsfun.c
@@ -0,0 +1,2330 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS function support.
+ */
+#include "jsstddef.h"
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsexn.h"
+
+#if JS_HAS_GENERATORS
+# include "jsiter.h"
+#endif
+
+/* Generic function/call/arguments tinyids -- also reflected bit numbers. */
+enum {
+ CALL_ARGUMENTS = -1, /* predefined arguments local variable */
+ CALL_CALLEE = -2, /* reference to active function's object */
+ ARGS_LENGTH = -3, /* number of actual args, arity if inactive */
+ ARGS_CALLEE = -4, /* reference from arguments to active funobj */
+ FUN_ARITY = -5, /* number of formal parameters; desired argc */
+ FUN_NAME = -6, /* function name, "" if anonymous */
+ FUN_CALLER = -7 /* Function.prototype.caller, backward compat */
+};
+
+#if JSFRAME_OVERRIDE_BITS < 8
+# error "not enough override bits in JSStackFrame.flags!"
+#endif
+
+#define TEST_OVERRIDE_BIT(fp, tinyid) \
+ ((fp)->flags & JS_BIT(JSFRAME_OVERRIDE_SHIFT - ((tinyid) + 1)))
+
+#define SET_OVERRIDE_BIT(fp, tinyid) \
+ ((fp)->flags |= JS_BIT(JSFRAME_OVERRIDE_SHIFT - ((tinyid) + 1)))
+
+JSBool
+js_GetArgsValue(JSContext *cx, JSStackFrame *fp, jsval *vp)
+{
+ JSObject *argsobj;
+
+ if (TEST_OVERRIDE_BIT(fp, CALL_ARGUMENTS)) {
+ JS_ASSERT(fp->callobj);
+ return OBJ_GET_PROPERTY(cx, fp->callobj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .argumentsAtom),
+ vp);
+ }
+ argsobj = js_GetArgsObject(cx, fp);
+ if (!argsobj)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(argsobj);
+ return JS_TRUE;
+}
+
+static JSBool
+MarkArgDeleted(JSContext *cx, JSStackFrame *fp, uintN slot)
+{
+ JSObject *argsobj;
+ jsval bmapval, bmapint;
+ size_t nbits, nbytes;
+ jsbitmap *bitmap;
+
+ argsobj = fp->argsobj;
+ (void) JS_GetReservedSlot(cx, argsobj, 0, &bmapval);
+ nbits = fp->argc;
+ JS_ASSERT(slot < nbits);
+ if (JSVAL_IS_VOID(bmapval)) {
+ if (nbits <= JSVAL_INT_BITS) {
+ bmapint = 0;
+ bitmap = (jsbitmap *) &bmapint;
+ } else {
+ nbytes = JS_HOWMANY(nbits, JS_BITS_PER_WORD) * sizeof(jsbitmap);
+ bitmap = (jsbitmap *) JS_malloc(cx, nbytes);
+ if (!bitmap)
+ return JS_FALSE;
+ memset(bitmap, 0, nbytes);
+ bmapval = PRIVATE_TO_JSVAL(bitmap);
+ JS_SetReservedSlot(cx, argsobj, 0, bmapval);
+ }
+ } else {
+ if (nbits <= JSVAL_INT_BITS) {
+ bmapint = JSVAL_TO_INT(bmapval);
+ bitmap = (jsbitmap *) &bmapint;
+ } else {
+ bitmap = (jsbitmap *) JSVAL_TO_PRIVATE(bmapval);
+ }
+ }
+ JS_SET_BIT(bitmap, slot);
+ if (bitmap == (jsbitmap *) &bmapint) {
+ bmapval = INT_TO_JSVAL(bmapint);
+ JS_SetReservedSlot(cx, argsobj, 0, bmapval);
+ }
+ return JS_TRUE;
+}
+
+/* NB: Infallible predicate, false does not mean error/exception. */
+static JSBool
+ArgWasDeleted(JSContext *cx, JSStackFrame *fp, uintN slot)
+{
+ JSObject *argsobj;
+ jsval bmapval, bmapint;
+ jsbitmap *bitmap;
+
+ argsobj = fp->argsobj;
+ (void) JS_GetReservedSlot(cx, argsobj, 0, &bmapval);
+ if (JSVAL_IS_VOID(bmapval))
+ return JS_FALSE;
+ if (fp->argc <= JSVAL_INT_BITS) {
+ bmapint = JSVAL_TO_INT(bmapval);
+ bitmap = (jsbitmap *) &bmapint;
+ } else {
+ bitmap = (jsbitmap *) JSVAL_TO_PRIVATE(bmapval);
+ }
+ return JS_TEST_BIT(bitmap, slot) != 0;
+}
+
+JSBool
+js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id,
+ JSObject **objp, jsval *vp)
+{
+ jsval val;
+ JSObject *obj;
+ uintN slot;
+
+ if (TEST_OVERRIDE_BIT(fp, CALL_ARGUMENTS)) {
+ JS_ASSERT(fp->callobj);
+ if (!OBJ_GET_PROPERTY(cx, fp->callobj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .argumentsAtom),
+ &val)) {
+ return JS_FALSE;
+ }
+ if (JSVAL_IS_PRIMITIVE(val)) {
+ obj = js_ValueToNonNullObject(cx, val);
+ if (!obj)
+ return JS_FALSE;
+ } else {
+ obj = JSVAL_TO_OBJECT(val);
+ }
+ *objp = obj;
+ return OBJ_GET_PROPERTY(cx, obj, id, vp);
+ }
+
+ *objp = NULL;
+ *vp = JSVAL_VOID;
+ if (JSID_IS_INT(id)) {
+ slot = (uintN) JSID_TO_INT(id);
+ if (slot < fp->argc) {
+ if (fp->argsobj && ArgWasDeleted(cx, fp, slot))
+ return OBJ_GET_PROPERTY(cx, fp->argsobj, id, vp);
+ *vp = fp->argv[slot];
+ } else {
+ /*
+ * Per ECMA-262 Ed. 3, 10.1.8, last bulleted item, do not share
+ * storage between the formal parameter and arguments[k] for all
+ * k >= fp->argc && k < fp->fun->nargs. For example, in
+ *
+ * function f(x) { x = 42; return arguments[0]; }
+ * f();
+ *
+ * the call to f should return undefined, not 42. If fp->argsobj
+ * is null at this point, as it would be in the example, return
+ * undefined in *vp.
+ */
+ if (fp->argsobj)
+ return OBJ_GET_PROPERTY(cx, fp->argsobj, id, vp);
+ }
+ } else {
+ if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom)) {
+ if (fp->argsobj && TEST_OVERRIDE_BIT(fp, ARGS_LENGTH))
+ return OBJ_GET_PROPERTY(cx, fp->argsobj, id, vp);
+ *vp = INT_TO_JSVAL((jsint) fp->argc);
+ }
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_GetArgsObject(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *argsobj, *global, *parent;
+
+ /*
+ * We must be in a function activation; the function must be lightweight
+ * or else fp must have a variable object.
+ */
+ JS_ASSERT(fp->fun && (!(fp->fun->flags & JSFUN_HEAVYWEIGHT) || fp->varobj));
+
+ /* Skip eval and debugger frames. */
+ while (fp->flags & JSFRAME_SPECIAL)
+ fp = fp->down;
+
+ /* Create an arguments object for fp only if it lacks one. */
+ argsobj = fp->argsobj;
+ if (argsobj)
+ return argsobj;
+
+ /* Link the new object to fp so it can get actual argument values. */
+ argsobj = js_NewObject(cx, &js_ArgumentsClass, NULL, NULL);
+ if (!argsobj || !JS_SetPrivate(cx, argsobj, fp)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+
+ /*
+ * Give arguments an intrinsic scope chain link to fp's global object.
+ * Since the arguments object lacks a prototype because js_ArgumentsClass
+ * is not initialized, js_NewObject won't assign a default parent to it.
+ *
+ * Therefore if arguments is used as the head of an eval scope chain (via
+ * a direct or indirect call to eval(program, arguments)), any reference
+ * to a standard class object in the program will fail to resolve due to
+ * js_GetClassPrototype not being able to find a global object containing
+ * the standard prototype by starting from arguments and following parent.
+ */
+ global = fp->scopeChain;
+ while ((parent = OBJ_GET_PARENT(cx, global)) != NULL)
+ global = parent;
+ argsobj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(global);
+ fp->argsobj = argsobj;
+ return argsobj;
+}
+
+static JSBool
+args_enumerate(JSContext *cx, JSObject *obj);
+
+JSBool
+js_PutArgsObject(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *argsobj;
+ jsval bmapval, rval;
+ JSBool ok;
+ JSRuntime *rt;
+
+ /*
+ * Reuse args_enumerate here to reflect fp's actual arguments as indexed
+ * elements of argsobj. Do this first, before clearing and freeing the
+ * deleted argument slot bitmap, because args_enumerate depends on that.
+ */
+ argsobj = fp->argsobj;
+ ok = args_enumerate(cx, argsobj);
+
+ /*
+ * Now clear the deleted argument number bitmap slot and free the bitmap,
+ * if one was actually created due to 'delete arguments[0]' or similar.
+ */
+ (void) JS_GetReservedSlot(cx, argsobj, 0, &bmapval);
+ if (!JSVAL_IS_VOID(bmapval)) {
+ JS_SetReservedSlot(cx, argsobj, 0, JSVAL_VOID);
+ if (fp->argc > JSVAL_INT_BITS)
+ JS_free(cx, JSVAL_TO_PRIVATE(bmapval));
+ }
+
+ /*
+ * Now get the prototype properties so we snapshot fp->fun and fp->argc
+ * before fp goes away.
+ */
+ rt = cx->runtime;
+ ok &= js_GetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.calleeAtom),
+ &rval);
+ ok &= js_SetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.calleeAtom),
+ &rval);
+ ok &= js_GetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.lengthAtom),
+ &rval);
+ ok &= js_SetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.lengthAtom),
+ &rval);
+
+ /*
+ * Clear the private pointer to fp, which is about to go away (js_Invoke).
+ * Do this last because the args_enumerate and js_GetProperty calls above
+ * need to follow the private slot to find fp.
+ */
+ ok &= JS_SetPrivate(cx, argsobj, NULL);
+ fp->argsobj = NULL;
+ return ok;
+}
+
+static JSBool
+args_delProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSStackFrame *fp;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case ARGS_CALLEE:
+ case ARGS_LENGTH:
+ SET_OVERRIDE_BIT(fp, slot);
+ break;
+
+ default:
+ if ((uintN)slot < fp->argc && !MarkArgDeleted(cx, fp, slot))
+ return JS_FALSE;
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+args_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSStackFrame *fp;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case ARGS_CALLEE:
+ if (!TEST_OVERRIDE_BIT(fp, slot))
+ *vp = fp->argv ? fp->argv[-2] : OBJECT_TO_JSVAL(fp->fun->object);
+ break;
+
+ case ARGS_LENGTH:
+ if (!TEST_OVERRIDE_BIT(fp, slot))
+ *vp = INT_TO_JSVAL((jsint)fp->argc);
+ break;
+
+ default:
+ if ((uintN)slot < fp->argc && !ArgWasDeleted(cx, fp, slot))
+ *vp = fp->argv[slot];
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+args_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case ARGS_CALLEE:
+ case ARGS_LENGTH:
+ SET_OVERRIDE_BIT(fp, slot);
+ break;
+
+ default:
+ if (FUN_INTERPRETED(fp->fun) &&
+ (uintN)slot < fp->argc &&
+ !ArgWasDeleted(cx, fp, slot)) {
+ fp->argv[slot] = *vp;
+ }
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+args_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSStackFrame *fp;
+ uintN slot;
+ JSString *str;
+ JSAtom *atom;
+ intN tinyid;
+ jsval value;
+
+ *objp = NULL;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ if (JSVAL_IS_INT(id)) {
+ slot = JSVAL_TO_INT(id);
+ if (slot < fp->argc && !ArgWasDeleted(cx, fp, slot)) {
+ /* XXX ECMA specs DontEnum, contrary to other array-like objects */
+ if (!js_DefineProperty(cx, obj, INT_JSVAL_TO_JSID(id),
+ fp->argv[slot],
+ args_getProperty, args_setProperty,
+ JS_VERSION_IS_ECMA(cx)
+ ? 0
+ : JSPROP_ENUMERATE,
+ NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ } else {
+ str = JSVAL_TO_STRING(id);
+ atom = cx->runtime->atomState.lengthAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ tinyid = ARGS_LENGTH;
+ value = INT_TO_JSVAL(fp->argc);
+ } else {
+ atom = cx->runtime->atomState.calleeAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ tinyid = ARGS_CALLEE;
+ value = fp->argv ? fp->argv[-2]
+ : OBJECT_TO_JSVAL(fp->fun->object);
+ } else {
+ atom = NULL;
+
+ /* Quell GCC overwarnings. */
+ tinyid = 0;
+ value = JSVAL_NULL;
+ }
+ }
+
+ if (atom && !TEST_OVERRIDE_BIT(fp, tinyid)) {
+ if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value,
+ args_getProperty, args_setProperty, 0,
+ SPROP_HAS_SHORTID, tinyid, NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+args_enumerate(JSContext *cx, JSObject *obj)
+{
+ JSStackFrame *fp;
+ JSObject *pobj;
+ JSProperty *prop;
+ uintN slot, argc;
+
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ /*
+ * Trigger reflection with value snapshot in args_resolve using a series
+ * of js_LookupProperty calls. We handle length, callee, and the indexed
+ * argument properties. We know that args_resolve covers all these cases
+ * and creates direct properties of obj, but that it may fail to resolve
+ * length or callee if overridden.
+ */
+ if (!js_LookupProperty(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.lengthAtom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ if (!js_LookupProperty(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.calleeAtom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ argc = fp->argc;
+ for (slot = 0; slot < argc; slot++) {
+ if (!js_LookupProperty(cx, obj, INT_TO_JSID((jsint)slot), &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+
+#if JS_HAS_GENERATORS
+/*
+ * If a generator-iterator's arguments or call object escapes, it needs to
+ * mark its generator object.
+ */
+static uint32
+args_or_call_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSStackFrame *fp;
+
+ fp = JS_GetPrivate(cx, obj);
+ if (fp && (fp->flags & JSFRAME_GENERATOR))
+ GC_MARK(cx, FRAME_TO_GENERATOR(fp)->obj, "FRAME_TO_GENERATOR(fp)->obj");
+ return 0;
+}
+#else
+# define args_or_call_mark NULL
+#endif
+
+/*
+ * The Arguments class is not initialized via JS_InitClass, and must not be,
+ * because its name is "Object". Per ECMA, that causes instances of it to
+ * delegate to the object named by Object.prototype. It also ensures that
+ * arguments.toString() returns "[object Object]".
+ *
+ * The JSClass functions below collaborate to lazily reflect and synchronize
+ * actual argument values, argument count, and callee function object stored
+ * in a JSStackFrame with their corresponding property values in the frame's
+ * arguments object.
+ */
+JSClass js_ArgumentsClass = {
+ js_Object_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_HAS_RESERVED_SLOTS(1) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Object),
+ JS_PropertyStub, args_delProperty,
+ args_getProperty, args_setProperty,
+ args_enumerate, (JSResolveOp) args_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, NULL,
+ args_or_call_mark, NULL
+};
+
+JSObject *
+js_GetCallObject(JSContext *cx, JSStackFrame *fp, JSObject *parent)
+{
+ JSObject *callobj, *funobj;
+
+ /* Create a call object for fp only if it lacks one. */
+ JS_ASSERT(fp->fun);
+ callobj = fp->callobj;
+ if (callobj)
+ return callobj;
+ JS_ASSERT(fp->fun);
+
+ /* The default call parent is its function's parent (static link). */
+ if (!parent) {
+ funobj = fp->argv ? JSVAL_TO_OBJECT(fp->argv[-2]) : fp->fun->object;
+ if (funobj)
+ parent = OBJ_GET_PARENT(cx, funobj);
+ }
+
+ /* Create the call object and link it to its stack frame. */
+ callobj = js_NewObject(cx, &js_CallClass, NULL, parent);
+ if (!callobj || !JS_SetPrivate(cx, callobj, fp)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ fp->callobj = callobj;
+
+ /* Make callobj be the scope chain and the variables object. */
+ JS_ASSERT(fp->scopeChain == parent);
+ fp->scopeChain = callobj;
+ fp->varobj = callobj;
+ return callobj;
+}
+
+static JSBool
+call_enumerate(JSContext *cx, JSObject *obj);
+
+JSBool
+js_PutCallObject(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *callobj;
+ JSBool ok;
+ jsid argsid;
+ jsval aval;
+
+ /*
+ * Reuse call_enumerate here to reflect all actual args and vars into the
+ * call object from fp.
+ */
+ callobj = fp->callobj;
+ if (!callobj)
+ return JS_TRUE;
+ ok = call_enumerate(cx, callobj);
+
+ /*
+ * Get the arguments object to snapshot fp's actual argument values.
+ */
+ if (fp->argsobj) {
+ argsid = ATOM_TO_JSID(cx->runtime->atomState.argumentsAtom);
+ ok &= js_GetProperty(cx, callobj, argsid, &aval);
+ ok &= js_SetProperty(cx, callobj, argsid, &aval);
+ ok &= js_PutArgsObject(cx, fp);
+ }
+
+ /*
+ * Clear the private pointer to fp, which is about to go away (js_Invoke).
+ * Do this last because the call_enumerate and js_GetProperty calls above
+ * need to follow the private slot to find fp.
+ */
+ ok &= JS_SetPrivate(cx, callobj, NULL);
+ fp->callobj = NULL;
+ return ok;
+}
+
+static JSPropertySpec call_props[] = {
+ {js_arguments_str, CALL_ARGUMENTS, JSPROP_PERMANENT,0,0},
+ {"__callee__", CALL_CALLEE, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+call_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->fun);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case CALL_ARGUMENTS:
+ if (!TEST_OVERRIDE_BIT(fp, slot)) {
+ JSObject *argsobj = js_GetArgsObject(cx, fp);
+ if (!argsobj)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(argsobj);
+ }
+ break;
+
+ case CALL_CALLEE:
+ if (!TEST_OVERRIDE_BIT(fp, slot))
+ *vp = fp->argv ? fp->argv[-2] : OBJECT_TO_JSVAL(fp->fun->object);
+ break;
+
+ default:
+ if ((uintN)slot < JS_MAX(fp->argc, fp->fun->nargs))
+ *vp = fp->argv[slot];
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+call_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->fun);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case CALL_ARGUMENTS:
+ case CALL_CALLEE:
+ SET_OVERRIDE_BIT(fp, slot);
+ break;
+
+ default:
+ if ((uintN)slot < JS_MAX(fp->argc, fp->fun->nargs))
+ fp->argv[slot] = *vp;
+ break;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_GetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+
+ JS_ASSERT(JSVAL_IS_INT(id));
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (fp) {
+ /* XXX no jsint slot commoning here to avoid MSVC1.52 crashes */
+ if ((uintN)JSVAL_TO_INT(id) < fp->nvars)
+ *vp = fp->vars[JSVAL_TO_INT(id)];
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_SetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+
+ JS_ASSERT(JSVAL_IS_INT(id));
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (fp) {
+ /* XXX jsint slot is block-local here to avoid MSVC1.52 crashes */
+ jsint slot = JSVAL_TO_INT(id);
+ if ((uintN)slot < fp->nvars)
+ fp->vars[slot] = *vp;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+call_enumerate(JSContext *cx, JSObject *obj)
+{
+ JSStackFrame *fp;
+ JSObject *funobj, *pobj;
+ JSScope *scope;
+ JSScopeProperty *sprop, *cprop;
+ JSPropertyOp getter;
+ jsval *vec;
+ JSAtom *atom;
+ JSProperty *prop;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+
+ /*
+ * Do not enumerate a cloned function object at fp->argv[-2], it may have
+ * gained its own (mutable) scope (e.g., a brutally-shared XUL script sets
+ * the clone's prototype property). We must enumerate the function object
+ * that was decorated with parameter and local variable properties by the
+ * compiler when the compiler created fp->fun, namely fp->fun->object.
+ *
+ * Contrast with call_resolve, where we prefer fp->argv[-2], because we'll
+ * use js_LookupProperty to find any overridden properties in that object,
+ * if it was a mutated clone; and if not, we will search its prototype,
+ * fp->fun->object, to find compiler-created params and locals.
+ */
+ funobj = fp->fun->object;
+ if (!funobj)
+ return JS_TRUE;
+
+ /*
+ * Reflect actual args from fp->argv for formal parameters, and local vars
+ * and functions in fp->vars for declared variables and nested-at-top-level
+ * local functions.
+ */
+ scope = OBJ_SCOPE(funobj);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ getter = sprop->getter;
+ if (getter == js_GetArgument)
+ vec = fp->argv;
+ else if (getter == js_GetLocalVariable)
+ vec = fp->vars;
+ else
+ continue;
+
+ /* Trigger reflection by looking up the unhidden atom for sprop->id. */
+ JS_ASSERT(JSID_IS_ATOM(sprop->id));
+ atom = JSID_TO_ATOM(sprop->id);
+ JS_ASSERT(atom->flags & ATOM_HIDDEN);
+ atom = atom->entry.value;
+
+ if (!js_LookupProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+
+ /*
+ * If we found the property in a different object, don't try sticking
+ * it into wrong slots vector. This can occur because we have a mutable
+ * __proto__ slot, and cloned function objects rely on their __proto__
+ * to delegate to the object that contains the var and arg properties.
+ */
+ if (!prop || pobj != obj) {
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ continue;
+ }
+ cprop = (JSScopeProperty *)prop;
+ LOCKED_OBJ_SET_SLOT(obj, cprop->slot, vec[(uint16) sprop->shortid]);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+call_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSStackFrame *fp;
+ JSObject *funobj;
+ JSString *str;
+ JSAtom *atom;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSPropertyOp getter, setter;
+ uintN attrs, slot, nslots, spflags;
+ jsval *vp, value;
+ intN shortid;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->fun);
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+
+ funobj = fp->argv ? JSVAL_TO_OBJECT(fp->argv[-2]) : fp->fun->object;
+ if (!funobj)
+ return JS_TRUE;
+ JS_ASSERT((JSFunction *) JS_GetPrivate(cx, funobj) == fp->fun);
+
+ str = JSVAL_TO_STRING(id);
+ atom = js_AtomizeString(cx, str, 0);
+ if (!atom)
+ return JS_FALSE;
+ if (!js_LookupHiddenProperty(cx, funobj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+
+ if (prop) {
+ if (!OBJ_IS_NATIVE(obj2)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+ }
+
+ sprop = (JSScopeProperty *) prop;
+ getter = sprop->getter;
+ attrs = sprop->attrs & ~JSPROP_SHARED;
+ slot = (uintN) sprop->shortid;
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+
+ /* Ensure we found an arg or var property for the same function. */
+ if ((sprop->flags & SPROP_IS_HIDDEN) &&
+ (obj2 == funobj ||
+ (JSFunction *) JS_GetPrivate(cx, obj2) == fp->fun)) {
+ if (getter == js_GetArgument) {
+ vp = fp->argv;
+ nslots = JS_MAX(fp->argc, fp->fun->nargs);
+ getter = setter = NULL;
+ } else {
+ JS_ASSERT(getter == js_GetLocalVariable);
+ vp = fp->vars;
+ nslots = fp->nvars;
+ getter = js_GetCallVariable;
+ setter = js_SetCallVariable;
+ }
+ if (slot < nslots) {
+ value = vp[slot];
+ spflags = SPROP_HAS_SHORTID;
+ shortid = (intN) slot;
+ } else {
+ value = JSVAL_VOID;
+ spflags = 0;
+ shortid = 0;
+ }
+ if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value,
+ getter, setter, attrs,
+ spflags, shortid, NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+call_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ JSStackFrame *fp;
+
+ if (type == JSTYPE_FUNCTION) {
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (fp) {
+ JS_ASSERT(fp->fun);
+ *vp = fp->argv ? fp->argv[-2] : OBJECT_TO_JSVAL(fp->fun->object);
+ }
+ }
+ return JS_TRUE;
+}
+
+JSClass js_CallClass = {
+ js_Call_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_IS_ANONYMOUS |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Call),
+ JS_PropertyStub, JS_PropertyStub,
+ call_getProperty, call_setProperty,
+ call_enumerate, (JSResolveOp)call_resolve,
+ call_convert, JS_FinalizeStub,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, NULL,
+ args_or_call_mark, NULL,
+};
+
+/*
+ * ECMA-262 specifies that length is a property of function object instances,
+ * but we can avoid that space cost by delegating to a prototype property that
+ * is JSPROP_PERMANENT and JSPROP_SHARED. Each fun_getProperty call computes
+ * a fresh length value based on the arity of the individual function object's
+ * private data.
+ *
+ * The extensions below other than length, i.e., the ones not in ECMA-262,
+ * are neither JSPROP_READONLY nor JSPROP_SHARED, because for compatibility
+ * with ECMA we must allow a delegating object to override them.
+ */
+#define LENGTH_PROP_ATTRS (JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED)
+
+static JSPropertySpec function_props[] = {
+ {js_arguments_str, CALL_ARGUMENTS, JSPROP_PERMANENT, 0,0},
+ {js_arity_str, FUN_ARITY, JSPROP_PERMANENT, 0,0},
+ {js_caller_str, FUN_CALLER, JSPROP_PERMANENT, 0,0},
+ {js_length_str, ARGS_LENGTH, LENGTH_PROP_ATTRS, 0,0},
+ {js_name_str, FUN_NAME, JSPROP_PERMANENT, 0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+fun_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSFunction *fun;
+ JSStackFrame *fp;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ slot = JSVAL_TO_INT(id);
+
+ /*
+ * Loop because getter and setter can be delegated from another class,
+ * but loop only for ARGS_LENGTH because we must pretend that f.length
+ * is in each function instance f, per ECMA-262, instead of only in the
+ * Function.prototype object (we use JSPROP_PERMANENT with JSPROP_SHARED
+ * to make it appear so).
+ *
+ * This code couples tightly to the attributes for the function_props[]
+ * initializers above, and to js_SetProperty and js_HasOwnPropertyHelper.
+ *
+ * It's important to allow delegating objects, even though they inherit
+ * this getter (fun_getProperty), to override arguments, arity, caller,
+ * and name. If we didn't return early for slot != ARGS_LENGTH, we would
+ * clobber *vp with the native property value, instead of letting script
+ * override that value in delegating objects.
+ *
+ * Note how that clobbering is what simulates JSPROP_READONLY for all of
+ * the non-standard properties when the directly addressed object (obj)
+ * is a function object (i.e., when this loop does not iterate).
+ */
+ while (!(fun = (JSFunction *)
+ JS_GetInstancePrivate(cx, obj, &js_FunctionClass, NULL))) {
+ if (slot != ARGS_LENGTH)
+ return JS_TRUE;
+ obj = OBJ_GET_PROTO(cx, obj);
+ if (!obj)
+ return JS_TRUE;
+ }
+
+ /* Find fun's top-most activation record. */
+ for (fp = cx->fp; fp && (fp->fun != fun || (fp->flags & JSFRAME_SPECIAL));
+ fp = fp->down) {
+ continue;
+ }
+
+ switch (slot) {
+ case CALL_ARGUMENTS:
+ /* Warn if strict about f.arguments or equivalent unqualified uses. */
+ if (!JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_DEPRECATED_USAGE,
+ js_arguments_str)) {
+ return JS_FALSE;
+ }
+ if (fp) {
+ if (!js_GetArgsValue(cx, fp, vp))
+ return JS_FALSE;
+ } else {
+ *vp = JSVAL_NULL;
+ }
+ break;
+
+ case ARGS_LENGTH:
+ case FUN_ARITY:
+ *vp = INT_TO_JSVAL((jsint)fun->nargs);
+ break;
+
+ case FUN_NAME:
+ *vp = fun->atom
+ ? ATOM_KEY(fun->atom)
+ : STRING_TO_JSVAL(cx->runtime->emptyString);
+ break;
+
+ case FUN_CALLER:
+ while (fp && (fp->flags & JSFRAME_SKIP_CALLER) && fp->down)
+ fp = fp->down;
+ if (fp && fp->down && fp->down->fun && fp->down->argv)
+ *vp = fp->down->argv[-2];
+ else
+ *vp = JSVAL_NULL;
+ if (!JSVAL_IS_PRIMITIVE(*vp) && cx->runtime->checkObjectAccess) {
+ id = ATOM_KEY(cx->runtime->atomState.callerAtom);
+ if (!cx->runtime->checkObjectAccess(cx, obj, id, JSACC_READ, vp))
+ return JS_FALSE;
+ }
+ break;
+
+ default:
+ /* XXX fun[0] and fun.arguments[0] are equivalent. */
+ if (fp && fp->fun && (uintN)slot < fp->fun->nargs)
+ *vp = fp->argv[slot];
+ break;
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+fun_enumerate(JSContext *cx, JSObject *obj)
+{
+ jsid prototypeId;
+ JSObject *pobj;
+ JSProperty *prop;
+
+ prototypeId = ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom);
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, prototypeId, &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return JS_TRUE;
+}
+
+static JSBool
+fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSFunction *fun;
+ JSString *str;
+ JSAtom *prototypeAtom;
+
+ /*
+ * No need to reflect fun.prototype in 'fun.prototype = ...' or in an
+ * unqualified reference to prototype, which the emitter looks up as a
+ * hidden atom when attempting to bind to a formal parameter or local
+ * variable slot.
+ */
+ if (flags & (JSRESOLVE_ASSIGNING | JSRESOLVE_HIDDEN))
+ return JS_TRUE;
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+
+ /* No valid function object should lack private data, but check anyway. */
+ fun = (JSFunction *)JS_GetInstancePrivate(cx, obj, &js_FunctionClass, NULL);
+ if (!fun || !fun->object)
+ return JS_TRUE;
+
+ /*
+ * Ok, check whether id is 'prototype' and bootstrap the function object's
+ * prototype property.
+ */
+ str = JSVAL_TO_STRING(id);
+ prototypeAtom = cx->runtime->atomState.classPrototypeAtom;
+ if (str == ATOM_TO_STRING(prototypeAtom)) {
+ JSObject *proto, *parentProto;
+ jsval pval;
+
+ proto = parentProto = NULL;
+ if (fun->object != obj && fun->object) {
+ /*
+ * Clone of a function: make its prototype property value have the
+ * same class as the clone-parent's prototype.
+ */
+ if (!OBJ_GET_PROPERTY(cx, fun->object, ATOM_TO_JSID(prototypeAtom),
+ &pval)) {
+ return JS_FALSE;
+ }
+ if (!JSVAL_IS_PRIMITIVE(pval)) {
+ /*
+ * We are about to allocate a new object, so hack the newborn
+ * root until then to protect pval in case it is figuratively
+ * up in the air, with no strong refs protecting it.
+ */
+ cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(pval);
+ parentProto = JSVAL_TO_OBJECT(pval);
+ }
+ }
+
+ /*
+ * Beware of the wacky case of a user function named Object -- trying
+ * to find a prototype for that will recur back here _ad perniciem_.
+ */
+ if (!parentProto && fun->atom == CLASS_ATOM(cx, Object))
+ return JS_TRUE;
+
+ /*
+ * If resolving "prototype" in a clone, clone the parent's prototype.
+ * Pass the constructor's (obj's) parent as the prototype parent, to
+ * avoid defaulting to parentProto.constructor.__parent__.
+ */
+ proto = js_NewObject(cx, &js_ObjectClass, parentProto,
+ OBJ_GET_PARENT(cx, obj));
+ if (!proto)
+ return JS_FALSE;
+
+ /*
+ * ECMA (15.3.5.2) says that constructor.prototype is DontDelete for
+ * user-defined functions, but DontEnum | ReadOnly | DontDelete for
+ * native "system" constructors such as Object or Function. So lazily
+ * set the former here in fun_resolve, but eagerly define the latter
+ * in JS_InitClass, with the right attributes.
+ */
+ if (!js_SetClassPrototype(cx, obj, proto,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+fun_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ switch (type) {
+ case JSTYPE_FUNCTION:
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ default:
+ return js_TryValueOf(cx, obj, type, vp);
+ }
+}
+
+static void
+fun_finalize(JSContext *cx, JSObject *obj)
+{
+ JSFunction *fun;
+ JSScript *script;
+
+ /* No valid function object should lack private data, but check anyway. */
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (!fun)
+ return;
+ if (fun->object == obj)
+ fun->object = NULL;
+
+ /* Null-check required since the parser sets interpreted very early. */
+ if (FUN_INTERPRETED(fun) && fun->u.i.script &&
+ js_IsAboutToBeFinalized(cx, fun))
+ {
+ script = fun->u.i.script;
+ fun->u.i.script = NULL;
+ js_DestroyScript(cx, script);
+ }
+}
+
+#if JS_HAS_XDR
+
+#include "jsxdrapi.h"
+
+enum {
+ JSXDR_FUNARG = 1,
+ JSXDR_FUNVAR = 2,
+ JSXDR_FUNCONST = 3
+};
+
+/* XXX store parent and proto, if defined */
+static JSBool
+fun_xdrObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSContext *cx;
+ JSFunction *fun;
+ uint32 nullAtom; /* flag to indicate if fun->atom is NULL */
+ JSTempValueRooter tvr;
+ uint32 flagsword; /* originally only flags was JS_XDRUint8'd */
+ uint16 extraUnused; /* variable for no longer used field */
+ JSAtom *propAtom;
+ JSScopeProperty *sprop;
+ uint32 userid; /* NB: holds a signed int-tagged jsval */
+ uintN i, n, dupflag;
+ uint32 type;
+ JSBool ok;
+#ifdef DEBUG
+ uintN nvars = 0, nargs = 0;
+#endif
+
+ cx = xdr->cx;
+ if (xdr->mode == JSXDR_ENCODE) {
+ /*
+ * No valid function object should lack private data, but fail soft
+ * (return true, no error report) in case one does due to API pilot
+ * or internal error.
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, *objp);
+ if (!fun)
+ return JS_TRUE;
+ if (!FUN_INTERPRETED(fun)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NOT_SCRIPTED_FUNCTION,
+ JS_GetFunctionName(fun));
+ return JS_FALSE;
+ }
+ nullAtom = !fun->atom;
+ flagsword = ((uint32)fun->u.i.nregexps << 16) | fun->flags;
+ extraUnused = 0;
+ } else {
+ fun = js_NewFunction(cx, NULL, NULL, 0, 0, NULL, NULL);
+ if (!fun)
+ return JS_FALSE;
+ }
+
+ /* From here on, control flow must flow through label out. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, fun->object, &tvr);
+ ok = JS_TRUE;
+
+ if (!JS_XDRUint32(xdr, &nullAtom))
+ goto bad;
+ if (!nullAtom && !js_XDRStringAtom(xdr, &fun->atom))
+ goto bad;
+
+ if (!JS_XDRUint16(xdr, &fun->nargs) ||
+ !JS_XDRUint16(xdr, &extraUnused) ||
+ !JS_XDRUint16(xdr, &fun->u.i.nvars) ||
+ !JS_XDRUint32(xdr, &flagsword)) {
+ goto bad;
+ }
+
+ /* Assert that all previous writes of extraUnused were writes of 0. */
+ JS_ASSERT(extraUnused == 0);
+
+ /* do arguments and local vars */
+ if (fun->object) {
+ n = fun->nargs + fun->u.i.nvars;
+ if (xdr->mode == JSXDR_ENCODE) {
+ JSScope *scope;
+ JSScopeProperty **spvec, *auto_spvec[8];
+ void *mark;
+
+ if (n <= sizeof auto_spvec / sizeof auto_spvec[0]) {
+ spvec = auto_spvec;
+ mark = NULL;
+ } else {
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(spvec, JSScopeProperty **, &cx->tempPool,
+ n * sizeof(JSScopeProperty *));
+ if (!spvec) {
+ JS_ReportOutOfMemory(cx);
+ goto bad;
+ }
+ }
+ scope = OBJ_SCOPE(fun->object);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop;
+ sprop = sprop->parent) {
+ if (sprop->getter == js_GetArgument) {
+ JS_ASSERT(nargs++ <= fun->nargs);
+ spvec[sprop->shortid] = sprop;
+ } else if (sprop->getter == js_GetLocalVariable) {
+ JS_ASSERT(nvars++ <= fun->u.i.nvars);
+ spvec[fun->nargs + sprop->shortid] = sprop;
+ }
+ }
+ for (i = 0; i < n; i++) {
+ sprop = spvec[i];
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ type = (i < fun->nargs)
+ ? JSXDR_FUNARG
+ : (sprop->attrs & JSPROP_READONLY)
+ ? JSXDR_FUNCONST
+ : JSXDR_FUNVAR;
+ userid = INT_TO_JSVAL(sprop->shortid);
+ propAtom = JSID_TO_ATOM(sprop->id);
+ if (!JS_XDRUint32(xdr, &type) ||
+ !JS_XDRUint32(xdr, &userid) ||
+ !js_XDRCStringAtom(xdr, &propAtom)) {
+ if (mark)
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ goto bad;
+ }
+ }
+ if (mark)
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ } else {
+ JSPropertyOp getter, setter;
+
+ for (i = n; i != 0; i--) {
+ uintN attrs = JSPROP_PERMANENT;
+
+ if (!JS_XDRUint32(xdr, &type) ||
+ !JS_XDRUint32(xdr, &userid) ||
+ !js_XDRCStringAtom(xdr, &propAtom)) {
+ goto bad;
+ }
+ JS_ASSERT(type == JSXDR_FUNARG || type == JSXDR_FUNVAR ||
+ type == JSXDR_FUNCONST);
+ if (type == JSXDR_FUNARG) {
+ getter = js_GetArgument;
+ setter = js_SetArgument;
+ JS_ASSERT(nargs++ <= fun->nargs);
+ } else if (type == JSXDR_FUNVAR || type == JSXDR_FUNCONST) {
+ getter = js_GetLocalVariable;
+ setter = js_SetLocalVariable;
+ if (type == JSXDR_FUNCONST)
+ attrs |= JSPROP_READONLY;
+ JS_ASSERT(nvars++ <= fun->u.i.nvars);
+ } else {
+ getter = NULL;
+ setter = NULL;
+ }
+
+ /* Flag duplicate argument if atom is bound in fun->object. */
+ dupflag = SCOPE_GET_PROPERTY(OBJ_SCOPE(fun->object),
+ ATOM_TO_JSID(propAtom))
+ ? SPROP_IS_DUPLICATE
+ : 0;
+
+ if (!js_AddHiddenProperty(cx, fun->object,
+ ATOM_TO_JSID(propAtom),
+ getter, setter, SPROP_INVALID_SLOT,
+ attrs | JSPROP_SHARED,
+ dupflag | SPROP_HAS_SHORTID,
+ JSVAL_TO_INT(userid))) {
+ goto bad;
+ }
+ }
+ }
+ }
+
+ if (!js_XDRScript(xdr, &fun->u.i.script, NULL))
+ goto bad;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ fun->flags = (uint16) flagsword | JSFUN_INTERPRETED;
+ fun->u.i.nregexps = (uint16) (flagsword >> 16);
+
+ *objp = fun->object;
+ js_CallNewScriptHook(cx, fun->u.i.script, fun);
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+#else /* !JS_HAS_XDR */
+
+#define fun_xdrObject NULL
+
+#endif /* !JS_HAS_XDR */
+
+/*
+ * [[HasInstance]] internal method for Function objects: fetch the .prototype
+ * property of its 'this' parameter, and walks the prototype chain of v (only
+ * if v is an object) returning true if .prototype is found.
+ */
+static JSBool
+fun_hasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ jsval pval;
+ JSString *str;
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &pval)) {
+ return JS_FALSE;
+ }
+
+ if (JSVAL_IS_PRIMITIVE(pval)) {
+ /*
+ * Throw a runtime error if instanceof is called on a function that
+ * has a non-object as its .prototype value.
+ */
+ str = js_DecompileValueGenerator(cx, -1, OBJECT_TO_JSVAL(obj), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_PROTOTYPE, JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ return js_IsDelegate(cx, JSVAL_TO_OBJECT(pval), v, bp);
+}
+
+static uint32
+fun_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSFunction *fun;
+
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (fun) {
+ GC_MARK(cx, fun, "private");
+ if (fun->atom)
+ GC_MARK_ATOM(cx, fun->atom);
+ if (FUN_INTERPRETED(fun) && fun->u.i.script)
+ js_MarkScript(cx, fun->u.i.script);
+ }
+ return 0;
+}
+
+static uint32
+fun_reserveSlots(JSContext *cx, JSObject *obj)
+{
+ JSFunction *fun;
+
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ return (fun && FUN_INTERPRETED(fun)) ? fun->u.i.nregexps : 0;
+}
+
+/*
+ * Reserve two slots in all function objects for XPConnect. Note that this
+ * does not bloat every instance, only those on which reserved slots are set,
+ * and those on which ad-hoc properties are defined.
+ */
+JS_FRIEND_DATA(JSClass) js_FunctionClass = {
+ js_Function_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_HAS_RESERVED_SLOTS(2) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Function),
+ JS_PropertyStub, JS_PropertyStub,
+ fun_getProperty, JS_PropertyStub,
+ fun_enumerate, (JSResolveOp)fun_resolve,
+ fun_convert, fun_finalize,
+ NULL, NULL,
+ NULL, NULL,
+ fun_xdrObject, fun_hasInstance,
+ fun_mark, fun_reserveSlots
+};
+
+JSBool
+js_fun_toString(JSContext *cx, JSObject *obj, uint32 indent,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval;
+ JSFunction *fun;
+ JSString *str;
+
+ if (!argv) {
+ JS_ASSERT(JS_ObjectIsFunction(cx, obj));
+ } else {
+ fval = argv[-1];
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ /*
+ * If we don't have a function to start off with, try converting
+ * the object to a function. If that doesn't work, complain.
+ */
+ if (JSVAL_IS_OBJECT(fval)) {
+ obj = JSVAL_TO_OBJECT(fval);
+ if (!OBJ_GET_CLASS(cx, obj)->convert(cx, obj, JSTYPE_FUNCTION,
+ &fval)) {
+ return JS_FALSE;
+ }
+ argv[-1] = fval;
+ }
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ js_Function_str, js_toString_str,
+ JS_GetTypeName(cx,
+ JS_TypeOfValue(cx, fval)));
+ return JS_FALSE;
+ }
+ }
+
+ obj = JSVAL_TO_OBJECT(fval);
+ }
+
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (!fun)
+ return JS_TRUE;
+ if (argc && !js_ValueToECMAUint32(cx, argv[0], &indent))
+ return JS_FALSE;
+ str = JS_DecompileFunction(cx, fun, (uintN)indent);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+fun_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return js_fun_toString(cx, obj, 0, argc, argv, rval);
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+fun_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return js_fun_toString(cx, obj, JS_DONT_PRETTY_PRINT, argc, argv, rval);
+}
+#endif
+
+static const char call_str[] = "call";
+
+static JSBool
+fun_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval, *sp, *oldsp;
+ JSString *str;
+ void *mark;
+ uintN i;
+ JSStackFrame *fp;
+ JSBool ok;
+
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_FUNCTION, &argv[-1]))
+ return JS_FALSE;
+ fval = argv[-1];
+
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ str = JS_ValueToString(cx, fval);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ js_Function_str, call_str,
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ if (argc == 0) {
+ /* Call fun with its global object as the 'this' param if no args. */
+ obj = NULL;
+ } else {
+ /* Otherwise convert the first arg to 'this' and skip over it. */
+ if (!js_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ argc--;
+ argv++;
+ }
+
+ /* Allocate stack space for fval, obj, and the args. */
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ /* Push fval, obj, and the args. */
+ *sp++ = fval;
+ *sp++ = OBJECT_TO_JSVAL(obj);
+ for (i = 0; i < argc; i++)
+ *sp++ = argv[i];
+
+ /* Lift current frame to include the args and do the call. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_Invoke(cx, argc, JSINVOKE_INTERNAL | JSINVOKE_SKIP_CALLER);
+
+ /* Store rval and pop stack back to our frame's sp. */
+ *rval = fp->sp[-1];
+ fp->sp = oldsp;
+ js_FreeStack(cx, mark);
+ return ok;
+}
+
+static JSBool
+fun_apply(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval, *sp, *oldsp;
+ JSString *str;
+ JSObject *aobj;
+ jsuint length;
+ JSBool arraylike, ok;
+ void *mark;
+ uintN i;
+ JSStackFrame *fp;
+
+ if (argc == 0) {
+ /* Will get globalObject as 'this' and no other arguments. */
+ return fun_call(cx, obj, argc, argv, rval);
+ }
+
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_FUNCTION, &argv[-1]))
+ return JS_FALSE;
+ fval = argv[-1];
+
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ str = JS_ValueToString(cx, fval);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ js_Function_str, "apply",
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ /* Quell GCC overwarnings. */
+ aobj = NULL;
+ length = 0;
+
+ if (argc >= 2) {
+ /* If the 2nd arg is null or void, call the function with 0 args. */
+ if (JSVAL_IS_NULL(argv[1]) || JSVAL_IS_VOID(argv[1])) {
+ argc = 0;
+ } else {
+ /* The second arg must be an array (or arguments object). */
+ arraylike = JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(argv[1])) {
+ aobj = JSVAL_TO_OBJECT(argv[1]);
+ if (!js_IsArrayLike(cx, aobj, &arraylike, &length))
+ return JS_FALSE;
+ }
+ if (!arraylike) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_APPLY_ARGS, "apply");
+ return JS_FALSE;
+ }
+ }
+ }
+
+ /* Convert the first arg to 'this' and skip over it. */
+ if (!js_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+
+ /* Allocate stack space for fval, obj, and the args. */
+ argc = (uintN)JS_MIN(length, ARRAY_INIT_LIMIT - 1);
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ /* Push fval, obj, and aobj's elements as args. */
+ *sp++ = fval;
+ *sp++ = OBJECT_TO_JSVAL(obj);
+ for (i = 0; i < argc; i++) {
+ ok = JS_GetElement(cx, aobj, (jsint)i, sp);
+ if (!ok)
+ goto out;
+ sp++;
+ }
+
+ /* Lift current frame to include the args and do the call. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_Invoke(cx, argc, JSINVOKE_INTERNAL | JSINVOKE_SKIP_CALLER);
+
+ /* Store rval and pop stack back to our frame's sp. */
+ *rval = fp->sp[-1];
+ fp->sp = oldsp;
+out:
+ js_FreeStack(cx, mark);
+ return ok;
+}
+
+#ifdef NARCISSUS
+static JSBool
+fun_applyConstructor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSObject *aobj;
+ uintN length, i;
+ void *mark;
+ jsval *sp, *newsp, *oldsp;
+ JSStackFrame *fp;
+ JSBool ok;
+
+ if (JSVAL_IS_PRIMITIVE(argv[0]) ||
+ (aobj = JSVAL_TO_OBJECT(argv[0]),
+ OBJ_GET_CLASS(cx, aobj) != &js_ArrayClass &&
+ OBJ_GET_CLASS(cx, aobj) != &js_ArgumentsClass)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_APPLY_ARGS, "__applyConstruct__");
+ return JS_FALSE;
+ }
+
+ if (!js_GetLengthProperty(cx, aobj, &length))
+ return JS_FALSE;
+
+ if (length >= ARRAY_INIT_LIMIT)
+ length = ARRAY_INIT_LIMIT - 1;
+ newsp = sp = js_AllocStack(cx, 2 + length, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ fp = cx->fp;
+ oldsp = fp->sp;
+ *sp++ = OBJECT_TO_JSVAL(obj);
+ *sp++ = JSVAL_NULL; /* This is filled automagically. */
+ for (i = 0; i < length; i++) {
+ ok = JS_GetElement(cx, aobj, (jsint)i, sp);
+ if (!ok)
+ goto out;
+ sp++;
+ }
+
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_InvokeConstructor(cx, newsp, length);
+
+ *rval = fp->sp[-1];
+ fp->sp = oldsp;
+out:
+ js_FreeStack(cx, mark);
+ return ok;
+}
+#endif
+
+static JSFunctionSpec function_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, fun_toSource, 0,0,0},
+#endif
+ {js_toString_str, fun_toString, 1,0,0},
+ {"apply", fun_apply, 2,0,0},
+ {call_str, fun_call, 1,0,0},
+#ifdef NARCISSUS
+ {"__applyConstructor__", fun_applyConstructor, 1,0,0},
+#endif
+ {0,0,0,0,0}
+};
+
+JSBool
+js_IsIdentifier(JSString *str)
+{
+ size_t length;
+ jschar c, *chars, *end, *s;
+
+ length = JSSTRING_LENGTH(str);
+ if (length == 0)
+ return JS_FALSE;
+ chars = JSSTRING_CHARS(str);
+ c = *chars;
+ if (!JS_ISIDSTART(c))
+ return JS_FALSE;
+ end = chars + length;
+ for (s = chars + 1; s != end; ++s) {
+ c = *s;
+ if (!JS_ISIDENT(c))
+ return JS_FALSE;
+ }
+ return !js_IsKeyword(chars, length);
+}
+
+static JSBool
+Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSStackFrame *fp, *caller;
+ JSFunction *fun;
+ JSObject *parent;
+ uintN i, n, lineno, dupflag;
+ JSAtom *atom;
+ const char *filename;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSString *str, *arg;
+ void *mark;
+ JSTokenStream *ts;
+ JSPrincipals *principals;
+ jschar *collected_args, *cp;
+ size_t arg_length, args_length, old_args_length;
+ JSTokenType tt;
+ JSBool ok;
+
+ fp = cx->fp;
+ if (!(fp->flags & JSFRAME_CONSTRUCTING)) {
+ obj = js_NewObject(cx, &js_FunctionClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (fun)
+ return JS_TRUE;
+
+ /*
+ * NB: (new Function) is not lexically closed by its caller, it's just an
+ * anonymous function in the top-level scope that its constructor inhabits.
+ * Thus 'var x = 42; f = new Function("return x"); print(f())' prints 42,
+ * and so would a call to f from another top-level's script or function.
+ *
+ * In older versions, before call objects, a new Function was adopted by
+ * its running context's globalObject, which might be different from the
+ * top-level reachable from scopeChain (in HTML frames, e.g.).
+ */
+ parent = OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]));
+
+ fun = js_NewFunction(cx, obj, NULL, 0, JSFUN_LAMBDA, parent,
+ cx->runtime->atomState.anonymousAtom);
+
+ if (!fun)
+ return JS_FALSE;
+
+ /*
+ * Function is static and not called directly by other functions in this
+ * file, therefore it is callable only as a native function by js_Invoke.
+ * Find the scripted caller, possibly skipping other native frames such as
+ * are built for Function.prototype.call or .apply activations that invoke
+ * Function indirectly from a script.
+ */
+ JS_ASSERT(!fp->script && fp->fun && fp->fun->u.n.native == Function);
+ caller = JS_GetScriptedCaller(cx, fp);
+ if (caller) {
+ filename = caller->script->filename;
+ lineno = js_PCToLineNumber(cx, caller->script, caller->pc);
+ principals = JS_EvalFramePrincipals(cx, fp, caller);
+ } else {
+ filename = NULL;
+ lineno = 0;
+ principals = NULL;
+ }
+
+ /* Belt-and-braces: check that the caller has access to parent. */
+ if (!js_CheckPrincipalsAccess(cx, parent, principals,
+ CLASS_ATOM(cx, Function))) {
+ return JS_FALSE;
+ }
+
+ n = argc ? argc - 1 : 0;
+ if (n > 0) {
+ /*
+ * Collect the function-argument arguments into one string, separated
+ * by commas, then make a tokenstream from that string, and scan it to
+ * get the arguments. We need to throw the full scanner at the
+ * problem, because the argument string can legitimately contain
+ * comments and linefeeds. XXX It might be better to concatenate
+ * everything up into a function definition and pass it to the
+ * compiler, but doing it this way is less of a delta from the old
+ * code. See ECMA 15.3.2.1.
+ */
+ args_length = 0;
+ for (i = 0; i < n; i++) {
+ /* Collect the lengths for all the function-argument arguments. */
+ arg = js_ValueToString(cx, argv[i]);
+ if (!arg)
+ return JS_FALSE;
+ argv[i] = STRING_TO_JSVAL(arg);
+
+ /*
+ * Check for overflow. The < test works because the maximum
+ * JSString length fits in 2 fewer bits than size_t has.
+ */
+ old_args_length = args_length;
+ args_length = old_args_length + JSSTRING_LENGTH(arg);
+ if (args_length < old_args_length) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+
+ /* Add 1 for each joining comma and check for overflow (two ways). */
+ old_args_length = args_length;
+ args_length = old_args_length + n - 1;
+ if (args_length < old_args_length ||
+ args_length >= ~(size_t)0 / sizeof(jschar)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ /*
+ * Allocate a string to hold the concatenated arguments, including room
+ * for a terminating 0. Mark cx->tempPool for later release, to free
+ * collected_args and its tokenstream in one swoop.
+ */
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(cp, jschar *, &cx->tempPool,
+ (args_length+1) * sizeof(jschar));
+ if (!cp) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ collected_args = cp;
+
+ /*
+ * Concatenate the arguments into the new string, separated by commas.
+ */
+ for (i = 0; i < n; i++) {
+ arg = JSVAL_TO_STRING(argv[i]);
+ arg_length = JSSTRING_LENGTH(arg);
+ (void) js_strncpy(cp, JSSTRING_CHARS(arg), arg_length);
+ cp += arg_length;
+
+ /* Add separating comma or terminating 0. */
+ *cp++ = (i + 1 < n) ? ',' : 0;
+ }
+
+ /*
+ * Make a tokenstream (allocated from cx->tempPool) that reads from
+ * the given string.
+ */
+ ts = js_NewTokenStream(cx, collected_args, args_length, filename,
+ lineno, principals);
+ if (!ts) {
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return JS_FALSE;
+ }
+
+ /* The argument string may be empty or contain no tokens. */
+ tt = js_GetToken(cx, ts);
+ if (tt != TOK_EOF) {
+ for (;;) {
+ /*
+ * Check that it's a name. This also implicitly guards against
+ * TOK_ERROR, which was already reported.
+ */
+ if (tt != TOK_NAME)
+ goto bad_formal;
+
+ /*
+ * Get the atom corresponding to the name from the tokenstream;
+ * we're assured at this point that it's a valid identifier.
+ */
+ atom = CURRENT_TOKEN(ts).t_atom;
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom),
+ &obj2, &prop)) {
+ goto bad_formal;
+ }
+ sprop = (JSScopeProperty *) prop;
+ dupflag = 0;
+ if (sprop) {
+ ok = JS_TRUE;
+ if (obj2 == obj) {
+ const char *name = js_AtomToPrintableString(cx, atom);
+
+ /*
+ * A duplicate parameter name. We force a duplicate
+ * node on the SCOPE_LAST_PROP(scope) list with the
+ * same id, distinguished by the SPROP_IS_DUPLICATE
+ * flag, and not mapped by an entry in scope.
+ */
+ JS_ASSERT(sprop->getter == js_GetArgument);
+ ok = name &&
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_DUPLICATE_FORMAL,
+ name);
+
+ dupflag = SPROP_IS_DUPLICATE;
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ goto bad_formal;
+ sprop = NULL;
+ }
+ if (!js_AddHiddenProperty(cx, fun->object, ATOM_TO_JSID(atom),
+ js_GetArgument, js_SetArgument,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ dupflag | SPROP_HAS_SHORTID,
+ fun->nargs)) {
+ goto bad_formal;
+ }
+ if (fun->nargs == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_ARGS);
+ goto bad;
+ }
+ fun->nargs++;
+
+ /*
+ * Get the next token. Stop on end of stream. Otherwise
+ * insist on a comma, get another name, and iterate.
+ */
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_EOF)
+ break;
+ if (tt != TOK_COMMA)
+ goto bad_formal;
+ tt = js_GetToken(cx, ts);
+ }
+ }
+
+ /* Clean up. */
+ ok = js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!ok)
+ return JS_FALSE;
+ }
+
+ if (argc) {
+ str = js_ValueToString(cx, argv[argc-1]);
+ } else {
+ /* Can't use cx->runtime->emptyString because we're called too early. */
+ str = js_NewStringCopyZ(cx, js_empty_ucstr, 0);
+ }
+ if (!str)
+ return JS_FALSE;
+ if (argv) {
+ /* Use the last arg (or this if argc == 0) as a local GC root. */
+ argv[(intN)(argc-1)] = STRING_TO_JSVAL(str);
+ }
+
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, JSSTRING_CHARS(str), JSSTRING_LENGTH(str),
+ filename, lineno, principals);
+ if (!ts) {
+ ok = JS_FALSE;
+ } else {
+ ok = js_CompileFunctionBody(cx, ts, fun) &&
+ js_CloseTokenStream(cx, ts);
+ }
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return ok;
+
+bad_formal:
+ /*
+ * Report "malformed formal parameter" iff no illegal char or similar
+ * scanner error was already reported.
+ */
+ if (!(ts->flags & TSF_ERROR))
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_FORMAL);
+
+bad:
+ /*
+ * Clean up the arguments string and tokenstream if we failed to parse
+ * the arguments.
+ */
+ (void)js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return JS_FALSE;
+}
+
+JSObject *
+js_InitFunctionClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+ JSAtom *atom;
+ JSFunction *fun;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_FunctionClass, Function, 1,
+ function_props, function_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+ atom = js_Atomize(cx, js_FunctionClass.name, strlen(js_FunctionClass.name),
+ 0);
+ if (!atom)
+ goto bad;
+ fun = js_NewFunction(cx, proto, NULL, 0, 0, obj, NULL);
+ if (!fun)
+ goto bad;
+ fun->u.i.script = js_NewScript(cx, 1, 0, 0);
+ if (!fun->u.i.script)
+ goto bad;
+ fun->u.i.script->code[0] = JSOP_STOP;
+ fun->flags |= JSFUN_INTERPRETED;
+ return proto;
+
+bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+}
+
+JSObject *
+js_InitCallClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_CallClass, NULL, 0,
+ call_props, NULL, NULL, NULL);
+ if (!proto)
+ return NULL;
+
+ /*
+ * Null Call.prototype's proto slot so that Object.prototype.* does not
+ * pollute the scope of heavyweight functions.
+ */
+ OBJ_SET_PROTO(cx, proto, NULL);
+ return proto;
+}
+
+JSFunction *
+js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
+ uintN flags, JSObject *parent, JSAtom *atom)
+{
+ JSFunction *fun;
+ JSTempValueRooter tvr;
+
+ /* If funobj is null, allocate an object for it. */
+ if (funobj) {
+ OBJ_SET_PARENT(cx, funobj, parent);
+ } else {
+ funobj = js_NewObject(cx, &js_FunctionClass, NULL, parent);
+ if (!funobj)
+ return NULL;
+ }
+
+ /* Protect fun from any potential GC callback. */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(funobj), &tvr);
+
+ /*
+ * Allocate fun after allocating funobj so slot allocation in js_NewObject
+ * does not wipe out fun from newborn[GCX_PRIVATE].
+ */
+ fun = (JSFunction *) js_NewGCThing(cx, GCX_PRIVATE, sizeof(JSFunction));
+ if (!fun)
+ goto out;
+
+ /* Initialize all function members. */
+ fun->object = NULL;
+ fun->nargs = nargs;
+ fun->flags = flags & JSFUN_FLAGS_MASK;
+ fun->u.n.native = native;
+ fun->u.n.extra = 0;
+ fun->u.n.spare = 0;
+ fun->atom = atom;
+ fun->clasp = NULL;
+
+ /* Link fun to funobj and vice versa. */
+ if (!js_LinkFunctionObject(cx, fun, funobj)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ fun = NULL;
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return fun;
+}
+
+JSObject *
+js_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent)
+{
+ JSObject *newfunobj;
+ JSFunction *fun;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, funobj) == &js_FunctionClass);
+ newfunobj = js_NewObject(cx, &js_FunctionClass, funobj, parent);
+ if (!newfunobj)
+ return NULL;
+ fun = (JSFunction *) JS_GetPrivate(cx, funobj);
+ if (!js_LinkFunctionObject(cx, fun, newfunobj)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ return newfunobj;
+}
+
+JSBool
+js_LinkFunctionObject(JSContext *cx, JSFunction *fun, JSObject *funobj)
+{
+ if (!fun->object)
+ fun->object = funobj;
+ return JS_SetPrivate(cx, funobj, fun);
+}
+
+JSFunction *
+js_DefineFunction(JSContext *cx, JSObject *obj, JSAtom *atom, JSNative native,
+ uintN nargs, uintN attrs)
+{
+ JSFunction *fun;
+
+ fun = js_NewFunction(cx, NULL, native, nargs, attrs, obj, atom);
+ if (!fun)
+ return NULL;
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom),
+ OBJECT_TO_JSVAL(fun->object),
+ NULL, NULL,
+ attrs & ~JSFUN_FLAGS_MASK, NULL)) {
+ return NULL;
+ }
+ return fun;
+}
+
+#if (JSV2F_CONSTRUCT & JSV2F_SEARCH_STACK)
+# error "JSINVOKE_CONSTRUCT and JSV2F_SEARCH_STACK are not disjoint!"
+#endif
+
+JSFunction *
+js_ValueToFunction(JSContext *cx, jsval *vp, uintN flags)
+{
+ jsval v;
+ JSObject *obj;
+
+ v = *vp;
+ obj = NULL;
+ if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (obj && OBJ_GET_CLASS(cx, obj) != &js_FunctionClass) {
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_FUNCTION, &v))
+ return NULL;
+ obj = VALUE_IS_FUNCTION(cx, v) ? JSVAL_TO_OBJECT(v) : NULL;
+ }
+ }
+ if (!obj) {
+ js_ReportIsNotFunction(cx, vp, flags);
+ return NULL;
+ }
+ return (JSFunction *) JS_GetPrivate(cx, obj);
+}
+
+JSObject *
+js_ValueToFunctionObject(JSContext *cx, jsval *vp, uintN flags)
+{
+ JSFunction *fun;
+ JSObject *funobj;
+ JSStackFrame *caller;
+ JSPrincipals *principals;
+
+ if (VALUE_IS_FUNCTION(cx, *vp))
+ return JSVAL_TO_OBJECT(*vp);
+
+ fun = js_ValueToFunction(cx, vp, flags);
+ if (!fun)
+ return NULL;
+ funobj = fun->object;
+ *vp = OBJECT_TO_JSVAL(funobj);
+
+ caller = JS_GetScriptedCaller(cx, cx->fp);
+ if (caller) {
+ principals = caller->script->principals;
+ } else {
+ /* No scripted caller, don't allow access. */
+ principals = NULL;
+ }
+
+ if (!js_CheckPrincipalsAccess(cx, funobj, principals,
+ fun->atom
+ ? fun->atom
+ : cx->runtime->atomState.anonymousAtom)) {
+ return NULL;
+ }
+ return funobj;
+}
+
+JSObject *
+js_ValueToCallableObject(JSContext *cx, jsval *vp, uintN flags)
+{
+ JSObject *callable;
+
+ callable = JSVAL_IS_PRIMITIVE(*vp) ? NULL : JSVAL_TO_OBJECT(*vp);
+ if (callable &&
+ ((callable->map->ops == &js_ObjectOps)
+ ? OBJ_GET_CLASS(cx, callable)->call
+ : callable->map->ops->call)) {
+ *vp = OBJECT_TO_JSVAL(callable);
+ } else {
+ callable = js_ValueToFunctionObject(cx, vp, flags);
+ }
+ return callable;
+}
+
+void
+js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags)
+{
+ JSStackFrame *fp;
+ JSString *str;
+ JSTempValueRooter tvr;
+ const char *bytes, *source;
+
+ for (fp = cx->fp; fp && !fp->spbase; fp = fp->down)
+ continue;
+ str = js_DecompileValueGenerator(cx,
+ (fp && fp->spbase <= vp && vp < fp->sp)
+ ? vp - fp->sp
+ : (flags & JSV2F_SEARCH_STACK)
+ ? JSDVG_SEARCH_STACK
+ : JSDVG_IGNORE_STACK,
+ *vp,
+ NULL);
+ if (str) {
+ JS_PUSH_TEMP_ROOT_STRING(cx, str, &tvr);
+ bytes = JS_GetStringBytes(str);
+ if (flags & JSV2F_ITERATOR) {
+ source = js_ValueToPrintableSource(cx, *vp);
+ if (source) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ITERATOR,
+ bytes, js_iterator_str, source);
+ }
+ } else {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ (uintN)((flags & JSV2F_CONSTRUCT)
+ ? JSMSG_NOT_CONSTRUCTOR
+ : JSMSG_NOT_FUNCTION),
+ bytes);
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ }
+}
diff --git a/third_party/js-1.7/jsfun.h b/third_party/js-1.7/jsfun.h
new file mode 100644
index 0000000..8d5c185
--- /dev/null
+++ b/third_party/js-1.7/jsfun.h
@@ -0,0 +1,170 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsfun_h___
+#define jsfun_h___
+/*
+ * JS function definitions.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+struct JSFunction {
+ JSObject *object; /* back-pointer to GC'ed object header */
+ uint16 nargs; /* minimum number of actual arguments */
+ uint16 flags; /* bound method and other flags, see jsapi.h */
+ union {
+ struct {
+ uint16 extra; /* number of arg slots for local GC roots */
+ uint16 spare; /* reserved for future use */
+ JSNative native; /* native method pointer or null */
+ } n;
+ struct {
+ uint16 nvars; /* number of local variables */
+ uint16 nregexps; /* number of regular expressions literals */
+ JSScript *script; /* interpreted bytecode descriptor or null */
+ } i;
+ } u;
+ JSAtom *atom; /* name for diagnostics and decompiling */
+ JSClass *clasp; /* if non-null, constructor for this class */
+};
+
+#define JSFUN_INTERPRETED 0x8000 /* use u.i if set, u.n if unset */
+
+#define FUN_INTERPRETED(fun) ((fun)->flags & JSFUN_INTERPRETED)
+#define FUN_NATIVE(fun) (FUN_INTERPRETED(fun) ? NULL : (fun)->u.n.native)
+#define FUN_SCRIPT(fun) (FUN_INTERPRETED(fun) ? (fun)->u.i.script : NULL)
+
+extern JSClass js_ArgumentsClass;
+extern JSClass js_CallClass;
+
+/* JS_FRIEND_DATA so that VALUE_IS_FUNCTION is callable from the shell. */
+extern JS_FRIEND_DATA(JSClass) js_FunctionClass;
+
+/*
+ * NB: jsapi.h and jsobj.h must be included before any call to this macro.
+ */
+#define VALUE_IS_FUNCTION(cx, v) \
+ (!JSVAL_IS_PRIMITIVE(v) && \
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_FunctionClass)
+
+extern JSBool
+js_fun_toString(JSContext *cx, JSObject *obj, uint32 indent,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_IsIdentifier(JSString *str);
+
+extern JSObject *
+js_InitFunctionClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitArgumentsClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitCallClass(JSContext *cx, JSObject *obj);
+
+extern JSFunction *
+js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
+ uintN flags, JSObject *parent, JSAtom *atom);
+
+extern JSObject *
+js_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent);
+
+extern JSBool
+js_LinkFunctionObject(JSContext *cx, JSFunction *fun, JSObject *object);
+
+extern JSFunction *
+js_DefineFunction(JSContext *cx, JSObject *obj, JSAtom *atom, JSNative native,
+ uintN nargs, uintN flags);
+
+/*
+ * Flags for js_ValueToFunction and js_ReportIsNotFunction. We depend on the
+ * fact that JSINVOKE_CONSTRUCT (aka JSFRAME_CONSTRUCTING) is 1, and test that
+ * with #if/#error in jsfun.c.
+ */
+#define JSV2F_CONSTRUCT JSINVOKE_CONSTRUCT
+#define JSV2F_ITERATOR JSINVOKE_ITERATOR
+#define JSV2F_SEARCH_STACK 0x10000
+
+extern JSFunction *
+js_ValueToFunction(JSContext *cx, jsval *vp, uintN flags);
+
+extern JSObject *
+js_ValueToFunctionObject(JSContext *cx, jsval *vp, uintN flags);
+
+extern JSObject *
+js_ValueToCallableObject(JSContext *cx, jsval *vp, uintN flags);
+
+extern void
+js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags);
+
+extern JSObject *
+js_GetCallObject(JSContext *cx, JSStackFrame *fp, JSObject *parent);
+
+extern JSBool
+js_PutCallObject(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_GetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_SetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_GetArgsValue(JSContext *cx, JSStackFrame *fp, jsval *vp);
+
+extern JSBool
+js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id,
+ JSObject **objp, jsval *vp);
+
+extern JSObject *
+js_GetArgsObject(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_PutArgsObject(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_XDRFunction(JSXDRState *xdr, JSObject **objp);
+
+JS_END_EXTERN_C
+
+#endif /* jsfun_h___ */
diff --git a/third_party/js-1.7/jsgc.c b/third_party/js-1.7/jsgc.c
new file mode 100644
index 0000000..7fae096
--- /dev/null
+++ b/third_party/js-1.7/jsgc.c
@@ -0,0 +1,3201 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS Mark-and-Sweep Garbage Collector.
+ *
+ * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
+ * jsgc.h). It allocates from a special GC arena pool with each arena allocated
+ * using malloc. It uses an ideally parallel array of flag bytes to hold the
+ * mark bit, finalizer type index, etc.
+ *
+ * XXX swizzle page to freelist for better locality of reference
+ */
+#include "jsstddef.h"
+#include <stdlib.h> /* for free */
+#include <string.h> /* for memset used when DEBUG */
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jsbit.h"
+#include "jsclist.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsiter.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+/*
+ * GC arena sizing depends on amortizing arena overhead using a large number
+ * of things per arena, and on the thing/flags ratio of 8:1 on most platforms.
+ *
+ * On 64-bit platforms, we would have half as many things per arena because
+ * pointers are twice as big, so we double the bytes for things per arena.
+ * This preserves the 1024 byte flags sub-arena size, which relates to the
+ * GC_PAGE_SIZE (see below for why).
+ */
+#if JS_BYTES_PER_WORD == 8
+# define GC_THINGS_SHIFT 14 /* 16KB for things on Alpha, etc. */
+#else
+# define GC_THINGS_SHIFT 13 /* 8KB for things on most platforms */
+#endif
+#define GC_THINGS_SIZE JS_BIT(GC_THINGS_SHIFT)
+#define GC_FLAGS_SIZE (GC_THINGS_SIZE / sizeof(JSGCThing))
+
+/*
+ * A GC arena contains one flag byte for each thing in its heap, and supports
+ * O(1) lookup of a flag given its thing's address.
+ *
+ * To implement this, we take advantage of the thing/flags numerology: given
+ * the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K
+ * allocation for things+flags there are always 8 consecutive 1K-pages each
+ * aligned on 1K boundary. We use these pages to allocate things and the
+ * remaining 1K of space before and after the aligned pages to store flags.
+ * If we are really lucky and things+flags starts on a 1K boundary, then
+ * flags would consist of a single 1K chunk that comes after 8K of things.
+ * Otherwise there are 2 chunks of flags, one before and one after things.
+ *
+ * To be able to find the flag byte for a particular thing, we put a
+ * JSGCPageInfo record at the beginning of each 1K-aligned page to hold that
+ * page's offset from the beginning of things+flags allocation and we allocate
+ * things after this record. Thus for each thing |thing_address & ~1023|
+ * gives the address of a JSGCPageInfo record from which we read page_offset.
+ * Due to page alignment
+ * (page_offset & ~1023) + (thing_address & 1023)
+ * gives thing_offset from the beginning of 8K paged things. We then divide
+ * thing_offset by sizeof(JSGCThing) to get thing_index.
+ *
+ * Now |page_address - page_offset| is things+flags arena_address and
+ * (page_offset & 1023) is the offset of the first page from the start of
+ * things+flags area. Thus if
+ * thing_index < (page_offset & 1023)
+ * then
+ * allocation_start_address + thing_index < address_of_the_first_page
+ * and we use
+ * allocation_start_address + thing_index
+ * as the address to store thing's flags. If
+ * thing_index >= (page_offset & 1023),
+ * then we use the chunk of flags that comes after the pages with things
+ * and calculate the address for the flag byte as
+ * address_of_the_first_page + 8K + (thing_index - (page_offset & 1023))
+ * which is just
+ * allocation_start_address + thing_index + 8K.
+ *
+ * When we allocate things with size equal to sizeof(JSGCThing), the overhead
+ * of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87%
+ * (assuming 4 bytes for each JSGCArena header, and 8 bytes for each
+ * JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the
+ * flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size
+ * and the overhead is close to 1/8 or 12.5%.
+ * FIXME: How can we avoid this overhead?
+ *
+ * Here's some ASCII art showing an arena:
+ *
+ * split or the first 1-K aligned address.
+ * |
+ * V
+ * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
+ * |fB| tp0 | tp1 | tp2 | tp3 | tp4 | tp5 | tp6 | tp7 | fA |
+ * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
+ * ^ ^
+ * tI ---------+ |
+ * tJ -------------------------------------------+
+ *
+ * - fB are the "before split" flags, fA are the "after split" flags
+ * - tp0-tp7 are the 8 thing pages
+ * - thing tI points into tp1, whose flags are below the split, in fB
+ * - thing tJ points into tp5, clearly above the split
+ *
+ * In general, one of the thing pages will have some of its things' flags on
+ * the low side of the split, and the rest of its things' flags on the high
+ * side. All the other pages have flags only below or only above.
+ *
+ * (If we need to implement card-marking for an incremental GC write barrier,
+ * we can replace word-sized offsetInArena in JSGCPageInfo by pair of
+ * uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed
+ * GC_THINGS_SIZE. This would gives an extremely efficient write barrier:
+ * when mutating an object obj, just store a 1 byte at
+ * (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.)
+ */
+#define GC_PAGE_SHIFT 10
+#define GC_PAGE_MASK ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
+#define GC_PAGE_SIZE JS_BIT(GC_PAGE_SHIFT)
+#define GC_PAGE_COUNT (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))
+
+typedef struct JSGCPageInfo {
+ jsuword offsetInArena; /* offset from the arena start */
+ jsuword unscannedBitmap; /* bitset for fast search of marked
+ but not yet scanned GC things */
+} JSGCPageInfo;
+
+struct JSGCArena {
+ JSGCArenaList *list; /* allocation list for the arena */
+ JSGCArena *prev; /* link field for allocation list */
+ JSGCArena *prevUnscanned; /* link field for the list of arenas
+ with marked but not yet scanned
+ things */
+ jsuword unscannedPages; /* bitset for fast search of pages
+ with marked but not yet scanned
+ things */
+ uint8 base[1]; /* things+flags allocation area */
+};
+
+#define GC_ARENA_SIZE \
+ (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)
+
+#define FIRST_THING_PAGE(a) \
+ (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)
+
+#define PAGE_TO_ARENA(pi) \
+ ((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena \
+ - offsetof(JSGCArena, base)))
+
+#define PAGE_INDEX(pi) \
+ ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))
+
+#define THING_TO_PAGE(thing) \
+ ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))
+
+/*
+ * Given a thing size n, return the size of the gap from the page start before
+ * the first thing. We know that any n not a power of two packs from
+ * the end of the page leaving at least enough room for one JSGCPageInfo, but
+ * not for another thing, at the front of the page (JS_ASSERTs below insist
+ * on this).
+ *
+ * This works because all allocations are a multiple of sizeof(JSGCThing) ==
+ * sizeof(JSGCPageInfo) in size.
+ */
+#define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))
+
+#ifdef JS_THREADSAFE
+/*
+ * The maximum number of things to put to the local free list by taking
+ * several things from the global free list or from the tail of the last
+ * allocated arena to amortize the cost of rt->gcLock.
+ *
+ * We use number 8 based on benchmarks from bug 312238.
+ */
+#define MAX_THREAD_LOCAL_THINGS 8
+
+#endif
+
+JS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));
+JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));
+JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
+JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
+JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);
+JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
+
+/*
+ * JSPtrTable capacity growth descriptor. The table grows by powers of two
+ * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
+ * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
+ */
+typedef struct JSPtrTableInfo {
+ uint16 minCapacity;
+ uint16 linearGrowthThreshold;
+} JSPtrTableInfo;
+
+#define GC_ITERATOR_TABLE_MIN 4
+#define GC_ITERATOR_TABLE_LINEAR 1024
+
+static const JSPtrTableInfo iteratorTableInfo = {
+ GC_ITERATOR_TABLE_MIN,
+ GC_ITERATOR_TABLE_LINEAR
+};
+
+/* Calculate table capacity based on the current value of JSPtrTable.count. */
+static size_t
+PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
+{
+ size_t linear, log, capacity;
+
+ linear = info->linearGrowthThreshold;
+ JS_ASSERT(info->minCapacity <= linear);
+
+ if (count == 0) {
+ capacity = 0;
+ } else if (count < linear) {
+ log = JS_CEILING_LOG2W(count);
+ JS_ASSERT(log != JS_BITS_PER_WORD);
+ capacity = (size_t)1 << log;
+ if (capacity < info->minCapacity)
+ capacity = info->minCapacity;
+ } else {
+ capacity = JS_ROUNDUP(count, linear);
+ }
+
+ JS_ASSERT(capacity >= count);
+ return capacity;
+}
+
+static void
+FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
+{
+ if (table->array) {
+ JS_ASSERT(table->count > 0);
+ free(table->array);
+ table->array = NULL;
+ table->count = 0;
+ }
+ JS_ASSERT(table->count == 0);
+}
+
+static JSBool
+AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
+ void *ptr)
+{
+ size_t count, capacity;
+ void **array;
+
+ count = table->count;
+ capacity = PtrTableCapacity(count, info);
+
+ if (count == capacity) {
+ if (capacity < info->minCapacity) {
+ JS_ASSERT(capacity == 0);
+ JS_ASSERT(!table->array);
+ capacity = info->minCapacity;
+ } else {
+ /*
+ * Simplify the overflow detection assuming pointer is bigger
+ * than byte.
+ */
+ JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
+ capacity = (capacity < info->linearGrowthThreshold)
+ ? 2 * capacity
+ : capacity + info->linearGrowthThreshold;
+ if (capacity > (size_t)-1 / sizeof table->array[0])
+ goto bad;
+ }
+ array = (void **) realloc(table->array,
+ capacity * sizeof table->array[0]);
+ if (!array)
+ goto bad;
+#ifdef DEBUG
+ memset(array + count, JS_FREE_PATTERN,
+ (capacity - count) * sizeof table->array[0]);
+#endif
+ table->array = array;
+ }
+
+ table->array[count] = ptr;
+ table->count = count + 1;
+
+ return JS_TRUE;
+
+ bad:
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+}
+
+static void
+ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
+ size_t newCount)
+{
+ size_t oldCapacity, capacity;
+ void **array;
+
+ JS_ASSERT(newCount <= table->count);
+ if (newCount == table->count)
+ return;
+
+ oldCapacity = PtrTableCapacity(table->count, info);
+ table->count = newCount;
+ capacity = PtrTableCapacity(newCount, info);
+
+ if (oldCapacity != capacity) {
+ array = table->array;
+ JS_ASSERT(array);
+ if (capacity == 0) {
+ free(array);
+ table->array = NULL;
+ return;
+ }
+ array = (void **) realloc(array, capacity * sizeof array[0]);
+ if (array)
+ table->array = array;
+ }
+#ifdef DEBUG
+ memset(table->array + newCount, JS_FREE_PATTERN,
+ (capacity - newCount) * sizeof table->array[0]);
+#endif
+}
+
+#ifdef JS_GCMETER
+# define METER(x) x
+#else
+# define METER(x) ((void) 0)
+#endif
+
+static JSBool
+NewGCArena(JSRuntime *rt, JSGCArenaList *arenaList)
+{
+ JSGCArena *a;
+ jsuword offset;
+ JSGCPageInfo *pi;
+ uint32 *bytesptr;
+
+ /* Check if we are allowed and can allocate a new arena. */
+ if (rt->gcBytes >= rt->gcMaxBytes)
+ return JS_FALSE;
+ a = (JSGCArena *)malloc(GC_ARENA_SIZE);
+ if (!a)
+ return JS_FALSE;
+
+ /* Initialize the JSGCPageInfo records at the start of every thing page. */
+ offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK;
+ JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a));
+ do {
+ pi = (JSGCPageInfo *) (a->base + offset);
+ pi->offsetInArena = offset;
+ pi->unscannedBitmap = 0;
+ offset += GC_PAGE_SIZE;
+ } while (offset < GC_THINGS_SIZE);
+
+ METER(++arenaList->stats.narenas);
+ METER(arenaList->stats.maxarenas
+ = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas));
+
+ a->list = arenaList;
+ a->prev = arenaList->last;
+ a->prevUnscanned = NULL;
+ a->unscannedPages = 0;
+ arenaList->last = a;
+ arenaList->lastLimit = 0;
+
+ bytesptr = (arenaList == &rt->gcArenaList[0])
+ ? &rt->gcBytes
+ : &rt->gcPrivateBytes;
+ *bytesptr += GC_ARENA_SIZE;
+
+ return JS_TRUE;
+}
+
+static void
+DestroyGCArena(JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap)
+{
+ JSGCArena *a;
+ uint32 *bytesptr;
+
+ a = *ap;
+ JS_ASSERT(a);
+ bytesptr = (arenaList == &rt->gcArenaList[0])
+ ? &rt->gcBytes
+ : &rt->gcPrivateBytes;
+ JS_ASSERT(*bytesptr >= GC_ARENA_SIZE);
+ *bytesptr -= GC_ARENA_SIZE;
+ METER(rt->gcStats.afree++);
+ METER(--arenaList->stats.narenas);
+ if (a == arenaList->last)
+ arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0);
+ *ap = a->prev;
+
+#ifdef DEBUG
+ memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE);
+#endif
+ free(a);
+}
+
+static void
+InitGCArenaLists(JSRuntime *rt)
+{
+ uintN i, thingSize;
+ JSGCArenaList *arenaList;
+
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ thingSize = GC_FREELIST_NBYTES(i);
+ JS_ASSERT((size_t)(uint16)thingSize == thingSize);
+ arenaList->last = NULL;
+ arenaList->lastLimit = 0;
+ arenaList->thingSize = (uint16)thingSize;
+ arenaList->freeList = NULL;
+ METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
+ }
+}
+
+static void
+FinishGCArenaLists(JSRuntime *rt)
+{
+ uintN i;
+ JSGCArenaList *arenaList;
+
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ while (arenaList->last)
+ DestroyGCArena(rt, arenaList, &arenaList->last);
+ arenaList->freeList = NULL;
+ }
+}
+
+uint8 *
+js_GetGCThingFlags(void *thing)
+{
+ JSGCPageInfo *pi;
+ jsuword offsetInArena, thingIndex;
+
+ pi = THING_TO_PAGE(thing);
+ offsetInArena = pi->offsetInArena;
+ JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
+ thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
+ ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
+ JS_ASSERT(thingIndex < GC_PAGE_SIZE);
+ if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
+ thingIndex += GC_THINGS_SIZE;
+ return (uint8 *)pi - offsetInArena + thingIndex;
+}
+
+JSRuntime*
+js_GetGCStringRuntime(JSString *str)
+{
+ JSGCPageInfo *pi;
+ JSGCArenaList *list;
+
+ pi = THING_TO_PAGE(str);
+ list = PAGE_TO_ARENA(pi)->list;
+
+ JS_ASSERT(list->thingSize == sizeof(JSGCThing));
+ JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
+
+ return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
+}
+
+JSBool
+js_IsAboutToBeFinalized(JSContext *cx, void *thing)
+{
+ uint8 flags = *js_GetGCThingFlags(thing);
+
+ return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
+}
+
+typedef void (*GCFinalizeOp)(JSContext *cx, JSGCThing *thing);
+
+#ifndef DEBUG
+# define js_FinalizeDouble NULL
+#endif
+
+#if !JS_HAS_XML_SUPPORT
+# define js_FinalizeXMLNamespace NULL
+# define js_FinalizeXMLQName NULL
+# define js_FinalizeXML NULL
+#endif
+
+static GCFinalizeOp gc_finalizers[GCX_NTYPES] = {
+ (GCFinalizeOp) js_FinalizeObject, /* GCX_OBJECT */
+ (GCFinalizeOp) js_FinalizeString, /* GCX_STRING */
+ (GCFinalizeOp) js_FinalizeDouble, /* GCX_DOUBLE */
+ (GCFinalizeOp) js_FinalizeString, /* GCX_MUTABLE_STRING */
+ NULL, /* GCX_PRIVATE */
+ (GCFinalizeOp) js_FinalizeXMLNamespace, /* GCX_NAMESPACE */
+ (GCFinalizeOp) js_FinalizeXMLQName, /* GCX_QNAME */
+ (GCFinalizeOp) js_FinalizeXML, /* GCX_XML */
+ NULL, /* GCX_EXTERNAL_STRING */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#ifdef GC_MARK_DEBUG
+static const char newborn_external_string[] = "newborn external string";
+
+static const char *gc_typenames[GCX_NTYPES] = {
+ "newborn object",
+ "newborn string",
+ "newborn double",
+ "newborn mutable string",
+ "newborn private",
+ "newborn Namespace",
+ "newborn QName",
+ "newborn XML",
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string
+};
+#endif
+
+intN
+js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
+ JSStringFinalizeOp newop)
+{
+ uintN i;
+
+ for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
+ if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
+ gc_finalizers[i] = (GCFinalizeOp) newop;
+ return (intN) i;
+ }
+ }
+ return -1;
+}
+
+/* This is compatible with JSDHashEntryStub. */
+typedef struct JSGCRootHashEntry {
+ JSDHashEntryHdr hdr;
+ void *root;
+ const char *name;
+} JSGCRootHashEntry;
+
+/* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
+#define GC_ROOTS_SIZE 256
+#define GC_FINALIZE_LEN 1024
+
+JSBool
+js_InitGC(JSRuntime *rt, uint32 maxbytes)
+{
+ InitGCArenaLists(rt);
+ if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
+ sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
+ rt->gcRootsHash.ops = NULL;
+ return JS_FALSE;
+ }
+ rt->gcLocksHash = NULL; /* create lazily */
+
+ /*
+ * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
+ * for default backward API compatibility.
+ */
+ rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
+
+ return JS_TRUE;
+}
+
+#ifdef JS_GCMETER
+JS_FRIEND_API(void)
+js_DumpGCStats(JSRuntime *rt, FILE *fp)
+{
+ uintN i;
+ size_t totalThings, totalMaxThings, totalBytes;
+
+ fprintf(fp, "\nGC allocation statistics:\n");
+
+#define UL(x) ((unsigned long)(x))
+#define ULSTAT(x) UL(rt->gcStats.x)
+ totalThings = 0;
+ totalMaxThings = 0;
+ totalBytes = 0;
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ JSGCArenaList *list = &rt->gcArenaList[i];
+ JSGCArenaStats *stats = &list->stats;
+ if (stats->maxarenas == 0) {
+ fprintf(fp, "ARENA LIST %u (thing size %lu): NEVER USED\n",
+ i, UL(GC_FREELIST_NBYTES(i)));
+ continue;
+ }
+ fprintf(fp, "ARENA LIST %u (thing size %lu):\n",
+ i, UL(GC_FREELIST_NBYTES(i)));
+ fprintf(fp, " arenas: %lu\n", UL(stats->narenas));
+ fprintf(fp, " max arenas: %lu\n", UL(stats->maxarenas));
+ fprintf(fp, " things: %lu\n", UL(stats->nthings));
+ fprintf(fp, " max things: %lu\n", UL(stats->maxthings));
+ fprintf(fp, " free list: %lu\n", UL(stats->freelen));
+ fprintf(fp, " free list density: %.1f%%\n",
+ stats->narenas == 0
+ ? 0.0
+ : (100.0 * list->thingSize * (jsdouble)stats->freelen /
+ (GC_THINGS_SIZE * (jsdouble)stats->narenas)));
+ fprintf(fp, " average free list density: %.1f%%\n",
+ stats->totalarenas == 0
+ ? 0.0
+ : (100.0 * list->thingSize * (jsdouble)stats->totalfreelen /
+ (GC_THINGS_SIZE * (jsdouble)stats->totalarenas)));
+ fprintf(fp, " recycles: %lu\n", UL(stats->recycle));
+ fprintf(fp, " recycle/alloc ratio: %.2f\n",
+ (jsdouble)stats->recycle /
+ (jsdouble)(stats->totalnew - stats->recycle));
+ totalThings += stats->nthings;
+ totalMaxThings += stats->maxthings;
+ totalBytes += GC_FREELIST_NBYTES(i) * stats->nthings;
+ }
+ fprintf(fp, "TOTAL STATS:\n");
+ fprintf(fp, " public bytes allocated: %lu\n", UL(rt->gcBytes));
+ fprintf(fp, " private bytes allocated: %lu\n", UL(rt->gcPrivateBytes));
+ fprintf(fp, " alloc attempts: %lu\n", ULSTAT(alloc));
+#ifdef JS_THREADSAFE
+ fprintf(fp, " alloc without locks: %1u\n", ULSTAT(localalloc));
+#endif
+ fprintf(fp, " total GC things: %lu\n", UL(totalThings));
+ fprintf(fp, " max total GC things: %lu\n", UL(totalMaxThings));
+ fprintf(fp, " GC things size: %lu\n", UL(totalBytes));
+ fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry));
+ fprintf(fp, " allocation failures: %lu\n", ULSTAT(fail));
+ fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
+ fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
+ fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
+ fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
+ fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
+ fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
+ fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
+ fprintf(fp, " delayed scan bag adds: %lu\n", ULSTAT(unscanned));
+#ifdef DEBUG
+ fprintf(fp, " max delayed scan bag size: %lu\n", ULSTAT(maxunscanned));
+#endif
+ fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
+ fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
+ fprintf(fp, " useless GC calls: %lu\n", ULSTAT(nopoke));
+ fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
+ fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
+ fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
+ fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
+ fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
+ fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
+ fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
+#undef UL
+#undef US
+
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(fp);
+#endif
+}
+#endif
+
+#ifdef DEBUG
+static void
+CheckLeakedRoots(JSRuntime *rt);
+#endif
+
+void
+js_FinishGC(JSRuntime *rt)
+{
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(stdout);
+#endif
+#ifdef JS_GCMETER
+ js_DumpGCStats(rt, stdout);
+#endif
+
+ FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
+#if JS_HAS_GENERATORS
+ rt->gcCloseState.reachableList = NULL;
+ METER(rt->gcStats.nclose = 0);
+ rt->gcCloseState.todoQueue = NULL;
+#endif
+ FinishGCArenaLists(rt);
+
+ if (rt->gcRootsHash.ops) {
+#ifdef DEBUG
+ CheckLeakedRoots(rt);
+#endif
+ JS_DHashTableFinish(&rt->gcRootsHash);
+ rt->gcRootsHash.ops = NULL;
+ }
+ if (rt->gcLocksHash) {
+ JS_DHashTableDestroy(rt->gcLocksHash);
+ rt->gcLocksHash = NULL;
+ }
+}
+
+JSBool
+js_AddRoot(JSContext *cx, void *rp, const char *name)
+{
+ JSBool ok = js_AddRootRT(cx->runtime, rp, name);
+ if (!ok)
+ JS_ReportOutOfMemory(cx);
+ return ok;
+}
+
+JSBool
+js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
+{
+ JSBool ok;
+ JSGCRootHashEntry *rhe;
+
+ /*
+ * Due to the long-standing, but now removed, use of rt->gcLock across the
+ * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
+ * properly with a racing GC, without calling JS_AddRoot from a request.
+ * We have to preserve API compatibility here, now that we avoid holding
+ * rt->gcLock across the mark phase (including the root hashtable mark).
+ *
+ * If the GC is running and we're called on another thread, wait for this
+ * GC activation to finish. We can safely wait here (in the case where we
+ * are called within a request on another thread's context) without fear
+ * of deadlock because the GC doesn't set rt->gcRunning until after it has
+ * waited for all active requests to end.
+ */
+ JS_LOCK_GC(rt);
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
+ if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
+ do {
+ JS_AWAIT_GC_DONE(rt);
+ } while (rt->gcLevel > 0);
+ }
+#endif
+ rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
+ JS_DHASH_ADD);
+ if (rhe) {
+ rhe->root = rp;
+ rhe->name = name;
+ ok = JS_TRUE;
+ } else {
+ ok = JS_FALSE;
+ }
+ JS_UNLOCK_GC(rt);
+ return ok;
+}
+
+JSBool
+js_RemoveRoot(JSRuntime *rt, void *rp)
+{
+ /*
+ * Due to the JS_RemoveRootRT API, we may be called outside of a request.
+ * Same synchronization drill as above in js_AddRoot.
+ */
+ JS_LOCK_GC(rt);
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
+ if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
+ do {
+ JS_AWAIT_GC_DONE(rt);
+ } while (rt->gcLevel > 0);
+ }
+#endif
+ (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
+ rt->gcPoke = JS_TRUE;
+ JS_UNLOCK_GC(rt);
+ return JS_TRUE;
+}
+
+#ifdef DEBUG
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
+{
+ uint32 *leakedroots = (uint32 *)arg;
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+
+ (*leakedroots)++;
+ fprintf(stderr,
+ "JS engine warning: leaking GC root \'%s\' at %p\n",
+ rhe->name ? (char *)rhe->name : "", rhe->root);
+
+ return JS_DHASH_NEXT;
+}
+
+static void
+CheckLeakedRoots(JSRuntime *rt)
+{
+ uint32 leakedroots = 0;
+
+ /* Warn (but don't assert) debug builds of any remaining roots. */
+ JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
+ &leakedroots);
+ if (leakedroots > 0) {
+ if (leakedroots == 1) {
+ fprintf(stderr,
+"JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
+" This root may point to freed memory. Objects reachable\n"
+" through it have not been finalized.\n");
+ } else {
+ fprintf(stderr,
+"JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
+" These roots may point to freed memory. Objects reachable\n"
+" through them have not been finalized.\n",
+ (unsigned long) leakedroots);
+ }
+ }
+}
+
+typedef struct NamedRootDumpArgs {
+ void (*dump)(const char *name, void *rp, void *data);
+ void *data;
+} NamedRootDumpArgs;
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
+ void *arg)
+{
+ NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+
+ if (rhe->name)
+ args->dump(rhe->name, rhe->root, args->data);
+ return JS_DHASH_NEXT;
+}
+
+void
+js_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data)
+{
+ NamedRootDumpArgs args;
+
+ args.dump = dump;
+ args.data = data;
+ JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
+}
+
+#endif /* DEBUG */
+
+typedef struct GCRootMapArgs {
+ JSGCRootMapFun map;
+ void *data;
+} GCRootMapArgs;
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
+ void *arg)
+{
+ GCRootMapArgs *args = (GCRootMapArgs *) arg;
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+ intN mapflags;
+ JSDHashOperator op;
+
+ mapflags = args->map(rhe->root, rhe->name, args->data);
+
+#if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
+ JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
+ JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
+ op = (JSDHashOperator)mapflags;
+#else
+ op = JS_DHASH_NEXT;
+ if (mapflags & JS_MAP_GCROOT_STOP)
+ op |= JS_DHASH_STOP;
+ if (mapflags & JS_MAP_GCROOT_REMOVE)
+ op |= JS_DHASH_REMOVE;
+#endif
+
+ return op;
+}
+
+uint32
+js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
+{
+ GCRootMapArgs args;
+ uint32 rv;
+
+ args.map = map;
+ args.data = data;
+ JS_LOCK_GC(rt);
+ rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
+ JS_UNLOCK_GC(rt);
+ return rv;
+}
+
+JSBool
+js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
+{
+ JSRuntime *rt;
+ JSBool ok;
+
+ rt = cx->runtime;
+ JS_ASSERT(!rt->gcRunning);
+
+ JS_LOCK_GC(rt);
+ ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
+ JS_UNLOCK_GC(rt);
+ return ok;
+}
+
+static void
+CloseIteratorStates(JSContext *cx)
+{
+ JSRuntime *rt;
+ size_t count, newCount, i;
+ void **array;
+ JSObject *obj;
+
+ rt = cx->runtime;
+ count = rt->gcIteratorTable.count;
+ array = rt->gcIteratorTable.array;
+
+ newCount = 0;
+ for (i = 0; i != count; ++i) {
+ obj = (JSObject *)array[i];
+ if (js_IsAboutToBeFinalized(cx, obj))
+ js_CloseIteratorState(cx, obj);
+ else
+ array[newCount++] = obj;
+ }
+ ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
+}
+
+#if JS_HAS_GENERATORS
+
+void
+js_RegisterGenerator(JSContext *cx, JSGenerator *gen)
+{
+ JSRuntime *rt;
+
+ rt = cx->runtime;
+ JS_ASSERT(!rt->gcRunning);
+ JS_ASSERT(rt->state != JSRTS_LANDING);
+ JS_ASSERT(gen->state == JSGEN_NEWBORN);
+
+ JS_LOCK_GC(rt);
+ gen->next = rt->gcCloseState.reachableList;
+ rt->gcCloseState.reachableList = gen;
+ METER(rt->gcStats.nclose++);
+ METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
+ rt->gcStats.nclose));
+ JS_UNLOCK_GC(rt);
+}
+
+/*
+ * We do not run close hooks when the parent scope of the generator instance
+ * becomes unreachable to prevent denial-of-service and resource leakage from
+ * misbehaved generators.
+ *
+ * Called from the GC.
+ */
+static JSBool
+CanScheduleCloseHook(JSGenerator *gen)
+{
+ JSObject *parent;
+ JSBool canSchedule;
+
+ /* Avoid OBJ_GET_PARENT overhead as we are in GC. */
+ parent = JSVAL_TO_OBJECT(gen->obj->slots[JSSLOT_PARENT]);
+ canSchedule = *js_GetGCThingFlags(parent) & GCF_MARK;
+#ifdef DEBUG_igor
+ if (!canSchedule) {
+ fprintf(stderr, "GEN: Kill without schedule, gen=%p parent=%p\n",
+ (void *)gen, (void *)parent);
+ }
+#endif
+ return canSchedule;
+}
+
+/*
+ * Check if we should delay execution of the close hook.
+ *
+ * Called outside GC or any locks.
+ *
+ * XXX The current implementation is a hack that embeds the knowledge of the
+ * browser embedding pending the resolution of bug 352788. In the browser we
+ * must not close any generators that came from a page that is currently in
+ * the browser history. We detect that using the fact in the browser the scope
+ * is history if scope->outerObject->innerObject != scope.
+ */
+static JSBool
+ShouldDeferCloseHook(JSContext *cx, JSGenerator *gen, JSBool *defer)
+{
+ JSObject *parent, *obj;
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+
+ /*
+ * This is called outside any locks, so use thread-safe macros to access
+ * parent and classes.
+ */
+ *defer = JS_FALSE;
+ parent = OBJ_GET_PARENT(cx, gen->obj);
+ clasp = OBJ_GET_CLASS(cx, parent);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass *)clasp;
+ if (xclasp->outerObject) {
+ obj = xclasp->outerObject(cx, parent);
+ if (!obj)
+ return JS_FALSE;
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj)
+ return JS_FALSE;
+ *defer = obj != parent;
+ }
+ }
+#ifdef DEBUG_igor
+ if (*defer) {
+ fprintf(stderr, "GEN: deferring, gen=%p parent=%p\n",
+ (void *)gen, (void *)parent);
+ }
+#endif
+ return JS_TRUE;
+}
+
+/*
+ * Find all unreachable generators and move them to the todo queue from
+ * rt->gcCloseState.reachableList to execute thier close hooks after the GC
+ * cycle completes. To ensure liveness during the sweep phase we mark all
+ * generators we are going to close later.
+ */
+static void
+FindAndMarkObjectsToClose(JSContext *cx, JSGCInvocationKind gckind,
+ JSGenerator **todoQueueTail)
+{
+ JSRuntime *rt;
+ JSGenerator *todo, **genp, *gen;
+
+ rt = cx->runtime;
+ todo = NULL;
+ genp = &rt->gcCloseState.reachableList;
+ while ((gen = *genp) != NULL) {
+ if (*js_GetGCThingFlags(gen->obj) & GCF_MARK) {
+ genp = &gen->next;
+ } else {
+ /* Generator must not be executing when it becomes unreachable. */
+ JS_ASSERT(gen->state == JSGEN_NEWBORN ||
+ gen->state == JSGEN_OPEN ||
+ gen->state == JSGEN_CLOSED);
+
+ *genp = gen->next;
+ if (gen->state == JSGEN_OPEN &&
+ js_FindFinallyHandler(gen->frame.script, gen->frame.pc) &&
+ CanScheduleCloseHook(gen)) {
+ /*
+ * Generator yielded inside a try with a finally block.
+ * Schedule it for closing.
+ *
+ * We keep generators that yielded outside try-with-finally
+ * with gen->state == JSGEN_OPEN. The finalizer must deal with
+ * open generators as we may skip the close hooks, see below.
+ */
+ gen->next = NULL;
+ *todoQueueTail = gen;
+ todoQueueTail = &gen->next;
+ if (!todo)
+ todo = gen;
+ METER(JS_ASSERT(rt->gcStats.nclose));
+ METER(rt->gcStats.nclose--);
+ METER(rt->gcStats.closelater++);
+ METER(rt->gcStats.maxcloselater
+ = JS_MAX(rt->gcStats.maxcloselater,
+ rt->gcStats.closelater));
+ }
+ }
+ }
+
+ if (gckind == GC_LAST_CONTEXT) {
+ /*
+ * Remove scheduled hooks on shutdown as it is too late to run them:
+ * we do not allow execution of arbitrary scripts at this point.
+ */
+ rt->gcCloseState.todoQueue = NULL;
+ } else {
+ /*
+ * Mark just-found unreachable generators *after* we scan the global
+ * list to prevent a generator that refers to other unreachable
+ * generators from keeping them on gcCloseState.reachableList.
+ */
+ for (gen = todo; gen; gen = gen->next)
+ GC_MARK(cx, gen->obj, "newly scheduled generator");
+ }
+}
+
+/*
+ * Mark unreachable generators already scheduled to close and return the tail
+ * pointer to JSGCCloseState.todoQueue.
+ */
+static JSGenerator **
+MarkScheduledGenerators(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSGenerator **genp, *gen;
+
+ rt = cx->runtime;
+ genp = &rt->gcCloseState.todoQueue;
+ while ((gen = *genp) != NULL) {
+ if (CanScheduleCloseHook(gen)) {
+ GC_MARK(cx, gen->obj, "scheduled generator");
+ genp = &gen->next;
+ } else {
+ /* Discard the generator from the list if its schedule is over. */
+ *genp = gen->next;
+ METER(JS_ASSERT(rt->gcStats.closelater > 0));
+ METER(rt->gcStats.closelater--);
+ }
+ }
+ return genp;
+}
+
+#ifdef JS_THREADSAFE
+# define GC_RUNNING_CLOSE_HOOKS_PTR(cx) \
+ (&(cx)->thread->gcRunningCloseHooks)
+#else
+# define GC_RUNNING_CLOSE_HOOKS_PTR(cx) \
+ (&(cx)->runtime->gcCloseState.runningCloseHook)
+#endif
+
+typedef struct JSTempCloseList {
+ JSTempValueRooter tvr;
+ JSGenerator *head;
+} JSTempCloseList;
+
+JS_STATIC_DLL_CALLBACK(void)
+mark_temp_close_list(JSContext *cx, JSTempValueRooter *tvr)
+{
+ JSTempCloseList *list = (JSTempCloseList *)tvr;
+ JSGenerator *gen;
+
+ for (gen = list->head; gen; gen = gen->next)
+ GC_MARK(cx, gen->obj, "temp list generator");
+}
+
+#define JS_PUSH_TEMP_CLOSE_LIST(cx, tempList) \
+ JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_close_list, &(tempList)->tvr)
+
+#define JS_POP_TEMP_CLOSE_LIST(cx, tempList) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((tempList)->tvr.u.marker == mark_temp_close_list); \
+ JS_POP_TEMP_ROOT(cx, &(tempList)->tvr); \
+ JS_END_MACRO
+
+JSBool
+js_RunCloseHooks(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSTempCloseList tempList;
+ JSStackFrame *fp;
+ JSGenerator **genp, *gen;
+ JSBool ok, defer;
+#if JS_GCMETER
+ uint32 deferCount;
+#endif
+
+ rt = cx->runtime;
+
+ /*
+ * It is OK to access todoQueue outside the lock here. When many threads
+ * update the todo list, accessing some older value of todoQueue in the
+ * worst case just delays the excution of close hooks.
+ */
+ if (!rt->gcCloseState.todoQueue)
+ return JS_TRUE;
+
+ /*
+ * To prevent an infinite loop when a close hook creats more objects with
+ * close hooks and then triggers GC we ignore recursive invocations of
+ * js_RunCloseHooks and limit number of hooks to execute to the initial
+ * size of the list.
+ */
+ if (*GC_RUNNING_CLOSE_HOOKS_PTR(cx))
+ return JS_TRUE;
+
+ *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_TRUE;
+
+ JS_LOCK_GC(rt);
+ tempList.head = rt->gcCloseState.todoQueue;
+ JS_PUSH_TEMP_CLOSE_LIST(cx, &tempList);
+ rt->gcCloseState.todoQueue = NULL;
+ METER(rt->gcStats.closelater = 0);
+ rt->gcPoke = JS_TRUE;
+ JS_UNLOCK_GC(rt);
+
+ /*
+ * Set aside cx->fp since we do not want a close hook using caller or
+ * other means to backtrace into whatever stack might be active when
+ * running the hook. We store the current frame on the dormant list to
+ * protect against GC that the hook can trigger.
+ */
+ fp = cx->fp;
+ if (fp) {
+ JS_ASSERT(!fp->dormantNext);
+ fp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = fp;
+ }
+ cx->fp = NULL;
+
+ genp = &tempList.head;
+ ok = JS_TRUE;
+ while ((gen = *genp) != NULL) {
+ ok = ShouldDeferCloseHook(cx, gen, &defer);
+ if (!ok) {
+ /* Quit ASAP discarding the hook. */
+ *genp = gen->next;
+ break;
+ }
+ if (defer) {
+ genp = &gen->next;
+ METER(deferCount++);
+ continue;
+ }
+ ok = js_CloseGeneratorObject(cx, gen);
+
+ /*
+ * Unlink the generator after closing it to make sure it always stays
+ * rooted through tempList.
+ */
+ *genp = gen->next;
+
+ if (cx->throwing) {
+ /*
+ * Report the exception thrown by the close hook and continue to
+ * execute the rest of the hooks.
+ */
+ if (!js_ReportUncaughtException(cx))
+ JS_ClearPendingException(cx);
+ ok = JS_TRUE;
+ } else if (!ok) {
+ /*
+ * Assume this is a stop signal from the branch callback or
+ * other quit ASAP condition. Break execution until the next
+ * invocation of js_RunCloseHooks.
+ */
+ break;
+ }
+ }
+
+ cx->fp = fp;
+ if (fp) {
+ JS_ASSERT(cx->dormantFrameChain == fp);
+ cx->dormantFrameChain = fp->dormantNext;
+ fp->dormantNext = NULL;
+ }
+
+ if (tempList.head) {
+ /*
+ * Some close hooks were not yet executed, put them back into the
+ * scheduled list.
+ */
+ while ((gen = *genp) != NULL) {
+ genp = &gen->next;
+ METER(deferCount++);
+ }
+
+ /* Now genp is a pointer to the tail of tempList. */
+ JS_LOCK_GC(rt);
+ *genp = rt->gcCloseState.todoQueue;
+ rt->gcCloseState.todoQueue = tempList.head;
+ METER(rt->gcStats.closelater += deferCount);
+ METER(rt->gcStats.maxcloselater
+ = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater));
+ JS_UNLOCK_GC(rt);
+ }
+
+ JS_POP_TEMP_CLOSE_LIST(cx, &tempList);
+ *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE;
+
+ return ok;
+}
+
+#endif /* JS_HAS_GENERATORS */
+
+#if defined(DEBUG_brendan) || defined(DEBUG_timeless)
+#define DEBUG_gchist
+#endif
+
+#ifdef DEBUG_gchist
+#define NGCHIST 64
+
+static struct GCHist {
+ JSBool lastDitch;
+ JSGCThing *freeList;
+} gchist[NGCHIST];
+
+unsigned gchpos;
+#endif
+
+void *
+js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
+{
+ JSRuntime *rt;
+ uintN flindex;
+ JSBool doGC;
+ JSGCThing *thing;
+ uint8 *flagp, *firstPage;
+ JSGCArenaList *arenaList;
+ jsuword offset;
+ JSGCArena *a;
+ JSLocalRootStack *lrs;
+#ifdef JS_THREADSAFE
+ JSBool gcLocked;
+ uintN localMallocBytes;
+ JSGCThing **flbase, **lastptr;
+ JSGCThing *tmpthing;
+ uint8 *tmpflagp;
+ uintN maxFreeThings; /* max to take from the global free list */
+ METER(size_t nfree);
+#endif
+
+ rt = cx->runtime;
+ METER(rt->gcStats.alloc++); /* this is not thread-safe */
+ nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
+ flindex = GC_FREELIST_INDEX(nbytes);
+
+#ifdef JS_THREADSAFE
+ gcLocked = JS_FALSE;
+ JS_ASSERT(cx->thread);
+ flbase = cx->thread->gcFreeLists;
+ JS_ASSERT(flbase);
+ thing = flbase[flindex];
+ localMallocBytes = cx->thread->gcMallocBytes;
+ if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
+ flagp = thing->flagp;
+ flbase[flindex] = thing->next;
+ METER(rt->gcStats.localalloc++); /* this is not thread-safe */
+ goto success;
+ }
+
+ JS_LOCK_GC(rt);
+ gcLocked = JS_TRUE;
+
+ /* Transfer thread-local counter to global one. */
+ if (localMallocBytes != 0) {
+ cx->thread->gcMallocBytes = 0;
+ if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
+ rt->gcMallocBytes = rt->gcMaxMallocBytes;
+ else
+ rt->gcMallocBytes += localMallocBytes;
+ }
+#endif
+ JS_ASSERT(!rt->gcRunning);
+ if (rt->gcRunning) {
+ METER(rt->gcStats.finalfail++);
+ JS_UNLOCK_GC(rt);
+ return NULL;
+ }
+
+#ifdef TOO_MUCH_GC
+#ifdef WAY_TOO_MUCH_GC
+ rt->gcPoke = JS_TRUE;
+#endif
+ doGC = JS_TRUE;
+#else
+ doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);
+#endif
+
+ arenaList = &rt->gcArenaList[flindex];
+ for (;;) {
+ if (doGC) {
+ /*
+ * Keep rt->gcLock across the call into js_GC so we don't starve
+ * and lose to racing threads who deplete the heap just after
+ * js_GC has replenished it (or has synchronized with a racing
+ * GC that collected a bunch of garbage). This unfair scheduling
+ * can happen on certain operating systems. For the gory details,
+ * see bug 162779 at https://bugzilla.mozilla.org/.
+ */
+ js_GC(cx, GC_LAST_DITCH);
+ METER(rt->gcStats.retry++);
+ }
+
+ /* Try to get thing from the free list. */
+ thing = arenaList->freeList;
+ if (thing) {
+ arenaList->freeList = thing->next;
+ flagp = thing->flagp;
+ JS_ASSERT(*flagp & GCF_FINAL);
+ METER(arenaList->stats.freelen--);
+ METER(arenaList->stats.recycle++);
+
+#ifdef JS_THREADSAFE
+ /*
+ * Refill the local free list by taking several things from the
+ * global free list unless we are still at rt->gcMaxMallocBytes
+ * barrier or the free list is already populated. The former
+ * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
+ * or no gcPoke. The latter is caused via allocating new things
+ * in gcCallback(cx, JSGC_END).
+ */
+ if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
+ break;
+ tmpthing = arenaList->freeList;
+ if (tmpthing) {
+ maxFreeThings = MAX_THREAD_LOCAL_THINGS;
+ do {
+ if (!tmpthing->next)
+ break;
+ tmpthing = tmpthing->next;
+ } while (--maxFreeThings != 0);
+
+ flbase[flindex] = arenaList->freeList;
+ arenaList->freeList = tmpthing->next;
+ tmpthing->next = NULL;
+ }
+#endif
+ break;
+ }
+
+ /* Allocate from the tail of last arena or from new arena if we can. */
+ if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) ||
+ NewGCArena(rt, arenaList)) {
+
+ offset = arenaList->lastLimit;
+ if ((offset & GC_PAGE_MASK) == 0) {
+ /*
+ * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary.
+ */
+ offset += PAGE_THING_GAP(nbytes);
+ }
+ JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
+ arenaList->lastLimit = (uint16)(offset + nbytes);
+ a = arenaList->last;
+ firstPage = (uint8 *)FIRST_THING_PAGE(a);
+ thing = (JSGCThing *)(firstPage + offset);
+ flagp = a->base + offset / sizeof(JSGCThing);
+ if (flagp >= firstPage)
+ flagp += GC_THINGS_SIZE;
+ METER(++arenaList->stats.nthings);
+ METER(arenaList->stats.maxthings =
+ JS_MAX(arenaList->stats.nthings,
+ arenaList->stats.maxthings));
+
+#ifdef JS_THREADSAFE
+ /*
+ * Refill the local free list by taking free things from the last
+ * arena. Prefer to order free things by ascending address in the
+ * (unscientific) hope of better cache locality.
+ */
+ if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
+ break;
+ METER(nfree = 0);
+ lastptr = &flbase[flindex];
+ maxFreeThings = MAX_THREAD_LOCAL_THINGS;
+ for (offset = arenaList->lastLimit;
+ offset != GC_THINGS_SIZE && maxFreeThings-- != 0;
+ offset += nbytes) {
+ if ((offset & GC_PAGE_MASK) == 0)
+ offset += PAGE_THING_GAP(nbytes);
+ JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
+ tmpflagp = a->base + offset / sizeof(JSGCThing);
+ if (tmpflagp >= firstPage)
+ tmpflagp += GC_THINGS_SIZE;
+
+ tmpthing = (JSGCThing *)(firstPage + offset);
+ tmpthing->flagp = tmpflagp;
+ *tmpflagp = GCF_FINAL; /* signifying that thing is free */
+
+ *lastptr = tmpthing;
+ lastptr = &tmpthing->next;
+ METER(++nfree);
+ }
+ arenaList->lastLimit = offset;
+ *lastptr = NULL;
+ METER(arenaList->stats.freelen += nfree);
+#endif
+ break;
+ }
+
+ /* Consider doing a "last ditch" GC unless already tried. */
+ if (doGC)
+ goto fail;
+ rt->gcPoke = JS_TRUE;
+ doGC = JS_TRUE;
+ }
+
+ /* We successfully allocated the thing. */
+#ifdef JS_THREADSAFE
+ success:
+#endif
+ lrs = cx->localRootStack;
+ if (lrs) {
+ /*
+ * If we're in a local root scope, don't set newborn[type] at all, to
+ * avoid entraining garbage from it for an unbounded amount of time
+ * on this context. A caller will leave the local root scope and pop
+ * this reference, allowing thing to be GC'd if it has no other refs.
+ * See JS_EnterLocalRootScope and related APIs.
+ */
+ if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
+ /*
+ * When we fail for a thing allocated through the tail of the last
+ * arena, thing's flag byte is not initialized. So to prevent GC
+ * accessing the uninitialized flags during the finalization, we
+ * always mark the thing as final. See bug 337407.
+ */
+ *flagp = GCF_FINAL;
+ goto fail;
+ }
+ } else {
+ /*
+ * No local root scope, so we're stuck with the old, fragile model of
+ * depending on a pigeon-hole newborn per type per context.
+ */
+ cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
+ }
+
+ /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */
+ *flagp = (uint8)flags;
+
+ /*
+ * Clear thing before unlocking in case a GC run is about to scan it,
+ * finding it via newborn[].
+ */
+ thing->next = NULL;
+ thing->flagp = NULL;
+#ifdef DEBUG_gchist
+ gchist[gchpos].lastDitch = doGC;
+ gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
+ if (++gchpos == NGCHIST)
+ gchpos = 0;
+#endif
+ METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++);
+ METER(++rt->gcArenaList[flindex].stats.totalnew);
+#ifdef JS_THREADSAFE
+ if (gcLocked)
+ JS_UNLOCK_GC(rt);
+#endif
+ return thing;
+
+fail:
+#ifdef JS_THREADSAFE
+ if (gcLocked)
+ JS_UNLOCK_GC(rt);
+#endif
+ METER(rt->gcStats.fail++);
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+}
+
+JSBool
+js_LockGCThing(JSContext *cx, void *thing)
+{
+ JSBool ok = js_LockGCThingRT(cx->runtime, thing);
+ if (!ok)
+ JS_ReportOutOfMemory(cx);
+ return ok;
+}
+
+/*
+ * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because
+ * their descendants must be marked by the GC. To find them during the mark
+ * phase, they are added to rt->gcLocksHash, which is created lazily.
+ *
+ * NB: we depend on the order of GC-thing type indexes here!
+ */
+#define GC_TYPE_IS_STRING(t) ((t) == GCX_STRING || \
+ (t) >= GCX_EXTERNAL_STRING)
+#define GC_TYPE_IS_XML(t) ((unsigned)((t) - GCX_NAMESPACE) <= \
+ (unsigned)(GCX_XML - GCX_NAMESPACE))
+#define GC_TYPE_IS_DEEP(t) ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
+
+#define IS_DEEP_STRING(t,o) (GC_TYPE_IS_STRING(t) && \
+ JSSTRING_IS_DEPENDENT((JSString *)(o)))
+
+#define GC_THING_IS_DEEP(t,o) (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
+
+/* This is compatible with JSDHashEntryStub. */
+typedef struct JSGCLockHashEntry {
+ JSDHashEntryHdr hdr;
+ const JSGCThing *thing;
+ uint32 count;
+} JSGCLockHashEntry;
+
+JSBool
+js_LockGCThingRT(JSRuntime *rt, void *thing)
+{
+ JSBool ok, deep;
+ uint8 *flagp;
+ uintN flags, lock, type;
+ JSGCLockHashEntry *lhe;
+
+ ok = JS_TRUE;
+ if (!thing)
+ return ok;
+
+ flagp = js_GetGCThingFlags(thing);
+
+ JS_LOCK_GC(rt);
+ flags = *flagp;
+ lock = (flags & GCF_LOCK);
+ type = (flags & GCF_TYPEMASK);
+ deep = GC_THING_IS_DEEP(type, thing);
+
+ /*
+ * Avoid adding a rt->gcLocksHash entry for shallow things until someone
+ * nests a lock -- then start such an entry with a count of 2, not 1.
+ */
+ if (lock || deep) {
+ if (!rt->gcLocksHash) {
+ rt->gcLocksHash =
+ JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
+ sizeof(JSGCLockHashEntry),
+ GC_ROOTS_SIZE);
+ if (!rt->gcLocksHash) {
+ ok = JS_FALSE;
+ goto done;
+ }
+ } else if (lock == 0) {
+#ifdef DEBUG
+ JSDHashEntryHdr *hdr =
+ JS_DHashTableOperate(rt->gcLocksHash, thing,
+ JS_DHASH_LOOKUP);
+ JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
+#endif
+ }
+
+ lhe = (JSGCLockHashEntry *)
+ JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
+ if (!lhe) {
+ ok = JS_FALSE;
+ goto done;
+ }
+ if (!lhe->thing) {
+ lhe->thing = thing;
+ lhe->count = deep ? 1 : 2;
+ } else {
+ JS_ASSERT(lhe->count >= 1);
+ lhe->count++;
+ }
+ }
+
+ *flagp = (uint8)(flags | GCF_LOCK);
+ METER(rt->gcStats.lock++);
+ ok = JS_TRUE;
+done:
+ JS_UNLOCK_GC(rt);
+ return ok;
+}
+
+JSBool
+js_UnlockGCThingRT(JSRuntime *rt, void *thing)
+{
+ uint8 *flagp, flags;
+ JSGCLockHashEntry *lhe;
+
+ if (!thing)
+ return JS_TRUE;
+
+ flagp = js_GetGCThingFlags(thing);
+ JS_LOCK_GC(rt);
+ flags = *flagp;
+
+ if (flags & GCF_LOCK) {
+ if (!rt->gcLocksHash ||
+ (lhe = (JSGCLockHashEntry *)
+ JS_DHashTableOperate(rt->gcLocksHash, thing,
+ JS_DHASH_LOOKUP),
+ JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
+ /* Shallow GC-thing with an implicit lock count of 1. */
+ JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
+ } else {
+ /* Basis or nested unlock of a deep thing, or nested of shallow. */
+ if (--lhe->count != 0)
+ goto out;
+ JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
+ }
+ *flagp = (uint8)(flags & ~GCF_LOCK);
+ }
+
+ rt->gcPoke = JS_TRUE;
+out:
+ METER(rt->gcStats.unlock++);
+ JS_UNLOCK_GC(rt);
+ return JS_TRUE;
+}
+
+#ifdef GC_MARK_DEBUG
+
+#include <stdio.h>
+#include "jsprf.h"
+
+typedef struct GCMarkNode GCMarkNode;
+
+struct GCMarkNode {
+ void *thing;
+ const char *name;
+ GCMarkNode *next;
+ GCMarkNode *prev;
+};
+
+JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
+JS_EXPORT_DATA(void *) js_LiveThingToFind;
+
+#ifdef HAVE_XPCONNECT
+#include "dump_xpc.h"
+#endif
+
+static void
+GetObjSlotName(JSScope *scope, JSObject *obj, uint32 slot, char *buf,
+ size_t bufsize)
+{
+ jsval nval;
+ JSScopeProperty *sprop;
+ JSClass *clasp;
+ uint32 key;
+ const char *slotname;
+
+ if (!scope) {
+ JS_snprintf(buf, bufsize, "**UNKNOWN OBJECT MAP ENTRY**");
+ return;
+ }
+
+ sprop = SCOPE_LAST_PROP(scope);
+ while (sprop && sprop->slot != slot)
+ sprop = sprop->parent;
+
+ if (!sprop) {
+ switch (slot) {
+ case JSSLOT_PROTO:
+ JS_snprintf(buf, bufsize, "__proto__");
+ break;
+ case JSSLOT_PARENT:
+ JS_snprintf(buf, bufsize, "__parent__");
+ break;
+ default:
+ slotname = NULL;
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (clasp->flags & JSCLASS_IS_GLOBAL) {
+ key = slot - JSSLOT_START(clasp);
+#define JS_PROTO(name,code,init) \
+ if ((code) == key) { slotname = js_##name##_str; goto found; }
+#include "jsproto.tbl"
+#undef JS_PROTO
+ }
+ found:
+ if (slotname)
+ JS_snprintf(buf, bufsize, "CLASS_OBJECT(%s)", slotname);
+ else
+ JS_snprintf(buf, bufsize, "**UNKNOWN SLOT %ld**", (long)slot);
+ break;
+ }
+ } else {
+ nval = ID_TO_VALUE(sprop->id);
+ if (JSVAL_IS_INT(nval)) {
+ JS_snprintf(buf, bufsize, "%ld", (long)JSVAL_TO_INT(nval));
+ } else if (JSVAL_IS_STRING(nval)) {
+ JS_snprintf(buf, bufsize, "%s",
+ JS_GetStringBytes(JSVAL_TO_STRING(nval)));
+ } else {
+ JS_snprintf(buf, bufsize, "**FINALIZED ATOM KEY**");
+ }
+ }
+}
+
+static const char *
+gc_object_class_name(void* thing)
+{
+ uint8 *flagp = js_GetGCThingFlags(thing);
+ const char *className = "";
+ static char depbuf[32];
+
+ switch (*flagp & GCF_TYPEMASK) {
+ case GCX_OBJECT: {
+ JSObject *obj = (JSObject *)thing;
+ JSClass *clasp = JSVAL_TO_PRIVATE(obj->slots[JSSLOT_CLASS]);
+ className = clasp->name;
+#ifdef HAVE_XPCONNECT
+ if (clasp->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS) {
+ jsval privateValue = obj->slots[JSSLOT_PRIVATE];
+
+ JS_ASSERT(clasp->flags & JSCLASS_HAS_PRIVATE);
+ if (!JSVAL_IS_VOID(privateValue)) {
+ void *privateThing = JSVAL_TO_PRIVATE(privateValue);
+ const char *xpcClassName = GetXPCObjectClassName(privateThing);
+
+ if (xpcClassName)
+ className = xpcClassName;
+ }
+ }
+#endif
+ break;
+ }
+
+ case GCX_STRING:
+ case GCX_MUTABLE_STRING: {
+ JSString *str = (JSString *)thing;
+ if (JSSTRING_IS_DEPENDENT(str)) {
+ JS_snprintf(depbuf, sizeof depbuf, "start:%u, length:%u",
+ JSSTRDEP_START(str), JSSTRDEP_LENGTH(str));
+ className = depbuf;
+ } else {
+ className = "string";
+ }
+ break;
+ }
+
+ case GCX_DOUBLE:
+ className = "double";
+ break;
+ }
+
+ return className;
+}
+
+static void
+gc_dump_thing(JSContext *cx, JSGCThing *thing, FILE *fp)
+{
+ GCMarkNode *prev = (GCMarkNode *)cx->gcCurrentMarkNode;
+ GCMarkNode *next = NULL;
+ char *path = NULL;
+
+ while (prev) {
+ next = prev;
+ prev = prev->prev;
+ }
+ while (next) {
+ uint8 nextFlags = *js_GetGCThingFlags(next->thing);
+ if ((nextFlags & GCF_TYPEMASK) == GCX_OBJECT) {
+ path = JS_sprintf_append(path, "%s(%s @ 0x%08p).",
+ next->name,
+ gc_object_class_name(next->thing),
+ (JSObject*)next->thing);
+ } else {
+ path = JS_sprintf_append(path, "%s(%s).",
+ next->name,
+ gc_object_class_name(next->thing));
+ }
+ next = next->next;
+ }
+ if (!path)
+ return;
+
+ fprintf(fp, "%08lx ", (long)thing);
+ switch (*js_GetGCThingFlags(thing) & GCF_TYPEMASK) {
+ case GCX_OBJECT:
+ {
+ JSObject *obj = (JSObject *)thing;
+ jsval privateValue = obj->slots[JSSLOT_PRIVATE];
+ void *privateThing = JSVAL_IS_VOID(privateValue)
+ ? NULL
+ : JSVAL_TO_PRIVATE(privateValue);
+ const char *className = gc_object_class_name(thing);
+ fprintf(fp, "object %8p %s", privateThing, className);
+ break;
+ }
+#if JS_HAS_XML_SUPPORT
+ case GCX_NAMESPACE:
+ {
+ JSXMLNamespace *ns = (JSXMLNamespace *)thing;
+ fprintf(fp, "namespace %s:%s",
+ JS_GetStringBytes(ns->prefix), JS_GetStringBytes(ns->uri));
+ break;
+ }
+ case GCX_QNAME:
+ {
+ JSXMLQName *qn = (JSXMLQName *)thing;
+ fprintf(fp, "qname %s(%s):%s",
+ JS_GetStringBytes(qn->prefix), JS_GetStringBytes(qn->uri),
+ JS_GetStringBytes(qn->localName));
+ break;
+ }
+ case GCX_XML:
+ {
+ extern const char *js_xml_class_str[];
+ JSXML *xml = (JSXML *)thing;
+ fprintf(fp, "xml %8p %s", xml, js_xml_class_str[xml->xml_class]);
+ break;
+ }
+#endif
+ case GCX_DOUBLE:
+ fprintf(fp, "double %g", *(jsdouble *)thing);
+ break;
+ case GCX_PRIVATE:
+ fprintf(fp, "private %8p", (void *)thing);
+ break;
+ default:
+ fprintf(fp, "string %s", JS_GetStringBytes((JSString *)thing));
+ break;
+ }
+ fprintf(fp, " via %s\n", path);
+ free(path);
+}
+
+void
+js_MarkNamedGCThing(JSContext *cx, void *thing, const char *name)
+{
+ GCMarkNode markNode;
+
+ if (!thing)
+ return;
+
+ markNode.thing = thing;
+ markNode.name = name;
+ markNode.next = NULL;
+ markNode.prev = (GCMarkNode *)cx->gcCurrentMarkNode;
+ if (markNode.prev)
+ markNode.prev->next = &markNode;
+ cx->gcCurrentMarkNode = &markNode;
+
+ if (thing == js_LiveThingToFind) {
+ /*
+ * Dump js_LiveThingToFind each time we reach it during the marking
+ * phase of GC to print all live references to the thing.
+ */
+ gc_dump_thing(cx, thing, stderr);
+ }
+
+ js_MarkGCThing(cx, thing);
+
+ if (markNode.prev)
+ markNode.prev->next = NULL;
+ cx->gcCurrentMarkNode = markNode.prev;
+}
+
+#endif /* !GC_MARK_DEBUG */
+
+static void
+gc_mark_atom_key_thing(void *thing, void *arg)
+{
+ JSContext *cx = (JSContext *) arg;
+
+ GC_MARK(cx, thing, "atom");
+}
+
+void
+js_MarkAtom(JSContext *cx, JSAtom *atom)
+{
+ jsval key;
+
+ if (atom->flags & ATOM_MARK)
+ return;
+ atom->flags |= ATOM_MARK;
+ key = ATOM_KEY(atom);
+ if (JSVAL_IS_GCTHING(key)) {
+#ifdef GC_MARK_DEBUG
+ char name[32];
+
+ if (JSVAL_IS_STRING(key)) {
+ JS_snprintf(name, sizeof name, "'%s'",
+ JS_GetStringBytes(JSVAL_TO_STRING(key)));
+ } else {
+ JS_snprintf(name, sizeof name, "<%x>", key);
+ }
+#endif
+ GC_MARK(cx, JSVAL_TO_GCTHING(key), name);
+ }
+ if (atom->flags & ATOM_HIDDEN)
+ js_MarkAtom(cx, atom->entry.value);
+}
+
+static void
+AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp);
+
+static void
+MarkGCThingChildren(JSContext *cx, void *thing, uint8 *flagp,
+ JSBool shouldCheckRecursion)
+{
+ JSRuntime *rt;
+ JSObject *obj;
+ jsval v, *vp, *end;
+ void *next_thing;
+ uint8 *next_flagp;
+ JSString *str;
+#ifdef JS_GCMETER
+ uint32 tailCallNesting;
+#endif
+#ifdef GC_MARK_DEBUG
+ JSScope *scope;
+ char name[32];
+#endif
+
+ /*
+ * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
+ * uses the non-recursive code that otherwise would be called only on
+ * a low C stack condition.
+ */
+#ifdef JS_GC_ASSUME_LOW_C_STACK
+# define RECURSION_TOO_DEEP() shouldCheckRecursion
+#else
+ int stackDummy;
+# define RECURSION_TOO_DEEP() (shouldCheckRecursion && \
+ !JS_CHECK_STACK_SIZE(cx, stackDummy))
+#endif
+
+ rt = cx->runtime;
+ METER(tailCallNesting = 0);
+ METER(if (++rt->gcStats.cdepth > rt->gcStats.maxcdepth)
+ rt->gcStats.maxcdepth = rt->gcStats.cdepth);
+
+#ifndef GC_MARK_DEBUG
+ start:
+#endif
+ JS_ASSERT(flagp);
+ JS_ASSERT(*flagp & GCF_MARK); /* the caller must already mark the thing */
+ METER(if (++rt->gcStats.depth > rt->gcStats.maxdepth)
+ rt->gcStats.maxdepth = rt->gcStats.depth);
+#ifdef GC_MARK_DEBUG
+ if (js_DumpGCHeap)
+ gc_dump_thing(cx, thing, js_DumpGCHeap);
+#endif
+
+ switch (*flagp & GCF_TYPEMASK) {
+ case GCX_OBJECT:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ /* If obj->slots is null, obj must be a newborn. */
+ obj = (JSObject *) thing;
+ vp = obj->slots;
+ if (!vp)
+ break;
+
+ /* Mark slots if they are small enough to be GC-allocated. */
+ if ((vp[-1] + 1) * sizeof(jsval) <= GC_NBYTES_MAX)
+ GC_MARK(cx, vp - 1, "slots");
+
+ /* Set up local variables to loop over unmarked things. */
+ end = vp + ((obj->map->ops->mark)
+ ? obj->map->ops->mark(cx, obj, NULL)
+ : JS_MIN(obj->map->freeslot, obj->map->nslots));
+ thing = NULL;
+ flagp = NULL;
+#ifdef GC_MARK_DEBUG
+ scope = OBJ_IS_NATIVE(obj) ? OBJ_SCOPE(obj) : NULL;
+#endif
+ for (; vp != end; ++vp) {
+ v = *vp;
+ if (!JSVAL_IS_GCTHING(v) || v == JSVAL_NULL)
+ continue;
+ next_thing = JSVAL_TO_GCTHING(v);
+ if (next_thing == thing)
+ continue;
+ next_flagp = js_GetGCThingFlags(next_thing);
+ if (*next_flagp & GCF_MARK)
+ continue;
+ JS_ASSERT(*next_flagp != GCF_FINAL);
+ if (thing) {
+#ifdef GC_MARK_DEBUG
+ GC_MARK(cx, thing, name);
+#else
+ *flagp |= GCF_MARK;
+ MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
+#endif
+ if (*next_flagp & GCF_MARK) {
+ /*
+ * This happens when recursive MarkGCThingChildren marks
+ * the thing with flags referred by *next_flagp.
+ */
+ thing = NULL;
+ continue;
+ }
+ }
+#ifdef GC_MARK_DEBUG
+ GetObjSlotName(scope, obj, vp - obj->slots, name, sizeof name);
+#endif
+ thing = next_thing;
+ flagp = next_flagp;
+ }
+ if (thing) {
+ /*
+ * thing came from the last unmarked GC-thing slot and we
+ * can optimize tail recursion.
+ *
+ * Since we already know that there is enough C stack space,
+ * we clear shouldCheckRecursion to avoid extra checking in
+ * RECURSION_TOO_DEEP.
+ */
+ shouldCheckRecursion = JS_FALSE;
+ goto on_tail_recursion;
+ }
+ break;
+
+#ifdef DEBUG
+ case GCX_STRING:
+ str = (JSString *)thing;
+ JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
+ break;
+#endif
+
+ case GCX_MUTABLE_STRING:
+ str = (JSString *)thing;
+ if (!JSSTRING_IS_DEPENDENT(str))
+ break;
+ thing = JSSTRDEP_BASE(str);
+ flagp = js_GetGCThingFlags(thing);
+ if (*flagp & GCF_MARK)
+ break;
+#ifdef GC_MARK_DEBUG
+ strcpy(name, "base");
+#endif
+ /* Fallthrough to code to deal with the tail recursion. */
+
+ on_tail_recursion:
+#ifdef GC_MARK_DEBUG
+ /*
+ * Do not eliminate C recursion when debugging to allow
+ * js_MarkNamedGCThing to build a full dump of live GC
+ * things.
+ */
+ GC_MARK(cx, thing, name);
+ break;
+#else
+ /* Eliminate tail recursion for the last unmarked child. */
+ JS_ASSERT(*flagp != GCF_FINAL);
+ METER(++tailCallNesting);
+ *flagp |= GCF_MARK;
+ goto start;
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ case GCX_NAMESPACE:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ js_MarkXMLNamespace(cx, (JSXMLNamespace *)thing);
+ break;
+
+ case GCX_QNAME:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ js_MarkXMLQName(cx, (JSXMLQName *)thing);
+ break;
+
+ case GCX_XML:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ js_MarkXML(cx, (JSXML *)thing);
+ break;
+#endif
+ add_to_unscanned_bag:
+ AddThingToUnscannedBag(cx->runtime, thing, flagp);
+ break;
+ }
+
+#undef RECURSION_TOO_DEEP
+
+ METER(rt->gcStats.depth -= 1 + tailCallNesting);
+ METER(rt->gcStats.cdepth--);
+}
+
+/*
+ * Avoid using PAGE_THING_GAP inside this macro to optimize the
+ * thingsPerUnscannedChunk calculation when thingSize is a power of two.
+ */
+#define GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap) \
+ JS_BEGIN_MACRO \
+ if (0 == ((thingSize) & ((thingSize) - 1))) { \
+ pageGap = (thingSize); \
+ thingsPerUnscannedChunk = ((GC_PAGE_SIZE / (thingSize)) \
+ + JS_BITS_PER_WORD - 1) \
+ >> JS_BITS_PER_WORD_LOG2; \
+ } else { \
+ pageGap = GC_PAGE_SIZE % (thingSize); \
+ thingsPerUnscannedChunk = JS_HOWMANY(GC_PAGE_SIZE / (thingSize), \
+ JS_BITS_PER_WORD); \
+ } \
+ JS_END_MACRO
+
+static void
+AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp)
+{
+ JSGCPageInfo *pi;
+ JSGCArena *arena;
+ size_t thingSize;
+ size_t thingsPerUnscannedChunk;
+ size_t pageGap;
+ size_t chunkIndex;
+ jsuword bit;
+
+ /* Things from delayed scanning bag are marked as GCF_MARK | GCF_FINAL. */
+ JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
+ *flagp |= GCF_FINAL;
+
+ METER(rt->gcStats.unscanned++);
+#ifdef DEBUG
+ ++rt->gcUnscannedBagSize;
+ METER(if (rt->gcUnscannedBagSize > rt->gcStats.maxunscanned)
+ rt->gcStats.maxunscanned = rt->gcUnscannedBagSize);
+#endif
+
+ pi = THING_TO_PAGE(thing);
+ arena = PAGE_TO_ARENA(pi);
+ thingSize = arena->list->thingSize;
+ GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
+ chunkIndex = (((jsuword)thing & GC_PAGE_MASK) - pageGap) /
+ (thingSize * thingsPerUnscannedChunk);
+ JS_ASSERT(chunkIndex < JS_BITS_PER_WORD);
+ bit = (jsuword)1 << chunkIndex;
+ if (pi->unscannedBitmap != 0) {
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+ if (thingsPerUnscannedChunk != 1) {
+ if (pi->unscannedBitmap & bit) {
+ /* Chunk already contains things to scan later. */
+ return;
+ }
+ } else {
+ /*
+ * The chunk must not contain things to scan later if there is
+ * only one thing per chunk.
+ */
+ JS_ASSERT(!(pi->unscannedBitmap & bit));
+ }
+ pi->unscannedBitmap |= bit;
+ JS_ASSERT(arena->unscannedPages & ((size_t)1 << PAGE_INDEX(pi)));
+ } else {
+ /*
+ * The thing is the first unscanned thing in the page, set the bit
+ * corresponding to this page arena->unscannedPages.
+ */
+ pi->unscannedBitmap = bit;
+ JS_ASSERT(PAGE_INDEX(pi) < JS_BITS_PER_WORD);
+ bit = (jsuword)1 << PAGE_INDEX(pi);
+ JS_ASSERT(!(arena->unscannedPages & bit));
+ if (arena->unscannedPages != 0) {
+ arena->unscannedPages |= bit;
+ JS_ASSERT(arena->prevUnscanned);
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+ } else {
+ /*
+ * The thing is the first unscanned thing in the whole arena, push
+ * the arena on the stack of unscanned arenas unless the arena
+ * has already been pushed. We detect that through prevUnscanned
+ * field which is NULL only for not yet pushed arenas. To ensure
+ * that prevUnscanned != NULL even when the stack contains one
+ * element, we make prevUnscanned for the arena at the bottom
+ * to point to itself.
+ *
+ * See comments in ScanDelayedChildren.
+ */
+ arena->unscannedPages = bit;
+ if (!arena->prevUnscanned) {
+ if (!rt->gcUnscannedArenaStackTop) {
+ /* Stack was empty, mark the arena as bottom element. */
+ arena->prevUnscanned = arena;
+ } else {
+ JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
+ arena->prevUnscanned = rt->gcUnscannedArenaStackTop;
+ }
+ rt->gcUnscannedArenaStackTop = arena;
+ }
+ }
+ }
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+}
+
+static void
+ScanDelayedChildren(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSGCArena *arena;
+ size_t thingSize;
+ size_t thingsPerUnscannedChunk;
+ size_t pageGap;
+ size_t pageIndex;
+ JSGCPageInfo *pi;
+ size_t chunkIndex;
+ size_t thingOffset, thingLimit;
+ JSGCThing *thing;
+ uint8 *flagp;
+ JSGCArena *prevArena;
+
+ rt = cx->runtime;
+ arena = rt->gcUnscannedArenaStackTop;
+ if (!arena) {
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+ return;
+ }
+
+ init_size:
+ thingSize = arena->list->thingSize;
+ GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
+ for (;;) {
+ /*
+ * The following assert verifies that the current arena belongs to
+ * the unscan stack since AddThingToUnscannedBag ensures that even
+ * for stack's bottom prevUnscanned != NULL but rather points to self.
+ */
+ JS_ASSERT(arena->prevUnscanned);
+ JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
+ while (arena->unscannedPages != 0) {
+ pageIndex = JS_FLOOR_LOG2W(arena->unscannedPages);
+ JS_ASSERT(pageIndex < GC_PAGE_COUNT);
+ pi = (JSGCPageInfo *)(FIRST_THING_PAGE(arena) +
+ pageIndex * GC_PAGE_SIZE);
+ JS_ASSERT(pi->unscannedBitmap);
+ chunkIndex = JS_FLOOR_LOG2W(pi->unscannedBitmap);
+ pi->unscannedBitmap &= ~((jsuword)1 << chunkIndex);
+ if (pi->unscannedBitmap == 0)
+ arena->unscannedPages &= ~((jsuword)1 << pageIndex);
+ thingOffset = (pageGap
+ + chunkIndex * thingsPerUnscannedChunk * thingSize);
+ JS_ASSERT(thingOffset >= sizeof(JSGCPageInfo));
+ thingLimit = thingOffset + thingsPerUnscannedChunk * thingSize;
+ if (thingsPerUnscannedChunk != 1) {
+ /*
+ * thingLimit can go beyond the last allocated thing for the
+ * last chunk as the real limit can be inside the chunk.
+ */
+ if (arena->list->last == arena &&
+ arena->list->lastLimit < (pageIndex * GC_PAGE_SIZE +
+ thingLimit)) {
+ thingLimit = (arena->list->lastLimit -
+ pageIndex * GC_PAGE_SIZE);
+ } else if (thingLimit > GC_PAGE_SIZE) {
+ thingLimit = GC_PAGE_SIZE;
+ }
+ JS_ASSERT(thingLimit > thingOffset);
+ }
+ JS_ASSERT(arena->list->last != arena ||
+ arena->list->lastLimit >= (pageIndex * GC_PAGE_SIZE +
+ thingLimit));
+ JS_ASSERT(thingLimit <= GC_PAGE_SIZE);
+
+ for (; thingOffset != thingLimit; thingOffset += thingSize) {
+ /*
+ * XXX: inline js_GetGCThingFlags() to use already available
+ * pi.
+ */
+ thing = (void *)((jsuword)pi + thingOffset);
+ flagp = js_GetGCThingFlags(thing);
+ if (thingsPerUnscannedChunk != 1) {
+ /*
+ * Skip free or already scanned things that share the chunk
+ * with unscanned ones.
+ */
+ if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
+ continue;
+ }
+ JS_ASSERT((*flagp & (GCF_MARK|GCF_FINAL))
+ == (GCF_MARK|GCF_FINAL));
+ *flagp &= ~GCF_FINAL;
+#ifdef DEBUG
+ JS_ASSERT(rt->gcUnscannedBagSize != 0);
+ --rt->gcUnscannedBagSize;
+
+ /*
+ * Check that GC thing type is consistent with the type of
+ * things that can be put to the unscanned bag.
+ */
+ switch (*flagp & GCF_TYPEMASK) {
+ case GCX_OBJECT:
+# if JS_HAS_XML_SUPPORT
+ case GCX_NAMESPACE:
+ case GCX_QNAME:
+ case GCX_XML:
+# endif
+ break;
+ default:
+ JS_ASSERT(0);
+ }
+#endif
+ MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
+ }
+ }
+ /*
+ * We finished scanning of the arena but we can only pop it from
+ * the stack if the arena is the stack's top.
+ *
+ * When MarkGCThingChildren from the above calls
+ * AddThingToUnscannedBag and the latter pushes new arenas to the
+ * stack, we have to skip popping of this arena until it becomes
+ * the top of the stack again.
+ */
+ if (arena == rt->gcUnscannedArenaStackTop) {
+ prevArena = arena->prevUnscanned;
+ arena->prevUnscanned = NULL;
+ if (arena == prevArena) {
+ /*
+ * prevUnscanned points to itself and we reached the bottom
+ * of the stack.
+ */
+ break;
+ }
+ rt->gcUnscannedArenaStackTop = arena = prevArena;
+ } else {
+ arena = rt->gcUnscannedArenaStackTop;
+ }
+ if (arena->list->thingSize != thingSize)
+ goto init_size;
+ }
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+ JS_ASSERT(!rt->gcUnscannedArenaStackTop->prevUnscanned);
+ rt->gcUnscannedArenaStackTop = NULL;
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+}
+
+void
+js_MarkGCThing(JSContext *cx, void *thing)
+{
+ uint8 *flagp;
+
+ if (!thing)
+ return;
+
+ flagp = js_GetGCThingFlags(thing);
+ JS_ASSERT(*flagp != GCF_FINAL);
+ if (*flagp & GCF_MARK)
+ return;
+ *flagp |= GCF_MARK;
+
+ if (!cx->insideGCMarkCallback) {
+ MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
+ } else {
+ /*
+ * For API compatibility we allow for the callback to assume that
+ * after it calls js_MarkGCThing for the last time, the callback
+ * can start to finalize its own objects that are only referenced
+ * by unmarked GC things.
+ *
+ * Since we do not know which call from inside the callback is the
+ * last, we ensure that the unscanned bag is always empty when we
+ * return to the callback and all marked things are scanned.
+ *
+ * As an optimization we do not check for the stack size here and
+ * pass JS_FALSE as the last argument to MarkGCThingChildren.
+ * Otherwise with low C stack the thing would be pushed to the bag
+ * just to be feed to MarkGCThingChildren from inside
+ * ScanDelayedChildren.
+ */
+ cx->insideGCMarkCallback = JS_FALSE;
+ MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
+ ScanDelayedChildren(cx);
+ cx->insideGCMarkCallback = JS_TRUE;
+ }
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+gc_root_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
+{
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+ jsval *rp = (jsval *)rhe->root;
+ jsval v = *rp;
+
+ /* Ignore null object and scalar values. */
+ if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
+ JSContext *cx = (JSContext *)arg;
+#ifdef DEBUG
+ JSBool root_points_to_gcArenaList = JS_FALSE;
+ jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
+ uintN i;
+ JSGCArenaList *arenaList;
+ JSGCArena *a;
+ size_t limit;
+
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &cx->runtime->gcArenaList[i];
+ limit = arenaList->lastLimit;
+ for (a = arenaList->last; a; a = a->prev) {
+ if (thing - FIRST_THING_PAGE(a) < limit) {
+ root_points_to_gcArenaList = JS_TRUE;
+ break;
+ }
+ limit = GC_THINGS_SIZE;
+ }
+ }
+ if (!root_points_to_gcArenaList && rhe->name) {
+ fprintf(stderr,
+"JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
+"invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
+"The root's name is \"%s\".\n",
+ rhe->name);
+ }
+ JS_ASSERT(root_points_to_gcArenaList);
+#endif
+
+ GC_MARK(cx, JSVAL_TO_GCTHING(v), rhe->name ? rhe->name : "root");
+ }
+ return JS_DHASH_NEXT;
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+gc_lock_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
+{
+ JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
+ void *thing = (void *)lhe->thing;
+ JSContext *cx = (JSContext *)arg;
+
+ GC_MARK(cx, thing, "locked object");
+ return JS_DHASH_NEXT;
+}
+
+#define GC_MARK_JSVALS(cx, len, vec, name) \
+ JS_BEGIN_MACRO \
+ jsval _v, *_vp, *_end; \
+ \
+ for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
+ _v = *_vp; \
+ if (JSVAL_IS_GCTHING(_v)) \
+ GC_MARK(cx, JSVAL_TO_GCTHING(_v), name); \
+ } \
+ JS_END_MACRO
+
+void
+js_MarkStackFrame(JSContext *cx, JSStackFrame *fp)
+{
+ uintN depth, nslots;
+
+ if (fp->callobj)
+ GC_MARK(cx, fp->callobj, "call object");
+ if (fp->argsobj)
+ GC_MARK(cx, fp->argsobj, "arguments object");
+ if (fp->varobj)
+ GC_MARK(cx, fp->varobj, "variables object");
+ if (fp->script) {
+ js_MarkScript(cx, fp->script);
+ if (fp->spbase) {
+ /*
+ * Don't mark what has not been pushed yet, or what has been
+ * popped already.
+ */
+ depth = fp->script->depth;
+ nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
+ < depth * sizeof(jsval))
+ ? (uintN)(fp->sp - fp->spbase)
+ : depth;
+ GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
+ }
+ }
+
+ /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
+ JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
+ (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
+ if (JSVAL_IS_GCTHING((jsval)fp->thisp))
+ GC_MARK(cx, JSVAL_TO_GCTHING((jsval)fp->thisp), "this");
+
+ /*
+ * Mark fp->argv, even though in the common case it will be marked via our
+ * caller's frame, or via a JSStackHeader if fp was pushed by an external
+ * invocation.
+ *
+ * The hard case is when there is not enough contiguous space in the stack
+ * arena for actual, missing formal, and local root (JSFunctionSpec.extra)
+ * slots. In this case, fp->argv points to new space in a new arena, and
+ * marking the caller's operand stack, or an external caller's allocated
+ * stack tracked by a JSStackHeader, will not mark all the values stored
+ * and addressable via fp->argv.
+ *
+ * So in summary, solely for the hard case of moving argv due to missing
+ * formals and extra roots, we must mark actuals, missing formals, and any
+ * local roots arrayed at fp->argv here.
+ *
+ * It would be good to avoid redundant marking of the same reference, in
+ * the case where fp->argv does point into caller-allocated space tracked
+ * by fp->down->spbase or cx->stackHeaders. This would allow callbacks
+ * such as the forthcoming rt->gcThingCallback (bug 333078) to compute JS
+ * reference counts. So this comment deserves a FIXME bug to cite.
+ */
+ if (fp->argv) {
+ nslots = fp->argc;
+ if (fp->fun) {
+ if (fp->fun->nargs > nslots)
+ nslots = fp->fun->nargs;
+ if (!FUN_INTERPRETED(fp->fun))
+ nslots += fp->fun->u.n.extra;
+ }
+ GC_MARK_JSVALS(cx, nslots + 2, fp->argv - 2, "arg");
+ }
+ if (JSVAL_IS_GCTHING(fp->rval))
+ GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval");
+ if (fp->vars)
+ GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
+ GC_MARK(cx, fp->scopeChain, "scope chain");
+ if (fp->sharpArray)
+ GC_MARK(cx, fp->sharpArray, "sharp array");
+
+ if (fp->xmlNamespace)
+ GC_MARK(cx, fp->xmlNamespace, "xmlNamespace");
+}
+
+static void
+MarkWeakRoots(JSContext *cx, JSWeakRoots *wr)
+{
+ uintN i;
+ void *thing;
+
+ for (i = 0; i < GCX_NTYPES; i++)
+ GC_MARK(cx, wr->newborn[i], gc_typenames[i]);
+ if (wr->lastAtom)
+ GC_MARK_ATOM(cx, wr->lastAtom);
+ if (JSVAL_IS_GCTHING(wr->lastInternalResult)) {
+ thing = JSVAL_TO_GCTHING(wr->lastInternalResult);
+ if (thing)
+ GC_MARK(cx, thing, "lastInternalResult");
+ }
+}
+
+/*
+ * When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
+ * rt->gcLock already held and when the lock should be kept on return.
+ */
+void
+js_GC(JSContext *cx, JSGCInvocationKind gckind)
+{
+ JSRuntime *rt;
+ JSBool keepAtoms;
+ uintN i, type;
+ JSContext *iter, *acx;
+#if JS_HAS_GENERATORS
+ JSGenerator **genTodoTail;
+#endif
+ JSStackFrame *fp, *chain;
+ JSStackHeader *sh;
+ JSTempValueRooter *tvr;
+ size_t nbytes, limit, offset;
+ JSGCArena *a, **ap;
+ uint8 flags, *flagp, *firstPage;
+ JSGCThing *thing, *freeList;
+ JSGCArenaList *arenaList;
+ GCFinalizeOp finalizer;
+ JSBool allClear;
+#ifdef JS_THREADSAFE
+ uint32 requestDebit;
+#endif
+
+ rt = cx->runtime;
+#ifdef JS_THREADSAFE
+ /* Avoid deadlock. */
+ JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
+#endif
+
+ if (gckind == GC_LAST_DITCH) {
+ /* The last ditch GC preserves all atoms and weak roots. */
+ keepAtoms = JS_TRUE;
+ } else {
+ JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
+ rt->gcPoke = JS_TRUE;
+
+ /* Keep atoms when a suspended compile is running on another context. */
+ keepAtoms = (rt->gcKeepAtoms != 0);
+ }
+
+ /*
+ * Don't collect garbage if the runtime isn't up, and cx is not the last
+ * context in the runtime. The last context must force a GC, and nothing
+ * should suppress that final collection or there may be shutdown leaks,
+ * or runtime bloat until the next context is created.
+ */
+ if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
+ return;
+
+ restart_after_callback:
+ /*
+ * Let the API user decide to defer a GC if it wants to (unless this
+ * is the last context). Invoke the callback regardless.
+ */
+ if (rt->gcCallback &&
+ !rt->gcCallback(cx, JSGC_BEGIN) &&
+ gckind != GC_LAST_CONTEXT) {
+ return;
+ }
+
+ /* Lock out other GC allocator and collector invocations. */
+ if (gckind != GC_LAST_DITCH)
+ JS_LOCK_GC(rt);
+
+ /* Do nothing if no mutator has executed since the last GC. */
+ if (!rt->gcPoke) {
+ METER(rt->gcStats.nopoke++);
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+ METER(rt->gcStats.poke++);
+ rt->gcPoke = JS_FALSE;
+
+#ifdef JS_THREADSAFE
+ JS_ASSERT(cx->thread->id == js_CurrentThreadId());
+
+ /* Bump gcLevel and return rather than nest on this thread. */
+ if (rt->gcThread == cx->thread) {
+ JS_ASSERT(rt->gcLevel > 0);
+ rt->gcLevel++;
+ METER(if (rt->gcLevel > rt->gcStats.maxlevel)
+ rt->gcStats.maxlevel = rt->gcLevel);
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+
+ /*
+ * If we're in one or more requests (possibly on more than one context)
+ * running on the current thread, indicate, temporarily, that all these
+ * requests are inactive. If cx->thread is NULL, then cx is not using
+ * the request model, and does not contribute to rt->requestCount.
+ */
+ requestDebit = 0;
+ if (cx->thread) {
+ JSCList *head, *link;
+
+ /*
+ * Check all contexts on cx->thread->contextList for active requests,
+ * counting each such context against requestDebit.
+ */
+ head = &cx->thread->contextList;
+ for (link = head->next; link != head; link = link->next) {
+ acx = CX_FROM_THREAD_LINKS(link);
+ JS_ASSERT(acx->thread == cx->thread);
+ if (acx->requestDepth)
+ requestDebit++;
+ }
+ } else {
+ /*
+ * We assert, but check anyway, in case someone is misusing the API.
+ * Avoiding the loop over all of rt's contexts is a win in the event
+ * that the GC runs only on request-less contexts with null threads,
+ * in a special thread such as might be used by the UI/DOM/Layout
+ * "mozilla" or "main" thread in Mozilla-the-browser.
+ */
+ JS_ASSERT(cx->requestDepth == 0);
+ if (cx->requestDepth)
+ requestDebit = 1;
+ }
+ if (requestDebit) {
+ JS_ASSERT(requestDebit <= rt->requestCount);
+ rt->requestCount -= requestDebit;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+ }
+
+ /* If another thread is already in GC, don't attempt GC; wait instead. */
+ if (rt->gcLevel > 0) {
+ /* Bump gcLevel to restart the current GC, so it finds new garbage. */
+ rt->gcLevel++;
+ METER(if (rt->gcLevel > rt->gcStats.maxlevel)
+ rt->gcStats.maxlevel = rt->gcLevel);
+
+ /* Wait for the other thread to finish, then resume our request. */
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ if (requestDebit)
+ rt->requestCount += requestDebit;
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+
+ /* No other thread is in GC, so indicate that we're now in GC. */
+ rt->gcLevel = 1;
+ rt->gcThread = cx->thread;
+
+ /* Wait for all other requests to finish. */
+ while (rt->requestCount > 0)
+ JS_AWAIT_REQUEST_DONE(rt);
+
+#else /* !JS_THREADSAFE */
+
+ /* Bump gcLevel and return rather than nest; the outer gc will restart. */
+ rt->gcLevel++;
+ METER(if (rt->gcLevel > rt->gcStats.maxlevel)
+ rt->gcStats.maxlevel = rt->gcLevel);
+ if (rt->gcLevel > 1)
+ return;
+
+#endif /* !JS_THREADSAFE */
+
+ /*
+ * Set rt->gcRunning here within the GC lock, and after waiting for any
+ * active requests to end, so that new requests that try to JS_AddRoot,
+ * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
+ * rt->gcLevel to drop to zero, while request-less calls to the *Root*
+ * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
+ * waiting for GC to finish.
+ */
+ rt->gcRunning = JS_TRUE;
+ JS_UNLOCK_GC(rt);
+
+ /* Reset malloc counter. */
+ rt->gcMallocBytes = 0;
+
+ /* Drop atoms held by the property cache, and clear property weak links. */
+ js_DisablePropertyCache(cx);
+ js_FlushPropertyCache(cx);
+#ifdef DEBUG_scopemeters
+ { extern void js_DumpScopeMeters(JSRuntime *rt);
+ js_DumpScopeMeters(rt);
+ }
+#endif
+
+#ifdef JS_THREADSAFE
+ /*
+ * Set all thread local freelists to NULL. We may visit a thread's
+ * freelist more than once. To avoid redundant clearing we unroll the
+ * current thread's step.
+ *
+ * Also, in case a JSScript wrapped within an object was finalized, we
+ * null acx->thread->gsnCache.script and finish the cache's hashtable.
+ * Note that js_DestroyScript, called from script_finalize, will have
+ * already cleared cx->thread->gsnCache above during finalization, so we
+ * don't have to here.
+ */
+ memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
+ iter = NULL;
+ while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
+ if (!acx->thread || acx->thread == cx->thread)
+ continue;
+ memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
+ GSN_CACHE_CLEAR(&acx->thread->gsnCache);
+ }
+#else
+ /* The thread-unsafe case just has to clear the runtime's GSN cache. */
+ GSN_CACHE_CLEAR(&rt->gsnCache);
+#endif
+
+restart:
+ rt->gcNumber++;
+ JS_ASSERT(!rt->gcUnscannedArenaStackTop);
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+
+ /*
+ * Mark phase.
+ */
+ JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
+ if (rt->gcLocksHash)
+ JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
+ js_MarkAtomState(&rt->atomState, keepAtoms, gc_mark_atom_key_thing, cx);
+ js_MarkWatchPoints(cx);
+ js_MarkScriptFilenames(rt, keepAtoms);
+ js_MarkNativeIteratorStates(cx);
+
+#if JS_HAS_GENERATORS
+ genTodoTail = MarkScheduledGenerators(cx);
+ JS_ASSERT(!*genTodoTail);
+#endif
+
+ iter = NULL;
+ while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
+ /*
+ * Iterate frame chain and dormant chains. Temporarily tack current
+ * frame onto the head of the dormant list to ease iteration.
+ *
+ * (NB: see comment on this whole "dormant" thing in js_Execute.)
+ */
+ chain = acx->fp;
+ if (chain) {
+ JS_ASSERT(!chain->dormantNext);
+ chain->dormantNext = acx->dormantFrameChain;
+ } else {
+ chain = acx->dormantFrameChain;
+ }
+
+ for (fp = chain; fp; fp = chain = chain->dormantNext) {
+ do {
+ js_MarkStackFrame(cx, fp);
+ } while ((fp = fp->down) != NULL);
+ }
+
+ /* Cleanup temporary "dormant" linkage. */
+ if (acx->fp)
+ acx->fp->dormantNext = NULL;
+
+ /* Mark other roots-by-definition in acx. */
+ GC_MARK(cx, acx->globalObject, "global object");
+ MarkWeakRoots(cx, &acx->weakRoots);
+ if (acx->throwing) {
+ if (JSVAL_IS_GCTHING(acx->exception))
+ GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception");
+ } else {
+ /* Avoid keeping GC-ed junk stored in JSContext.exception. */
+ acx->exception = JSVAL_NULL;
+ }
+#if JS_HAS_LVALUE_RETURN
+ if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
+ GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2");
+#endif
+
+ for (sh = acx->stackHeaders; sh; sh = sh->down) {
+ METER(rt->gcStats.stackseg++);
+ METER(rt->gcStats.segslots += sh->nslots);
+ GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
+ }
+
+ if (acx->localRootStack)
+ js_MarkLocalRoots(cx, acx->localRootStack);
+
+ for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
+ switch (tvr->count) {
+ case JSTVU_SINGLE:
+ if (JSVAL_IS_GCTHING(tvr->u.value)) {
+ GC_MARK(cx, JSVAL_TO_GCTHING(tvr->u.value),
+ "tvr->u.value");
+ }
+ break;
+ case JSTVU_MARKER:
+ tvr->u.marker(cx, tvr);
+ break;
+ case JSTVU_SPROP:
+ MARK_SCOPE_PROPERTY(cx, tvr->u.sprop);
+ break;
+ case JSTVU_WEAK_ROOTS:
+ MarkWeakRoots(cx, tvr->u.weakRoots);
+ break;
+ default:
+ JS_ASSERT(tvr->count >= 0);
+ GC_MARK_JSVALS(cx, tvr->count, tvr->u.array, "tvr->u.array");
+ }
+ }
+
+ if (acx->sharpObjectMap.depth > 0)
+ js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
+ }
+
+#ifdef DUMP_CALL_TABLE
+ js_DumpCallTable(cx);
+#endif
+
+ /*
+ * Mark children of things that caused too deep recursion during above
+ * marking phase.
+ */
+ ScanDelayedChildren(cx);
+
+#if JS_HAS_GENERATORS
+ /*
+ * Close phase: search and mark part. See comments in
+ * FindAndMarkObjectsToClose for details.
+ */
+ FindAndMarkObjectsToClose(cx, gckind, genTodoTail);
+
+ /*
+ * Mark children of things that caused too deep recursion during the
+ * just-completed marking part of the close phase.
+ */
+ ScanDelayedChildren(cx);
+#endif
+
+ JS_ASSERT(!cx->insideGCMarkCallback);
+ if (rt->gcCallback) {
+ cx->insideGCMarkCallback = JS_TRUE;
+ (void) rt->gcCallback(cx, JSGC_MARK_END);
+ JS_ASSERT(cx->insideGCMarkCallback);
+ cx->insideGCMarkCallback = JS_FALSE;
+ }
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+
+ /* Finalize iterator states before the objects they iterate over. */
+ CloseIteratorStates(cx);
+
+ /*
+ * Sweep phase.
+ *
+ * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
+ * so that any attempt to allocate a GC-thing from a finalizer will fail,
+ * rather than nest badly and leave the unmarked newborn to be swept.
+ *
+ * Finalize smaller objects before larger, to guarantee finalization of
+ * GC-allocated obj->slots after obj. See FreeSlots in jsobj.c.
+ */
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ nbytes = GC_FREELIST_NBYTES(i);
+ limit = arenaList->lastLimit;
+ for (a = arenaList->last; a; a = a->prev) {
+ JS_ASSERT(!a->prevUnscanned);
+ JS_ASSERT(a->unscannedPages == 0);
+ firstPage = (uint8 *) FIRST_THING_PAGE(a);
+ for (offset = 0; offset != limit; offset += nbytes) {
+ if ((offset & GC_PAGE_MASK) == 0) {
+ JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))->
+ unscannedBitmap == 0);
+ offset += PAGE_THING_GAP(nbytes);
+ }
+ JS_ASSERT(offset < limit);
+ flagp = a->base + offset / sizeof(JSGCThing);
+ if (flagp >= firstPage)
+ flagp += GC_THINGS_SIZE;
+ flags = *flagp;
+ if (flags & GCF_MARK) {
+ *flagp &= ~GCF_MARK;
+ } else if (!(flags & (GCF_LOCK | GCF_FINAL))) {
+ /* Call the finalizer with GCF_FINAL ORed into flags. */
+ type = flags & GCF_TYPEMASK;
+ finalizer = gc_finalizers[type];
+ if (finalizer) {
+ thing = (JSGCThing *)(firstPage + offset);
+ *flagp = (uint8)(flags | GCF_FINAL);
+ if (type >= GCX_EXTERNAL_STRING)
+ js_PurgeDeflatedStringCache(rt, (JSString *)thing);
+ finalizer(cx, thing);
+ }
+
+ /* Set flags to GCF_FINAL, signifying that thing is free. */
+ *flagp = GCF_FINAL;
+ }
+ }
+ limit = GC_THINGS_SIZE;
+ }
+ }
+
+ /*
+ * Sweep the runtime's property tree after finalizing objects, in case any
+ * had watchpoints referencing tree nodes. Then sweep atoms, which may be
+ * referenced from dead property ids.
+ */
+ js_SweepScopeProperties(rt);
+ js_SweepAtomState(&rt->atomState);
+
+ /*
+ * Sweep script filenames after sweeping functions in the generic loop
+ * above. In this way when a scripted function's finalizer destroys the
+ * script and calls rt->destroyScriptHook, the hook can still access the
+ * script's filename. See bug 323267.
+ */
+ js_SweepScriptFilenames(rt);
+
+ /*
+ * Free phase.
+ * Free any unused arenas and rebuild the JSGCThing freelist.
+ */
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ ap = &arenaList->last;
+ a = *ap;
+ if (!a)
+ continue;
+
+ allClear = JS_TRUE;
+ arenaList->freeList = NULL;
+ freeList = NULL;
+ METER(arenaList->stats.nthings = 0);
+ METER(arenaList->stats.freelen = 0);
+
+ nbytes = GC_FREELIST_NBYTES(i);
+ limit = arenaList->lastLimit;
+ do {
+ METER(size_t nfree = 0);
+ firstPage = (uint8 *) FIRST_THING_PAGE(a);
+ for (offset = 0; offset != limit; offset += nbytes) {
+ if ((offset & GC_PAGE_MASK) == 0)
+ offset += PAGE_THING_GAP(nbytes);
+ JS_ASSERT(offset < limit);
+ flagp = a->base + offset / sizeof(JSGCThing);
+ if (flagp >= firstPage)
+ flagp += GC_THINGS_SIZE;
+
+ if (*flagp != GCF_FINAL) {
+ allClear = JS_FALSE;
+ METER(++arenaList->stats.nthings);
+ } else {
+ thing = (JSGCThing *)(firstPage + offset);
+ thing->flagp = flagp;
+ thing->next = freeList;
+ freeList = thing;
+ METER(++nfree);
+ }
+ }
+ if (allClear) {
+ /*
+ * Forget just assembled free list head for the arena
+ * and destroy the arena itself.
+ */
+ freeList = arenaList->freeList;
+ DestroyGCArena(rt, arenaList, ap);
+ } else {
+ allClear = JS_TRUE;
+ arenaList->freeList = freeList;
+ ap = &a->prev;
+ METER(arenaList->stats.freelen += nfree);
+ METER(arenaList->stats.totalfreelen += nfree);
+ METER(++arenaList->stats.totalarenas);
+ }
+ limit = GC_THINGS_SIZE;
+ } while ((a = *ap) != NULL);
+ }
+
+ if (rt->gcCallback)
+ (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
+#ifdef DEBUG_srcnotesize
+ { extern void DumpSrcNoteSizeHist();
+ DumpSrcNoteSizeHist();
+ printf("GC HEAP SIZE %lu (%lu)\n",
+ (unsigned long)rt->gcBytes, (unsigned long)rt->gcPrivateBytes);
+ }
+#endif
+
+ JS_LOCK_GC(rt);
+
+ /*
+ * We want to restart GC if js_GC was called recursively or if any of the
+ * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
+ */
+ if (rt->gcLevel > 1 || rt->gcPoke) {
+ rt->gcLevel = 1;
+ rt->gcPoke = JS_FALSE;
+ JS_UNLOCK_GC(rt);
+ goto restart;
+ }
+ js_EnablePropertyCache(cx);
+ rt->gcLevel = 0;
+ rt->gcLastBytes = rt->gcBytes;
+ rt->gcRunning = JS_FALSE;
+
+#ifdef JS_THREADSAFE
+ /* If we were invoked during a request, pay back the temporary debit. */
+ if (requestDebit)
+ rt->requestCount += requestDebit;
+ rt->gcThread = NULL;
+ JS_NOTIFY_GC_DONE(rt);
+
+ /*
+ * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
+ */
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+#endif
+
+ /* Execute JSGC_END callback outside the lock. */
+ if (rt->gcCallback) {
+ JSWeakRoots savedWeakRoots;
+ JSTempValueRooter tvr;
+
+ if (gckind == GC_LAST_DITCH) {
+ /*
+ * We allow JSGC_END implementation to force a full GC or allocate
+ * new GC things. Thus we must protect the weak roots from GC or
+ * overwrites.
+ */
+ savedWeakRoots = cx->weakRoots;
+ JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
+ JS_KEEP_ATOMS(rt);
+ JS_UNLOCK_GC(rt);
+ }
+
+ (void) rt->gcCallback(cx, JSGC_END);
+
+ if (gckind == GC_LAST_DITCH) {
+ JS_LOCK_GC(rt);
+ JS_UNKEEP_ATOMS(rt);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
+ /*
+ * On shutdown iterate until JSGC_END callback stops creating
+ * garbage.
+ */
+ goto restart_after_callback;
+ }
+ }
+}
+
+void
+js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
+{
+ uint32 *pbytes, bytes;
+
+#ifdef JS_THREADSAFE
+ pbytes = &cx->thread->gcMallocBytes;
+#else
+ pbytes = &cx->runtime->gcMallocBytes;
+#endif
+ bytes = *pbytes;
+ *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
+}
diff --git a/third_party/js-1.7/jsgc.h b/third_party/js-1.7/jsgc.h
new file mode 100644
index 0000000..ec623a1
--- /dev/null
+++ b/third_party/js-1.7/jsgc.h
@@ -0,0 +1,368 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsgc_h___
+#define jsgc_h___
+/*
+ * JS Garbage Collector.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsdhash.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/* GC thing type indexes. */
+#define GCX_OBJECT 0 /* JSObject */
+#define GCX_STRING 1 /* JSString */
+#define GCX_DOUBLE 2 /* jsdouble */
+#define GCX_MUTABLE_STRING 3 /* JSString that's mutable --
+ single-threaded only! */
+#define GCX_PRIVATE 4 /* private (unscanned) data */
+#define GCX_NAMESPACE 5 /* JSXMLNamespace */
+#define GCX_QNAME 6 /* JSXMLQName */
+#define GCX_XML 7 /* JSXML */
+#define GCX_EXTERNAL_STRING 8 /* JSString w/ external chars */
+
+#define GCX_NTYPES_LOG2 4 /* type index bits */
+#define GCX_NTYPES JS_BIT(GCX_NTYPES_LOG2)
+
+/* GC flag definitions, must fit in 8 bits (type index goes in the low bits). */
+#define GCF_TYPEMASK JS_BITMASK(GCX_NTYPES_LOG2)
+#define GCF_MARK JS_BIT(GCX_NTYPES_LOG2)
+#define GCF_FINAL JS_BIT(GCX_NTYPES_LOG2 + 1)
+#define GCF_SYSTEM JS_BIT(GCX_NTYPES_LOG2 + 2)
+#define GCF_LOCKSHIFT (GCX_NTYPES_LOG2 + 3) /* lock bit shift */
+#define GCF_LOCK JS_BIT(GCF_LOCKSHIFT) /* lock request bit in API */
+
+/* Pseudo-flag that modifies GCX_STRING to make GCX_MUTABLE_STRING. */
+#define GCF_MUTABLE 2
+
+#if (GCX_STRING | GCF_MUTABLE) != GCX_MUTABLE_STRING
+# error "mutable string type index botch!"
+#endif
+
+extern uint8 *
+js_GetGCThingFlags(void *thing);
+
+/*
+ * The sole purpose of the function is to preserve public API compatibility
+ * in JS_GetStringBytes which takes only single JSString* argument.
+ */
+JSRuntime*
+js_GetGCStringRuntime(JSString *str);
+
+#if 1
+/*
+ * Since we're forcing a GC from JS_GC anyway, don't bother wasting cycles
+ * loading oldval. XXX remove implied force, fix jsinterp.c's "second arg
+ * ignored", etc.
+ */
+#define GC_POKE(cx, oldval) ((cx)->runtime->gcPoke = JS_TRUE)
+#else
+#define GC_POKE(cx, oldval) ((cx)->runtime->gcPoke = JSVAL_IS_GCTHING(oldval))
+#endif
+
+extern intN
+js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
+ JSStringFinalizeOp newop);
+
+extern JSBool
+js_InitGC(JSRuntime *rt, uint32 maxbytes);
+
+extern void
+js_FinishGC(JSRuntime *rt);
+
+extern JSBool
+js_AddRoot(JSContext *cx, void *rp, const char *name);
+
+extern JSBool
+js_AddRootRT(JSRuntime *rt, void *rp, const char *name);
+
+extern JSBool
+js_RemoveRoot(JSRuntime *rt, void *rp);
+
+#ifdef DEBUG
+extern void
+js_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data);
+#endif
+
+extern uint32
+js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data);
+
+/* Table of pointers with count valid members. */
+typedef struct JSPtrTable {
+ size_t count;
+ void **array;
+} JSPtrTable;
+
+extern JSBool
+js_RegisterCloseableIterator(JSContext *cx, JSObject *obj);
+
+#if JS_HAS_GENERATORS
+
+/*
+ * Runtime state to support generators' close hooks.
+ */
+typedef struct JSGCCloseState {
+ /*
+ * Singly linked list of generators that are reachable from GC roots or
+ * were created after the last GC.
+ */
+ JSGenerator *reachableList;
+
+ /*
+ * Head of the queue of generators that have already become unreachable but
+ * whose close hooks are not yet run.
+ */
+ JSGenerator *todoQueue;
+
+#ifndef JS_THREADSAFE
+ /*
+ * Flag indicating that the current thread is excuting a close hook for
+ * single thread case.
+ */
+ JSBool runningCloseHook;
+#endif
+} JSGCCloseState;
+
+extern void
+js_RegisterGenerator(JSContext *cx, JSGenerator *gen);
+
+extern JSBool
+js_RunCloseHooks(JSContext *cx);
+
+#endif
+
+/*
+ * The private JSGCThing struct, which describes a gcFreeList element.
+ */
+struct JSGCThing {
+ JSGCThing *next;
+ uint8 *flagp;
+};
+
+#define GC_NBYTES_MAX (10 * sizeof(JSGCThing))
+#define GC_NUM_FREELISTS (GC_NBYTES_MAX / sizeof(JSGCThing))
+#define GC_FREELIST_NBYTES(i) (((i) + 1) * sizeof(JSGCThing))
+#define GC_FREELIST_INDEX(n) (((n) / sizeof(JSGCThing)) - 1)
+
+extern void *
+js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes);
+
+extern JSBool
+js_LockGCThing(JSContext *cx, void *thing);
+
+extern JSBool
+js_LockGCThingRT(JSRuntime *rt, void *thing);
+
+extern JSBool
+js_UnlockGCThingRT(JSRuntime *rt, void *thing);
+
+extern JSBool
+js_IsAboutToBeFinalized(JSContext *cx, void *thing);
+
+extern void
+js_MarkAtom(JSContext *cx, JSAtom *atom);
+
+/* We avoid a large number of unnecessary calls by doing the flag check first */
+#define GC_MARK_ATOM(cx, atom) \
+ JS_BEGIN_MACRO \
+ if (!((atom)->flags & ATOM_MARK)) \
+ js_MarkAtom(cx, atom); \
+ JS_END_MACRO
+
+/*
+ * Always use GC_MARK macro and never call js_MarkGCThing directly so
+ * when GC_MARK_DEBUG is defined the dump of live GC things does not miss
+ * a thing.
+ */
+extern void
+js_MarkGCThing(JSContext *cx, void *thing);
+
+#ifdef GC_MARK_DEBUG
+
+# define GC_MARK(cx, thing, name) js_MarkNamedGCThing(cx, thing, name)
+
+extern void
+js_MarkNamedGCThing(JSContext *cx, void *thing, const char *name);
+
+extern JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
+JS_EXTERN_DATA(void *) js_LiveThingToFind;
+
+#else
+
+# define GC_MARK(cx, thing, name) js_MarkGCThing(cx, thing)
+
+#endif
+
+extern void
+js_MarkStackFrame(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Kinds of js_GC invocation.
+ */
+typedef enum JSGCInvocationKind {
+ /* Normal invocation. */
+ GC_NORMAL,
+
+ /*
+ * Called from js_DestroyContext for last JSContext in a JSRuntime, when
+ * it is imperative that rt->gcPoke gets cleared early in js_GC.
+ */
+ GC_LAST_CONTEXT,
+
+ /*
+ * Called from js_NewGCThing as a last-ditch GC attempt. See comments
+ * before js_GC definition for details.
+ */
+ GC_LAST_DITCH
+} JSGCInvocationKind;
+
+extern void
+js_GC(JSContext *cx, JSGCInvocationKind gckind);
+
+/* Call this after succesful malloc of memory for GC-related things. */
+extern void
+js_UpdateMallocCounter(JSContext *cx, size_t nbytes);
+
+#ifdef DEBUG_notme
+#define JS_GCMETER 1
+#endif
+
+#ifdef JS_GCMETER
+
+typedef struct JSGCStats {
+#ifdef JS_THREADSAFE
+ uint32 localalloc; /* number of succeeded allocations from local lists */
+#endif
+ uint32 alloc; /* number of allocation attempts */
+ uint32 retry; /* allocation attempt retries after running the GC */
+ uint32 retryhalt; /* allocation retries halted by the branch callback */
+ uint32 fail; /* allocation failures */
+ uint32 finalfail; /* finalizer calls allocator failures */
+ uint32 lockborn; /* things born locked */
+ uint32 lock; /* valid lock calls */
+ uint32 unlock; /* valid unlock calls */
+ uint32 depth; /* mark tail recursion depth */
+ uint32 maxdepth; /* maximum mark tail recursion depth */
+ uint32 cdepth; /* mark recursion depth of C functions */
+ uint32 maxcdepth; /* maximum mark recursion depth of C functions */
+ uint32 unscanned; /* mark C stack overflows or number of times
+ GC things were put in unscanned bag */
+#ifdef DEBUG
+ uint32 maxunscanned; /* maximum size of unscanned bag */
+#endif
+ uint32 maxlevel; /* maximum GC nesting (indirect recursion) level */
+ uint32 poke; /* number of potentially useful GC calls */
+ uint32 nopoke; /* useless GC calls where js_PokeGC was not set */
+ uint32 afree; /* thing arenas freed so far */
+ uint32 stackseg; /* total extraordinary stack segments scanned */
+ uint32 segslots; /* total stack segment jsval slots scanned */
+ uint32 nclose; /* number of objects with close hooks */
+ uint32 maxnclose; /* max number of objects with close hooks */
+ uint32 closelater; /* number of close hooks scheduled to run */
+ uint32 maxcloselater; /* max number of close hooks scheduled to run */
+} JSGCStats;
+
+extern JS_FRIEND_API(void)
+js_DumpGCStats(JSRuntime *rt, FILE *fp);
+
+#endif /* JS_GCMETER */
+
+typedef struct JSGCArena JSGCArena;
+typedef struct JSGCArenaList JSGCArenaList;
+
+#ifdef JS_GCMETER
+typedef struct JSGCArenaStats JSGCArenaStats;
+
+struct JSGCArenaStats {
+ uint32 narenas; /* number of arena in list */
+ uint32 maxarenas; /* maximun number of allocated arenas */
+ uint32 nthings; /* number of allocates JSGCThing */
+ uint32 maxthings; /* maximum number number of allocates JSGCThing */
+ uint32 totalnew; /* number of succeeded calls to js_NewGCThing */
+ uint32 freelen; /* freeList lengths */
+ uint32 recycle; /* number of things recycled through freeList */
+ uint32 totalarenas; /* total number of arenas with live things that
+ GC scanned so far */
+ uint32 totalfreelen; /* total number of things that GC put to free
+ list so far */
+};
+#endif
+
+struct JSGCArenaList {
+ JSGCArena *last; /* last allocated GC arena */
+ uint16 lastLimit; /* end offset of allocated so far things in
+ the last arena */
+ uint16 thingSize; /* size of things to allocate on this list */
+ JSGCThing *freeList; /* list of free GC things */
+#ifdef JS_GCMETER
+ JSGCArenaStats stats;
+#endif
+};
+
+typedef struct JSWeakRoots {
+ /* Most recently created things by type, members of the GC's root set. */
+ JSGCThing *newborn[GCX_NTYPES];
+
+ /* Atom root for the last-looked-up atom on this context. */
+ JSAtom *lastAtom;
+
+ /* Root for the result of the most recent js_InternalInvoke call. */
+ jsval lastInternalResult;
+} JSWeakRoots;
+
+JS_STATIC_ASSERT(JSVAL_NULL == 0);
+#define JS_CLEAR_WEAK_ROOTS(wr) (memset((wr), 0, sizeof(JSWeakRoots)))
+
+#ifdef DEBUG_notme
+#define TOO_MUCH_GC 1
+#endif
+
+#ifdef WAY_TOO_MUCH_GC
+#define TOO_MUCH_GC 1
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsgc_h___ */
diff --git a/third_party/js-1.7/jshash.c b/third_party/js-1.7/jshash.c
new file mode 100644
index 0000000..8e25517
--- /dev/null
+++ b/third_party/js-1.7/jshash.c
@@ -0,0 +1,483 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR hash table package.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+
+/* Compute the number of buckets in ht */
+#define NBUCKETS(ht) JS_BIT(JS_HASH_BITS - (ht)->shift)
+
+/* The smallest table has 16 buckets */
+#define MINBUCKETSLOG2 4
+#define MINBUCKETS JS_BIT(MINBUCKETSLOG2)
+
+/* Compute the maximum entries given n buckets that we will tolerate, ~90% */
+#define OVERLOADED(n) ((n) - ((n) >> 3))
+
+/* Compute the number of entries below which we shrink the table by half */
+#define UNDERLOADED(n) (((n) > MINBUCKETS) ? ((n) >> 2) : 0)
+
+/*
+** Stubs for default hash allocator ops.
+*/
+static void *
+DefaultAllocTable(void *pool, size_t size)
+{
+ return malloc(size);
+}
+
+static void
+DefaultFreeTable(void *pool, void *item)
+{
+ free(item);
+}
+
+static JSHashEntry *
+DefaultAllocEntry(void *pool, const void *key)
+{
+ return (JSHashEntry*) malloc(sizeof(JSHashEntry));
+}
+
+static void
+DefaultFreeEntry(void *pool, JSHashEntry *he, uintN flag)
+{
+ if (flag == HT_FREE_ENTRY)
+ free(he);
+}
+
+static JSHashAllocOps defaultHashAllocOps = {
+ DefaultAllocTable, DefaultFreeTable,
+ DefaultAllocEntry, DefaultFreeEntry
+};
+
+JS_PUBLIC_API(JSHashTable *)
+JS_NewHashTable(uint32 n, JSHashFunction keyHash,
+ JSHashComparator keyCompare, JSHashComparator valueCompare,
+ JSHashAllocOps *allocOps, void *allocPriv)
+{
+ JSHashTable *ht;
+ size_t nb;
+
+ if (n <= MINBUCKETS) {
+ n = MINBUCKETSLOG2;
+ } else {
+ n = JS_CeilingLog2(n);
+ if ((int32)n < 0)
+ return NULL;
+ }
+
+ if (!allocOps) allocOps = &defaultHashAllocOps;
+
+ ht = (JSHashTable*) allocOps->allocTable(allocPriv, sizeof *ht);
+ if (!ht)
+ return NULL;
+ memset(ht, 0, sizeof *ht);
+ ht->shift = JS_HASH_BITS - n;
+ n = JS_BIT(n);
+ nb = n * sizeof(JSHashEntry *);
+ ht->buckets = (JSHashEntry**) allocOps->allocTable(allocPriv, nb);
+ if (!ht->buckets) {
+ allocOps->freeTable(allocPriv, ht);
+ return NULL;
+ }
+ memset(ht->buckets, 0, nb);
+
+ ht->keyHash = keyHash;
+ ht->keyCompare = keyCompare;
+ ht->valueCompare = valueCompare;
+ ht->allocOps = allocOps;
+ ht->allocPriv = allocPriv;
+ return ht;
+}
+
+JS_PUBLIC_API(void)
+JS_HashTableDestroy(JSHashTable *ht)
+{
+ uint32 i, n;
+ JSHashEntry *he, **hep;
+ JSHashAllocOps *allocOps = ht->allocOps;
+ void *allocPriv = ht->allocPriv;
+
+ n = NBUCKETS(ht);
+ for (i = 0; i < n; i++) {
+ hep = &ht->buckets[i];
+ while ((he = *hep) != NULL) {
+ *hep = he->next;
+ allocOps->freeEntry(allocPriv, he, HT_FREE_ENTRY);
+ }
+ }
+#ifdef DEBUG
+ memset(ht->buckets, 0xDB, n * sizeof ht->buckets[0]);
+#endif
+ allocOps->freeTable(allocPriv, ht->buckets);
+#ifdef DEBUG
+ memset(ht, 0xDB, sizeof *ht);
+#endif
+ allocOps->freeTable(allocPriv, ht);
+}
+
+/*
+ * Multiplicative hash, from Knuth 6.4.
+ */
+#define BUCKET_HEAD(ht, keyHash) \
+ (&(ht)->buckets[((keyHash) * JS_GOLDEN_RATIO) >> (ht)->shift])
+
+JS_PUBLIC_API(JSHashEntry **)
+JS_HashTableRawLookup(JSHashTable *ht, JSHashNumber keyHash, const void *key)
+{
+ JSHashEntry *he, **hep, **hep0;
+
+#ifdef HASHMETER
+ ht->nlookups++;
+#endif
+ hep = hep0 = BUCKET_HEAD(ht, keyHash);
+ while ((he = *hep) != NULL) {
+ if (he->keyHash == keyHash && ht->keyCompare(key, he->key)) {
+ /* Move to front of chain if not already there */
+ if (hep != hep0) {
+ *hep = he->next;
+ he->next = *hep0;
+ *hep0 = he;
+ }
+ return hep0;
+ }
+ hep = &he->next;
+#ifdef HASHMETER
+ ht->nsteps++;
+#endif
+ }
+ return hep;
+}
+
+static JSBool
+Resize(JSHashTable *ht, uint32 newshift)
+{
+ size_t nb, nentries, i;
+ JSHashEntry **oldbuckets, *he, *next, **hep;
+#ifdef DEBUG
+ size_t nold = NBUCKETS(ht);
+#endif
+
+ JS_ASSERT(newshift < JS_HASH_BITS);
+
+ nb = (size_t)1 << (JS_HASH_BITS - newshift);
+
+ /* Integer overflow protection. */
+ if (nb > (size_t)-1 / sizeof(JSHashEntry*))
+ return JS_FALSE;
+ nb *= sizeof(JSHashEntry*);
+
+ oldbuckets = ht->buckets;
+ ht->buckets = (JSHashEntry**)ht->allocOps->allocTable(ht->allocPriv, nb);
+ if (!ht->buckets) {
+ ht->buckets = oldbuckets;
+ return JS_FALSE;
+ }
+ memset(ht->buckets, 0, nb);
+
+ ht->shift = newshift;
+ nentries = ht->nentries;
+
+ for (i = 0; nentries != 0; i++) {
+ for (he = oldbuckets[i]; he; he = next) {
+ JS_ASSERT(nentries != 0);
+ --nentries;
+ next = he->next;
+ hep = BUCKET_HEAD(ht, he->keyHash);
+
+ /*
+ * Since he comes from the old table, it must be unique and we
+ * simply add it to the head of bucket chain without chain lookup.
+ */
+ he->next = *hep;
+ *hep = he;
+ }
+ }
+#ifdef DEBUG
+ memset(oldbuckets, 0xDB, nold * sizeof oldbuckets[0]);
+#endif
+ ht->allocOps->freeTable(ht->allocPriv, oldbuckets);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableRawAdd(JSHashTable *ht, JSHashEntry **hep,
+ JSHashNumber keyHash, const void *key, void *value)
+{
+ uint32 n;
+ JSHashEntry *he;
+
+ /* Grow the table if it is overloaded */
+ n = NBUCKETS(ht);
+ if (ht->nentries >= OVERLOADED(n)) {
+ if (!Resize(ht, ht->shift - 1))
+ return NULL;
+#ifdef HASHMETER
+ ht->ngrows++;
+#endif
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ }
+
+ /* Make a new key value entry */
+ he = ht->allocOps->allocEntry(ht->allocPriv, key);
+ if (!he)
+ return NULL;
+ he->keyHash = keyHash;
+ he->key = key;
+ he->value = value;
+ he->next = *hep;
+ *hep = he;
+ ht->nentries++;
+ return he;
+}
+
+JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableAdd(JSHashTable *ht, const void *key, void *value)
+{
+ JSHashNumber keyHash;
+ JSHashEntry *he, **hep;
+
+ keyHash = ht->keyHash(key);
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ if ((he = *hep) != NULL) {
+ /* Hit; see if values match */
+ if (ht->valueCompare(he->value, value)) {
+ /* key,value pair is already present in table */
+ return he;
+ }
+ if (he->value)
+ ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_VALUE);
+ he->value = value;
+ return he;
+ }
+ return JS_HashTableRawAdd(ht, hep, keyHash, key, value);
+}
+
+JS_PUBLIC_API(void)
+JS_HashTableRawRemove(JSHashTable *ht, JSHashEntry **hep, JSHashEntry *he)
+{
+ uint32 n;
+
+ *hep = he->next;
+ ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_ENTRY);
+
+ /* Shrink table if it's underloaded */
+ n = NBUCKETS(ht);
+ if (--ht->nentries < UNDERLOADED(n)) {
+ Resize(ht, ht->shift + 1);
+#ifdef HASHMETER
+ ht->nshrinks++;
+#endif
+ }
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HashTableRemove(JSHashTable *ht, const void *key)
+{
+ JSHashNumber keyHash;
+ JSHashEntry *he, **hep;
+
+ keyHash = ht->keyHash(key);
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ if ((he = *hep) == NULL)
+ return JS_FALSE;
+
+ /* Hit; remove element */
+ JS_HashTableRawRemove(ht, hep, he);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void *)
+JS_HashTableLookup(JSHashTable *ht, const void *key)
+{
+ JSHashNumber keyHash;
+ JSHashEntry *he, **hep;
+
+ keyHash = ht->keyHash(key);
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ if ((he = *hep) != NULL) {
+ return he->value;
+ }
+ return NULL;
+}
+
+/*
+** Iterate over the entries in the hash table calling func for each
+** entry found. Stop if "f" says to (return value & JS_ENUMERATE_STOP).
+** Return a count of the number of elements scanned.
+*/
+JS_PUBLIC_API(int)
+JS_HashTableEnumerateEntries(JSHashTable *ht, JSHashEnumerator f, void *arg)
+{
+ JSHashEntry *he, **hep, **bucket;
+ uint32 nlimit, n, nbuckets, newlog2;
+ int rv;
+
+ nlimit = ht->nentries;
+ n = 0;
+ for (bucket = ht->buckets; n != nlimit; ++bucket) {
+ hep = bucket;
+ while ((he = *hep) != NULL) {
+ JS_ASSERT(n < nlimit);
+ rv = f(he, n, arg);
+ n++;
+ if (rv & HT_ENUMERATE_REMOVE) {
+ *hep = he->next;
+ ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_ENTRY);
+ --ht->nentries;
+ } else {
+ hep = &he->next;
+ }
+ if (rv & HT_ENUMERATE_STOP) {
+ goto out;
+ }
+ }
+ }
+
+out:
+ /* Shrink table if removal of entries made it underloaded */
+ if (ht->nentries != nlimit) {
+ JS_ASSERT(ht->nentries < nlimit);
+ nbuckets = NBUCKETS(ht);
+ if (MINBUCKETS < nbuckets && ht->nentries < UNDERLOADED(nbuckets)) {
+ newlog2 = JS_CeilingLog2(ht->nentries);
+ if (newlog2 < MINBUCKETSLOG2)
+ newlog2 = MINBUCKETSLOG2;
+
+ /* Check that we really shrink the table. */
+ JS_ASSERT(JS_HASH_BITS - ht->shift > newlog2);
+ Resize(ht, JS_HASH_BITS - newlog2);
+ }
+ }
+ return (int)n;
+}
+
+#ifdef HASHMETER
+#include <math.h>
+#include <stdio.h>
+
+JS_PUBLIC_API(void)
+JS_HashTableDumpMeter(JSHashTable *ht, JSHashEnumerator dump, FILE *fp)
+{
+ double sqsum, mean, variance, sigma;
+ uint32 nchains, nbuckets, nentries;
+ uint32 i, n, maxChain, maxChainLen;
+ JSHashEntry *he;
+
+ sqsum = 0;
+ nchains = 0;
+ maxChainLen = 0;
+ nbuckets = NBUCKETS(ht);
+ for (i = 0; i < nbuckets; i++) {
+ he = ht->buckets[i];
+ if (!he)
+ continue;
+ nchains++;
+ for (n = 0; he; he = he->next)
+ n++;
+ sqsum += n * n;
+ if (n > maxChainLen) {
+ maxChainLen = n;
+ maxChain = i;
+ }
+ }
+ nentries = ht->nentries;
+ mean = (double)nentries / nchains;
+ variance = nchains * sqsum - nentries * nentries;
+ if (variance < 0 || nchains == 1)
+ variance = 0;
+ else
+ variance /= nchains * (nchains - 1);
+ sigma = sqrt(variance);
+
+ fprintf(fp, "\nHash table statistics:\n");
+ fprintf(fp, " number of lookups: %u\n", ht->nlookups);
+ fprintf(fp, " number of entries: %u\n", ht->nentries);
+ fprintf(fp, " number of grows: %u\n", ht->ngrows);
+ fprintf(fp, " number of shrinks: %u\n", ht->nshrinks);
+ fprintf(fp, " mean steps per hash: %g\n", (double)ht->nsteps
+ / ht->nlookups);
+ fprintf(fp, "mean hash chain length: %g\n", mean);
+ fprintf(fp, " standard deviation: %g\n", sigma);
+ fprintf(fp, " max hash chain length: %u\n", maxChainLen);
+ fprintf(fp, " max hash chain: [%u]\n", maxChain);
+
+ for (he = ht->buckets[maxChain], i = 0; he; he = he->next, i++)
+ if (dump(he, i, fp) != HT_ENUMERATE_NEXT)
+ break;
+}
+#endif /* HASHMETER */
+
+JS_PUBLIC_API(int)
+JS_HashTableDump(JSHashTable *ht, JSHashEnumerator dump, FILE *fp)
+{
+ int count;
+
+ count = JS_HashTableEnumerateEntries(ht, dump, fp);
+#ifdef HASHMETER
+ JS_HashTableDumpMeter(ht, dump, fp);
+#endif
+ return count;
+}
+
+JS_PUBLIC_API(JSHashNumber)
+JS_HashString(const void *key)
+{
+ JSHashNumber h;
+ const unsigned char *s;
+
+ h = 0;
+ for (s = (const unsigned char *)key; *s; s++)
+ h = (h >> (JS_HASH_BITS - 4)) ^ (h << 4) ^ *s;
+ return h;
+}
+
+JS_PUBLIC_API(int)
+JS_CompareValues(const void *v1, const void *v2)
+{
+ return v1 == v2;
+}
diff --git a/third_party/js-1.7/jshash.h b/third_party/js-1.7/jshash.h
new file mode 100644
index 0000000..2a125e1
--- /dev/null
+++ b/third_party/js-1.7/jshash.h
@@ -0,0 +1,151 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jshash_h___
+#define jshash_h___
+/*
+ * API to portable hash table code.
+ */
+#include <stddef.h>
+#include <stdio.h>
+#include "jstypes.h"
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+typedef uint32 JSHashNumber;
+typedef struct JSHashEntry JSHashEntry;
+typedef struct JSHashTable JSHashTable;
+
+#define JS_HASH_BITS 32
+#define JS_GOLDEN_RATIO 0x9E3779B9U
+
+typedef JSHashNumber (* JS_DLL_CALLBACK JSHashFunction)(const void *key);
+typedef intN (* JS_DLL_CALLBACK JSHashComparator)(const void *v1, const void *v2);
+typedef intN (* JS_DLL_CALLBACK JSHashEnumerator)(JSHashEntry *he, intN i, void *arg);
+
+/* Flag bits in JSHashEnumerator's return value */
+#define HT_ENUMERATE_NEXT 0 /* continue enumerating entries */
+#define HT_ENUMERATE_STOP 1 /* stop enumerating entries */
+#define HT_ENUMERATE_REMOVE 2 /* remove and free the current entry */
+
+typedef struct JSHashAllocOps {
+ void * (*allocTable)(void *pool, size_t size);
+ void (*freeTable)(void *pool, void *item);
+ JSHashEntry * (*allocEntry)(void *pool, const void *key);
+ void (*freeEntry)(void *pool, JSHashEntry *he, uintN flag);
+} JSHashAllocOps;
+
+#define HT_FREE_VALUE 0 /* just free the entry's value */
+#define HT_FREE_ENTRY 1 /* free value and entire entry */
+
+struct JSHashEntry {
+ JSHashEntry *next; /* hash chain linkage */
+ JSHashNumber keyHash; /* key hash function result */
+ const void *key; /* ptr to opaque key */
+ void *value; /* ptr to opaque value */
+};
+
+struct JSHashTable {
+ JSHashEntry **buckets; /* vector of hash buckets */
+ uint32 nentries; /* number of entries in table */
+ uint32 shift; /* multiplicative hash shift */
+ JSHashFunction keyHash; /* key hash function */
+ JSHashComparator keyCompare; /* key comparison function */
+ JSHashComparator valueCompare; /* value comparison function */
+ JSHashAllocOps *allocOps; /* allocation operations */
+ void *allocPriv; /* allocation private data */
+#ifdef HASHMETER
+ uint32 nlookups; /* total number of lookups */
+ uint32 nsteps; /* number of hash chains traversed */
+ uint32 ngrows; /* number of table expansions */
+ uint32 nshrinks; /* number of table contractions */
+#endif
+};
+
+/*
+ * Create a new hash table.
+ * If allocOps is null, use default allocator ops built on top of malloc().
+ */
+extern JS_PUBLIC_API(JSHashTable *)
+JS_NewHashTable(uint32 n, JSHashFunction keyHash,
+ JSHashComparator keyCompare, JSHashComparator valueCompare,
+ JSHashAllocOps *allocOps, void *allocPriv);
+
+extern JS_PUBLIC_API(void)
+JS_HashTableDestroy(JSHashTable *ht);
+
+/* Low level access methods */
+extern JS_PUBLIC_API(JSHashEntry **)
+JS_HashTableRawLookup(JSHashTable *ht, JSHashNumber keyHash, const void *key);
+
+extern JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableRawAdd(JSHashTable *ht, JSHashEntry **hep, JSHashNumber keyHash,
+ const void *key, void *value);
+
+extern JS_PUBLIC_API(void)
+JS_HashTableRawRemove(JSHashTable *ht, JSHashEntry **hep, JSHashEntry *he);
+
+/* Higher level access methods */
+extern JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableAdd(JSHashTable *ht, const void *key, void *value);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HashTableRemove(JSHashTable *ht, const void *key);
+
+extern JS_PUBLIC_API(intN)
+JS_HashTableEnumerateEntries(JSHashTable *ht, JSHashEnumerator f, void *arg);
+
+extern JS_PUBLIC_API(void *)
+JS_HashTableLookup(JSHashTable *ht, const void *key);
+
+extern JS_PUBLIC_API(intN)
+JS_HashTableDump(JSHashTable *ht, JSHashEnumerator dump, FILE *fp);
+
+/* General-purpose C string hash function. */
+extern JS_PUBLIC_API(JSHashNumber)
+JS_HashString(const void *key);
+
+/* Stub function just returns v1 == v2 */
+extern JS_PUBLIC_API(intN)
+JS_CompareValues(const void *v1, const void *v2);
+
+JS_END_EXTERN_C
+
+#endif /* jshash_h___ */
diff --git a/third_party/js-1.7/jsify.pl b/third_party/js-1.7/jsify.pl
new file mode 100644
index 0000000..fa7f4f8
--- /dev/null
+++ b/third_party/js-1.7/jsify.pl
@@ -0,0 +1,485 @@
+#!/usr/local/bin/perl
+
+# This script modifies C code to use the hijacked NSPR routines that are
+# now baked into the JavaScript engine rather than using the NSPR
+# routines that they were based on, i.e. types like PRArenaPool are changed
+# to JSArenaPool.
+#
+# This script was used in 9/98 to facilitate the incorporation of some NSPR
+# code into the JS engine so as to minimize dependency on NSPR.
+#
+
+# Command-line: jsify.pl [options] [filename]*
+#
+# Options:
+# -r Reverse direction of transformation, i.e. JS ==> NSPR2
+# -outdir Directory in which to place output files
+
+
+# NSPR2 symbols that will be modified to JS symbols, e.g.
+# PRArena <==> JSArena
+
+@NSPR_symbols = (
+"PRArena",
+"PRArenaPool",
+"PRArenaStats",
+"PR_ARENAMETER",
+"PR_ARENA_",
+"PR_ARENA_ALIGN",
+"PR_ARENA_ALLOCATE",
+"PR_ARENA_CONST_ALIGN_MASK",
+"PR_ARENA_DEFAULT_ALIGN",
+"PR_ARENA_DESTROY",
+"PR_ARENA_GROW",
+"PR_ARENA_MARK",
+"PR_ARENA_RELEASE",
+
+"PR_smprintf",
+"PR_smprintf_free",
+"PR_snprintf",
+"PR_sprintf_append",
+"PR_sscanf",
+"PR_sxprintf",
+"PR_vsmprintf",
+"PR_vsnprintf",
+"PR_vsprintf_append",
+"PR_vsxprintf",
+
+"PRCList",
+"PRCListStr",
+"PRCLists",
+
+"PRDestroyEventProc",
+"PREvent",
+"PREventFunProc",
+"PREventQueue",
+"PRHandleEventProc",
+"PR_PostEvent",
+"PR_PostSynchronousEvent",
+"PR_ProcessPendingEvents",
+"PR_CreateEventQueue",
+"PR_DequeueEvent",
+"PR_DestroyEvent",
+"PR_DestroyEventQueue",
+"PR_EventAvailable",
+"PR_EventLoop",
+"PR_GetEvent",
+"PR_GetEventOwner",
+"PR_GetEventQueueMonitor",
+"PR_GetEventQueueSelectFD",
+"PR_GetMainEventQueue",
+"PR_HandleEvent",
+"PR_InitEvent",
+"PR_ENTER_EVENT_QUEUE_MONITOR",
+"PR_EXIT_EVENT_QUEUE_MONITOR",
+"PR_MapEvents",
+"PR_RevokeEvents",
+
+"PR_cnvtf",
+"PR_dtoa",
+"PR_strtod",
+
+"PRFileDesc",
+
+"PR_HASH_BITS",
+"PR_GOLDEN_RATIO",
+"PRHashAllocOps",
+"PRHashComparator",
+"PRHashEntry",
+"PRHashEnumerator",
+"PRHashFunction",
+"PRHashNumber",
+"PRHashTable",
+"PR_HashString",
+"PR_HashTableAdd",
+"PR_HashTableDestroy",
+"PR_HashTableDump",
+"PR_HashTableEnumerateEntries",
+"PR_HashTableLookup",
+"PR_HashTableRawAdd",
+"PR_HashTableRawLookup",
+"PR_HashTableRawRemove",
+"PR_HashTableRemove",
+
+"PRBool",
+"PRFloat64",
+"PRInt16",
+"PRInt32",
+"PRInt64",
+"PRInt8",
+"PRIntn",
+"PRUint16",
+"PRUint32",
+"PRUint64",
+"PRUint8",
+"PRUintn",
+"PRPtrDiff",
+"PRPtrdiff",
+"PRUptrdiff",
+"PRUword",
+"PRWord",
+"PRPackedBool",
+"PRSize",
+"PRStatus",
+"pruword",
+"prword",
+"prword_t",
+
+"PR_ALIGN_OF_DOUBLE",
+"PR_ALIGN_OF_FLOAT",
+"PR_ALIGN_OF_INT",
+"PR_ALIGN_OF_INT64",
+"PR_ALIGN_OF_LONG",
+"PR_ALIGN_OF_POINTER",
+"PR_ALIGN_OF_SHORT",
+"PR_ALIGN_OF_WORD",
+"PR_BITS_PER_BYTE",
+"PR_BITS_PER_BYTE_LOG2",
+"PR_BITS_PER_DOUBLE",
+"PR_BITS_PER_DOUBLE_LOG2",
+"PR_BITS_PER_FLOAT",
+"PR_BITS_PER_FLOAT_LOG2",
+"PR_BITS_PER_INT",
+"PR_BITS_PER_INT64",
+"PR_BITS_PER_INT64_LOG2",
+"PR_BITS_PER_INT_LOG2",
+"PR_BITS_PER_LONG",
+"PR_BITS_PER_LONG_LOG2",
+"PR_BITS_PER_SHORT",
+"PR_BITS_PER_SHORT_LOG2",
+"PR_BITS_PER_WORD",
+"PR_BITS_PER_WORD_LOG2",
+"PR_BYTES_PER_BYTE",
+"PR_BYTES_PER_DOUBLE",
+"PR_BYTES_PER_DWORD",
+"PR_BYTES_PER_DWORD_LOG2",
+"PR_BYTES_PER_FLOAT",
+"PR_BYTES_PER_INT",
+"PR_BYTES_PER_INT64",
+"PR_BYTES_PER_LONG",
+"PR_BYTES_PER_SHORT",
+"PR_BYTES_PER_WORD",
+"PR_BYTES_PER_WORD_LOG2",
+
+"PRSegment",
+"PRSegmentAccess",
+"PRStuffFunc",
+"PRThread",
+
+"PR_APPEND_LINK",
+
+"PR_ASSERT",
+
+"PR_ATOMIC_DWORD_LOAD",
+"PR_ATOMIC_DWORD_STORE",
+
+"PR_Abort",
+
+"PR_ArenaAllocate",
+"PR_ArenaCountAllocation",
+"PR_ArenaCountGrowth",
+"PR_ArenaCountInplaceGrowth",
+"PR_ArenaCountRelease",
+"PR_ArenaCountRetract",
+"PR_ArenaFinish",
+"PR_ArenaGrow",
+"PR_ArenaRelease",
+"PR_CompactArenaPool",
+"PR_DumpArenaStats",
+"PR_FinishArenaPool",
+"PR_FreeArenaPool",
+"PR_InitArenaPool",
+
+"PR_Assert",
+
+"PR_AttachThread",
+
+"PR_BEGIN_EXTERN_C",
+"PR_BEGIN_MACRO",
+
+"PR_BIT",
+"PR_BITMASK",
+
+"PR_BUFFER_OVERFLOW_ERROR",
+
+"PR_CALLBACK",
+"PR_CALLBACK_DECL",
+"PR_CALLOC",
+"PR_CEILING_LOG2",
+"PR_CLEAR_ARENA",
+"PR_CLEAR_BIT",
+"PR_CLEAR_UNUSED",
+"PR_CLIST_IS_EMPTY",
+"PR_COUNT_ARENA",
+"PR_CURRENT_THREAD",
+
+"PR_GetSegmentAccess",
+"PR_GetSegmentSize",
+"PR_GetSegmentVaddr",
+"PR_GrowSegment",
+"PR_DestroySegment",
+"PR_MapSegment",
+"PR_NewSegment",
+"PR_Segment",
+"PR_Seg",
+"PR_SEGMENT_NONE",
+"PR_SEGMENT_RDONLY",
+"PR_SEGMENT_RDWR",
+
+"PR_Calloc",
+"PR_CeilingLog2",
+"PR_CompareStrings",
+"PR_CompareValues",
+"PR_DELETE",
+"PR_END_EXTERN_C",
+"PR_END_MACRO",
+"PR_ENUMERATE_STOP",
+"PR_FAILURE",
+"PR_FALSE",
+"PR_FLOOR_LOG2",
+"PR_FREEIF",
+"PR_FREE_PATTERN",
+"PR_FloorLog2",
+"PR_FormatTime",
+"PR_Free",
+
+"PR_GetEnv",
+"PR_GetError",
+"PR_INIT_ARENA_POOL",
+"PR_INIT_CLIST",
+"PR_INIT_STATIC_CLIST",
+"PR_INLINE",
+"PR_INSERT_AFTER",
+"PR_INSERT_BEFORE",
+"PR_INSERT_LINK",
+"PR_INT32",
+"PR_INTERVAL_NO_TIMEOUT",
+"PR_INTERVAL_NO_WAIT",
+"PR_Init",
+"PR_LIST_HEAD",
+"PR_LIST_TAIL",
+"PR_LOG",
+"PR_LOGGING",
+"PR_LOG_ALWAYS",
+"PR_LOG_BEGIN",
+"PR_LOG_DEBUG",
+"PR_LOG_DEFINE",
+"PR_LOG_END",
+"PR_LOG_ERROR",
+"PR_LOG_MAX",
+"PR_LOG_MIN",
+"PR_LOG_NONE",
+"PR_LOG_NOTICE",
+"PR_LOG_TEST",
+"PR_LOG_WARN",
+"PR_LOG_WARNING",
+"PR_LogFlush",
+"PR_LogPrint",
+"PR_MALLOC",
+"PR_MAX",
+"PR_MD_calloc",
+"PR_MD_free",
+"PR_MD_malloc",
+"PR_MD_realloc",
+"PR_MIN",
+"PR_Malloc",
+"PR_NEW",
+"PR_NEWZAP",
+"PR_NEXT_LINK",
+"PR_NOT_REACHED",
+"PR_NewCondVar",
+"PR_NewHashTable",
+"PR_NewLogModule",
+"PR_PREV_LINK",
+"PR_PUBLIC_API",
+"PR_PUBLIC_DATA",
+"PR_RANGE_ERROR",
+"PR_REALLOC",
+"PR_REMOVE_AND_INIT_LINK",
+"PR_REMOVE_LINK",
+"PR_ROUNDUP",
+"PR_Realloc",
+
+"PR_SET_BIT",
+"PR_STATIC_CALLBACK",
+"PR_SUCCESS",
+"PR_SetError",
+"PR_SetLogBuffering",
+"PR_SetLogFile",
+
+"PR_TEST_BIT",
+"PR_TRUE",
+"PR_UINT32",
+"PR_UPTRDIFF",
+
+"prarena_h___",
+"prbit_h___",
+"prclist_h___",
+"prdtoa_h___",
+"prlog_h___",
+"prlong_h___",
+"prmacos_h___",
+"prmem_h___",
+"prprf_h___",
+"prtypes_h___",
+
+"prarena",
+"prbit",
+"prbitmap_t",
+"prclist",
+"prcpucfg",
+"prdtoa",
+"prhash",
+"plhash",
+"prlong",
+"prmacos",
+"prmem",
+"prosdep",
+"protypes",
+"prprf",
+"prtypes"
+);
+
+while ($ARGV[0] =~ /^-/) {
+ if ($ARGV[0] eq "-r") {
+ shift;
+ $reverse_conversion = 1;
+ } elsif ($ARGV[0] eq "-outdir") {
+ shift;
+ $outdir = shift;
+ }
+}
+
+# Given an NSPR symbol compute the JS equivalent or
+# vice-versa
+sub subst {
+ local ($replacement);
+ local ($sym) = @_;
+
+ $replacement = substr($sym,0,2) eq "pr" ? "js" : "JS";
+ $replacement .= substr($sym, 2);
+ return $replacement;
+}
+
+# Build the regular expression that will convert between the NSPR
+# types and the JS types
+if ($reverse_conversion) {
+ die "Not implemented yet";
+} else {
+ foreach $sym (@NSPR_symbols) {
+ $regexp .= $sym . "|"
+ }
+ # Get rid of the last "!"
+ chop $regexp;
+
+ # Replace PR* with JS* and replace pr* with js*
+ $regexp = 's/(^|\\W)(' . $regexp . ')/$1 . &subst($2)/eg';
+# print $regexp;
+}
+
+# Pre-compile a little subroutine to perform the regexp substitution
+# between NSPR types and JS types
+eval('sub convert_from_NSPR {($line) = @_; $line =~ ' . $regexp . ';}');
+
+sub convert_mallocs {
+ ($line) = @_;
+ $line =~ s/PR_MALLOC/malloc/g;
+ $line =~ s/PR_REALLOC/realloc/g;
+ $line =~ s/PR_FREE/free/g;
+ return $line;
+}
+
+sub convert_includes {
+ ($line) = @_;
+ if ($line !~ /include/) {
+ return $line;
+ }
+
+ if ($line =~ /prlog\.h/) {
+ $line = '#include "jsutil.h"'. " /* Added by JSIFY */\n";
+ } elsif ($line =~ /plhash\.h/) {
+ $line = '#include "jshash.h"'. " /* Added by JSIFY */\n";
+ } elsif ($line =~ /plarena\.h/) {
+ $line = '#include "jsarena.h"'. " /* Added by JSIFY */\n";
+ } elsif ($line =~ /prmem\.h/) {
+ $line = "";
+ } elsif ($line =~ /jsmsg\.def/) {
+ $line = '#include "js.msg"' . "\n";
+ } elsif ($line =~ /shellmsg\.def/) {
+ $line = '#include "jsshell.msg"' . "\n";
+ } elsif ($line =~ /jsopcode\.def/) {
+ $line = '#include "jsopcode.tbl"' . "\n";
+ }
+ return $line;
+}
+
+sub convert_declarations {
+ ($line) = @_;
+ $line =~ s/PR_EXTERN/JS_EXTERN_API/g;
+ $line =~ s/PR_IMPLEMENT_DATA/JS_EXPORT_DATA/g;
+ $line =~ s/PR_IMPLEMENT/JS_EXPORT_API/g;
+ $line =~ s/PR_CALLBACK/JS_DLL_CALLBACK/g;
+ $line =~ s/PR_STATIC_CALLBACK/JS_STATIC_DLL_CALLBACK/g;
+ $line =~ s/PR_IMPORT/JS_IMPORT/g;
+ $line =~ s/PR_PUBLIC_API/JS_EXPORT_API/g;
+ $line =~ s/PR_PUBLIC_DATA/JS_EXPORT_DATA/g;
+ return $line;
+}
+
+sub convert_long_long_macros {
+ ($line) = @_;
+ $line =~ s/\b(LL_)/JSLL_/g;
+ return $line;
+}
+
+sub convert_asserts {
+ ($line) = @_;
+ $line =~ s/\bPR_ASSERT/JS_ASSERT/g;
+ return $line;
+}
+
+while ($#ARGV >= 0) {
+ $infile = shift;
+
+ # Change filename, e.g. prtime.h to jsprtime.h, except for legacy
+ # files that start with 'prmj', like prmjtime.h.
+ $outfile = $infile;
+ if ($infile !~ /^prmj/) {
+ $outfile =~ s/^pr/js/;
+ $outfile =~ s/^pl/js/;
+ }
+
+ if ($outdir) {
+ $outfile = $outdir . '/' . $outfile;
+ }
+
+ if ($infile eq $outfile) {
+ die "Error: refuse to overwrite $outfile, use -outdir option."
+ }
+ die "Can't open $infile" if !open(INFILE, "<$infile");
+ die "Can't open $outfile for writing" if !open(OUTFILE, ">$outfile");
+
+ while (<INFILE>) {
+ $line = $_;
+
+ #Get rid of #include "prlog.h"
+ &convert_includes($line);
+
+ # Rename PR_EXTERN, PR_IMPORT, etc.
+ &convert_declarations($line);
+
+ # Convert from PR_MALLOC to malloc, etc.
+ &convert_mallocs($line);
+
+ # Convert from PR_ASSERT to JS_ASSERT
+# &convert_asserts($line);
+
+ # Convert from, e.g. PRArena to JSPRArena
+ &convert_from_NSPR($line);
+
+ # Change LL_* macros to JSLL_*
+ &convert_long_long_macros($line);
+
+ print OUTFILE $line;
+ }
+}
diff --git a/third_party/js-1.7/jsinterp.c b/third_party/js-1.7/jsinterp.c
new file mode 100644
index 0000000..c8c1204
--- /dev/null
+++ b/third_party/js-1.7/jsinterp.c
@@ -0,0 +1,6216 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript bytecode interpreter.
+ */
+#include "jsstddef.h"
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsiter.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#ifdef DEBUG
+#define ASSERT_CACHE_IS_EMPTY(cache) \
+ JS_BEGIN_MACRO \
+ JSPropertyCacheEntry *end_, *pce_, entry_; \
+ JSPropertyCache *cache_ = (cache); \
+ JS_ASSERT(cache_->empty); \
+ end_ = &cache_->table[PROPERTY_CACHE_SIZE]; \
+ for (pce_ = &cache_->table[0]; pce_ < end_; pce_++) { \
+ PCE_LOAD(cache_, pce_, entry_); \
+ JS_ASSERT(!PCE_OBJECT(entry_)); \
+ JS_ASSERT(!PCE_PROPERTY(entry_)); \
+ } \
+ JS_END_MACRO
+#else
+#define ASSERT_CACHE_IS_EMPTY(cache) ((void)0)
+#endif
+
+void
+js_FlushPropertyCache(JSContext *cx)
+{
+ JSPropertyCache *cache;
+
+ cache = &cx->runtime->propertyCache;
+ if (cache->empty) {
+ ASSERT_CACHE_IS_EMPTY(cache);
+ return;
+ }
+ memset(cache->table, 0, sizeof cache->table);
+ cache->empty = JS_TRUE;
+#ifdef JS_PROPERTY_CACHE_METERING
+ cache->flushes++;
+#endif
+}
+
+void
+js_DisablePropertyCache(JSContext *cx)
+{
+ JS_ASSERT(!cx->runtime->propertyCache.disabled);
+ cx->runtime->propertyCache.disabled = JS_TRUE;
+}
+
+void
+js_EnablePropertyCache(JSContext *cx)
+{
+ JS_ASSERT(cx->runtime->propertyCache.disabled);
+ ASSERT_CACHE_IS_EMPTY(&cx->runtime->propertyCache);
+ cx->runtime->propertyCache.disabled = JS_FALSE;
+}
+
+/*
+ * Stack macros and functions. These all use a local variable, jsval *sp, to
+ * point to the next free stack slot. SAVE_SP must be called before any call
+ * to a function that may invoke the interpreter. RESTORE_SP must be called
+ * only after return from js_Invoke, because only js_Invoke changes fp->sp.
+ */
+#define PUSH(v) (*sp++ = (v))
+#define POP() (*--sp)
+#ifdef DEBUG
+#define SAVE_SP(fp) \
+ (JS_ASSERT((fp)->script || !(fp)->spbase || (sp) == (fp)->spbase), \
+ (fp)->sp = sp)
+#else
+#define SAVE_SP(fp) ((fp)->sp = sp)
+#endif
+#define RESTORE_SP(fp) (sp = (fp)->sp)
+
+/*
+ * SAVE_SP_AND_PC commits deferred stores of interpreter registers to their
+ * homes in fp, when calling out of the interpreter loop or threaded code.
+ * RESTORE_SP_AND_PC copies the other way, to update registers after a call
+ * to a subroutine that interprets a piece of the current script.
+ */
+#define SAVE_SP_AND_PC(fp) (SAVE_SP(fp), (fp)->pc = pc)
+#define RESTORE_SP_AND_PC(fp) (RESTORE_SP(fp), pc = (fp)->pc)
+
+/*
+ * Push the generating bytecode's pc onto the parallel pc stack that runs
+ * depth slots below the operands.
+ *
+ * NB: PUSH_OPND uses sp, depth, and pc from its lexical environment. See
+ * js_Interpret for these local variables' declarations and uses.
+ */
+#define PUSH_OPND(v) (sp[-depth] = (jsval)pc, PUSH(v))
+#define STORE_OPND(n,v) (sp[(n)-depth] = (jsval)pc, sp[n] = (v))
+#define POP_OPND() POP()
+#define FETCH_OPND(n) (sp[n])
+
+/*
+ * Push the jsdouble d using sp, depth, and pc from the lexical environment.
+ * Try to convert d to a jsint that fits in a jsval, otherwise GC-alloc space
+ * for it and push a reference.
+ */
+#define STORE_NUMBER(cx, n, d) \
+ JS_BEGIN_MACRO \
+ jsint i_; \
+ jsval v_; \
+ \
+ if (JSDOUBLE_IS_INT(d, i_) && INT_FITS_IN_JSVAL(i_)) { \
+ v_ = INT_TO_JSVAL(i_); \
+ } else { \
+ ok = js_NewDoubleValue(cx, d, &v_); \
+ if (!ok) \
+ goto out; \
+ } \
+ STORE_OPND(n, v_); \
+ JS_END_MACRO
+
+#define STORE_INT(cx, n, i) \
+ JS_BEGIN_MACRO \
+ jsval v_; \
+ \
+ if (INT_FITS_IN_JSVAL(i)) { \
+ v_ = INT_TO_JSVAL(i); \
+ } else { \
+ ok = js_NewDoubleValue(cx, (jsdouble)(i), &v_); \
+ if (!ok) \
+ goto out; \
+ } \
+ STORE_OPND(n, v_); \
+ JS_END_MACRO
+
+#define STORE_UINT(cx, n, u) \
+ JS_BEGIN_MACRO \
+ jsval v_; \
+ \
+ if ((u) <= JSVAL_INT_MAX) { \
+ v_ = INT_TO_JSVAL(u); \
+ } else { \
+ ok = js_NewDoubleValue(cx, (jsdouble)(u), &v_); \
+ if (!ok) \
+ goto out; \
+ } \
+ STORE_OPND(n, v_); \
+ JS_END_MACRO
+
+#define FETCH_NUMBER(cx, n, d) \
+ JS_BEGIN_MACRO \
+ jsval v_; \
+ \
+ v_ = FETCH_OPND(n); \
+ VALUE_TO_NUMBER(cx, v_, d); \
+ JS_END_MACRO
+
+#define FETCH_INT(cx, n, i) \
+ JS_BEGIN_MACRO \
+ jsval v_ = FETCH_OPND(n); \
+ if (JSVAL_IS_INT(v_)) { \
+ i = JSVAL_TO_INT(v_); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToECMAInt32(cx, v_, &i); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#define FETCH_UINT(cx, n, ui) \
+ JS_BEGIN_MACRO \
+ jsval v_ = FETCH_OPND(n); \
+ jsint i_; \
+ if (JSVAL_IS_INT(v_) && (i_ = JSVAL_TO_INT(v_)) >= 0) { \
+ ui = (uint32) i_; \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToECMAUint32(cx, v_, &ui); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+/*
+ * Optimized conversion macros that test for the desired type in v before
+ * homing sp and calling a conversion function.
+ */
+#define VALUE_TO_NUMBER(cx, v, d) \
+ JS_BEGIN_MACRO \
+ if (JSVAL_IS_INT(v)) { \
+ d = (jsdouble)JSVAL_TO_INT(v); \
+ } else if (JSVAL_IS_DOUBLE(v)) { \
+ d = *JSVAL_TO_DOUBLE(v); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToNumber(cx, v, &d); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#define POP_BOOLEAN(cx, v, b) \
+ JS_BEGIN_MACRO \
+ v = FETCH_OPND(-1); \
+ if (v == JSVAL_NULL) { \
+ b = JS_FALSE; \
+ } else if (JSVAL_IS_BOOLEAN(v)) { \
+ b = JSVAL_TO_BOOLEAN(v); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToBoolean(cx, v, &b); \
+ if (!ok) \
+ goto out; \
+ } \
+ sp--; \
+ JS_END_MACRO
+
+/*
+ * Convert a primitive string, number or boolean to a corresponding object.
+ * v must not be an object, null or undefined when using this macro.
+ */
+#define PRIMITIVE_TO_OBJECT(cx, v, obj) \
+ JS_BEGIN_MACRO \
+ SAVE_SP(fp); \
+ if (JSVAL_IS_STRING(v)) { \
+ obj = js_StringToObject(cx, JSVAL_TO_STRING(v)); \
+ } else if (JSVAL_IS_INT(v)) { \
+ obj = js_NumberToObject(cx, (jsdouble)JSVAL_TO_INT(v)); \
+ } else if (JSVAL_IS_DOUBLE(v)) { \
+ obj = js_NumberToObject(cx, *JSVAL_TO_DOUBLE(v)); \
+ } else { \
+ JS_ASSERT(JSVAL_IS_BOOLEAN(v)); \
+ obj = js_BooleanToObject(cx, JSVAL_TO_BOOLEAN(v)); \
+ } \
+ JS_END_MACRO
+
+#define VALUE_TO_OBJECT(cx, v, obj) \
+ JS_BEGIN_MACRO \
+ if (!JSVAL_IS_PRIMITIVE(v)) { \
+ obj = JSVAL_TO_OBJECT(v); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ obj = js_ValueToNonNullObject(cx, v); \
+ if (!obj) { \
+ ok = JS_FALSE; \
+ goto out; \
+ } \
+ } \
+ JS_END_MACRO
+
+#define FETCH_OBJECT(cx, n, v, obj) \
+ JS_BEGIN_MACRO \
+ v = FETCH_OPND(n); \
+ VALUE_TO_OBJECT(cx, v, obj); \
+ STORE_OPND(n, OBJECT_TO_JSVAL(obj)); \
+ JS_END_MACRO
+
+#define VALUE_TO_PRIMITIVE(cx, v, hint, vp) \
+ JS_BEGIN_MACRO \
+ if (JSVAL_IS_PRIMITIVE(v)) { \
+ *vp = v; \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), hint, vp); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+JS_FRIEND_API(jsval *)
+js_AllocRawStack(JSContext *cx, uintN nslots, void **markp)
+{
+ jsval *sp;
+
+ if (markp)
+ *markp = JS_ARENA_MARK(&cx->stackPool);
+ JS_ARENA_ALLOCATE_CAST(sp, jsval *, &cx->stackPool, nslots * sizeof(jsval));
+ if (!sp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_STACK_OVERFLOW,
+ (cx->fp && cx->fp->fun)
+ ? JS_GetFunctionName(cx->fp->fun)
+ : "script");
+ }
+ return sp;
+}
+
+JS_FRIEND_API(void)
+js_FreeRawStack(JSContext *cx, void *mark)
+{
+ JS_ARENA_RELEASE(&cx->stackPool, mark);
+}
+
+JS_FRIEND_API(jsval *)
+js_AllocStack(JSContext *cx, uintN nslots, void **markp)
+{
+ jsval *sp, *vp, *end;
+ JSArena *a;
+ JSStackHeader *sh;
+ JSStackFrame *fp;
+
+ /* Callers don't check for zero nslots: we do to avoid empty segments. */
+ if (nslots == 0) {
+ *markp = NULL;
+ return JS_ARENA_MARK(&cx->stackPool);
+ }
+
+ /* Allocate 2 extra slots for the stack segment header we'll likely need. */
+ sp = js_AllocRawStack(cx, 2 + nslots, markp);
+ if (!sp)
+ return NULL;
+
+ /* Try to avoid another header if we can piggyback on the last segment. */
+ a = cx->stackPool.current;
+ sh = cx->stackHeaders;
+ if (sh && JS_STACK_SEGMENT(sh) + sh->nslots == sp) {
+ /* Extend the last stack segment, give back the 2 header slots. */
+ sh->nslots += nslots;
+ a->avail -= 2 * sizeof(jsval);
+ } else {
+ /*
+ * Need a new stack segment, so we must initialize unused slots in the
+ * current frame. See js_GC, just before marking the "operand" jsvals,
+ * where we scan from fp->spbase to fp->sp or through fp->script->depth
+ * (whichever covers fewer slots).
+ */
+ fp = cx->fp;
+ if (fp && fp->script && fp->spbase) {
+#ifdef DEBUG
+ jsuword depthdiff = fp->script->depth * sizeof(jsval);
+ JS_ASSERT(JS_UPTRDIFF(fp->sp, fp->spbase) <= depthdiff);
+ JS_ASSERT(JS_UPTRDIFF(*markp, fp->spbase) >= depthdiff);
+#endif
+ end = fp->spbase + fp->script->depth;
+ for (vp = fp->sp; vp < end; vp++)
+ *vp = JSVAL_VOID;
+ }
+
+ /* Allocate and push a stack segment header from the 2 extra slots. */
+ sh = (JSStackHeader *)sp;
+ sh->nslots = nslots;
+ sh->down = cx->stackHeaders;
+ cx->stackHeaders = sh;
+ sp += 2;
+ }
+
+ /*
+ * Store JSVAL_NULL using memset, to let compilers optimize as they see
+ * fit, in case a caller allocates and pushes GC-things one by one, which
+ * could nest a last-ditch GC that will scan this segment.
+ */
+ memset(sp, 0, nslots * sizeof(jsval));
+ return sp;
+}
+
+JS_FRIEND_API(void)
+js_FreeStack(JSContext *cx, void *mark)
+{
+ JSStackHeader *sh;
+ jsuword slotdiff;
+
+ /* Check for zero nslots allocation special case. */
+ if (!mark)
+ return;
+
+ /* We can assert because js_FreeStack always balances js_AllocStack. */
+ sh = cx->stackHeaders;
+ JS_ASSERT(sh);
+
+ /* If mark is in the current segment, reduce sh->nslots, else pop sh. */
+ slotdiff = JS_UPTRDIFF(mark, JS_STACK_SEGMENT(sh)) / sizeof(jsval);
+ if (slotdiff < (jsuword)sh->nslots)
+ sh->nslots = slotdiff;
+ else
+ cx->stackHeaders = sh->down;
+
+ /* Release the stackPool space allocated since mark was set. */
+ JS_ARENA_RELEASE(&cx->stackPool, mark);
+}
+
+JSBool
+js_GetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSBool
+js_SetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSBool
+js_GetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSBool
+js_SetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSObject *
+js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *obj, *cursor, *clonedChild, *parent;
+ JSTempValueRooter tvr;
+
+ obj = fp->blockChain;
+ if (!obj) {
+ /*
+ * Don't force a call object for a lightweight function call, but do
+ * insist that there is a call object for a heavyweight function call.
+ */
+ JS_ASSERT(!fp->fun ||
+ !(fp->fun->flags & JSFUN_HEAVYWEIGHT) ||
+ fp->callobj);
+ JS_ASSERT(fp->scopeChain);
+ return fp->scopeChain;
+ }
+
+ /*
+ * We have one or more lexical scopes to reflect into fp->scopeChain, so
+ * make sure there's a call object at the current head of the scope chain,
+ * if this frame is a call frame.
+ */
+ if (fp->fun && !fp->callobj) {
+ JS_ASSERT(OBJ_GET_CLASS(cx, fp->scopeChain) != &js_BlockClass ||
+ JS_GetPrivate(cx, fp->scopeChain) != fp);
+ if (!js_GetCallObject(cx, fp, fp->scopeChain))
+ return NULL;
+ }
+
+ /*
+ * Clone the block chain. To avoid recursive cloning we set the parent of
+ * the cloned child after we clone the parent. In the following loop when
+ * clonedChild is null it indicates the first iteration when no special GC
+ * rooting is necessary. On the second and the following iterations we
+ * have to protect cloned so far chain against the GC during cloning of
+ * the cursor object.
+ */
+ cursor = obj;
+ clonedChild = NULL;
+ for (;;) {
+ parent = OBJ_GET_PARENT(cx, cursor);
+
+ /*
+ * We pass fp->scopeChain and not null even if we override the parent
+ * slot later as null triggers useless calculations of slot's value in
+ * js_NewObject that js_CloneBlockObject calls.
+ */
+ cursor = js_CloneBlockObject(cx, cursor, fp->scopeChain, fp);
+ if (!cursor) {
+ if (clonedChild)
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return NULL;
+ }
+ if (!clonedChild) {
+ /*
+ * The first iteration. Check if other follow and root obj if so
+ * to protect the whole cloned chain against GC.
+ */
+ obj = cursor;
+ if (!parent)
+ break;
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+ } else {
+ /*
+ * Avoid OBJ_SET_PARENT overhead as clonedChild cannot escape to
+ * other threads.
+ */
+ clonedChild->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(cursor);
+ if (!parent) {
+ JS_ASSERT(tvr.u.value == OBJECT_TO_JSVAL(obj));
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ break;
+ }
+ }
+ clonedChild = cursor;
+ cursor = parent;
+ }
+ fp->flags |= JSFRAME_POP_BLOCKS;
+ fp->scopeChain = obj;
+ fp->blockChain = NULL;
+ return obj;
+}
+
+/*
+ * Walk the scope chain looking for block scopes whose locals need to be
+ * copied from stack slots into object slots before fp goes away.
+ */
+static JSBool
+PutBlockObjects(JSContext *cx, JSStackFrame *fp)
+{
+ JSBool ok;
+ JSObject *obj;
+
+ ok = JS_TRUE;
+ for (obj = fp->scopeChain; obj; obj = OBJ_GET_PARENT(cx, obj)) {
+ if (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
+ if (JS_GetPrivate(cx, obj) != fp)
+ break;
+ ok &= js_PutBlockObject(cx, obj);
+ }
+ }
+ return ok;
+}
+
+JSObject *
+js_ComputeThis(JSContext *cx, JSObject *thisp, jsval *argv)
+{
+ if (thisp && OBJ_GET_CLASS(cx, thisp) != &js_CallClass) {
+ /* Some objects (e.g., With) delegate 'this' to another object. */
+ thisp = OBJ_THIS_OBJECT(cx, thisp);
+ if (!thisp)
+ return NULL;
+ } else {
+ /*
+ * ECMA requires "the global object", but in the presence of multiple
+ * top-level objects (windows, frames, or certain layers in the client
+ * object model), we prefer fun's parent. An example that causes this
+ * code to run:
+ *
+ * // in window w1
+ * function f() { return this }
+ * function g() { return f }
+ *
+ * // in window w2
+ * var h = w1.g()
+ * alert(h() == w1)
+ *
+ * The alert should display "true".
+ */
+ if (JSVAL_IS_PRIMITIVE(argv[-2]) ||
+ !OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]))) {
+ thisp = cx->globalObject;
+ } else {
+ jsid id;
+ jsval v;
+ uintN attrs;
+
+ /* Walk up the parent chain. */
+ thisp = JSVAL_TO_OBJECT(argv[-2]);
+ id = ATOM_TO_JSID(cx->runtime->atomState.parentAtom);
+ for (;;) {
+ if (!OBJ_CHECK_ACCESS(cx, thisp, id, JSACC_PARENT, &v, &attrs))
+ return NULL;
+ if (JSVAL_IS_VOID(v))
+ v = OBJ_GET_SLOT(cx, thisp, JSSLOT_PARENT);
+ if (JSVAL_IS_NULL(v))
+ break;
+ thisp = JSVAL_TO_OBJECT(v);
+ }
+ }
+ }
+ argv[-1] = OBJECT_TO_JSVAL(thisp);
+ return thisp;
+}
+
+#if JS_HAS_NO_SUCH_METHOD
+
+static JSBool
+NoSuchMethod(JSContext *cx, JSStackFrame *fp, jsval *vp, uint32 flags,
+ uintN argc)
+{
+ JSObject *thisp, *argsobj;
+ jsval *sp, roots[3];
+ JSTempValueRooter tvr;
+ jsid id;
+ JSBool ok;
+ jsbytecode *pc;
+ jsatomid atomIndex;
+
+ /*
+ * We must call js_ComputeThis here to censor Call objects. A performance
+ * hit, since we'll call it again in the normal sequence of invoke events,
+ * but at least it's idempotent.
+ *
+ * Normally, we call ComputeThis after all frame members have been set,
+ * and in particular, after any revision of the callee value at *vp due
+ * to clasp->convert (see below). This matters because ComputeThis may
+ * access *vp via fp->argv[-2], to follow the parent chain to a global
+ * object to use as the 'this' parameter.
+ *
+ * Obviously, here in the JSVAL_IS_PRIMITIVE(v) case, there can't be any
+ * such defaulting of 'this' to callee (v, *vp) ancestor.
+ */
+ JS_ASSERT(JSVAL_IS_PRIMITIVE(vp[0]));
+ RESTORE_SP(fp);
+ if (JSVAL_IS_OBJECT(vp[1])) {
+ thisp = JSVAL_TO_OBJECT(vp[1]);
+ } else {
+ PRIMITIVE_TO_OBJECT(cx, vp[1], thisp);
+ if (!thisp)
+ return JS_FALSE;
+ vp[1] = OBJECT_TO_JSVAL(thisp);
+ }
+ thisp = js_ComputeThis(cx, thisp, vp + 2);
+ if (!thisp)
+ return JS_FALSE;
+ vp[1] = OBJECT_TO_JSVAL(thisp);
+
+ /* From here on, control must flow through label out: to return. */
+ memset(roots, 0, sizeof roots);
+ JS_PUSH_TEMP_ROOT(cx, JS_ARRAY_LENGTH(roots), roots, &tvr);
+
+ id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom);
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, thisp)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) thisp->map->ops;
+ thisp = ops->getMethod(cx, thisp, id, &roots[2]);
+ if (!thisp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ vp[1] = OBJECT_TO_JSVAL(thisp);
+ } else
+#endif
+ {
+ ok = OBJ_GET_PROPERTY(cx, thisp, id, &roots[2]);
+ if (!ok)
+ goto out;
+ }
+ if (JSVAL_IS_PRIMITIVE(roots[2]))
+ goto not_function;
+
+ pc = (jsbytecode *) vp[-(intN)fp->script->depth];
+ switch ((JSOp) *pc) {
+ case JSOP_NAME:
+ case JSOP_GETPROP:
+#if JS_HAS_XML_SUPPORT
+ case JSOP_GETMETHOD:
+#endif
+ atomIndex = GET_ATOM_INDEX(pc);
+ roots[0] = ATOM_KEY(js_GetAtom(cx, &fp->script->atomMap, atomIndex));
+ argsobj = js_NewArrayObject(cx, argc, vp + 2);
+ if (!argsobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ roots[1] = OBJECT_TO_JSVAL(argsobj);
+ ok = js_InternalInvoke(cx, thisp, roots[2], flags | JSINVOKE_INTERNAL,
+ 2, roots, &vp[0]);
+ break;
+
+ default:
+ goto not_function;
+ }
+
+ out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+
+ not_function:
+ js_ReportIsNotFunction(cx, vp, flags & JSINVOKE_FUNFLAGS);
+ ok = JS_FALSE;
+ goto out;
+}
+
+#endif /* JS_HAS_NO_SUCH_METHOD */
+
+#ifdef DUMP_CALL_TABLE
+
+#include "jsclist.h"
+#include "jshash.h"
+#include "jsdtoa.h"
+
+typedef struct CallKey {
+ jsval callee; /* callee value */
+ const char *filename; /* function filename or null */
+ uintN lineno; /* function lineno or 0 */
+} CallKey;
+
+/* Compensate for typeof null == "object" brain damage. */
+#define JSTYPE_NULL JSTYPE_LIMIT
+#define TYPEOF(cx,v) (JSVAL_IS_NULL(v) ? JSTYPE_NULL : JS_TypeOfValue(cx,v))
+#define TYPENAME(t) (((t) == JSTYPE_NULL) ? js_null_str : js_type_str[t])
+#define NTYPEHIST (JSTYPE_LIMIT + 1)
+
+typedef struct CallValue {
+ uint32 total; /* total call count */
+ uint32 recycled; /* LRU-recycled calls lost */
+ uint16 minargc; /* minimum argument count */
+ uint16 maxargc; /* maximum argument count */
+ struct ArgInfo {
+ uint32 typeHist[NTYPEHIST]; /* histogram by type */
+ JSCList lruList; /* top 10 values LRU list */
+ struct ArgValCount {
+ JSCList lruLink; /* LRU list linkage */
+ jsval value; /* recently passed value */
+ uint32 count; /* number of times passed */
+ char strbuf[112]; /* string conversion buffer */
+ } topValCounts[10]; /* top 10 value storage */
+ } argInfo[8];
+} CallValue;
+
+typedef struct CallEntry {
+ JSHashEntry entry;
+ CallKey key;
+ CallValue value;
+ char name[32]; /* function name copy */
+} CallEntry;
+
+static void *
+AllocCallTable(void *pool, size_t size)
+{
+ return malloc(size);
+}
+
+static void
+FreeCallTable(void *pool, void *item)
+{
+ free(item);
+}
+
+static JSHashEntry *
+AllocCallEntry(void *pool, const void *key)
+{
+ return (JSHashEntry*) calloc(1, sizeof(CallEntry));
+}
+
+static void
+FreeCallEntry(void *pool, JSHashEntry *he, uintN flag)
+{
+ JS_ASSERT(flag == HT_FREE_ENTRY);
+ free(he);
+}
+
+static JSHashAllocOps callTableAllocOps = {
+ AllocCallTable, FreeCallTable,
+ AllocCallEntry, FreeCallEntry
+};
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_call_key(const void *key)
+{
+ CallKey *ck = (CallKey *) key;
+ JSHashNumber hash = (jsuword)ck->callee >> 3;
+
+ if (ck->filename) {
+ hash = (hash << 4) ^ JS_HashString(ck->filename);
+ hash = (hash << 4) ^ ck->lineno;
+ }
+ return hash;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_compare_call_keys(const void *k1, const void *k2)
+{
+ CallKey *ck1 = (CallKey *)k1, *ck2 = (CallKey *)k2;
+
+ return ck1->callee == ck2->callee &&
+ ((ck1->filename && ck2->filename)
+ ? strcmp(ck1->filename, ck2->filename) == 0
+ : ck1->filename == ck2->filename) &&
+ ck1->lineno == ck2->lineno;
+}
+
+JSHashTable *js_CallTable;
+size_t js_LogCallToSourceLimit;
+
+JS_STATIC_DLL_CALLBACK(intN)
+CallTableDumper(JSHashEntry *he, intN k, void *arg)
+{
+ CallEntry *ce = (CallEntry *)he;
+ FILE *fp = (FILE *)arg;
+ uintN argc, i, n;
+ struct ArgInfo *ai;
+ JSType save, type;
+ JSCList *cl;
+ struct ArgValCount *avc;
+ jsval argval;
+
+ if (ce->key.filename) {
+ /* We're called at the end of the mark phase, so mark our filenames. */
+ js_MarkScriptFilename(ce->key.filename);
+ fprintf(fp, "%s:%u ", ce->key.filename, ce->key.lineno);
+ } else {
+ fprintf(fp, "@%p ", (void *) ce->key.callee);
+ }
+
+ if (ce->name[0])
+ fprintf(fp, "name %s ", ce->name);
+ fprintf(fp, "calls %lu (%lu) argc %u/%u\n",
+ (unsigned long) ce->value.total,
+ (unsigned long) ce->value.recycled,
+ ce->value.minargc, ce->value.maxargc);
+
+ argc = JS_MIN(ce->value.maxargc, 8);
+ for (i = 0; i < argc; i++) {
+ ai = &ce->value.argInfo[i];
+
+ n = 0;
+ save = -1;
+ for (type = JSTYPE_VOID; type <= JSTYPE_LIMIT; type++) {
+ if (ai->typeHist[type]) {
+ save = type;
+ ++n;
+ }
+ }
+ if (n == 1) {
+ fprintf(fp, " arg %u type %s: %lu\n",
+ i, TYPENAME(save), (unsigned long) ai->typeHist[save]);
+ } else {
+ fprintf(fp, " arg %u type histogram:\n", i);
+ for (type = JSTYPE_VOID; type <= JSTYPE_LIMIT; type++) {
+ fprintf(fp, " %9s: %8lu ",
+ TYPENAME(type), (unsigned long) ai->typeHist[type]);
+ for (n = (uintN) JS_HOWMANY(ai->typeHist[type], 10); n > 0; --n)
+ fputc('*', fp);
+ fputc('\n', fp);
+ }
+ }
+
+ fprintf(fp, " arg %u top 10 values:\n", i);
+ n = 1;
+ for (cl = ai->lruList.prev; cl != &ai->lruList; cl = cl->prev) {
+ avc = (struct ArgValCount *)cl;
+ if (!avc->count)
+ break;
+ argval = avc->value;
+ fprintf(fp, " %9u: %8lu %.*s (%#lx)\n",
+ n, (unsigned long) avc->count,
+ sizeof avc->strbuf, avc->strbuf, argval);
+ ++n;
+ }
+ }
+
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_DumpCallTable(JSContext *cx)
+{
+ char name[24];
+ FILE *fp;
+ static uintN dumpCount;
+
+ if (!js_CallTable)
+ return;
+
+ JS_snprintf(name, sizeof name, "/tmp/calltable.dump.%u", dumpCount & 7);
+ dumpCount++;
+ fp = fopen(name, "w");
+ if (!fp)
+ return;
+
+ JS_HashTableEnumerateEntries(js_CallTable, CallTableDumper, fp);
+ fclose(fp);
+}
+
+static void
+LogCall(JSContext *cx, jsval callee, uintN argc, jsval *argv)
+{
+ CallKey key;
+ const char *name, *cstr;
+ JSFunction *fun;
+ JSHashNumber keyHash;
+ JSHashEntry **hep, *he;
+ CallEntry *ce;
+ uintN i, j;
+ jsval argval;
+ JSType type;
+ struct ArgInfo *ai;
+ struct ArgValCount *avc;
+ JSString *str;
+
+ if (!js_CallTable) {
+ js_CallTable = JS_NewHashTable(1024, js_hash_call_key,
+ js_compare_call_keys, NULL,
+ &callTableAllocOps, NULL);
+ if (!js_CallTable)
+ return;
+ }
+
+ key.callee = callee;
+ key.filename = NULL;
+ key.lineno = 0;
+ name = "";
+ if (VALUE_IS_FUNCTION(cx, callee)) {
+ fun = (JSFunction *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(callee));
+ if (fun->atom)
+ name = js_AtomToPrintableString(cx, fun->atom);
+ if (FUN_INTERPRETED(fun)) {
+ key.filename = fun->u.i.script->filename;
+ key.lineno = fun->u.i.script->lineno;
+ }
+ }
+ keyHash = js_hash_call_key(&key);
+
+ hep = JS_HashTableRawLookup(js_CallTable, keyHash, &key);
+ he = *hep;
+ if (he) {
+ ce = (CallEntry *) he;
+ JS_ASSERT(strncmp(ce->name, name, sizeof ce->name) == 0);
+ } else {
+ he = JS_HashTableRawAdd(js_CallTable, hep, keyHash, &key, NULL);
+ if (!he)
+ return;
+ ce = (CallEntry *) he;
+ ce->entry.key = &ce->key;
+ ce->entry.value = &ce->value;
+ ce->key = key;
+ for (i = 0; i < 8; i++) {
+ ai = &ce->value.argInfo[i];
+ JS_INIT_CLIST(&ai->lruList);
+ for (j = 0; j < 10; j++)
+ JS_APPEND_LINK(&ai->topValCounts[j].lruLink, &ai->lruList);
+ }
+ strncpy(ce->name, name, sizeof ce->name);
+ }
+
+ ++ce->value.total;
+ if (ce->value.minargc < argc)
+ ce->value.minargc = argc;
+ if (ce->value.maxargc < argc)
+ ce->value.maxargc = argc;
+ if (argc > 8)
+ argc = 8;
+ for (i = 0; i < argc; i++) {
+ ai = &ce->value.argInfo[i];
+ argval = argv[i];
+ type = TYPEOF(cx, argval);
+ ++ai->typeHist[type];
+
+ for (j = 0; ; j++) {
+ if (j == 10) {
+ avc = (struct ArgValCount *) ai->lruList.next;
+ ce->value.recycled += avc->count;
+ avc->value = argval;
+ avc->count = 1;
+ break;
+ }
+ avc = &ai->topValCounts[j];
+ if (avc->value == argval) {
+ ++avc->count;
+ break;
+ }
+ }
+
+ /* Move avc to the back of the LRU list. */
+ JS_REMOVE_LINK(&avc->lruLink);
+ JS_APPEND_LINK(&avc->lruLink, &ai->lruList);
+
+ str = NULL;
+ cstr = "";
+ switch (TYPEOF(cx, argval)) {
+ case JSTYPE_VOID:
+ cstr = js_type_str[JSTYPE_VOID];
+ break;
+ case JSTYPE_NULL:
+ cstr = js_null_str;
+ break;
+ case JSTYPE_BOOLEAN:
+ cstr = js_boolean_str[JSVAL_TO_BOOLEAN(argval)];
+ break;
+ case JSTYPE_NUMBER:
+ if (JSVAL_IS_INT(argval)) {
+ JS_snprintf(avc->strbuf, sizeof avc->strbuf, "%ld",
+ JSVAL_TO_INT(argval));
+ } else {
+ JS_dtostr(avc->strbuf, sizeof avc->strbuf, DTOSTR_STANDARD, 0,
+ *JSVAL_TO_DOUBLE(argval));
+ }
+ continue;
+ case JSTYPE_STRING:
+ str = js_QuoteString(cx, JSVAL_TO_STRING(argval), (jschar)'"');
+ break;
+ case JSTYPE_FUNCTION:
+ if (VALUE_IS_FUNCTION(cx, argval)) {
+ fun = (JSFunction *)JS_GetPrivate(cx, JSVAL_TO_OBJECT(argval));
+ if (fun && fun->atom) {
+ str = ATOM_TO_STRING(fun->atom);
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case JSTYPE_OBJECT:
+ js_LogCallToSourceLimit = sizeof avc->strbuf;
+ cx->options |= JSOPTION_LOGCALL_TOSOURCE;
+ str = js_ValueToSource(cx, argval);
+ cx->options &= ~JSOPTION_LOGCALL_TOSOURCE;
+ break;
+ }
+ if (str)
+ cstr = JS_GetStringBytes(str);
+ strncpy(avc->strbuf, cstr, sizeof avc->strbuf);
+ }
+}
+
+#endif /* DUMP_CALL_TABLE */
+
+/*
+ * Conditional assert to detect failure to clear a pending exception that is
+ * suppressed (or unintentional suppression of a wanted exception).
+ */
+#if defined DEBUG_brendan || defined DEBUG_mrbkap || defined DEBUG_shaver
+# define DEBUG_NOT_THROWING 1
+#endif
+
+#ifdef DEBUG_NOT_THROWING
+# define ASSERT_NOT_THROWING(cx) JS_ASSERT(!(cx)->throwing)
+#else
+# define ASSERT_NOT_THROWING(cx) /* nothing */
+#endif
+
+/*
+ * Find a function reference and its 'this' object implicit first parameter
+ * under argc arguments on cx's stack, and call the function. Push missing
+ * required arguments, allocate declared local variables, and pop everything
+ * when done. Then push the return value.
+ */
+JS_FRIEND_API(JSBool)
+js_Invoke(JSContext *cx, uintN argc, uintN flags)
+{
+ void *mark;
+ JSStackFrame *fp, frame;
+ jsval *sp, *newsp, *limit;
+ jsval *vp, v, thisv;
+ JSObject *funobj, *parent, *thisp;
+ JSBool ok;
+ JSClass *clasp;
+ JSObjectOps *ops;
+ JSNative native;
+ JSFunction *fun;
+ JSScript *script;
+ uintN nslots, nvars, nalloc, surplus;
+ JSInterpreterHook hook;
+ void *hookData;
+
+ /* Mark the top of stack and load frequently-used registers. */
+ mark = JS_ARENA_MARK(&cx->stackPool);
+ fp = cx->fp;
+ sp = fp->sp;
+
+ /*
+ * Set vp to the callee value's stack slot (it's where rval goes).
+ * Once vp is set, control should flow through label out2: to return.
+ * Set frame.rval early so native class and object ops can throw and
+ * return false, causing a goto out2 with ok set to false.
+ */
+ vp = sp - (2 + argc);
+ v = *vp;
+ frame.rval = JSVAL_VOID;
+
+ /*
+ * A callee must be an object reference, unless its 'this' parameter
+ * implements the __noSuchMethod__ method, in which case that method will
+ * be called like so:
+ *
+ * thisp.__noSuchMethod__(id, args)
+ *
+ * where id is the name of the method that this invocation attempted to
+ * call by name, and args is an Array containing this invocation's actual
+ * parameters.
+ */
+ if (JSVAL_IS_PRIMITIVE(v)) {
+#if JS_HAS_NO_SUCH_METHOD
+ if (fp->script && !(flags & JSINVOKE_INTERNAL)) {
+ ok = NoSuchMethod(cx, fp, vp, flags, argc);
+ if (ok)
+ frame.rval = *vp;
+ goto out2;
+ }
+#endif
+ goto bad;
+ }
+
+ /* Load thisv after potentially calling NoSuchMethod, which may set it. */
+ thisv = vp[1];
+
+ funobj = JSVAL_TO_OBJECT(v);
+ parent = OBJ_GET_PARENT(cx, funobj);
+ clasp = OBJ_GET_CLASS(cx, funobj);
+ if (clasp != &js_FunctionClass) {
+ /* Function is inlined, all other classes use object ops. */
+ ops = funobj->map->ops;
+
+ /*
+ * XXX this makes no sense -- why convert to function if clasp->call?
+ * XXX better to call that hook without converting
+ * XXX the only thing that needs fixing is liveconnect
+ *
+ * Try converting to function, for closure and API compatibility.
+ * We attempt the conversion under all circumstances for 1.2, but
+ * only if there is a call op defined otherwise.
+ */
+ if ((ops == &js_ObjectOps) ? clasp->call : ops->call) {
+ ok = clasp->convert(cx, funobj, JSTYPE_FUNCTION, &v);
+ if (!ok)
+ goto out2;
+
+ if (VALUE_IS_FUNCTION(cx, v)) {
+ /* Make vp refer to funobj to keep it available as argv[-2]. */
+ *vp = v;
+ funobj = JSVAL_TO_OBJECT(v);
+ parent = OBJ_GET_PARENT(cx, funobj);
+ goto have_fun;
+ }
+ }
+ fun = NULL;
+ script = NULL;
+ nslots = nvars = 0;
+
+ /* Try a call or construct native object op. */
+ native = (flags & JSINVOKE_CONSTRUCT) ? ops->construct : ops->call;
+ if (!native)
+ goto bad;
+
+ if (JSVAL_IS_OBJECT(thisv)) {
+ thisp = JSVAL_TO_OBJECT(thisv);
+ } else {
+ PRIMITIVE_TO_OBJECT(cx, thisv, thisp);
+ if (!thisp)
+ goto out2;
+ vp[1] = thisv = OBJECT_TO_JSVAL(thisp);
+ }
+ } else {
+have_fun:
+ /* Get private data and set derived locals from it. */
+ fun = (JSFunction *) JS_GetPrivate(cx, funobj);
+ nslots = (fun->nargs > argc) ? fun->nargs - argc : 0;
+ if (FUN_INTERPRETED(fun)) {
+ native = NULL;
+ script = fun->u.i.script;
+ nvars = fun->u.i.nvars;
+ } else {
+ native = fun->u.n.native;
+ script = NULL;
+ nvars = 0;
+ nslots += fun->u.n.extra;
+ }
+
+ if (JSFUN_BOUND_METHOD_TEST(fun->flags)) {
+ /* Handle bound method special case. */
+ thisp = parent;
+ } else if (JSVAL_IS_OBJECT(thisv)) {
+ thisp = JSVAL_TO_OBJECT(thisv);
+ } else {
+ uintN thispflags = JSFUN_THISP_FLAGS(fun->flags);
+
+ JS_ASSERT(!(flags & JSINVOKE_CONSTRUCT));
+ if (JSVAL_IS_STRING(thisv)) {
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_STRING)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_StringToObject(cx, JSVAL_TO_STRING(thisv));
+ } else if (JSVAL_IS_INT(thisv)) {
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_NUMBER)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_NumberToObject(cx, (jsdouble)JSVAL_TO_INT(thisv));
+ } else if (JSVAL_IS_DOUBLE(thisv)) {
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_NUMBER)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_NumberToObject(cx, *JSVAL_TO_DOUBLE(thisv));
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(thisv));
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_BOOLEAN)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_BooleanToObject(cx, JSVAL_TO_BOOLEAN(thisv));
+ }
+ if (!thisp) {
+ ok = JS_FALSE;
+ goto out2;
+ }
+ goto init_frame;
+ }
+ }
+
+ if (flags & JSINVOKE_CONSTRUCT) {
+ /* Default return value for a constructor is the new object. */
+ frame.rval = OBJECT_TO_JSVAL(thisp);
+ } else {
+ thisp = js_ComputeThis(cx, thisp, vp + 2);
+ if (!thisp) {
+ ok = JS_FALSE;
+ goto out2;
+ }
+ }
+
+ init_frame:
+ /* Initialize the rest of frame, except for sp (set by SAVE_SP later). */
+ frame.thisp = thisp;
+ frame.varobj = NULL;
+ frame.callobj = frame.argsobj = NULL;
+ frame.script = script;
+ frame.fun = fun;
+ frame.argc = argc;
+ frame.argv = sp - argc;
+ frame.nvars = nvars;
+ frame.vars = sp;
+ frame.down = fp;
+ frame.annotation = NULL;
+ frame.scopeChain = NULL; /* set below for real, after cx->fp is set */
+ frame.pc = NULL;
+ frame.spbase = NULL;
+ frame.sharpDepth = 0;
+ frame.sharpArray = NULL;
+ frame.flags = flags;
+ frame.dormantNext = NULL;
+ frame.xmlNamespace = NULL;
+ frame.blockChain = NULL;
+
+ /* From here on, control must flow through label out: to return. */
+ cx->fp = &frame;
+
+ /* Init these now in case we goto out before first hook call. */
+ hook = cx->runtime->callHook;
+ hookData = NULL;
+
+ /* Check for argument slots required by the function. */
+ if (nslots) {
+ /* All arguments must be contiguous, so we may have to copy actuals. */
+ nalloc = nslots;
+ limit = (jsval *) cx->stackPool.current->limit;
+ JS_ASSERT((jsval *) cx->stackPool.current->base <= sp && sp <= limit);
+ if (sp + nslots > limit) {
+ /* Hit end of arena: we have to copy argv[-2..(argc+nslots-1)]. */
+ nalloc += 2 + argc;
+ } else {
+ /* Take advantage of surplus slots in the caller's frame depth. */
+ JS_ASSERT((jsval *)mark >= sp);
+ surplus = (jsval *)mark - sp;
+ nalloc -= surplus;
+ }
+
+ /* Check whether we have enough space in the caller's frame. */
+ if ((intN)nalloc > 0) {
+ /* Need space for actuals plus missing formals minus surplus. */
+ newsp = js_AllocRawStack(cx, nalloc, NULL);
+ if (!newsp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /* If we couldn't allocate contiguous args, copy actuals now. */
+ if (newsp != mark) {
+ JS_ASSERT(sp + nslots > limit);
+ JS_ASSERT(2 + argc + nslots == nalloc);
+ *newsp++ = vp[0];
+ *newsp++ = vp[1];
+ if (argc)
+ memcpy(newsp, frame.argv, argc * sizeof(jsval));
+ frame.argv = newsp;
+ sp = frame.vars = newsp + argc;
+ }
+ }
+
+ /* Advance frame.vars to make room for the missing args. */
+ frame.vars += nslots;
+
+ /* Push void to initialize missing args. */
+ do {
+ PUSH(JSVAL_VOID);
+ } while (--nslots != 0);
+ }
+ JS_ASSERT(nslots == 0);
+
+ /* Now allocate stack space for local variables. */
+ if (nvars) {
+ JS_ASSERT((jsval *)cx->stackPool.current->avail >= frame.vars);
+ surplus = (jsval *)cx->stackPool.current->avail - frame.vars;
+ if (surplus < nvars) {
+ newsp = js_AllocRawStack(cx, nvars, NULL);
+ if (!newsp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (newsp != sp) {
+ /* NB: Discontinuity between argv and vars. */
+ sp = frame.vars = newsp;
+ }
+ }
+
+ /* Push void to initialize local variables. */
+ do {
+ PUSH(JSVAL_VOID);
+ } while (--nvars != 0);
+ }
+ JS_ASSERT(nvars == 0);
+
+ /* Store the current sp in frame before calling fun. */
+ SAVE_SP(&frame);
+
+ /* call the hook if present */
+ if (hook && (native || script))
+ hookData = hook(cx, &frame, JS_TRUE, 0, cx->runtime->callHookData);
+
+ /* Call the function, either a native method or an interpreted script. */
+ if (native) {
+#ifdef DEBUG_NOT_THROWING
+ JSBool alreadyThrowing = cx->throwing;
+#endif
+
+#if JS_HAS_LVALUE_RETURN
+ /* Set by JS_SetCallReturnValue2, used to return reference types. */
+ cx->rval2set = JS_FALSE;
+#endif
+
+ /* If native, use caller varobj and scopeChain for eval. */
+ frame.varobj = fp->varobj;
+ frame.scopeChain = fp->scopeChain;
+ ok = native(cx, frame.thisp, argc, frame.argv, &frame.rval);
+ JS_RUNTIME_METER(cx->runtime, nativeCalls);
+#ifdef DEBUG_NOT_THROWING
+ if (ok && !alreadyThrowing)
+ ASSERT_NOT_THROWING(cx);
+#endif
+ } else if (script) {
+#ifdef DUMP_CALL_TABLE
+ LogCall(cx, *vp, argc, frame.argv);
+#endif
+ /* Use parent scope so js_GetCallObject can find the right "Call". */
+ frame.scopeChain = parent;
+ if (JSFUN_HEAVYWEIGHT_TEST(fun->flags)) {
+ /* Scope with a call object parented by the callee's parent. */
+ if (!js_GetCallObject(cx, &frame, parent)) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ ok = js_Interpret(cx, script->code, &v);
+ } else {
+ /* fun might be onerror trying to report a syntax error in itself. */
+ frame.scopeChain = NULL;
+ ok = JS_TRUE;
+ }
+
+out:
+ if (hookData) {
+ hook = cx->runtime->callHook;
+ if (hook)
+ hook(cx, &frame, JS_FALSE, &ok, hookData);
+ }
+
+ /* If frame has a call object, sync values and clear back-pointer. */
+ if (frame.callobj)
+ ok &= js_PutCallObject(cx, &frame);
+
+ /* If frame has an arguments object, sync values and clear back-pointer. */
+ if (frame.argsobj)
+ ok &= js_PutArgsObject(cx, &frame);
+
+ /* Restore cx->fp now that we're done releasing frame objects. */
+ cx->fp = fp;
+
+out2:
+ /* Pop everything we may have allocated off the stack. */
+ JS_ARENA_RELEASE(&cx->stackPool, mark);
+
+ /* Store the return value and restore sp just above it. */
+ *vp = frame.rval;
+ fp->sp = vp + 1;
+
+ /*
+ * Store the location of the JSOP_CALL or JSOP_EVAL that generated the
+ * return value, but only if this is an external (compiled from script
+ * source) call that has stack budget for the generating pc.
+ */
+ if (fp->script && !(flags & JSINVOKE_INTERNAL))
+ vp[-(intN)fp->script->depth] = (jsval)fp->pc;
+ return ok;
+
+bad:
+ js_ReportIsNotFunction(cx, vp, flags & JSINVOKE_FUNFLAGS);
+ ok = JS_FALSE;
+ goto out2;
+}
+
+JSBool
+js_InternalInvoke(JSContext *cx, JSObject *obj, jsval fval, uintN flags,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ JSStackFrame *fp, *oldfp, frame;
+ jsval *oldsp, *sp;
+ void *mark;
+ uintN i;
+ JSBool ok;
+
+ fp = oldfp = cx->fp;
+ if (!fp) {
+ memset(&frame, 0, sizeof frame);
+ cx->fp = fp = &frame;
+ }
+ oldsp = fp->sp;
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ PUSH(fval);
+ PUSH(OBJECT_TO_JSVAL(obj));
+ for (i = 0; i < argc; i++)
+ PUSH(argv[i]);
+ SAVE_SP(fp);
+ ok = js_Invoke(cx, argc, flags | JSINVOKE_INTERNAL);
+ if (ok) {
+ RESTORE_SP(fp);
+
+ /*
+ * Store *rval in the a scoped local root if a scope is open, else in
+ * the lastInternalResult pigeon-hole GC root, solely so users of
+ * js_InternalInvoke and its direct and indirect (js_ValueToString for
+ * example) callers do not need to manage roots for local, temporary
+ * references to such results.
+ */
+ *rval = POP_OPND();
+ if (JSVAL_IS_GCTHING(*rval)) {
+ if (cx->localRootStack) {
+ if (js_PushLocalRoot(cx, cx->localRootStack, *rval) < 0)
+ ok = JS_FALSE;
+ } else {
+ cx->weakRoots.lastInternalResult = *rval;
+ }
+ }
+ }
+
+ js_FreeStack(cx, mark);
+out:
+ fp->sp = oldsp;
+ if (oldfp != fp)
+ cx->fp = oldfp;
+
+ return ok;
+}
+
+JSBool
+js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval,
+ JSAccessMode mode, uintN argc, jsval *argv, jsval *rval)
+{
+ int stackDummy;
+
+ /*
+ * js_InternalInvoke could result in another try to get or set the same id
+ * again, see bug 355497.
+ */
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+ /*
+ * Check general (not object-ops/class-specific) access from the running
+ * script to obj.id only if id has a scripted getter or setter that we're
+ * about to invoke. If we don't check this case, nothing else will -- no
+ * other native code has the chance to check.
+ *
+ * Contrast this non-native (scripted) case with native getter and setter
+ * accesses, where the native itself must do an access check, if security
+ * policies requires it. We make a checkAccess or checkObjectAccess call
+ * back to the embedding program only in those cases where we're not going
+ * to call an embedding-defined native function, getter, setter, or class
+ * hook anyway. Where we do call such a native, there's no need for the
+ * engine to impose a separate access check callback on all embeddings --
+ * many embeddings have no security policy at all.
+ */
+ JS_ASSERT(mode == JSACC_READ || mode == JSACC_WRITE);
+ if (cx->runtime->checkObjectAccess &&
+ VALUE_IS_FUNCTION(cx, fval) &&
+ FUN_INTERPRETED((JSFunction *)
+ JS_GetPrivate(cx, JSVAL_TO_OBJECT(fval))) &&
+ !cx->runtime->checkObjectAccess(cx, obj, ID_TO_VALUE(id), mode,
+ &fval)) {
+ return JS_FALSE;
+ }
+
+ return js_InternalCall(cx, obj, fval, argc, argv, rval);
+}
+
+JSBool
+js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
+ JSStackFrame *down, uintN flags, jsval *result)
+{
+ JSInterpreterHook hook;
+ void *hookData, *mark;
+ JSStackFrame *oldfp, frame;
+ JSObject *obj, *tmp;
+ JSBool ok;
+
+ hook = cx->runtime->executeHook;
+ hookData = mark = NULL;
+ oldfp = cx->fp;
+ frame.script = script;
+ if (down) {
+ /* Propagate arg/var state for eval and the debugger API. */
+ frame.callobj = down->callobj;
+ frame.argsobj = down->argsobj;
+ frame.varobj = down->varobj;
+ frame.fun = down->fun;
+ frame.thisp = down->thisp;
+ frame.argc = down->argc;
+ frame.argv = down->argv;
+ frame.nvars = down->nvars;
+ frame.vars = down->vars;
+ frame.annotation = down->annotation;
+ frame.sharpArray = down->sharpArray;
+ } else {
+ frame.callobj = frame.argsobj = NULL;
+ obj = chain;
+ if (cx->options & JSOPTION_VAROBJFIX) {
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ }
+ frame.varobj = obj;
+ frame.fun = NULL;
+ frame.thisp = chain;
+ frame.argc = 0;
+ frame.argv = NULL;
+ frame.nvars = script->numGlobalVars;
+ if (frame.nvars) {
+ frame.vars = js_AllocRawStack(cx, frame.nvars, &mark);
+ if (!frame.vars)
+ return JS_FALSE;
+ memset(frame.vars, 0, frame.nvars * sizeof(jsval));
+ } else {
+ frame.vars = NULL;
+ }
+ frame.annotation = NULL;
+ frame.sharpArray = NULL;
+ }
+ frame.rval = JSVAL_VOID;
+ frame.down = down;
+ frame.scopeChain = chain;
+ frame.pc = NULL;
+ frame.sp = oldfp ? oldfp->sp : NULL;
+ frame.spbase = NULL;
+ frame.sharpDepth = 0;
+ frame.flags = flags;
+ frame.dormantNext = NULL;
+ frame.xmlNamespace = NULL;
+ frame.blockChain = NULL;
+
+ /*
+ * Here we wrap the call to js_Interpret with code to (conditionally)
+ * save and restore the old stack frame chain into a chain of 'dormant'
+ * frame chains. Since we are replacing cx->fp, we were running into
+ * the problem that if GC was called under this frame, some of the GC
+ * things associated with the old frame chain (available here only in
+ * the C variable 'oldfp') were not rooted and were being collected.
+ *
+ * So, now we preserve the links to these 'dormant' frame chains in cx
+ * before calling js_Interpret and cleanup afterwards. The GC walks
+ * these dormant chains and marks objects in the same way that it marks
+ * objects in the primary cx->fp chain.
+ */
+ if (oldfp && oldfp != down) {
+ JS_ASSERT(!oldfp->dormantNext);
+ oldfp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = oldfp;
+ }
+
+ cx->fp = &frame;
+ if (hook)
+ hookData = hook(cx, &frame, JS_TRUE, 0, cx->runtime->executeHookData);
+
+ /*
+ * Use frame.rval, not result, so the last result stays rooted across any
+ * GC activations nested within this js_Interpret.
+ */
+ ok = js_Interpret(cx, script->code, &frame.rval);
+ *result = frame.rval;
+
+ if (hookData) {
+ hook = cx->runtime->executeHook;
+ if (hook)
+ hook(cx, &frame, JS_FALSE, &ok, hookData);
+ }
+ if (mark)
+ js_FreeRawStack(cx, mark);
+ cx->fp = oldfp;
+
+ if (oldfp && oldfp != down) {
+ JS_ASSERT(cx->dormantFrameChain == oldfp);
+ cx->dormantFrameChain = oldfp->dormantNext;
+ oldfp->dormantNext = NULL;
+ }
+
+ return ok;
+}
+
+#if JS_HAS_EXPORT_IMPORT
+/*
+ * If id is JSVAL_VOID, import all exported properties from obj.
+ */
+static JSBool
+ImportProperty(JSContext *cx, JSObject *obj, jsid id)
+{
+ JSBool ok;
+ JSIdArray *ida;
+ JSProperty *prop;
+ JSObject *obj2, *target, *funobj, *closure;
+ JSString *str;
+ uintN attrs;
+ jsint i;
+ jsval value;
+
+ if (JSVAL_IS_VOID(id)) {
+ ida = JS_Enumerate(cx, obj);
+ if (!ida)
+ return JS_FALSE;
+ ok = JS_TRUE;
+ if (ida->length == 0)
+ goto out;
+ } else {
+ ida = NULL;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (str)
+ js_ReportIsNotDefined(cx, JS_GetStringBytes(str));
+ return JS_FALSE;
+ }
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ return JS_FALSE;
+ if (!(attrs & JSPROP_EXPORTED)) {
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NOT_EXPORTED,
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+ }
+
+ target = cx->fp->varobj;
+ i = 0;
+ do {
+ if (ida) {
+ id = ida->vector[i];
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, NULL, &attrs);
+ if (!ok)
+ goto out;
+ if (!(attrs & JSPROP_EXPORTED))
+ continue;
+ }
+ ok = OBJ_CHECK_ACCESS(cx, obj, id, JSACC_IMPORT, &value, &attrs);
+ if (!ok)
+ goto out;
+ if (VALUE_IS_FUNCTION(cx, value)) {
+ funobj = JSVAL_TO_OBJECT(value);
+ closure = js_CloneFunctionObject(cx, funobj, obj);
+ if (!closure) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ value = OBJECT_TO_JSVAL(closure);
+ }
+
+ /*
+ * Handle the case of importing a property that refers to a local
+ * variable or formal parameter of a function activation. These
+ * properties are accessed by opcodes using stack slot numbers
+ * generated by the compiler rather than runtime name-lookup. These
+ * local references, therefore, bypass the normal scope chain lookup.
+ * So, instead of defining a new property in the activation object,
+ * modify the existing value in the stack slot.
+ */
+ if (OBJ_GET_CLASS(cx, target) == &js_CallClass) {
+ ok = OBJ_LOOKUP_PROPERTY(cx, target, id, &obj2, &prop);
+ if (!ok)
+ goto out;
+ } else {
+ prop = NULL;
+ }
+ if (prop && target == obj2) {
+ ok = OBJ_SET_PROPERTY(cx, target, id, &value);
+ } else {
+ ok = OBJ_DEFINE_PROPERTY(cx, target, id, value, NULL, NULL,
+ attrs & ~(JSPROP_EXPORTED |
+ JSPROP_GETTER |
+ JSPROP_SETTER),
+ NULL);
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ goto out;
+ } while (ida && ++i < ida->length);
+
+out:
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ return ok;
+}
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+JSBool
+js_CheckRedeclaration(JSContext *cx, JSObject *obj, jsid id, uintN attrs,
+ JSObject **objp, JSProperty **propp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ uintN oldAttrs, report;
+ JSBool isFunction;
+ jsval value;
+ const char *type, *name;
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (propp) {
+ *objp = obj2;
+ *propp = prop;
+ }
+ if (!prop)
+ return JS_TRUE;
+
+ /*
+ * Use prop as a speedup hint to OBJ_GET_ATTRIBUTES, but drop it on error.
+ * An assertion at label bad: will insist that it is null.
+ */
+ if (!OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &oldAttrs)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+#ifdef DEBUG
+ prop = NULL;
+#endif
+ goto bad;
+ }
+
+ /*
+ * From here, return true, or else goto bad on failure to null out params.
+ * If our caller doesn't want prop, drop it (we don't need it any longer).
+ */
+ if (!propp) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ prop = NULL;
+ }
+
+ /* If either property is readonly, we have an error. */
+ report = ((oldAttrs | attrs) & JSPROP_READONLY)
+ ? JSREPORT_ERROR
+ : JSREPORT_WARNING | JSREPORT_STRICT;
+
+ if (report != JSREPORT_ERROR) {
+ /*
+ * Allow redeclaration of variables and functions, but insist that the
+ * new value is not a getter if the old value was, ditto for setters --
+ * unless prop is impermanent (in which case anyone could delete it and
+ * redefine it, willy-nilly).
+ */
+ if (!(attrs & (JSPROP_GETTER | JSPROP_SETTER)))
+ return JS_TRUE;
+ if ((~(oldAttrs ^ attrs) & (JSPROP_GETTER | JSPROP_SETTER)) == 0)
+ return JS_TRUE;
+ if (!(oldAttrs & JSPROP_PERMANENT))
+ return JS_TRUE;
+ report = JSREPORT_ERROR;
+ }
+
+ isFunction = (oldAttrs & (JSPROP_GETTER | JSPROP_SETTER)) != 0;
+ if (!isFunction) {
+ if (!OBJ_GET_PROPERTY(cx, obj, id, &value))
+ goto bad;
+ isFunction = VALUE_IS_FUNCTION(cx, value);
+ }
+ type = (oldAttrs & attrs & JSPROP_GETTER)
+ ? js_getter_str
+ : (oldAttrs & attrs & JSPROP_SETTER)
+ ? js_setter_str
+ : (oldAttrs & JSPROP_READONLY)
+ ? js_const_str
+ : isFunction
+ ? js_function_str
+ : js_var_str;
+ name = js_AtomToPrintableString(cx, JSID_TO_ATOM(id));
+ if (!name)
+ goto bad;
+ return JS_ReportErrorFlagsAndNumber(cx, report,
+ js_GetErrorMessage, NULL,
+ JSMSG_REDECLARED_VAR,
+ type, name);
+
+bad:
+ if (propp) {
+ *objp = NULL;
+ *propp = NULL;
+ }
+ JS_ASSERT(!prop);
+ return JS_FALSE;
+}
+
+JSBool
+js_StrictlyEqual(jsval lval, jsval rval)
+{
+ jsval ltag = JSVAL_TAG(lval), rtag = JSVAL_TAG(rval);
+ jsdouble ld, rd;
+
+ if (ltag == rtag) {
+ if (ltag == JSVAL_STRING) {
+ JSString *lstr = JSVAL_TO_STRING(lval),
+ *rstr = JSVAL_TO_STRING(rval);
+ return js_EqualStrings(lstr, rstr);
+ }
+ if (ltag == JSVAL_DOUBLE) {
+ ld = *JSVAL_TO_DOUBLE(lval);
+ rd = *JSVAL_TO_DOUBLE(rval);
+ return JSDOUBLE_COMPARE(ld, ==, rd, JS_FALSE);
+ }
+ return lval == rval;
+ }
+ if (ltag == JSVAL_DOUBLE && JSVAL_IS_INT(rval)) {
+ ld = *JSVAL_TO_DOUBLE(lval);
+ rd = JSVAL_TO_INT(rval);
+ return JSDOUBLE_COMPARE(ld, ==, rd, JS_FALSE);
+ }
+ if (JSVAL_IS_INT(lval) && rtag == JSVAL_DOUBLE) {
+ ld = JSVAL_TO_INT(lval);
+ rd = *JSVAL_TO_DOUBLE(rval);
+ return JSDOUBLE_COMPARE(ld, ==, rd, JS_FALSE);
+ }
+ return lval == rval;
+}
+
+JSBool
+js_InvokeConstructor(JSContext *cx, jsval *vp, uintN argc)
+{
+ JSFunction *fun;
+ JSObject *obj, *obj2, *proto, *parent;
+ jsval lval, rval;
+ JSClass *clasp, *funclasp;
+
+ fun = NULL;
+ obj2 = NULL;
+ lval = *vp;
+ if (!JSVAL_IS_OBJECT(lval) ||
+ (obj2 = JSVAL_TO_OBJECT(lval)) == NULL ||
+ /* XXX clean up to avoid special cases above ObjectOps layer */
+ OBJ_GET_CLASS(cx, obj2) == &js_FunctionClass ||
+ !obj2->map->ops->construct)
+ {
+ fun = js_ValueToFunction(cx, vp, JSV2F_CONSTRUCT);
+ if (!fun)
+ return JS_FALSE;
+ }
+
+ clasp = &js_ObjectClass;
+ if (!obj2) {
+ proto = parent = NULL;
+ fun = NULL;
+ } else {
+ /*
+ * Get the constructor prototype object for this function.
+ * Use the nominal 'this' parameter slot, vp[1], as a local
+ * root to protect this prototype, in case it has no other
+ * strong refs.
+ */
+ if (!OBJ_GET_PROPERTY(cx, obj2,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &vp[1])) {
+ return JS_FALSE;
+ }
+ rval = vp[1];
+ proto = JSVAL_IS_OBJECT(rval) ? JSVAL_TO_OBJECT(rval) : NULL;
+ parent = OBJ_GET_PARENT(cx, obj2);
+
+ if (OBJ_GET_CLASS(cx, obj2) == &js_FunctionClass) {
+ funclasp = ((JSFunction *)JS_GetPrivate(cx, obj2))->clasp;
+ if (funclasp)
+ clasp = funclasp;
+ }
+ }
+ obj = js_NewObject(cx, clasp, proto, parent);
+ if (!obj)
+ return JS_FALSE;
+
+ /* Now we have an object with a constructor method; call it. */
+ vp[1] = OBJECT_TO_JSVAL(obj);
+ if (!js_Invoke(cx, argc, JSINVOKE_CONSTRUCT)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return JS_FALSE;
+ }
+
+ /* Check the return value and if it's primitive, force it to be obj. */
+ rval = *vp;
+ if (JSVAL_IS_PRIMITIVE(rval)) {
+ if (!fun) {
+ /* native [[Construct]] returning primitive is error */
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_NEW_RESULT,
+ js_ValueToPrintableString(cx, rval));
+ return JS_FALSE;
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ }
+
+ JS_RUNTIME_METER(cx->runtime, constructs);
+ return JS_TRUE;
+}
+
+static JSBool
+InternStringElementId(JSContext *cx, jsval idval, jsid *idp)
+{
+ JSAtom *atom;
+
+ atom = js_ValueToStringAtom(cx, idval);
+ if (!atom)
+ return JS_FALSE;
+ *idp = ATOM_TO_JSID(atom);
+ return JS_TRUE;
+}
+
+static JSBool
+InternNonIntElementId(JSContext *cx, jsval idval, jsid *idp)
+{
+ JS_ASSERT(!JSVAL_IS_INT(idval));
+
+#if JS_HAS_XML_SUPPORT
+ if (JSVAL_IS_OBJECT(idval)) {
+ *idp = OBJECT_JSVAL_TO_JSID(idval);
+ return JS_TRUE;
+ }
+#endif
+
+ return InternStringElementId(cx, idval, idp);
+}
+
+#if JS_HAS_XML_SUPPORT
+#define CHECK_ELEMENT_ID(obj, id) \
+ JS_BEGIN_MACRO \
+ if (JSID_IS_OBJECT(id) && !OBJECT_IS_XML(cx, obj)) { \
+ SAVE_SP_AND_PC(fp); \
+ ok = InternStringElementId(cx, OBJECT_JSID_TO_JSVAL(id), &id); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#else
+#define CHECK_ELEMENT_ID(obj, id) JS_ASSERT(!JSID_IS_OBJECT(id))
+#endif
+
+#ifndef MAX_INTERP_LEVEL
+#if defined(XP_OS2)
+#define MAX_INTERP_LEVEL 250
+#else
+#define MAX_INTERP_LEVEL 1000
+#endif
+#endif
+
+#define MAX_INLINE_CALL_COUNT 1000
+
+/*
+ * Threaded interpretation via computed goto appears to be well-supported by
+ * GCC 3 and higher. IBM's C compiler when run with the right options (e.g.,
+ * -qlanglvl=extended) also supports threading. Ditto the SunPro C compiler.
+ * Currently it's broken for JS_VERSION < 160, though this isn't worth fixing.
+ * Add your compiler support macros here.
+ */
+#if JS_VERSION >= 160 && ( \
+ __GNUC__ >= 3 || \
+ (__IBMC__ >= 700 && defined __IBM_COMPUTED_GOTO) || \
+ __SUNPRO_C >= 0x570)
+# define JS_THREADED_INTERP 1
+#else
+# undef JS_THREADED_INTERP
+#endif
+
+JSBool
+js_Interpret(JSContext *cx, jsbytecode *pc, jsval *result)
+{
+ JSRuntime *rt;
+ JSStackFrame *fp;
+ JSScript *script;
+ uintN inlineCallCount;
+ JSObject *obj, *obj2, *parent;
+ JSVersion currentVersion, originalVersion;
+ JSBranchCallback onbranch;
+ JSBool ok, cond;
+ JSTrapHandler interruptHandler;
+ jsint depth, len;
+ jsval *sp, *newsp;
+ void *mark;
+ jsbytecode *endpc, *pc2;
+ JSOp op, op2;
+ jsatomid atomIndex;
+ JSAtom *atom;
+ uintN argc, attrs, flags, slot;
+ jsval *vp, lval, rval, ltmp, rtmp;
+ jsid id;
+ JSObject *withobj, *iterobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSString *str, *str2;
+ jsint i, j;
+ jsdouble d, d2;
+ JSClass *clasp;
+ JSFunction *fun;
+ JSType type;
+#if !defined JS_THREADED_INTERP && defined DEBUG
+ FILE *tracefp = NULL;
+#endif
+#if JS_HAS_EXPORT_IMPORT
+ JSIdArray *ida;
+#endif
+ jsint low, high, off, npairs;
+ JSBool match;
+#if JS_HAS_GETTER_SETTER
+ JSPropertyOp getter, setter;
+#endif
+ int stackDummy;
+
+#ifdef __GNUC__
+# define JS_EXTENSION __extension__
+# define JS_EXTENSION_(s) __extension__ ({ s; })
+#else
+# define JS_EXTENSION
+# define JS_EXTENSION_(s) s
+#endif
+
+#ifdef JS_THREADED_INTERP
+ static void *normalJumpTable[] = {
+# define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ JS_EXTENSION &&L_##op,
+# include "jsopcode.tbl"
+# undef OPDEF
+ };
+
+ static void *interruptJumpTable[] = {
+# define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ ((op != JSOP_PUSHOBJ) \
+ ? JS_EXTENSION &&interrupt \
+ : JS_EXTENSION &&L_JSOP_PUSHOBJ),
+# include "jsopcode.tbl"
+# undef OPDEF
+ };
+
+ register void **jumpTable = normalJumpTable;
+
+# define DO_OP() JS_EXTENSION_(goto *jumpTable[op])
+# define DO_NEXT_OP(n) do { op = *(pc += (n)); DO_OP(); } while (0)
+# define BEGIN_CASE(OP) L_##OP:
+# define END_CASE(OP) DO_NEXT_OP(OP##_LENGTH);
+# define END_VARLEN_CASE DO_NEXT_OP(len);
+# define EMPTY_CASE(OP) BEGIN_CASE(OP) op = *++pc; DO_OP();
+#else
+# define DO_OP() goto do_op
+# define DO_NEXT_OP(n) goto advance_pc
+# define BEGIN_CASE(OP) case OP:
+# define END_CASE(OP) break;
+# define END_VARLEN_CASE break;
+# define EMPTY_CASE(OP) BEGIN_CASE(OP) END_CASE(OP)
+#endif
+
+ *result = JSVAL_VOID;
+ rt = cx->runtime;
+
+ /* Set registerized frame pointer and derived script pointer. */
+ fp = cx->fp;
+ script = fp->script;
+ JS_ASSERT(script->length != 0);
+
+ /* Count of JS function calls that nest in this C js_Interpret frame. */
+ inlineCallCount = 0;
+
+ /*
+ * Optimized Get and SetVersion for proper script language versioning.
+ *
+ * If any native method or JSClass/JSObjectOps hook calls js_SetVersion
+ * and changes cx->version, the effect will "stick" and we will stop
+ * maintaining currentVersion. This is relied upon by testsuites, for
+ * the most part -- web browsers select version before compiling and not
+ * at run-time.
+ */
+ currentVersion = script->version;
+ originalVersion = cx->version;
+ if (currentVersion != originalVersion)
+ js_SetVersion(cx, currentVersion);
+
+#ifdef __GNUC__
+ flags = 0; /* suppress gcc warnings */
+ id = 0;
+#endif
+
+ /*
+ * Prepare to call a user-supplied branch handler, and abort the script
+ * if it returns false. We reload onbranch after calling out to native
+ * functions (but not to getters, setters, or other native hooks).
+ */
+#define LOAD_BRANCH_CALLBACK(cx) (onbranch = (cx)->branchCallback)
+
+ LOAD_BRANCH_CALLBACK(cx);
+#define CHECK_BRANCH(len) \
+ JS_BEGIN_MACRO \
+ if (len <= 0 && onbranch) { \
+ SAVE_SP_AND_PC(fp); \
+ if (!(ok = (*onbranch)(cx, script))) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+ /*
+ * Load the debugger's interrupt hook here and after calling out to native
+ * functions (but not to getters, setters, or other native hooks), so we do
+ * not have to reload it each time through the interpreter loop -- we hope
+ * the compiler can keep it in a register when it is non-null.
+ */
+#ifdef JS_THREADED_INTERP
+# define LOAD_JUMP_TABLE() \
+ (jumpTable = interruptHandler ? interruptJumpTable : normalJumpTable)
+#else
+# define LOAD_JUMP_TABLE() /* nothing */
+#endif
+
+#define LOAD_INTERRUPT_HANDLER(rt) \
+ JS_BEGIN_MACRO \
+ interruptHandler = (rt)->interruptHandler; \
+ LOAD_JUMP_TABLE(); \
+ JS_END_MACRO
+
+ LOAD_INTERRUPT_HANDLER(rt);
+
+ /* Check for too much js_Interpret nesting, or too deep a C stack. */
+ if (++cx->interpLevel == MAX_INTERP_LEVEL ||
+ !JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ ok = JS_FALSE;
+ goto out2;
+ }
+
+ /*
+ * Allocate operand and pc stack slots for the script's worst-case depth,
+ * unless we're called to interpret a part of an already active script, a
+ * filtering predicate expression for example.
+ */
+ depth = (jsint) script->depth;
+ if (JS_LIKELY(!fp->spbase)) {
+ newsp = js_AllocRawStack(cx, (uintN)(2 * depth), &mark);
+ if (!newsp) {
+ ok = JS_FALSE;
+ goto out2;
+ }
+ sp = newsp + depth;
+ fp->spbase = sp;
+ SAVE_SP(fp);
+ } else {
+ sp = fp->sp;
+ JS_ASSERT(JS_UPTRDIFF(sp, fp->spbase) <= depth * sizeof(jsval));
+ newsp = fp->spbase - depth;
+ mark = NULL;
+ }
+
+ /*
+ * To support generator_throw and to catch ignored exceptions, fail right
+ * away if cx->throwing is set. If no exception is pending, null obj in
+ * case a callable object is being sent into a yield expression, and the
+ * yield's result is invoked.
+ */
+ ok = !cx->throwing;
+ if (!ok) {
+#ifdef DEBUG_NOT_THROWING
+ printf("JS INTERPRETER CALLED WITH PENDING EXCEPTION %lx\n",
+ (unsigned long) cx->exception);
+#endif
+ goto out;
+ }
+ obj = NULL;
+
+#ifdef JS_THREADED_INTERP
+
+ /*
+ * This is a loop, but it does not look like a loop. The loop-closing
+ * jump is distributed throughout interruptJumpTable, and comes back to
+ * the interrupt label. The dispatch on op is through normalJumpTable.
+ * The trick is LOAD_INTERRUPT_HANDLER setting jumpTable appropriately.
+ *
+ * It is important that "op" be initialized before the interrupt label
+ * because it is possible for "op" to be specially assigned during the
+ * normally processing of an opcode while looping (in particular, this
+ * happens in JSOP_TRAP while debugging). We rely on DO_NEXT_OP to
+ * correctly manage "op" in all other cases.
+ */
+ op = (JSOp) *pc;
+ if (interruptHandler) {
+interrupt:
+ SAVE_SP_AND_PC(fp);
+ switch (interruptHandler(cx, script, pc, &rval,
+ rt->interruptHandlerData)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ break;
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+
+ JS_ASSERT((uintN)op < (uintN)JSOP_LIMIT);
+ JS_EXTENSION_(goto *normalJumpTable[op]);
+
+#else /* !JS_THREADED_INTERP */
+
+ for (;;) {
+ op = (JSOp) *pc;
+ do_op:
+ len = js_CodeSpec[op].length;
+
+#ifdef DEBUG
+ tracefp = (FILE *) cx->tracefp;
+ if (tracefp) {
+ intN nuses, n;
+
+ fprintf(tracefp, "%4u: ", js_PCToLineNumber(cx, script, pc));
+ js_Disassemble1(cx, script, pc,
+ PTRDIFF(pc, script->code, jsbytecode), JS_FALSE,
+ tracefp);
+ nuses = js_CodeSpec[op].nuses;
+ if (nuses) {
+ SAVE_SP_AND_PC(fp);
+ for (n = -nuses; n < 0; n++) {
+ str = js_DecompileValueGenerator(cx, n, sp[n], NULL);
+ if (str) {
+ fprintf(tracefp, "%s %s",
+ (n == -nuses) ? " inputs:" : ",",
+ JS_GetStringBytes(str));
+ }
+ }
+ fprintf(tracefp, " @ %d\n", sp - fp->spbase);
+ }
+ }
+#endif /* DEBUG */
+
+ if (interruptHandler && op != JSOP_PUSHOBJ) {
+ SAVE_SP_AND_PC(fp);
+ switch (interruptHandler(cx, script, pc, &rval,
+ rt->interruptHandlerData)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ break;
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+
+ switch (op) {
+
+#endif /* !JS_THREADED_INTERP */
+
+ BEGIN_CASE(JSOP_STOP)
+ goto out;
+
+ EMPTY_CASE(JSOP_NOP)
+
+ BEGIN_CASE(JSOP_GROUP)
+ obj = NULL;
+ END_CASE(JSOP_GROUP)
+
+ BEGIN_CASE(JSOP_PUSH)
+ PUSH_OPND(JSVAL_VOID);
+ END_CASE(JSOP_PUSH)
+
+ BEGIN_CASE(JSOP_POP)
+ sp--;
+ END_CASE(JSOP_POP)
+
+ BEGIN_CASE(JSOP_POP2)
+ sp -= 2;
+ END_CASE(JSOP_POP2)
+
+ BEGIN_CASE(JSOP_SWAP)
+ vp = sp - depth; /* swap generating pc's for the decompiler */
+ ltmp = vp[-1];
+ vp[-1] = vp[-2];
+ sp[-2] = ltmp;
+ rtmp = sp[-1];
+ sp[-1] = sp[-2];
+ sp[-2] = rtmp;
+ END_CASE(JSOP_SWAP)
+
+ BEGIN_CASE(JSOP_POPV)
+ *result = POP_OPND();
+ END_CASE(JSOP_POPV)
+
+ BEGIN_CASE(JSOP_ENTERWITH)
+ FETCH_OBJECT(cx, -1, rval, obj);
+ SAVE_SP_AND_PC(fp);
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj || !(obj2 = js_GetScopeChain(cx, fp))) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ withobj = js_NewWithObject(cx, obj, obj2, sp - fp->spbase - 1);
+ if (!withobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->scopeChain = withobj;
+ STORE_OPND(-1, OBJECT_TO_JSVAL(withobj));
+ END_CASE(JSOP_ENTERWITH)
+
+ BEGIN_CASE(JSOP_LEAVEWITH)
+ rval = POP_OPND();
+ JS_ASSERT(JSVAL_IS_OBJECT(rval));
+ withobj = JSVAL_TO_OBJECT(rval);
+ JS_ASSERT(OBJ_GET_CLASS(cx, withobj) == &js_WithClass);
+ fp->scopeChain = OBJ_GET_PARENT(cx, withobj);
+ JS_SetPrivate(cx, withobj, NULL);
+ END_CASE(JSOP_LEAVEWITH)
+
+ BEGIN_CASE(JSOP_SETRVAL)
+ ASSERT_NOT_THROWING(cx);
+ fp->rval = POP_OPND();
+ END_CASE(JSOP_SETRVAL)
+
+ BEGIN_CASE(JSOP_RETURN)
+ CHECK_BRANCH(-1);
+ fp->rval = POP_OPND();
+ /* FALL THROUGH */
+
+ BEGIN_CASE(JSOP_RETRVAL) /* fp->rval already set */
+ ASSERT_NOT_THROWING(cx);
+ if (inlineCallCount)
+ inline_return:
+ {
+ JSInlineFrame *ifp = (JSInlineFrame *) fp;
+ void *hookData = ifp->hookData;
+
+ /*
+ * If fp has blocks on its scope chain, home their locals now,
+ * before calling any debugger hook, and before freeing stack.
+ * This matches the order of block putting and hook calling in
+ * the "out-of-line" return code at the bottom of js_Interpret
+ * and in js_Invoke.
+ */
+ if (fp->flags & JSFRAME_POP_BLOCKS) {
+ SAVE_SP_AND_PC(fp);
+ ok &= PutBlockObjects(cx, fp);
+ }
+
+ if (hookData) {
+ JSInterpreterHook hook = rt->callHook;
+ if (hook) {
+ SAVE_SP_AND_PC(fp);
+ hook(cx, fp, JS_FALSE, &ok, hookData);
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+ }
+
+ /*
+ * If fp has a call object, sync values and clear the back-
+ * pointer. This can happen for a lightweight function if it
+ * calls eval unexpectedly (in a way that is hidden from the
+ * compiler). See bug 325540.
+ */
+ if (fp->callobj) {
+ SAVE_SP_AND_PC(fp);
+ ok &= js_PutCallObject(cx, fp);
+ }
+
+ if (fp->argsobj) {
+ SAVE_SP_AND_PC(fp);
+ ok &= js_PutArgsObject(cx, fp);
+ }
+
+ /* Restore context version only if callee hasn't set version. */
+ if (JS_LIKELY(cx->version == currentVersion)) {
+ currentVersion = ifp->callerVersion;
+ if (currentVersion != cx->version)
+ js_SetVersion(cx, currentVersion);
+ }
+
+ /* Store the return value in the caller's operand frame. */
+ vp = ifp->rvp;
+ *vp = fp->rval;
+
+ /* Restore cx->fp and release the inline frame's space. */
+ cx->fp = fp = fp->down;
+ JS_ARENA_RELEASE(&cx->stackPool, ifp->mark);
+
+ /* Restore sp to point just above the return value. */
+ fp->sp = vp + 1;
+ RESTORE_SP(fp);
+
+ /* Restore the calling script's interpreter registers. */
+ obj = NULL;
+ script = fp->script;
+ depth = (jsint) script->depth;
+ pc = fp->pc;
+#ifndef JS_THREADED_INTERP
+ endpc = script->code + script->length;
+#endif
+
+ /* Store the generating pc for the return value. */
+ vp[-depth] = (jsval)pc;
+
+ /* Resume execution in the calling frame. */
+ inlineCallCount--;
+ if (JS_LIKELY(ok)) {
+ JS_ASSERT(js_CodeSpec[*pc].length == JSOP_CALL_LENGTH);
+ len = JSOP_CALL_LENGTH;
+ DO_NEXT_OP(len);
+ }
+ }
+ goto out;
+
+ BEGIN_CASE(JSOP_DEFAULT)
+ (void) POP();
+ /* FALL THROUGH */
+ BEGIN_CASE(JSOP_GOTO)
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_IFEQ)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFEQ)
+
+ BEGIN_CASE(JSOP_IFNE)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond != JS_FALSE) {
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFNE)
+
+ BEGIN_CASE(JSOP_OR)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_TRUE) {
+ len = GET_JUMP_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_OR)
+
+ BEGIN_CASE(JSOP_AND)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMP_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_AND)
+
+ BEGIN_CASE(JSOP_DEFAULTX)
+ (void) POP();
+ /* FALL THROUGH */
+ BEGIN_CASE(JSOP_GOTOX)
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_IFEQX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFEQX)
+
+ BEGIN_CASE(JSOP_IFNEX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond != JS_FALSE) {
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFNEX)
+
+ BEGIN_CASE(JSOP_ORX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_TRUE) {
+ len = GET_JUMPX_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_ORX)
+
+ BEGIN_CASE(JSOP_ANDX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMPX_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_ANDX)
+
+/*
+ * If the index value at sp[n] is not an int that fits in a jsval, it could
+ * be an object (an XML QName, AttributeName, or AnyName), but only if we are
+ * compiling with JS_HAS_XML_SUPPORT. Otherwise convert the index value to a
+ * string atom id.
+ */
+#define FETCH_ELEMENT_ID(n, id) \
+ JS_BEGIN_MACRO \
+ jsval idval_ = FETCH_OPND(n); \
+ if (JSVAL_IS_INT(idval_)) { \
+ id = INT_JSVAL_TO_JSID(idval_); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = InternNonIntElementId(cx, idval_, &id); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_IN)
+ SAVE_SP_AND_PC(fp);
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_PRIMITIVE(rval)) {
+ str = js_DecompileValueGenerator(cx, -1, rval, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_IN_NOT_OBJECT,
+ JS_GetStringBytes(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ obj = JSVAL_TO_OBJECT(rval);
+ FETCH_ELEMENT_ID(-2, id);
+ CHECK_ELEMENT_ID(obj, id);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(prop != NULL));
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ END_CASE(JSOP_IN)
+
+ BEGIN_CASE(JSOP_FOREACH)
+ flags = JSITER_ENUMERATE | JSITER_FOREACH;
+ goto value_to_iter;
+
+#if JS_HAS_DESTRUCTURING
+ BEGIN_CASE(JSOP_FOREACHKEYVAL)
+ flags = JSITER_ENUMERATE | JSITER_FOREACH | JSITER_KEYVALUE;
+ goto value_to_iter;
+#endif
+
+ BEGIN_CASE(JSOP_FORIN)
+ /*
+ * Set JSITER_ENUMERATE to indicate that for-in loop should use
+ * the enumeration protocol's iterator for compatibility if an
+ * explicit iterator is not given via the optional __iterator__
+ * method.
+ */
+ flags = JSITER_ENUMERATE;
+
+ value_to_iter:
+ JS_ASSERT(sp > fp->spbase);
+ SAVE_SP_AND_PC(fp);
+ ok = js_ValueToIterator(cx, flags, &sp[-1]);
+ if (!ok)
+ goto out;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(sp[-1]));
+ JS_ASSERT(JSOP_FORIN_LENGTH == js_CodeSpec[op].length);
+ END_CASE(JSOP_FORIN)
+
+ BEGIN_CASE(JSOP_FORPROP)
+ /*
+ * Handle JSOP_FORPROP first, so the cost of the goto do_forinloop
+ * is not paid for the more common cases.
+ */
+ lval = FETCH_OPND(-1);
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ i = -2;
+ goto do_forinloop;
+
+ BEGIN_CASE(JSOP_FORNAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ /*
+ * ECMA 12.6.3 says to eval the LHS after looking for properties
+ * to enumerate, and bail without LHS eval if there are no props.
+ * We do Find here to share the most code at label do_forinloop.
+ * If looking for enumerable properties could have side effects,
+ * then we'd have to move this into the common code and condition
+ * it on op == JSOP_FORNAME.
+ */
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ lval = OBJECT_TO_JSVAL(obj);
+ /* FALL THROUGH */
+
+ BEGIN_CASE(JSOP_FORARG)
+ BEGIN_CASE(JSOP_FORVAR)
+ BEGIN_CASE(JSOP_FORLOCAL)
+ /*
+ * JSOP_FORARG and JSOP_FORVAR don't require any lval computation
+ * here, because they address slots on the stack (in fp->args and
+ * fp->vars, respectively). Same applies to JSOP_FORLOCAL, which
+ * addresses fp->spbase.
+ */
+ /* FALL THROUGH */
+
+ BEGIN_CASE(JSOP_FORELEM)
+ /*
+ * JSOP_FORELEM simply initializes or updates the iteration state
+ * and leaves the index expression evaluation and assignment to the
+ * enumerator until after the next property has been acquired, via
+ * a JSOP_ENUMELEM bytecode.
+ */
+ i = -1;
+
+ do_forinloop:
+ /*
+ * Reach under the top of stack to find our property iterator, a
+ * JSObject that contains the iteration state.
+ */
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(sp[i]));
+ iterobj = JSVAL_TO_OBJECT(sp[i]);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_CallIteratorNext(cx, iterobj, &rval);
+ if (!ok)
+ goto out;
+ if (rval == JSVAL_HOLE) {
+ rval = JSVAL_FALSE;
+ goto end_forinloop;
+ }
+
+ switch (op) {
+ case JSOP_FORARG:
+ slot = GET_ARGNO(pc);
+ JS_ASSERT(slot < fp->fun->nargs);
+ fp->argv[slot] = rval;
+ break;
+
+ case JSOP_FORVAR:
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->fun->u.i.nvars);
+ fp->vars[slot] = rval;
+ break;
+
+ case JSOP_FORLOCAL:
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ vp = &fp->spbase[slot];
+ GC_POKE(cx, *vp);
+ *vp = rval;
+ break;
+
+ case JSOP_FORELEM:
+ /* FORELEM is not a SET operation, it's more like BINDNAME. */
+ PUSH_OPND(rval);
+ break;
+
+ default:
+ JS_ASSERT(op == JSOP_FORPROP || op == JSOP_FORNAME);
+
+ /* Convert lval to a non-null object containing id. */
+ VALUE_TO_OBJECT(cx, lval, obj);
+ if (op == JSOP_FORPROP)
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+
+ /* Set the variable obj[id] to refer to rval. */
+ fp->flags |= JSFRAME_ASSIGNING;
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ fp->flags &= ~JSFRAME_ASSIGNING;
+ if (!ok)
+ goto out;
+ break;
+ }
+
+ /* Push true to keep looping through properties. */
+ rval = JSVAL_TRUE;
+
+ end_forinloop:
+ sp += i + 1;
+ PUSH_OPND(rval);
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+
+ BEGIN_CASE(JSOP_DUP)
+ JS_ASSERT(sp > fp->spbase);
+ vp = sp - 1; /* address top of stack */
+ rval = *vp;
+ vp -= depth; /* address generating pc */
+ vp[1] = *vp;
+ PUSH(rval);
+ END_CASE(JSOP_DUP)
+
+ BEGIN_CASE(JSOP_DUP2)
+ JS_ASSERT(sp - 2 >= fp->spbase);
+ vp = sp - 1; /* address top of stack */
+ lval = vp[-1];
+ rval = *vp;
+ vp -= depth; /* address generating pc */
+ vp[1] = vp[2] = *vp;
+ PUSH(lval);
+ PUSH(rval);
+ END_CASE(JSOP_DUP2)
+
+#define PROPERTY_OP(n, call) \
+ JS_BEGIN_MACRO \
+ /* Fetch the left part and resolve it to a non-null object. */ \
+ FETCH_OBJECT(cx, n, lval, obj); \
+ \
+ /* Get or set the property, set ok false if error, true if success. */\
+ SAVE_SP_AND_PC(fp); \
+ call; \
+ if (!ok) \
+ goto out; \
+ JS_END_MACRO
+
+#define ELEMENT_OP(n, call) \
+ JS_BEGIN_MACRO \
+ /* Fetch the right part and resolve it to an internal id. */ \
+ FETCH_ELEMENT_ID(n, id); \
+ \
+ /* Fetch the left part and resolve it to a non-null object. */ \
+ FETCH_OBJECT(cx, n - 1, lval, obj); \
+ \
+ /* Ensure that id has a type suitable for use with obj. */ \
+ CHECK_ELEMENT_ID(obj, id); \
+ \
+ /* Get or set the element, set ok false if error, true if success. */ \
+ SAVE_SP_AND_PC(fp); \
+ call; \
+ if (!ok) \
+ goto out; \
+ JS_END_MACRO
+
+#define NATIVE_GET(cx,obj,pobj,sprop,vp) \
+ JS_BEGIN_MACRO \
+ if (SPROP_HAS_STUB_GETTER(sprop)) { \
+ /* Fast path for Object instance properties. */ \
+ JS_ASSERT((sprop)->slot != SPROP_INVALID_SLOT || \
+ !SPROP_HAS_STUB_SETTER(sprop)); \
+ *vp = ((sprop)->slot != SPROP_INVALID_SLOT) \
+ ? LOCKED_OBJ_GET_SLOT(pobj, (sprop)->slot) \
+ : JSVAL_VOID; \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_NativeGet(cx, obj, pobj, sprop, vp); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#define NATIVE_SET(cx,obj,sprop,vp) \
+ JS_BEGIN_MACRO \
+ if (SPROP_HAS_STUB_SETTER(sprop) && \
+ (sprop)->slot != SPROP_INVALID_SLOT) { \
+ /* Fast path for Object instance properties. */ \
+ LOCKED_OBJ_SET_SLOT(obj, (sprop)->slot, *vp); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_NativeSet(cx, obj, sprop, vp); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+/*
+ * CACHED_GET and CACHED_SET use cx, obj, id, and rval from their callers'
+ * environments.
+ */
+#define CACHED_GET(call) \
+ JS_BEGIN_MACRO \
+ if (!OBJ_IS_NATIVE(obj)) { \
+ ok = call; \
+ } else { \
+ JS_LOCK_OBJ(cx, obj); \
+ PROPERTY_CACHE_TEST(&rt->propertyCache, obj, id, sprop); \
+ if (sprop) { \
+ NATIVE_GET(cx, obj, obj, sprop, &rval); \
+ JS_UNLOCK_OBJ(cx, obj); \
+ } else { \
+ JS_UNLOCK_OBJ(cx, obj); \
+ ok = call; \
+ /* No fill here: js_GetProperty fills the cache. */ \
+ } \
+ } \
+ JS_END_MACRO
+
+#define CACHED_SET(call) \
+ JS_BEGIN_MACRO \
+ if (!OBJ_IS_NATIVE(obj)) { \
+ ok = call; \
+ } else { \
+ JSScope *scope_; \
+ JS_LOCK_OBJ(cx, obj); \
+ PROPERTY_CACHE_TEST(&rt->propertyCache, obj, id, sprop); \
+ if (sprop && \
+ !(sprop->attrs & JSPROP_READONLY) && \
+ (scope_ = OBJ_SCOPE(obj), !SCOPE_IS_SEALED(scope_))) { \
+ NATIVE_SET(cx, obj, sprop, &rval); \
+ JS_UNLOCK_SCOPE(cx, scope_); \
+ } else { \
+ JS_UNLOCK_OBJ(cx, obj); \
+ ok = call; \
+ /* No fill here: js_SetProperty writes through the cache. */ \
+ } \
+ } \
+ JS_END_MACRO
+
+#define BEGIN_LITOPX_CASE(OP,PCOFF) \
+ BEGIN_CASE(OP) \
+ pc2 = pc; \
+ atomIndex = GET_ATOM_INDEX(pc + PCOFF); \
+ do_##OP: \
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+
+#define END_LITOPX_CASE(OP) \
+ END_CASE(OP)
+
+ BEGIN_LITOPX_CASE(JSOP_SETCONST, 0)
+ obj = fp->varobj;
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), rval,
+ NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT |
+ JSPROP_READONLY,
+ NULL);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_LITOPX_CASE(JSOP_SETCONST)
+
+#if JS_HAS_DESTRUCTURING
+ BEGIN_CASE(JSOP_ENUMCONSTELEM)
+ FETCH_ELEMENT_ID(-1, id);
+ FETCH_OBJECT(cx, -2, lval, obj);
+ CHECK_ELEMENT_ID(obj, id);
+ rval = FETCH_OPND(-3);
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, rval, NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT |
+ JSPROP_READONLY,
+ NULL);
+ if (!ok)
+ goto out;
+ sp -= 3;
+ END_CASE(JSOP_ENUMCONSTELEM)
+#endif
+
+ BEGIN_LITOPX_CASE(JSOP_BINDNAME, 0)
+ SAVE_SP_AND_PC(fp);
+ obj = js_FindIdentifierBase(cx, ATOM_TO_JSID(atom));
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_BINDNAME)
+
+ BEGIN_CASE(JSOP_SETNAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(lval));
+ obj = JSVAL_TO_OBJECT(lval);
+ SAVE_SP_AND_PC(fp);
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETNAME)
+
+#define INTEGER_OP(OP, EXTRA_CODE) \
+ JS_BEGIN_MACRO \
+ FETCH_INT(cx, -1, j); \
+ FETCH_INT(cx, -2, i); \
+ EXTRA_CODE \
+ i = i OP j; \
+ sp--; \
+ STORE_INT(cx, -1, i); \
+ JS_END_MACRO
+
+#define BITWISE_OP(OP) INTEGER_OP(OP, (void) 0;)
+#define SIGNED_SHIFT_OP(OP) INTEGER_OP(OP, j &= 31;)
+
+ BEGIN_CASE(JSOP_BITOR)
+ BITWISE_OP(|);
+ END_CASE(JSOP_BITOR)
+
+ BEGIN_CASE(JSOP_BITXOR)
+ BITWISE_OP(^);
+ END_CASE(JSOP_BITXOR)
+
+ BEGIN_CASE(JSOP_BITAND)
+ BITWISE_OP(&);
+ END_CASE(JSOP_BITAND)
+
+#define RELATIONAL_OP(OP) \
+ JS_BEGIN_MACRO \
+ rval = FETCH_OPND(-1); \
+ lval = FETCH_OPND(-2); \
+ /* Optimize for two int-tagged operands (typical loop control). */ \
+ if ((lval & rval) & JSVAL_INT) { \
+ ltmp = lval ^ JSVAL_VOID; \
+ rtmp = rval ^ JSVAL_VOID; \
+ if (ltmp && rtmp) { \
+ cond = JSVAL_TO_INT(lval) OP JSVAL_TO_INT(rval); \
+ } else { \
+ d = ltmp ? JSVAL_TO_INT(lval) : *rt->jsNaN; \
+ d2 = rtmp ? JSVAL_TO_INT(rval) : *rt->jsNaN; \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, JS_FALSE); \
+ } \
+ } else { \
+ VALUE_TO_PRIMITIVE(cx, lval, JSTYPE_NUMBER, &lval); \
+ sp[-2] = lval; \
+ VALUE_TO_PRIMITIVE(cx, rval, JSTYPE_NUMBER, &rval); \
+ if (JSVAL_IS_STRING(lval) && JSVAL_IS_STRING(rval)) { \
+ str = JSVAL_TO_STRING(lval); \
+ str2 = JSVAL_TO_STRING(rval); \
+ cond = js_CompareStrings(str, str2) OP 0; \
+ } else { \
+ VALUE_TO_NUMBER(cx, lval, d); \
+ VALUE_TO_NUMBER(cx, rval, d2); \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, JS_FALSE); \
+ } \
+ } \
+ sp--; \
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond)); \
+ JS_END_MACRO
+
+/*
+ * NB: These macros can't use JS_BEGIN_MACRO/JS_END_MACRO around their bodies
+ * because they begin if/else chains, so callers must not put semicolons after
+ * the call expressions!
+ */
+#if JS_HAS_XML_SUPPORT
+#define XML_EQUALITY_OP(OP) \
+ if ((ltmp == JSVAL_OBJECT && \
+ (obj2 = JSVAL_TO_OBJECT(lval)) && \
+ OBJECT_IS_XML(cx, obj2)) || \
+ (rtmp == JSVAL_OBJECT && \
+ (obj2 = JSVAL_TO_OBJECT(rval)) && \
+ OBJECT_IS_XML(cx, obj2))) { \
+ JSXMLObjectOps *ops; \
+ \
+ ops = (JSXMLObjectOps *) obj2->map->ops; \
+ if (obj2 == JSVAL_TO_OBJECT(rval)) \
+ rval = lval; \
+ SAVE_SP_AND_PC(fp); \
+ ok = ops->equality(cx, obj2, rval, &cond); \
+ if (!ok) \
+ goto out; \
+ cond = cond OP JS_TRUE; \
+ } else
+
+#define EXTENDED_EQUALITY_OP(OP) \
+ if (ltmp == JSVAL_OBJECT && \
+ (obj2 = JSVAL_TO_OBJECT(lval)) && \
+ ((clasp = OBJ_GET_CLASS(cx, obj2))->flags & JSCLASS_IS_EXTENDED)) { \
+ JSExtendedClass *xclasp; \
+ \
+ xclasp = (JSExtendedClass *) clasp; \
+ SAVE_SP_AND_PC(fp); \
+ ok = xclasp->equality(cx, obj2, rval, &cond); \
+ if (!ok) \
+ goto out; \
+ cond = cond OP JS_TRUE; \
+ } else
+#else
+#define XML_EQUALITY_OP(OP) /* nothing */
+#define EXTENDED_EQUALITY_OP(OP) /* nothing */
+#endif
+
+#define EQUALITY_OP(OP, IFNAN) \
+ JS_BEGIN_MACRO \
+ rval = FETCH_OPND(-1); \
+ lval = FETCH_OPND(-2); \
+ ltmp = JSVAL_TAG(lval); \
+ rtmp = JSVAL_TAG(rval); \
+ XML_EQUALITY_OP(OP) \
+ if (ltmp == rtmp) { \
+ if (ltmp == JSVAL_STRING) { \
+ str = JSVAL_TO_STRING(lval); \
+ str2 = JSVAL_TO_STRING(rval); \
+ cond = js_EqualStrings(str, str2) OP JS_TRUE; \
+ } else if (ltmp == JSVAL_DOUBLE) { \
+ d = *JSVAL_TO_DOUBLE(lval); \
+ d2 = *JSVAL_TO_DOUBLE(rval); \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, IFNAN); \
+ } else { \
+ EXTENDED_EQUALITY_OP(OP) \
+ /* Handle all undefined (=>NaN) and int combinations. */ \
+ cond = lval OP rval; \
+ } \
+ } else { \
+ if (JSVAL_IS_NULL(lval) || JSVAL_IS_VOID(lval)) { \
+ cond = (JSVAL_IS_NULL(rval) || JSVAL_IS_VOID(rval)) OP 1; \
+ } else if (JSVAL_IS_NULL(rval) || JSVAL_IS_VOID(rval)) { \
+ cond = 1 OP 0; \
+ } else { \
+ if (ltmp == JSVAL_OBJECT) { \
+ VALUE_TO_PRIMITIVE(cx, lval, JSTYPE_VOID, &sp[-2]); \
+ lval = sp[-2]; \
+ ltmp = JSVAL_TAG(lval); \
+ } else if (rtmp == JSVAL_OBJECT) { \
+ VALUE_TO_PRIMITIVE(cx, rval, JSTYPE_VOID, &sp[-1]); \
+ rval = sp[-1]; \
+ rtmp = JSVAL_TAG(rval); \
+ } \
+ if (ltmp == JSVAL_STRING && rtmp == JSVAL_STRING) { \
+ str = JSVAL_TO_STRING(lval); \
+ str2 = JSVAL_TO_STRING(rval); \
+ cond = js_EqualStrings(str, str2) OP JS_TRUE; \
+ } else { \
+ VALUE_TO_NUMBER(cx, lval, d); \
+ VALUE_TO_NUMBER(cx, rval, d2); \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, IFNAN); \
+ } \
+ } \
+ } \
+ sp--; \
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond)); \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_EQ)
+ EQUALITY_OP(==, JS_FALSE);
+ END_CASE(JSOP_EQ)
+
+ BEGIN_CASE(JSOP_NE)
+ EQUALITY_OP(!=, JS_TRUE);
+ END_CASE(JSOP_NE)
+
+#define NEW_EQUALITY_OP(OP) \
+ JS_BEGIN_MACRO \
+ rval = FETCH_OPND(-1); \
+ lval = FETCH_OPND(-2); \
+ cond = js_StrictlyEqual(lval, rval) OP JS_TRUE; \
+ sp--; \
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond)); \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_NEW_EQ)
+ NEW_EQUALITY_OP(==);
+ END_CASE(JSOP_NEW_EQ)
+
+ BEGIN_CASE(JSOP_NEW_NE)
+ NEW_EQUALITY_OP(!=);
+ END_CASE(JSOP_NEW_NE)
+
+ BEGIN_CASE(JSOP_CASE)
+ pc2 = (jsbytecode *) sp[-2-depth];
+ NEW_EQUALITY_OP(==);
+ (void) POP();
+ if (cond) {
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ sp[-depth] = (jsval)pc2;
+ PUSH(lval);
+ END_CASE(JSOP_CASE)
+
+ BEGIN_CASE(JSOP_CASEX)
+ pc2 = (jsbytecode *) sp[-2-depth];
+ NEW_EQUALITY_OP(==);
+ (void) POP();
+ if (cond) {
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ sp[-depth] = (jsval)pc2;
+ PUSH(lval);
+ END_CASE(JSOP_CASEX)
+
+ BEGIN_CASE(JSOP_LT)
+ RELATIONAL_OP(<);
+ END_CASE(JSOP_LT)
+
+ BEGIN_CASE(JSOP_LE)
+ RELATIONAL_OP(<=);
+ END_CASE(JSOP_LE)
+
+ BEGIN_CASE(JSOP_GT)
+ RELATIONAL_OP(>);
+ END_CASE(JSOP_GT)
+
+ BEGIN_CASE(JSOP_GE)
+ RELATIONAL_OP(>=);
+ END_CASE(JSOP_GE)
+
+#undef EQUALITY_OP
+#undef RELATIONAL_OP
+
+ BEGIN_CASE(JSOP_LSH)
+ SIGNED_SHIFT_OP(<<);
+ END_CASE(JSOP_LSH)
+
+ BEGIN_CASE(JSOP_RSH)
+ SIGNED_SHIFT_OP(>>);
+ END_CASE(JSOP_RSH)
+
+ BEGIN_CASE(JSOP_URSH)
+ {
+ uint32 u;
+
+ FETCH_INT(cx, -1, j);
+ FETCH_UINT(cx, -2, u);
+ u >>= j & 31;
+ sp--;
+ STORE_UINT(cx, -1, u);
+ }
+ END_CASE(JSOP_URSH)
+
+#undef INTEGER_OP
+#undef BITWISE_OP
+#undef SIGNED_SHIFT_OP
+
+ BEGIN_CASE(JSOP_ADD)
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+#if JS_HAS_XML_SUPPORT
+ if (!JSVAL_IS_PRIMITIVE(lval) &&
+ (obj2 = JSVAL_TO_OBJECT(lval), OBJECT_IS_XML(cx, obj2)) &&
+ VALUE_IS_XML(cx, rval)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj2->map->ops;
+ SAVE_SP_AND_PC(fp);
+ ok = ops->concatenate(cx, obj2, rval, &rval);
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, rval);
+ } else
+#endif
+ {
+ VALUE_TO_PRIMITIVE(cx, lval, JSTYPE_VOID, &sp[-2]);
+ lval = sp[-2];
+ VALUE_TO_PRIMITIVE(cx, rval, JSTYPE_VOID, &sp[-1]);
+ rval = sp[-1];
+ if ((cond = JSVAL_IS_STRING(lval)) || JSVAL_IS_STRING(rval)) {
+ SAVE_SP_AND_PC(fp);
+ if (cond) {
+ str = JSVAL_TO_STRING(lval);
+ ok = (str2 = js_ValueToString(cx, rval)) != NULL;
+ if (!ok)
+ goto out;
+ sp[-1] = STRING_TO_JSVAL(str2);
+ } else {
+ str2 = JSVAL_TO_STRING(rval);
+ ok = (str = js_ValueToString(cx, lval)) != NULL;
+ if (!ok)
+ goto out;
+ sp[-2] = STRING_TO_JSVAL(str);
+ }
+ str = js_ConcatStrings(cx, str, str2);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ sp--;
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ } else {
+ VALUE_TO_NUMBER(cx, lval, d);
+ VALUE_TO_NUMBER(cx, rval, d2);
+ d += d2;
+ sp--;
+ STORE_NUMBER(cx, -1, d);
+ }
+ }
+ END_CASE(JSOP_ADD)
+
+#define BINARY_OP(OP) \
+ JS_BEGIN_MACRO \
+ FETCH_NUMBER(cx, -1, d2); \
+ FETCH_NUMBER(cx, -2, d); \
+ d = d OP d2; \
+ sp--; \
+ STORE_NUMBER(cx, -1, d); \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_SUB)
+ BINARY_OP(-);
+ END_CASE(JSOP_SUB)
+
+ BEGIN_CASE(JSOP_MUL)
+ BINARY_OP(*);
+ END_CASE(JSOP_MUL)
+
+ BEGIN_CASE(JSOP_DIV)
+ FETCH_NUMBER(cx, -1, d2);
+ FETCH_NUMBER(cx, -2, d);
+ sp--;
+ if (d2 == 0) {
+#ifdef XP_WIN
+ /* XXX MSVC miscompiles such that (NaN == 0) */
+ if (JSDOUBLE_IS_NaN(d2))
+ rval = DOUBLE_TO_JSVAL(rt->jsNaN);
+ else
+#endif
+ if (d == 0 || JSDOUBLE_IS_NaN(d))
+ rval = DOUBLE_TO_JSVAL(rt->jsNaN);
+ else if ((JSDOUBLE_HI32(d) ^ JSDOUBLE_HI32(d2)) >> 31)
+ rval = DOUBLE_TO_JSVAL(rt->jsNegativeInfinity);
+ else
+ rval = DOUBLE_TO_JSVAL(rt->jsPositiveInfinity);
+ STORE_OPND(-1, rval);
+ } else {
+ d /= d2;
+ STORE_NUMBER(cx, -1, d);
+ }
+ END_CASE(JSOP_DIV)
+
+ BEGIN_CASE(JSOP_MOD)
+ FETCH_NUMBER(cx, -1, d2);
+ FETCH_NUMBER(cx, -2, d);
+ sp--;
+ if (d2 == 0) {
+ STORE_OPND(-1, DOUBLE_TO_JSVAL(rt->jsNaN));
+ } else {
+#ifdef XP_WIN
+ /* Workaround MS fmod bug where 42 % (1/0) => NaN, not 42. */
+ if (!(JSDOUBLE_IS_FINITE(d) && JSDOUBLE_IS_INFINITE(d2)))
+#endif
+ d = fmod(d, d2);
+ STORE_NUMBER(cx, -1, d);
+ }
+ END_CASE(JSOP_MOD)
+
+ BEGIN_CASE(JSOP_NOT)
+ POP_BOOLEAN(cx, rval, cond);
+ PUSH_OPND(BOOLEAN_TO_JSVAL(!cond));
+ END_CASE(JSOP_NOT)
+
+ BEGIN_CASE(JSOP_BITNOT)
+ FETCH_INT(cx, -1, i);
+ i = ~i;
+ STORE_INT(cx, -1, i);
+ END_CASE(JSOP_BITNOT)
+
+ BEGIN_CASE(JSOP_NEG)
+ /*
+ * Optimize the case of an int-tagged operand by noting that
+ * INT_FITS_IN_JSVAL(i) => INT_FITS_IN_JSVAL(-i) unless i is 0
+ * when -i is the negative zero which is jsdouble.
+ */
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_INT(rval) && (i = JSVAL_TO_INT(rval)) != 0) {
+ i = -i;
+ JS_ASSERT(INT_FITS_IN_JSVAL(i));
+ rval = INT_TO_JSVAL(i);
+ } else {
+ if (JSVAL_IS_DOUBLE(rval)) {
+ d = *JSVAL_TO_DOUBLE(rval);
+ } else {
+ SAVE_SP_AND_PC(fp);
+ ok = js_ValueToNumber(cx, rval, &d);
+ if (!ok)
+ goto out;
+ }
+#ifdef HPUX
+ /*
+ * Negation of a zero doesn't produce a negative
+ * zero on HPUX. Perform the operation by bit
+ * twiddling.
+ */
+ JSDOUBLE_HI32(d) ^= JSDOUBLE_HI32_SIGNBIT;
+#else
+ d = -d;
+#endif
+ ok = js_NewNumberValue(cx, d, &rval);
+ if (!ok)
+ goto out;
+ }
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_NEG)
+
+ BEGIN_CASE(JSOP_POS)
+ rval = FETCH_OPND(-1);
+ if (!JSVAL_IS_NUMBER(rval)) {
+ SAVE_SP_AND_PC(fp);
+ ok = js_ValueToNumber(cx, rval, &d);
+ if (!ok)
+ goto out;
+ ok = js_NewNumberValue(cx, d, &rval);
+ if (!ok)
+ goto out;
+ sp[-1] = rval;
+ }
+ sp[-1-depth] = (jsval)pc;
+ END_CASE(JSOP_POS)
+
+ BEGIN_CASE(JSOP_NEW)
+ /* Get immediate argc and find the constructor function. */
+ argc = GET_ARGC(pc);
+
+ do_new:
+ SAVE_SP_AND_PC(fp);
+ vp = sp - (2 + argc);
+ JS_ASSERT(vp >= fp->spbase);
+
+ ok = js_InvokeConstructor(cx, vp, argc);
+ if (!ok)
+ goto out;
+ RESTORE_SP(fp);
+ LOAD_BRANCH_CALLBACK(cx);
+ LOAD_INTERRUPT_HANDLER(rt);
+ obj = JSVAL_TO_OBJECT(*vp);
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+
+ BEGIN_CASE(JSOP_DELNAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+
+ /* ECMA says to return true if name is undefined or inherited. */
+ rval = JSVAL_TRUE;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ ok = OBJ_DELETE_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_DELNAME)
+
+ BEGIN_CASE(JSOP_DELPROP)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ PROPERTY_OP(-1, ok = OBJ_DELETE_PROPERTY(cx, obj, id, &rval));
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_DELPROP)
+
+ BEGIN_CASE(JSOP_DELELEM)
+ ELEMENT_OP(-1, ok = OBJ_DELETE_PROPERTY(cx, obj, id, &rval));
+ sp--;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_DELELEM)
+
+ BEGIN_CASE(JSOP_TYPEOFEXPR)
+ BEGIN_CASE(JSOP_TYPEOF)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ type = JS_TypeOfValue(cx, rval);
+ atom = rt->atomState.typeAtoms[type];
+ STORE_OPND(-1, ATOM_KEY(atom));
+ END_CASE(JSOP_TYPEOF)
+
+ BEGIN_CASE(JSOP_VOID)
+ (void) POP_OPND();
+ PUSH_OPND(JSVAL_VOID);
+ END_CASE(JSOP_VOID)
+
+ BEGIN_CASE(JSOP_INCNAME)
+ BEGIN_CASE(JSOP_DECNAME)
+ BEGIN_CASE(JSOP_NAMEINC)
+ BEGIN_CASE(JSOP_NAMEDEC)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (!prop)
+ goto atom_not_defined;
+
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ lval = OBJECT_TO_JSVAL(obj);
+ i = 0;
+ goto do_incop;
+
+ BEGIN_CASE(JSOP_INCPROP)
+ BEGIN_CASE(JSOP_DECPROP)
+ BEGIN_CASE(JSOP_PROPINC)
+ BEGIN_CASE(JSOP_PROPDEC)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ lval = FETCH_OPND(-1);
+ i = -1;
+ goto do_incop;
+
+ BEGIN_CASE(JSOP_INCELEM)
+ BEGIN_CASE(JSOP_DECELEM)
+ BEGIN_CASE(JSOP_ELEMINC)
+ BEGIN_CASE(JSOP_ELEMDEC)
+ FETCH_ELEMENT_ID(-1, id);
+ lval = FETCH_OPND(-2);
+ i = -2;
+
+ do_incop:
+ {
+ const JSCodeSpec *cs;
+
+ VALUE_TO_OBJECT(cx, lval, obj);
+ if (i < 0)
+ STORE_OPND(i, OBJECT_TO_JSVAL(obj));
+ CHECK_ELEMENT_ID(obj, id);
+
+ /* The operand must contain a number. */
+ SAVE_SP_AND_PC(fp);
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+
+ /* Preload for use in the if/else immediately below. */
+ cs = &js_CodeSpec[op];
+
+ /* The expression result goes in rtmp, the updated value in rval. */
+ if (JSVAL_IS_INT(rval) &&
+ rval != INT_TO_JSVAL(JSVAL_INT_MIN) &&
+ rval != INT_TO_JSVAL(JSVAL_INT_MAX)) {
+ if (cs->format & JOF_POST) {
+ rtmp = rval;
+ (cs->format & JOF_INC) ? (rval += 2) : (rval -= 2);
+ } else {
+ (cs->format & JOF_INC) ? (rval += 2) : (rval -= 2);
+ rtmp = rval;
+ }
+ } else {
+
+/*
+ * Initially, rval contains the value to increment or decrement, which is not
+ * yet converted. As above, the expression result goes in rtmp, the updated
+ * value goes in rval. Our caller must set vp to point at a GC-rooted jsval
+ * in which we home rtmp, to protect it from GC in case the unconverted rval
+ * is not a number.
+ */
+#define NONINT_INCREMENT_OP_MIDDLE() \
+ JS_BEGIN_MACRO \
+ VALUE_TO_NUMBER(cx, rval, d); \
+ if (cs->format & JOF_POST) { \
+ rtmp = rval; \
+ if (!JSVAL_IS_NUMBER(rtmp)) { \
+ ok = js_NewNumberValue(cx, d, &rtmp); \
+ if (!ok) \
+ goto out; \
+ } \
+ *vp = rtmp; \
+ (cs->format & JOF_INC) ? d++ : d--; \
+ ok = js_NewNumberValue(cx, d, &rval); \
+ } else { \
+ (cs->format & JOF_INC) ? ++d : --d; \
+ ok = js_NewNumberValue(cx, d, &rval); \
+ rtmp = rval; \
+ } \
+ if (!ok) \
+ goto out; \
+ JS_END_MACRO
+
+ if (cs->format & JOF_POST) {
+ /*
+ * We must push early to protect the postfix increment
+ * or decrement result, if converted to a jsdouble from
+ * a non-number value, from GC nesting in the setter.
+ */
+ vp = sp;
+ PUSH(JSVAL_VOID);
+ SAVE_SP(fp);
+ --i;
+ }
+#ifdef __GNUC__
+ else vp = NULL; /* suppress bogus gcc warnings */
+#endif
+
+ NONINT_INCREMENT_OP_MIDDLE();
+ }
+
+ fp->flags |= JSFRAME_ASSIGNING;
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ fp->flags &= ~JSFRAME_ASSIGNING;
+ if (!ok)
+ goto out;
+ sp += i;
+ PUSH_OPND(rtmp);
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+ }
+
+/* NB: This macro doesn't use JS_BEGIN_MACRO/JS_END_MACRO around its body. */
+#define FAST_INCREMENT_OP(SLOT,COUNT,BASE,PRE,OPEQ,MINMAX) \
+ slot = SLOT; \
+ JS_ASSERT(slot < fp->fun->COUNT); \
+ vp = fp->BASE + slot; \
+ rval = *vp; \
+ if (!JSVAL_IS_INT(rval) || rval == INT_TO_JSVAL(JSVAL_INT_##MINMAX)) \
+ goto do_nonint_fast_incop; \
+ PRE = rval; \
+ rval OPEQ 2; \
+ *vp = rval; \
+ PUSH_OPND(PRE); \
+ goto end_nonint_fast_incop
+
+ BEGIN_CASE(JSOP_INCARG)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rval, +=, MAX);
+ BEGIN_CASE(JSOP_DECARG)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rval, -=, MIN);
+ BEGIN_CASE(JSOP_ARGINC)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rtmp, +=, MAX);
+ BEGIN_CASE(JSOP_ARGDEC)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rtmp, -=, MIN);
+
+ BEGIN_CASE(JSOP_INCVAR)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rval, +=, MAX);
+ BEGIN_CASE(JSOP_DECVAR)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rval, -=, MIN);
+ BEGIN_CASE(JSOP_VARINC)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rtmp, +=, MAX);
+ BEGIN_CASE(JSOP_VARDEC)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rtmp, -=, MIN);
+
+ end_nonint_fast_incop:
+ len = JSOP_INCARG_LENGTH; /* all fast incops are same length */
+ DO_NEXT_OP(len);
+
+#undef FAST_INCREMENT_OP
+
+ do_nonint_fast_incop:
+ {
+ const JSCodeSpec *cs = &js_CodeSpec[op];
+
+ NONINT_INCREMENT_OP_MIDDLE();
+ *vp = rval;
+ PUSH_OPND(rtmp);
+ len = cs->length;
+ DO_NEXT_OP(len);
+ }
+
+/* NB: This macro doesn't use JS_BEGIN_MACRO/JS_END_MACRO around its body. */
+#define FAST_GLOBAL_INCREMENT_OP(SLOWOP,PRE,OPEQ,MINMAX) \
+ slot = GET_VARNO(pc); \
+ JS_ASSERT(slot < fp->nvars); \
+ lval = fp->vars[slot]; \
+ if (JSVAL_IS_NULL(lval)) { \
+ op = SLOWOP; \
+ DO_OP(); \
+ } \
+ slot = JSVAL_TO_INT(lval); \
+ obj = fp->varobj; \
+ rval = OBJ_GET_SLOT(cx, obj, slot); \
+ if (!JSVAL_IS_INT(rval) || rval == INT_TO_JSVAL(JSVAL_INT_##MINMAX)) \
+ goto do_nonint_fast_global_incop; \
+ PRE = rval; \
+ rval OPEQ 2; \
+ OBJ_SET_SLOT(cx, obj, slot, rval); \
+ PUSH_OPND(PRE); \
+ goto end_nonint_fast_global_incop
+
+ BEGIN_CASE(JSOP_INCGVAR)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_INCNAME, rval, +=, MAX);
+ BEGIN_CASE(JSOP_DECGVAR)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_DECNAME, rval, -=, MIN);
+ BEGIN_CASE(JSOP_GVARINC)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_NAMEINC, rtmp, +=, MAX);
+ BEGIN_CASE(JSOP_GVARDEC)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_NAMEDEC, rtmp, -=, MIN);
+
+ end_nonint_fast_global_incop:
+ len = JSOP_INCGVAR_LENGTH; /* all gvar incops are same length */
+ JS_ASSERT(len == js_CodeSpec[op].length);
+ DO_NEXT_OP(len);
+
+#undef FAST_GLOBAL_INCREMENT_OP
+
+ do_nonint_fast_global_incop:
+ {
+ const JSCodeSpec *cs = &js_CodeSpec[op];
+
+ vp = sp++;
+ SAVE_SP(fp);
+ NONINT_INCREMENT_OP_MIDDLE();
+ OBJ_SET_SLOT(cx, obj, slot, rval);
+ STORE_OPND(-1, rtmp);
+ len = cs->length;
+ DO_NEXT_OP(len);
+ }
+
+ BEGIN_CASE(JSOP_GETPROP)
+ BEGIN_CASE(JSOP_GETXPROP)
+ /* Get an immediate atom naming the property. */
+ atom = GET_ATOM(cx, script, pc);
+ lval = FETCH_OPND(-1);
+ if (JSVAL_IS_STRING(lval) &&
+ atom == cx->runtime->atomState.lengthAtom) {
+ rval = INT_TO_JSVAL(JSSTRING_LENGTH(JSVAL_TO_STRING(lval)));
+ obj = NULL;
+ } else {
+ id = ATOM_TO_JSID(atom);
+ VALUE_TO_OBJECT(cx, lval, obj);
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ SAVE_SP_AND_PC(fp);
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+ }
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_GETPROP)
+
+ BEGIN_CASE(JSOP_SETPROP)
+ /* Pop the right-hand side into rval for OBJ_SET_PROPERTY. */
+ rval = FETCH_OPND(-1);
+
+ /* Get an immediate atom naming the property. */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ PROPERTY_OP(-2, CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval)));
+ sp--;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETPROP)
+
+ BEGIN_CASE(JSOP_GETELEM)
+ BEGIN_CASE(JSOP_GETXELEM)
+ ELEMENT_OP(-1, CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval)));
+ sp--;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_GETELEM)
+
+ BEGIN_CASE(JSOP_SETELEM)
+ rval = FETCH_OPND(-1);
+ ELEMENT_OP(-2, CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval)));
+ sp -= 2;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETELEM)
+
+ BEGIN_CASE(JSOP_ENUMELEM)
+ /* Funky: the value to set is under the [obj, id] pair. */
+ FETCH_ELEMENT_ID(-1, id);
+ FETCH_OBJECT(cx, -2, lval, obj);
+ CHECK_ELEMENT_ID(obj, id);
+ rval = FETCH_OPND(-3);
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ sp -= 3;
+ END_CASE(JSOP_ENUMELEM)
+
+/*
+ * LAZY_ARGS_THISP allows the JSOP_ARGSUB bytecode to defer creation of the
+ * arguments object until it is truly needed. JSOP_ARGSUB optimizes away
+ * arguments objects when the only uses of the 'arguments' parameter are to
+ * fetch individual actual parameters. But if such a use were then invoked,
+ * e.g., arguments[i](), the 'this' parameter would and must bind to the
+ * caller's arguments object. So JSOP_ARGSUB sets obj to LAZY_ARGS_THISP.
+ */
+#define LAZY_ARGS_THISP ((JSObject *) JSVAL_VOID)
+
+ BEGIN_CASE(JSOP_PUSHOBJ)
+ if (obj == LAZY_ARGS_THISP && !(obj = js_GetArgsObject(cx, fp))) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_PUSHOBJ)
+
+ BEGIN_CASE(JSOP_CALL)
+ BEGIN_CASE(JSOP_EVAL)
+ argc = GET_ARGC(pc);
+ vp = sp - (argc + 2);
+ lval = *vp;
+ SAVE_SP_AND_PC(fp);
+ if (VALUE_IS_FUNCTION(cx, lval) &&
+ (obj = JSVAL_TO_OBJECT(lval),
+ fun = (JSFunction *) JS_GetPrivate(cx, obj),
+ FUN_INTERPRETED(fun)))
+ /* inline_call: */
+ {
+ uintN nframeslots, nvars, nslots, missing;
+ JSArena *a;
+ jsuword avail, nbytes;
+ JSBool overflow;
+ void *newmark;
+ jsval *rvp;
+ JSInlineFrame *newifp;
+ JSInterpreterHook hook;
+
+ /* Restrict recursion of lightweight functions. */
+ if (inlineCallCount == MAX_INLINE_CALL_COUNT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_OVER_RECURSED);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /* Compute the total number of stack slots needed for fun. */
+ nframeslots = JS_HOWMANY(sizeof(JSInlineFrame), sizeof(jsval));
+ nvars = fun->u.i.nvars;
+ script = fun->u.i.script;
+ depth = (jsint) script->depth;
+ nslots = nframeslots + nvars + 2 * depth;
+
+ /* Allocate missing expected args adjacent to actual args. */
+ missing = (fun->nargs > argc) ? fun->nargs - argc : 0;
+ a = cx->stackPool.current;
+ avail = a->avail;
+ newmark = (void *) avail;
+ if (missing) {
+ newsp = sp + missing;
+ overflow = (jsuword) newsp > a->limit;
+ if (overflow)
+ nslots += 2 + argc + missing;
+ else if ((jsuword) newsp > avail)
+ avail = a->avail = (jsuword) newsp;
+ }
+#ifdef __GNUC__
+ else overflow = JS_FALSE; /* suppress bogus gcc warnings */
+#endif
+
+ /* Allocate the inline frame with its vars and operand slots. */
+ newsp = (jsval *) avail;
+ nbytes = nslots * sizeof(jsval);
+ avail += nbytes;
+ if (avail <= a->limit) {
+ a->avail = avail;
+ } else {
+ JS_ARENA_ALLOCATE_CAST(newsp, jsval *, &cx->stackPool,
+ nbytes);
+ if (!newsp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_STACK_OVERFLOW,
+ (fp && fp->fun)
+ ? JS_GetFunctionName(fp->fun)
+ : "script");
+ goto bad_inline_call;
+ }
+ }
+
+ /* Move args if missing overflow arena a, push missing args. */
+ rvp = vp;
+ if (missing) {
+ if (overflow) {
+ memcpy(newsp, vp, (2 + argc) * sizeof(jsval));
+ vp = newsp;
+ sp = vp + 2 + argc;
+ newsp = sp + missing;
+ }
+ do {
+ PUSH(JSVAL_VOID);
+ } while (--missing != 0);
+ }
+
+ /* Claim space for the stack frame and initialize it. */
+ newifp = (JSInlineFrame *) newsp;
+ newsp += nframeslots;
+ newifp->frame.callobj = NULL;
+ newifp->frame.argsobj = NULL;
+ newifp->frame.varobj = NULL;
+ newifp->frame.script = script;
+ newifp->frame.fun = fun;
+ newifp->frame.argc = argc;
+ newifp->frame.argv = vp + 2;
+ newifp->frame.rval = JSVAL_VOID;
+ newifp->frame.nvars = nvars;
+ newifp->frame.vars = newsp;
+ newifp->frame.down = fp;
+ newifp->frame.annotation = NULL;
+ newifp->frame.scopeChain = parent = OBJ_GET_PARENT(cx, obj);
+ newifp->frame.sharpDepth = 0;
+ newifp->frame.sharpArray = NULL;
+ newifp->frame.flags = 0;
+ newifp->frame.dormantNext = NULL;
+ newifp->frame.xmlNamespace = NULL;
+ newifp->frame.blockChain = NULL;
+ newifp->rvp = rvp;
+ newifp->mark = newmark;
+
+ /* Compute the 'this' parameter now that argv is set. */
+ if (!JSVAL_IS_OBJECT(vp[1])) {
+ PRIMITIVE_TO_OBJECT(cx, vp[1], obj2);
+ if (!obj2)
+ goto bad_inline_call;
+ vp[1] = OBJECT_TO_JSVAL(obj2);
+ }
+ newifp->frame.thisp =
+ js_ComputeThis(cx,
+ JSFUN_BOUND_METHOD_TEST(fun->flags)
+ ? parent
+ : JSVAL_TO_OBJECT(vp[1]),
+ newifp->frame.argv);
+ if (!newifp->frame.thisp)
+ goto bad_inline_call;
+#ifdef DUMP_CALL_TABLE
+ LogCall(cx, *vp, argc, vp + 2);
+#endif
+
+ /* Push void to initialize local variables. */
+ sp = newsp;
+ while (nvars--)
+ PUSH(JSVAL_VOID);
+ sp += depth;
+ newifp->frame.spbase = sp;
+ SAVE_SP(&newifp->frame);
+
+ /* Call the debugger hook if present. */
+ hook = rt->callHook;
+ if (hook) {
+ newifp->frame.pc = NULL;
+ newifp->hookData = hook(cx, &newifp->frame, JS_TRUE, 0,
+ rt->callHookData);
+ LOAD_INTERRUPT_HANDLER(rt);
+ } else {
+ newifp->hookData = NULL;
+ }
+
+ /* Scope with a call object parented by the callee's parent. */
+ if (JSFUN_HEAVYWEIGHT_TEST(fun->flags) &&
+ !js_GetCallObject(cx, &newifp->frame, parent)) {
+ goto bad_inline_call;
+ }
+
+ /* Switch to new version if currentVersion wasn't overridden. */
+ newifp->callerVersion = cx->version;
+ if (JS_LIKELY(cx->version == currentVersion)) {
+ currentVersion = script->version;
+ if (currentVersion != cx->version)
+ js_SetVersion(cx, currentVersion);
+ }
+
+ /* Push the frame and set interpreter registers. */
+ cx->fp = fp = &newifp->frame;
+ pc = script->code;
+#ifndef JS_THREADED_INTERP
+ endpc = pc + script->length;
+#endif
+ obj = NULL;
+ inlineCallCount++;
+ JS_RUNTIME_METER(rt, inlineCalls);
+
+ /* Load first opcode and dispatch it (safe since JSOP_STOP). */
+ op = *pc;
+ DO_OP();
+
+ bad_inline_call:
+ RESTORE_SP(fp);
+ JS_ASSERT(fp->pc == pc);
+ script = fp->script;
+ depth = (jsint) script->depth;
+ js_FreeRawStack(cx, newmark);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ ok = js_Invoke(cx, argc, 0);
+ RESTORE_SP(fp);
+ LOAD_BRANCH_CALLBACK(cx);
+ LOAD_INTERRUPT_HANDLER(rt);
+ if (!ok)
+ goto out;
+ JS_RUNTIME_METER(rt, nonInlineCalls);
+#if JS_HAS_LVALUE_RETURN
+ if (cx->rval2set) {
+ /*
+ * Use the stack depth we didn't claim in our budget, but that
+ * we know is there on account of [fun, this] already having
+ * been pushed, at a minimum (if no args). Those two slots
+ * have been popped and [rval] has been pushed, which leaves
+ * one more slot for rval2 before we might overflow.
+ *
+ * NB: rval2 must be the property identifier, and rval the
+ * object from which to get the property. The pair form an
+ * ECMA "reference type", which can be used on the right- or
+ * left-hand side of assignment ops. Note well: only native
+ * methods can return reference types. See JSOP_SETCALL just
+ * below for the left-hand-side case.
+ */
+ PUSH_OPND(cx->rval2);
+ ELEMENT_OP(-1, ok = OBJ_GET_PROPERTY(cx, obj, id, &rval));
+
+ sp--;
+ STORE_OPND(-1, rval);
+ cx->rval2set = JS_FALSE;
+ }
+#endif /* JS_HAS_LVALUE_RETURN */
+ obj = NULL;
+ END_CASE(JSOP_CALL)
+
+#if JS_HAS_LVALUE_RETURN
+ BEGIN_CASE(JSOP_SETCALL)
+ argc = GET_ARGC(pc);
+ SAVE_SP_AND_PC(fp);
+ ok = js_Invoke(cx, argc, 0);
+ RESTORE_SP(fp);
+ LOAD_BRANCH_CALLBACK(cx);
+ LOAD_INTERRUPT_HANDLER(rt);
+ if (!ok)
+ goto out;
+ if (!cx->rval2set) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_LEFTSIDE_OF_ASS);
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(cx->rval2);
+ cx->rval2set = JS_FALSE;
+ obj = NULL;
+ END_CASE(JSOP_SETCALL)
+#endif
+
+ BEGIN_CASE(JSOP_NAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (!prop) {
+ /* Kludge to allow (typeof foo == "undefined") tests. */
+ len = JSOP_NAME_LENGTH;
+ endpc = script->code + script->length;
+ for (pc2 = pc + len; pc2 < endpc; pc2++) {
+ op2 = (JSOp)*pc2;
+ if (op2 == JSOP_TYPEOF) {
+ PUSH_OPND(JSVAL_VOID);
+ DO_NEXT_OP(len);
+ }
+ if (op2 != JSOP_GROUP)
+ break;
+ }
+ goto atom_not_defined;
+ }
+
+ /* Take the slow path if prop was not found in a native object. */
+ if (!OBJ_IS_NATIVE(obj) || !OBJ_IS_NATIVE(obj2)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ } else {
+ sprop = (JSScopeProperty *)prop;
+ NATIVE_GET(cx, obj, obj2, sprop, &rval);
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_NAME)
+
+ BEGIN_CASE(JSOP_UINT16)
+ i = (jsint) GET_ATOM_INDEX(pc);
+ rval = INT_TO_JSVAL(i);
+ PUSH_OPND(rval);
+ obj = NULL;
+ END_CASE(JSOP_UINT16)
+
+ BEGIN_CASE(JSOP_UINT24)
+ i = (jsint) GET_LITERAL_INDEX(pc);
+ rval = INT_TO_JSVAL(i);
+ PUSH_OPND(rval);
+ END_CASE(JSOP_UINT24)
+
+ BEGIN_CASE(JSOP_LITERAL)
+ atomIndex = GET_LITERAL_INDEX(pc);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ PUSH_OPND(ATOM_KEY(atom));
+ obj = NULL;
+ END_CASE(JSOP_LITERAL)
+
+ BEGIN_CASE(JSOP_FINDNAME)
+ atomIndex = GET_LITERAL_INDEX(pc);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ SAVE_SP_AND_PC(fp);
+ obj = js_FindIdentifierBase(cx, ATOM_TO_JSID(atom));
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ PUSH_OPND(ATOM_KEY(atom));
+ END_CASE(JSOP_FINDNAME)
+
+ BEGIN_CASE(JSOP_LITOPX)
+ /*
+ * Load atomIndex, which is used by code at each do_JSOP_* label.
+ *
+ * Also set pc2 to point at the bytecode extended by this prefix
+ * to have a leading 24 bit atomIndex, instead of the unextended
+ * 16-bit atomIndex that normally comes after op. This enables
+ * JOF_INDEXCONST format ops (which have multiple immediates) to
+ * collect their other immediate via GET_VARNO(pc2) or similar.
+ *
+ * Finally, load op and, if threading, adjust pc so that it will
+ * be advanced properly at the end of op's case by DO_NEXT_OP.
+ */
+ atomIndex = GET_LITERAL_INDEX(pc);
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = *pc2;
+ pc += JSOP_LITOPX_LENGTH - (1 + ATOM_INDEX_LEN);
+#ifndef JS_THREADED_INTERP
+ len = js_CodeSpec[op].length;
+#endif
+ switch (op) {
+ case JSOP_ANONFUNOBJ: goto do_JSOP_ANONFUNOBJ;
+ case JSOP_BINDNAME: goto do_JSOP_BINDNAME;
+ case JSOP_CLOSURE: goto do_JSOP_CLOSURE;
+ case JSOP_DEFCONST: goto do_JSOP_DEFCONST;
+ case JSOP_DEFFUN: goto do_JSOP_DEFFUN;
+ case JSOP_DEFLOCALFUN: goto do_JSOP_DEFLOCALFUN;
+ case JSOP_DEFVAR: goto do_JSOP_DEFVAR;
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTNAME: goto do_JSOP_EXPORTNAME;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case JSOP_GETMETHOD: goto do_JSOP_GETMETHOD;
+ case JSOP_SETMETHOD: goto do_JSOP_SETMETHOD;
+#endif
+ case JSOP_NAMEDFUNOBJ: goto do_JSOP_NAMEDFUNOBJ;
+ case JSOP_NUMBER: goto do_JSOP_NUMBER;
+ case JSOP_OBJECT: goto do_JSOP_OBJECT;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_QNAMECONST: goto do_JSOP_QNAMECONST;
+ case JSOP_QNAMEPART: goto do_JSOP_QNAMEPART;
+#endif
+ case JSOP_REGEXP: goto do_JSOP_REGEXP;
+ case JSOP_SETCONST: goto do_JSOP_SETCONST;
+ case JSOP_STRING: goto do_JSOP_STRING;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_XMLCDATA: goto do_JSOP_XMLCDATA;
+ case JSOP_XMLCOMMENT: goto do_JSOP_XMLCOMMENT;
+ case JSOP_XMLOBJECT: goto do_JSOP_XMLOBJECT;
+ case JSOP_XMLPI: goto do_JSOP_XMLPI;
+#endif
+ case JSOP_ENTERBLOCK: goto do_JSOP_ENTERBLOCK;
+ default: JS_ASSERT(0);
+ }
+ /* NOTREACHED */
+
+ BEGIN_CASE(JSOP_NUMBER)
+ BEGIN_CASE(JSOP_STRING)
+ BEGIN_CASE(JSOP_OBJECT)
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_NUMBER:
+ do_JSOP_STRING:
+ do_JSOP_OBJECT:
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ PUSH_OPND(ATOM_KEY(atom));
+ obj = NULL;
+ END_CASE(JSOP_NUMBER)
+
+ BEGIN_LITOPX_CASE(JSOP_REGEXP, 0)
+ {
+ JSRegExp *re;
+ JSObject *funobj;
+
+ /*
+ * Push a regexp object for the atom mapped by the bytecode at pc,
+ * cloning the literal's regexp object if necessary, to simulate in
+ * the pre-compile/execute-later case what ECMA specifies for the
+ * compile-and-go case: that scanning each regexp literal creates
+ * a single corresponding RegExp object.
+ *
+ * To support pre-compilation transparently, we must handle the
+ * case where a regexp object literal is used in a different global
+ * at execution time from the global with which it was scanned at
+ * compile time. We do this by re-wrapping the JSRegExp private
+ * data struct with a cloned object having the right prototype and
+ * parent, and having its own lastIndex property value storage.
+ *
+ * Unlike JSOP_DEFFUN and other prolog bytecodes that may clone
+ * literal objects, we don't want to pay a script prolog execution
+ * price for all regexp literals in a script (many may not be used
+ * by a particular execution of that script, depending on control
+ * flow), so we initialize lazily here.
+ *
+ * XXX This code is specific to regular expression objects. If we
+ * need a similar op for other kinds of object literals, we should
+ * push cloning down under JSObjectOps and reuse code here.
+ */
+ JS_ASSERT(ATOM_IS_OBJECT(atom));
+ obj = ATOM_TO_OBJECT(atom);
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_RegExpClass);
+
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ slot = re->cloneIndex;
+ if (fp->fun) {
+ /*
+ * We're in function code, not global or eval code (in eval
+ * code, JSOP_REGEXP is never emitted). The code generator
+ * recorded in fp->fun->nregexps the number of re->cloneIndex
+ * slots that it reserved in the cloned funobj.
+ */
+ funobj = JSVAL_TO_OBJECT(fp->argv[-2]);
+ slot += JSCLASS_RESERVED_SLOTS(&js_FunctionClass);
+ if (!JS_GetReservedSlot(cx, funobj, slot, &rval))
+ return JS_FALSE;
+ if (JSVAL_IS_VOID(rval))
+ rval = JSVAL_NULL;
+ } else {
+ /*
+ * We're in global code. The code generator already arranged
+ * via script->numGlobalVars to reserve a global variable slot
+ * at cloneIndex. All global variable slots are initialized
+ * to null, not void, for faster testing in JSOP_*GVAR cases.
+ */
+ rval = fp->vars[slot];
+#ifdef __GNUC__
+ funobj = NULL; /* suppress bogus gcc warnings */
+#endif
+ }
+
+ if (JSVAL_IS_NULL(rval)) {
+ /* Compute the current global object in obj2. */
+ obj2 = fp->scopeChain;
+ while ((parent = OBJ_GET_PARENT(cx, obj2)) != NULL)
+ obj2 = parent;
+
+ /*
+ * We must home sp here, because either js_CloneRegExpObject
+ * or JS_SetReservedSlot could nest a last-ditch GC. We home
+ * pc as well, in case js_CloneRegExpObject has to lookup the
+ * "RegExp" class in the global object, which could entail a
+ * JSNewResolveOp call.
+ */
+ SAVE_SP_AND_PC(fp);
+
+ /*
+ * If obj's parent is not obj2, we must clone obj so that it
+ * has the right parent, and therefore, the right prototype.
+ *
+ * Yes, this means we assume that the correct RegExp.prototype
+ * to which regexp instances (including literals) delegate can
+ * be distinguished solely by the instance's parent, which was
+ * set to the parent of the RegExp constructor function object
+ * when the instance was created. In other words,
+ *
+ * (/x/.__parent__ == RegExp.__parent__) implies
+ * (/x/.__proto__ == RegExp.prototype)
+ *
+ * (unless you assign a different object to RegExp.prototype
+ * at runtime, in which case, ECMA doesn't specify operation,
+ * and you get what you deserve).
+ *
+ * This same coupling between instance parent and constructor
+ * parent turns up everywhere (see jsobj.c's FindClassObject,
+ * js_ConstructObject, and js_NewObject). It's fundamental to
+ * the design of the language when you consider multiple global
+ * objects and separate compilation and execution, even though
+ * it is not specified fully in ECMA.
+ */
+ if (OBJ_GET_PARENT(cx, obj) != obj2) {
+ obj = js_CloneRegExpObject(cx, obj, obj2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /* Store the regexp object value in its cloneIndex slot. */
+ if (fp->fun) {
+ if (!JS_SetReservedSlot(cx, funobj, slot, rval))
+ return JS_FALSE;
+ } else {
+ fp->vars[slot] = rval;
+ }
+ }
+
+ PUSH_OPND(rval);
+ obj = NULL;
+ }
+ END_LITOPX_CASE(JSOP_REGEXP)
+
+ BEGIN_CASE(JSOP_ZERO)
+ PUSH_OPND(JSVAL_ZERO);
+ obj = NULL;
+ END_CASE(JSOP_ZERO)
+
+ BEGIN_CASE(JSOP_ONE)
+ PUSH_OPND(JSVAL_ONE);
+ obj = NULL;
+ END_CASE(JSOP_ONE)
+
+ BEGIN_CASE(JSOP_NULL)
+ PUSH_OPND(JSVAL_NULL);
+ obj = NULL;
+ END_CASE(JSOP_NULL)
+
+ BEGIN_CASE(JSOP_THIS)
+ obj = fp->thisp;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ JSExtendedClass *xclasp;
+
+ xclasp = (JSExtendedClass *) clasp;
+ if (xclasp->outerObject) {
+ obj = xclasp->outerObject(cx, obj);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ }
+
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_CASE(JSOP_THIS)
+
+ BEGIN_CASE(JSOP_FALSE)
+ PUSH_OPND(JSVAL_FALSE);
+ obj = NULL;
+ END_CASE(JSOP_FALSE)
+
+ BEGIN_CASE(JSOP_TRUE)
+ PUSH_OPND(JSVAL_TRUE);
+ obj = NULL;
+ END_CASE(JSOP_TRUE)
+
+ BEGIN_CASE(JSOP_TABLESWITCH)
+ pc2 = pc;
+ len = GET_JUMP_OFFSET(pc2);
+
+ /*
+ * ECMAv2+ forbids conversion of discriminant, so we will skip to
+ * the default case if the discriminant isn't already an int jsval.
+ * (This opcode is emitted only for dense jsint-domain switches.)
+ */
+ rval = POP_OPND();
+ if (!JSVAL_IS_INT(rval))
+ DO_NEXT_OP(len);
+ i = JSVAL_TO_INT(rval);
+
+ pc2 += JUMP_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+
+ i -= low;
+ if ((jsuint)i < (jsuint)(high - low + 1)) {
+ pc2 += JUMP_OFFSET_LEN + JUMP_OFFSET_LEN * i;
+ off = (jsint) GET_JUMP_OFFSET(pc2);
+ if (off)
+ len = off;
+ }
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_LOOKUPSWITCH)
+ lval = POP_OPND();
+ pc2 = pc;
+ len = GET_JUMP_OFFSET(pc2);
+
+ if (!JSVAL_IS_NUMBER(lval) &&
+ !JSVAL_IS_STRING(lval) &&
+ !JSVAL_IS_BOOLEAN(lval)) {
+ DO_NEXT_OP(len);
+ }
+
+ pc2 += JUMP_OFFSET_LEN;
+ npairs = (jsint) GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+
+#define SEARCH_PAIRS(MATCH_CODE) \
+ while (npairs) { \
+ atom = GET_ATOM(cx, script, pc2); \
+ rval = ATOM_KEY(atom); \
+ MATCH_CODE \
+ if (match) { \
+ pc2 += ATOM_INDEX_LEN; \
+ len = GET_JUMP_OFFSET(pc2); \
+ DO_NEXT_OP(len); \
+ } \
+ pc2 += ATOM_INDEX_LEN + JUMP_OFFSET_LEN; \
+ npairs--; \
+ }
+ if (JSVAL_IS_STRING(lval)) {
+ str = JSVAL_TO_STRING(lval);
+ SEARCH_PAIRS(
+ match = (JSVAL_IS_STRING(rval) &&
+ ((str2 = JSVAL_TO_STRING(rval)) == str ||
+ js_EqualStrings(str2, str)));
+ )
+ } else if (JSVAL_IS_DOUBLE(lval)) {
+ d = *JSVAL_TO_DOUBLE(lval);
+ SEARCH_PAIRS(
+ match = (JSVAL_IS_DOUBLE(rval) &&
+ *JSVAL_TO_DOUBLE(rval) == d);
+ )
+ } else {
+ SEARCH_PAIRS(
+ match = (lval == rval);
+ )
+ }
+#undef SEARCH_PAIRS
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_TABLESWITCHX)
+ pc2 = pc;
+ len = GET_JUMPX_OFFSET(pc2);
+
+ /*
+ * ECMAv2+ forbids conversion of discriminant, so we will skip to
+ * the default case if the discriminant isn't already an int jsval.
+ * (This opcode is emitted only for dense jsint-domain switches.)
+ */
+ rval = POP_OPND();
+ if (!JSVAL_IS_INT(rval))
+ DO_NEXT_OP(len);
+ i = JSVAL_TO_INT(rval);
+
+ pc2 += JUMPX_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+
+ i -= low;
+ if ((jsuint)i < (jsuint)(high - low + 1)) {
+ pc2 += JUMP_OFFSET_LEN + JUMPX_OFFSET_LEN * i;
+ off = (jsint) GET_JUMPX_OFFSET(pc2);
+ if (off)
+ len = off;
+ }
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_LOOKUPSWITCHX)
+ lval = POP_OPND();
+ pc2 = pc;
+ len = GET_JUMPX_OFFSET(pc2);
+
+ if (!JSVAL_IS_NUMBER(lval) &&
+ !JSVAL_IS_STRING(lval) &&
+ !JSVAL_IS_BOOLEAN(lval)) {
+ DO_NEXT_OP(len);
+ }
+
+ pc2 += JUMPX_OFFSET_LEN;
+ npairs = (jsint) GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+
+#define SEARCH_EXTENDED_PAIRS(MATCH_CODE) \
+ while (npairs) { \
+ atom = GET_ATOM(cx, script, pc2); \
+ rval = ATOM_KEY(atom); \
+ MATCH_CODE \
+ if (match) { \
+ pc2 += ATOM_INDEX_LEN; \
+ len = GET_JUMPX_OFFSET(pc2); \
+ DO_NEXT_OP(len); \
+ } \
+ pc2 += ATOM_INDEX_LEN + JUMPX_OFFSET_LEN; \
+ npairs--; \
+ }
+ if (JSVAL_IS_STRING(lval)) {
+ str = JSVAL_TO_STRING(lval);
+ SEARCH_EXTENDED_PAIRS(
+ match = (JSVAL_IS_STRING(rval) &&
+ ((str2 = JSVAL_TO_STRING(rval)) == str ||
+ js_EqualStrings(str2, str)));
+ )
+ } else if (JSVAL_IS_DOUBLE(lval)) {
+ d = *JSVAL_TO_DOUBLE(lval);
+ SEARCH_EXTENDED_PAIRS(
+ match = (JSVAL_IS_DOUBLE(rval) &&
+ *JSVAL_TO_DOUBLE(rval) == d);
+ )
+ } else {
+ SEARCH_EXTENDED_PAIRS(
+ match = (lval == rval);
+ )
+ }
+#undef SEARCH_EXTENDED_PAIRS
+ END_VARLEN_CASE
+
+ EMPTY_CASE(JSOP_CONDSWITCH)
+
+#if JS_HAS_EXPORT_IMPORT
+ BEGIN_CASE(JSOP_EXPORTALL)
+ obj = fp->varobj;
+ SAVE_SP_AND_PC(fp);
+ ida = JS_Enumerate(cx, obj);
+ if (!ida) {
+ ok = JS_FALSE;
+ } else {
+ for (i = 0, j = ida->length; i < j; i++) {
+ id = ida->vector[i];
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ break;
+ if (!prop)
+ continue;
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ if (ok) {
+ attrs |= JSPROP_EXPORTED;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ break;
+ }
+ JS_DestroyIdArray(cx, ida);
+ }
+ END_CASE(JSOP_EXPORTALL)
+
+ BEGIN_LITOPX_CASE(JSOP_EXPORTNAME, 0)
+ id = ATOM_TO_JSID(atom);
+ obj = fp->varobj;
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (!prop) {
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, NULL, NULL,
+ JSPROP_EXPORTED, NULL);
+ } else {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ if (ok) {
+ attrs |= JSPROP_EXPORTED;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ if (!ok)
+ goto out;
+ END_LITOPX_CASE(JSOP_EXPORTNAME)
+
+ BEGIN_CASE(JSOP_IMPORTALL)
+ id = (jsid) JSVAL_VOID;
+ PROPERTY_OP(-1, ok = ImportProperty(cx, obj, id));
+ sp--;
+ END_CASE(JSOP_IMPORTALL)
+
+ BEGIN_CASE(JSOP_IMPORTPROP)
+ /* Get an immediate atom naming the property. */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ PROPERTY_OP(-1, ok = ImportProperty(cx, obj, id));
+ sp--;
+ END_CASE(JSOP_IMPORTPROP)
+
+ BEGIN_CASE(JSOP_IMPORTELEM)
+ ELEMENT_OP(-1, ok = ImportProperty(cx, obj, id));
+ sp -= 2;
+ END_CASE(JSOP_IMPORTELEM)
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ BEGIN_CASE(JSOP_TRAP)
+ SAVE_SP_AND_PC(fp);
+ switch (JS_HandleTrap(cx, script, pc, &rval)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ JS_ASSERT(JSVAL_IS_INT(rval));
+ op = (JSOp) JSVAL_TO_INT(rval);
+ JS_ASSERT((uintN)op < (uintN)JSOP_LIMIT);
+ LOAD_INTERRUPT_HANDLER(rt);
+ DO_OP();
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ END_CASE(JSOP_TRAP)
+
+ BEGIN_CASE(JSOP_ARGUMENTS)
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetArgsValue(cx, fp, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ obj = NULL;
+ END_CASE(JSOP_ARGUMENTS)
+
+ BEGIN_CASE(JSOP_ARGSUB)
+ id = INT_TO_JSID(GET_ARGNO(pc));
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetArgsProperty(cx, fp, id, &obj, &rval);
+ if (!ok)
+ goto out;
+ if (!obj) {
+ /*
+ * If arguments was not overridden by eval('arguments = ...'),
+ * set obj to the magic cookie respected by JSOP_PUSHOBJ, just
+ * in case this bytecode is part of an 'arguments[i](j, k)' or
+ * similar such invocation sequence, where the function that
+ * is invoked expects its 'this' parameter to be the caller's
+ * arguments object.
+ */
+ obj = LAZY_ARGS_THISP;
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_ARGSUB)
+
+#undef LAZY_ARGS_THISP
+
+ BEGIN_CASE(JSOP_ARGCNT)
+ id = ATOM_TO_JSID(rt->atomState.lengthAtom);
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetArgsProperty(cx, fp, id, &obj, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ END_CASE(JSOP_ARGCNT)
+
+ BEGIN_CASE(JSOP_GETARG)
+ slot = GET_ARGNO(pc);
+ JS_ASSERT(slot < fp->fun->nargs);
+ PUSH_OPND(fp->argv[slot]);
+ obj = NULL;
+ END_CASE(JSOP_GETARG)
+
+ BEGIN_CASE(JSOP_SETARG)
+ slot = GET_ARGNO(pc);
+ JS_ASSERT(slot < fp->fun->nargs);
+ vp = &fp->argv[slot];
+ GC_POKE(cx, *vp);
+ *vp = FETCH_OPND(-1);
+ obj = NULL;
+ END_CASE(JSOP_SETARG)
+
+ BEGIN_CASE(JSOP_GETVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->fun->u.i.nvars);
+ PUSH_OPND(fp->vars[slot]);
+ obj = NULL;
+ END_CASE(JSOP_GETVAR)
+
+ BEGIN_CASE(JSOP_SETVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->fun->u.i.nvars);
+ vp = &fp->vars[slot];
+ GC_POKE(cx, *vp);
+ *vp = FETCH_OPND(-1);
+ obj = NULL;
+ END_CASE(JSOP_SETVAR)
+
+ BEGIN_CASE(JSOP_GETGVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->nvars);
+ lval = fp->vars[slot];
+ if (JSVAL_IS_NULL(lval)) {
+ op = JSOP_NAME;
+ DO_OP();
+ }
+ slot = JSVAL_TO_INT(lval);
+ obj = fp->varobj;
+ rval = OBJ_GET_SLOT(cx, obj, slot);
+ PUSH_OPND(rval);
+ END_CASE(JSOP_GETGVAR)
+
+ BEGIN_CASE(JSOP_SETGVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->nvars);
+ rval = FETCH_OPND(-1);
+ lval = fp->vars[slot];
+ obj = fp->varobj;
+ if (JSVAL_IS_NULL(lval)) {
+ /*
+ * Inline-clone and specialize JSOP_SETNAME code here because
+ * JSOP_SETGVAR has arity 1: [rval], not arity 2: [obj, rval]
+ * as JSOP_SETNAME does, where [obj] is due to JSOP_BINDNAME.
+ */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ SAVE_SP_AND_PC(fp);
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ } else {
+ slot = JSVAL_TO_INT(lval);
+ GC_POKE(cx, obj->slots[slot]);
+ OBJ_SET_SLOT(cx, obj, slot, rval);
+ }
+ obj = NULL;
+ END_CASE(JSOP_SETGVAR)
+
+ BEGIN_CASE(JSOP_DEFCONST)
+ BEGIN_CASE(JSOP_DEFVAR)
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_DEFCONST:
+ do_JSOP_DEFVAR:
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ obj = fp->varobj;
+ attrs = JSPROP_ENUMERATE;
+ if (!(fp->flags & JSFRAME_EVAL))
+ attrs |= JSPROP_PERMANENT;
+ if (op == JSOP_DEFCONST)
+ attrs |= JSPROP_READONLY;
+
+ /* Lookup id in order to check for redeclaration problems. */
+ id = ATOM_TO_JSID(atom);
+ SAVE_SP_AND_PC(fp);
+ ok = js_CheckRedeclaration(cx, obj, id, attrs, &obj2, &prop);
+ if (!ok)
+ goto out;
+
+ /* Bind a variable only if it's not yet defined. */
+ if (!prop) {
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, NULL, NULL,
+ attrs, &prop);
+ if (!ok)
+ goto out;
+ JS_ASSERT(prop);
+ obj2 = obj;
+ }
+
+ /*
+ * Try to optimize a property we either just created, or found
+ * directly in the global object, that is permanent, has a slot,
+ * and has stub getter and setter, into a "fast global" accessed
+ * by the JSOP_*GVAR opcodes.
+ */
+ if (atomIndex < script->numGlobalVars &&
+ (attrs & JSPROP_PERMANENT) &&
+ obj2 == obj &&
+ OBJ_IS_NATIVE(obj)) {
+ sprop = (JSScopeProperty *) prop;
+ if (SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj)) &&
+ SPROP_HAS_STUB_GETTER(sprop) &&
+ SPROP_HAS_STUB_SETTER(sprop)) {
+ /*
+ * Fast globals use fp->vars to map the global name's
+ * atomIndex to the permanent fp->varobj slot number,
+ * tagged as a jsval. The atomIndex for the global's
+ * name literal is identical to its fp->vars index.
+ */
+ fp->vars[atomIndex] = INT_TO_JSVAL(sprop->slot);
+ }
+ }
+
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ END_CASE(JSOP_DEFVAR)
+
+ BEGIN_LITOPX_CASE(JSOP_DEFFUN, 0)
+ obj = ATOM_TO_OBJECT(atom);
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ id = ATOM_TO_JSID(fun->atom);
+
+ /*
+ * We must be at top-level (either outermost block that forms a
+ * function's body, or a global) scope, not inside an expression
+ * (JSOP_{ANON,NAMED}FUNOBJ) or compound statement (JSOP_CLOSURE)
+ * in the same compilation unit (ECMA Program).
+ *
+ * However, we could be in a Program being eval'd from inside a
+ * with statement, so we need to distinguish scope chain head from
+ * variables object. Hence the obj2 vs. parent distinction below.
+ * First we make sure the function object we're defining has the
+ * right scope chain. Then we define its name in fp->varobj.
+ *
+ * If static link is not current scope, clone fun's object to link
+ * to the current scope via parent. This clause exists to enable
+ * sharing of compiled functions among multiple equivalent scopes,
+ * splitting the cost of compilation evenly among the scopes and
+ * amortizing it over a number of executions. Examples include XUL
+ * scripts and event handlers shared among Mozilla chrome windows,
+ * and server-side JS user-defined functions shared among requests.
+ *
+ * NB: The Script object exposes compile and exec in the language,
+ * such that this clause introduces an incompatible change from old
+ * JS versions that supported Script. Such a JS version supported
+ * executing a script that defined and called functions scoped by
+ * the compile-time static link, not by the exec-time scope chain.
+ *
+ * We sacrifice compatibility, breaking such scripts, in order to
+ * promote compile-cost sharing and amortizing, and because Script
+ * is not and will not be standardized.
+ */
+ JS_ASSERT(!fp->blockChain);
+ obj2 = fp->scopeChain;
+ if (OBJ_GET_PARENT(cx, obj) != obj2) {
+ obj = js_CloneFunctionObject(cx, obj, obj2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+
+ /*
+ * Protect obj from any GC hiding below OBJ_DEFINE_PROPERTY. All
+ * paths from here must flow through the "Restore fp->scopeChain"
+ * code below the OBJ_DEFINE_PROPERTY call.
+ */
+ fp->scopeChain = obj;
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /*
+ * ECMA requires functions defined when entering Global code to be
+ * permanent, and functions defined when entering Eval code to be
+ * impermanent.
+ */
+ attrs = JSPROP_ENUMERATE;
+ if (!(fp->flags & JSFRAME_EVAL))
+ attrs |= JSPROP_PERMANENT;
+
+ /*
+ * Load function flags that are also property attributes. Getters
+ * and setters do not need a slot, their value is stored elsewhere
+ * in the property itself, not in obj->slots.
+ */
+ flags = JSFUN_GSFLAG2ATTR(fun->flags);
+ if (flags) {
+ attrs |= flags | JSPROP_SHARED;
+ rval = JSVAL_VOID;
+ }
+
+ /*
+ * Check for a const property of the same name -- or any kind
+ * of property if executing with the strict option. We check
+ * here at runtime as well as at compile-time, to handle eval
+ * as well as multiple HTML script tags.
+ */
+ parent = fp->varobj;
+ SAVE_SP_AND_PC(fp);
+ ok = js_CheckRedeclaration(cx, parent, id, attrs, NULL, NULL);
+ if (ok) {
+ ok = OBJ_DEFINE_PROPERTY(cx, parent, id, rval,
+ (flags & JSPROP_GETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ (flags & JSPROP_SETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ attrs,
+ &prop);
+ }
+
+ /* Restore fp->scopeChain now that obj is defined in fp->varobj. */
+ fp->scopeChain = obj2;
+ if (!ok)
+ goto out;
+
+#if 0
+ if (attrs == (JSPROP_ENUMERATE | JSPROP_PERMANENT) &&
+ script->numGlobalVars) {
+ /*
+ * As with JSOP_DEFVAR and JSOP_DEFCONST (above), fast globals
+ * use fp->vars to map the global function name's atomIndex to
+ * its permanent fp->varobj slot number, tagged as a jsval.
+ */
+ sprop = (JSScopeProperty *) prop;
+ fp->vars[atomIndex] = INT_TO_JSVAL(sprop->slot);
+ }
+#endif
+ OBJ_DROP_PROPERTY(cx, parent, prop);
+ END_LITOPX_CASE(JSOP_DEFFUN)
+
+ BEGIN_LITOPX_CASE(JSOP_DEFLOCALFUN, VARNO_LEN)
+ /*
+ * Define a local function (i.e., one nested at the top level of
+ * another function), parented by the current scope chain, and
+ * stored in a local variable slot that the compiler allocated.
+ * This is an optimization over JSOP_DEFFUN that avoids requiring
+ * a call object for the outer function's activation.
+ */
+ slot = GET_VARNO(pc2);
+ obj = ATOM_TO_OBJECT(atom);
+
+ JS_ASSERT(!fp->blockChain);
+ if (!(fp->flags & JSFRAME_POP_BLOCKS)) {
+ /*
+ * If the compiler-created function object (obj) is scoped by a
+ * let-induced body block, temporarily update fp->blockChain so
+ * that js_GetScopeChain will clone the block into the runtime
+ * scope needed to parent the function object's clone.
+ */
+ parent = OBJ_GET_PARENT(cx, obj);
+ if (OBJ_GET_CLASS(cx, parent) == &js_BlockClass)
+ fp->blockChain = parent;
+ parent = js_GetScopeChain(cx, fp);
+ } else {
+ /*
+ * We have already emulated JSOP_ENTERBLOCK for the enclosing
+ * body block, for a prior JSOP_DEFLOCALFUN in the prolog, so
+ * we just load fp->scopeChain into parent.
+ *
+ * In typical execution scenarios, the prolog bytecodes that
+ * include this JSOP_DEFLOCALFUN run, then come main bytecodes
+ * including JSOP_ENTERBLOCK for the outermost (body) block.
+ * JSOP_ENTERBLOCK will detect that it need not do anything if
+ * the body block was entered above due to a local function.
+ * Finally the matching JSOP_LEAVEBLOCK runs.
+ *
+ * If the matching JSOP_LEAVEBLOCK for the body block does not
+ * run for some reason, the body block will be properly "put"
+ * (via js_PutBlockObject) by the PutBlockObjects call at the
+ * bottom of js_Interpret.
+ */
+ parent = fp->scopeChain;
+ JS_ASSERT(OBJ_GET_CLASS(cx, parent) == &js_BlockClass);
+ JS_ASSERT(OBJ_GET_PROTO(cx, parent) == OBJ_GET_PARENT(cx, obj));
+ JS_ASSERT(OBJ_GET_CLASS(cx, OBJ_GET_PARENT(cx, parent))
+ == &js_CallClass);
+ }
+
+ /* If re-parenting, store a clone of the function object. */
+ if (OBJ_GET_PARENT(cx, obj) != parent) {
+ SAVE_SP_AND_PC(fp);
+ obj = js_CloneFunctionObject(cx, obj, parent);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ fp->vars[slot] = OBJECT_TO_JSVAL(obj);
+ END_LITOPX_CASE(JSOP_DEFLOCALFUN)
+
+ BEGIN_LITOPX_CASE(JSOP_ANONFUNOBJ, 0)
+ /* Push the specified function object literal. */
+ obj = ATOM_TO_OBJECT(atom);
+
+ /* If re-parenting, push a clone of the function object. */
+ SAVE_SP_AND_PC(fp);
+ parent = js_GetScopeChain(cx, fp);
+ if (!parent) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (OBJ_GET_PARENT(cx, obj) != parent) {
+ obj = js_CloneFunctionObject(cx, obj, parent);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_ANONFUNOBJ)
+
+ BEGIN_LITOPX_CASE(JSOP_NAMEDFUNOBJ, 0)
+ /* ECMA ed. 3 FunctionExpression: function Identifier [etc.]. */
+ rval = ATOM_KEY(atom);
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, rval));
+
+ /*
+ * 1. Create a new object as if by the expression new Object().
+ * 2. Add Result(1) to the front of the scope chain.
+ *
+ * Step 2 is achieved by making the new object's parent be the
+ * current scope chain, and then making the new object the parent
+ * of the Function object clone.
+ */
+ SAVE_SP_AND_PC(fp);
+ obj2 = js_GetScopeChain(cx, fp);
+ if (!obj2) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ parent = js_NewObject(cx, &js_ObjectClass, NULL, obj2);
+ if (!parent) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /*
+ * 3. Create a new Function object as specified in section 13.2
+ * with [parameters and body specified by the function expression
+ * that was parsed by the compiler into a Function object, and
+ * saved in the script's atom map].
+ *
+ * Protect parent from GC after js_CloneFunctionObject calls into
+ * js_NewObject, which displaces the newborn object root in cx by
+ * allocating the clone, then runs a last-ditch GC while trying
+ * to allocate the clone's slots vector. Another, multi-threaded
+ * path: js_CloneFunctionObject => js_NewObject => OBJ_GET_CLASS
+ * which may suspend the current request in ClaimScope, with the
+ * newborn displaced as in the first scenario.
+ */
+ fp->scopeChain = parent;
+ obj = js_CloneFunctionObject(cx, JSVAL_TO_OBJECT(rval), parent);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /*
+ * Protect obj from any GC hiding below OBJ_DEFINE_PROPERTY. All
+ * paths from here must flow through the "Restore fp->scopeChain"
+ * code below the OBJ_DEFINE_PROPERTY call.
+ */
+ fp->scopeChain = obj;
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /*
+ * 4. Create a property in the object Result(1). The property's
+ * name is [fun->atom, the identifier parsed by the compiler],
+ * value is Result(3), and attributes are { DontDelete, ReadOnly }.
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ attrs = JSFUN_GSFLAG2ATTR(fun->flags);
+ if (attrs) {
+ attrs |= JSPROP_SHARED;
+ rval = JSVAL_VOID;
+ }
+ ok = OBJ_DEFINE_PROPERTY(cx, parent, ATOM_TO_JSID(fun->atom), rval,
+ (attrs & JSPROP_GETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ (attrs & JSPROP_SETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ attrs |
+ JSPROP_ENUMERATE | JSPROP_PERMANENT |
+ JSPROP_READONLY,
+ NULL);
+
+ /* Restore fp->scopeChain now that obj is defined in parent. */
+ fp->scopeChain = obj2;
+ if (!ok) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ goto out;
+ }
+
+ /*
+ * 5. Remove Result(1) from the front of the scope chain [no-op].
+ * 6. Return Result(3).
+ */
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_NAMEDFUNOBJ)
+
+ BEGIN_LITOPX_CASE(JSOP_CLOSURE, 0)
+ /*
+ * ECMA ed. 3 extension: a named function expression in a compound
+ * statement (not at the top statement level of global code, or at
+ * the top level of a function body).
+ *
+ * Get immediate operand atom, which is a function object literal.
+ * From it, get the function to close.
+ */
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, ATOM_KEY(atom)));
+ obj = ATOM_TO_OBJECT(atom);
+
+ /*
+ * Clone the function object with the current scope chain as the
+ * clone's parent. The original function object is the prototype
+ * of the clone. Do this only if re-parenting; the compiler may
+ * have seen the right parent already and created a sufficiently
+ * well-scoped function object.
+ */
+ SAVE_SP_AND_PC(fp);
+ obj2 = js_GetScopeChain(cx, fp);
+ if (!obj2) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (OBJ_GET_PARENT(cx, obj) != obj2) {
+ obj = js_CloneFunctionObject(cx, obj, obj2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+
+ /*
+ * Protect obj from any GC hiding below OBJ_DEFINE_PROPERTY. All
+ * paths from here must flow through the "Restore fp->scopeChain"
+ * code below the OBJ_DEFINE_PROPERTY call.
+ */
+ fp->scopeChain = obj;
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /*
+ * Make a property in fp->varobj with id fun->atom and value obj,
+ * unless fun is a getter or setter (in which case, obj is cast to
+ * a JSPropertyOp and passed accordingly).
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ attrs = JSFUN_GSFLAG2ATTR(fun->flags);
+ if (attrs) {
+ attrs |= JSPROP_SHARED;
+ rval = JSVAL_VOID;
+ }
+ parent = fp->varobj;
+ ok = OBJ_DEFINE_PROPERTY(cx, parent, ATOM_TO_JSID(fun->atom), rval,
+ (attrs & JSPROP_GETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ (attrs & JSPROP_SETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ attrs | JSPROP_ENUMERATE
+ | JSPROP_PERMANENT,
+ &prop);
+
+ /* Restore fp->scopeChain now that obj is defined in fp->varobj. */
+ fp->scopeChain = obj2;
+ if (!ok) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ goto out;
+ }
+
+#if 0
+ if (attrs == 0 && script->numGlobalVars) {
+ /*
+ * As with JSOP_DEFVAR and JSOP_DEFCONST (above), fast globals
+ * use fp->vars to map the global function name's atomIndex to
+ * its permanent fp->varobj slot number, tagged as a jsval.
+ */
+ sprop = (JSScopeProperty *) prop;
+ fp->vars[atomIndex] = INT_TO_JSVAL(sprop->slot);
+ }
+#endif
+ OBJ_DROP_PROPERTY(cx, parent, prop);
+ END_LITOPX_CASE(JSOP_CLOSURE)
+
+#if JS_HAS_GETTER_SETTER
+ BEGIN_CASE(JSOP_GETTER)
+ BEGIN_CASE(JSOP_SETTER)
+ op2 = (JSOp) *++pc;
+ switch (op2) {
+ case JSOP_SETNAME:
+ case JSOP_SETPROP:
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ rval = FETCH_OPND(-1);
+ i = -1;
+ goto gs_pop_lval;
+
+ case JSOP_SETELEM:
+ rval = FETCH_OPND(-1);
+ FETCH_ELEMENT_ID(-2, id);
+ i = -2;
+ gs_pop_lval:
+ FETCH_OBJECT(cx, i - 1, lval, obj);
+ break;
+
+ case JSOP_INITPROP:
+ JS_ASSERT(sp - fp->spbase >= 2);
+ rval = FETCH_OPND(-1);
+ i = -1;
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ goto gs_get_lval;
+
+ case JSOP_INITELEM:
+ JS_ASSERT(sp - fp->spbase >= 3);
+ rval = FETCH_OPND(-1);
+ FETCH_ELEMENT_ID(-2, id);
+ i = -2;
+ gs_get_lval:
+ lval = FETCH_OPND(i-1);
+ JS_ASSERT(JSVAL_IS_OBJECT(lval));
+ obj = JSVAL_TO_OBJECT(lval);
+ break;
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ /* Ensure that id has a type suitable for use with obj. */
+ CHECK_ELEMENT_ID(obj, id);
+
+ SAVE_SP_AND_PC(fp);
+ if (JS_TypeOfValue(cx, rval) != JSTYPE_FUNCTION) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ (op == JSOP_GETTER)
+ ? js_getter_str
+ : js_setter_str);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /*
+ * Getters and setters are just like watchpoints from an access
+ * control point of view.
+ */
+ ok = OBJ_CHECK_ACCESS(cx, obj, id, JSACC_WATCH, &rtmp, &attrs);
+ if (!ok)
+ goto out;
+
+ if (op == JSOP_GETTER) {
+ getter = JS_EXTENSION (JSPropertyOp) JSVAL_TO_OBJECT(rval);
+ setter = NULL;
+ attrs = JSPROP_GETTER;
+ } else {
+ getter = NULL;
+ setter = JS_EXTENSION (JSPropertyOp) JSVAL_TO_OBJECT(rval);
+ attrs = JSPROP_SETTER;
+ }
+ attrs |= JSPROP_ENUMERATE | JSPROP_SHARED;
+
+ /* Check for a readonly or permanent property of the same name. */
+ ok = js_CheckRedeclaration(cx, obj, id, attrs, NULL, NULL);
+ if (!ok)
+ goto out;
+
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, getter, setter,
+ attrs, NULL);
+ if (!ok)
+ goto out;
+
+ obj = NULL;
+ sp += i;
+ if (js_CodeSpec[op2].ndefs)
+ STORE_OPND(-1, rval);
+ len = js_CodeSpec[op2].length;
+ DO_NEXT_OP(len);
+#endif /* JS_HAS_GETTER_SETTER */
+
+ BEGIN_CASE(JSOP_NEWINIT)
+ argc = 0;
+ fp->sharpDepth++;
+ goto do_new;
+
+ BEGIN_CASE(JSOP_ENDINIT)
+ if (--fp->sharpDepth == 0)
+ fp->sharpArray = NULL;
+
+ /* Re-set the newborn root to the top of this object tree. */
+ JS_ASSERT(sp - fp->spbase >= 1);
+ lval = FETCH_OPND(-1);
+ JS_ASSERT(JSVAL_IS_OBJECT(lval));
+ cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(lval);
+ END_CASE(JSOP_ENDINIT)
+
+ BEGIN_CASE(JSOP_INITPROP)
+ /* Pop the property's value into rval. */
+ JS_ASSERT(sp - fp->spbase >= 2);
+ rval = FETCH_OPND(-1);
+
+ /* Get the immediate property name into id. */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ i = -1;
+ goto do_init;
+
+ BEGIN_CASE(JSOP_INITELEM)
+ /* Pop the element's value into rval. */
+ JS_ASSERT(sp - fp->spbase >= 3);
+ rval = FETCH_OPND(-1);
+
+ /* Pop and conditionally atomize the element id. */
+ FETCH_ELEMENT_ID(-2, id);
+ i = -2;
+
+ do_init:
+ /* Find the object being initialized at top of stack. */
+ lval = FETCH_OPND(i-1);
+ JS_ASSERT(JSVAL_IS_OBJECT(lval));
+ obj = JSVAL_TO_OBJECT(lval);
+
+ /* Ensure that id has a type suitable for use with obj. */
+ CHECK_ELEMENT_ID(obj, id);
+
+ /* Set the property named by obj[id] to rval. */
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ sp += i;
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+
+#if JS_HAS_SHARP_VARS
+ BEGIN_CASE(JSOP_DEFSHARP)
+ SAVE_SP_AND_PC(fp);
+ obj = fp->sharpArray;
+ if (!obj) {
+ obj = js_NewArrayObject(cx, 0, NULL);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->sharpArray = obj;
+ }
+ i = (jsint) GET_ATOM_INDEX(pc);
+ id = INT_TO_JSID(i);
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_PRIMITIVE(rval)) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", (unsigned) i);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SHARP_DEF, numBuf);
+ ok = JS_FALSE;
+ goto out;
+ }
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ END_CASE(JSOP_DEFSHARP)
+
+ BEGIN_CASE(JSOP_USESHARP)
+ i = (jsint) GET_ATOM_INDEX(pc);
+ id = INT_TO_JSID(i);
+ obj = fp->sharpArray;
+ if (!obj) {
+ rval = JSVAL_VOID;
+ } else {
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ }
+ if (!JSVAL_IS_OBJECT(rval)) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", (unsigned) i);
+
+ SAVE_SP_AND_PC(fp);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SHARP_USE, numBuf);
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_USESHARP)
+#endif /* JS_HAS_SHARP_VARS */
+
+ /* No-ops for ease of decompilation and jit'ing. */
+ EMPTY_CASE(JSOP_TRY)
+ EMPTY_CASE(JSOP_FINALLY)
+
+ /* Reset the stack to the given depth. */
+ BEGIN_CASE(JSOP_SETSP)
+ i = (jsint) GET_ATOM_INDEX(pc);
+ JS_ASSERT(i >= 0);
+
+ for (obj = fp->blockChain; obj; obj = OBJ_GET_PARENT(cx, obj)) {
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_BlockClass);
+ if (OBJ_BLOCK_DEPTH(cx, obj) + (jsint)OBJ_BLOCK_COUNT(cx, obj) <= i) {
+ JS_ASSERT(OBJ_BLOCK_DEPTH(cx, obj) < i || OBJ_BLOCK_COUNT(cx, obj) == 0);
+ break;
+ }
+ }
+ fp->blockChain = obj;
+
+ JS_ASSERT(ok);
+ for (obj = fp->scopeChain;
+ (clasp = OBJ_GET_CLASS(cx, obj)) == &js_WithClass ||
+ clasp == &js_BlockClass;
+ obj = OBJ_GET_PARENT(cx, obj)) {
+ if (JS_GetPrivate(cx, obj) != fp ||
+ OBJ_BLOCK_DEPTH(cx, obj) < i) {
+ break;
+ }
+ if (clasp == &js_BlockClass)
+ ok &= js_PutBlockObject(cx, obj);
+ else
+ JS_SetPrivate(cx, obj, NULL);
+ }
+
+ fp->scopeChain = obj;
+
+ /* Set sp after js_PutBlockObject to avoid potential GC hazards. */
+ sp = fp->spbase + i;
+
+ /* Don't fail until after we've updated all stacks. */
+ if (!ok)
+ goto out;
+ END_CASE(JSOP_SETSP)
+
+ BEGIN_CASE(JSOP_GOSUB)
+ JS_ASSERT(cx->exception != JSVAL_HOLE);
+ if (!cx->throwing) {
+ lval = JSVAL_HOLE;
+ } else {
+ lval = cx->exception;
+ cx->throwing = JS_FALSE;
+ }
+ PUSH(lval);
+ i = PTRDIFF(pc, script->main, jsbytecode) + JSOP_GOSUB_LENGTH;
+ len = GET_JUMP_OFFSET(pc);
+ PUSH(INT_TO_JSVAL(i));
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_GOSUBX)
+ JS_ASSERT(cx->exception != JSVAL_HOLE);
+ if (!cx->throwing) {
+ lval = JSVAL_HOLE;
+ } else {
+ lval = cx->exception;
+ cx->throwing = JS_FALSE;
+ }
+ PUSH(lval);
+ i = PTRDIFF(pc, script->main, jsbytecode) + JSOP_GOSUBX_LENGTH;
+ len = GET_JUMPX_OFFSET(pc);
+ PUSH(INT_TO_JSVAL(i));
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_RETSUB)
+ rval = POP();
+ JS_ASSERT(JSVAL_IS_INT(rval));
+ lval = POP();
+ if (lval != JSVAL_HOLE) {
+ /*
+ * Exception was pending during finally, throw it *before* we
+ * adjust pc, because pc indexes into script->trynotes. This
+ * turns out not to be necessary, but it seems clearer. And
+ * it points out a FIXME: 350509, due to Igor Bukanov.
+ */
+ cx->throwing = JS_TRUE;
+ cx->exception = lval;
+ ok = JS_FALSE;
+ goto out;
+ }
+ len = JSVAL_TO_INT(rval);
+ pc = script->main;
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_EXCEPTION)
+ JS_ASSERT(cx->throwing);
+ PUSH(cx->exception);
+ cx->throwing = JS_FALSE;
+ END_CASE(JSOP_EXCEPTION)
+
+ BEGIN_CASE(JSOP_THROWING)
+ JS_ASSERT(!cx->throwing);
+ cx->throwing = JS_TRUE;
+ cx->exception = POP_OPND();
+ END_CASE(JSOP_THROWING)
+
+ BEGIN_CASE(JSOP_THROW)
+ JS_ASSERT(!cx->throwing);
+ cx->throwing = JS_TRUE;
+ cx->exception = POP_OPND();
+ ok = JS_FALSE;
+ /* let the code at out try to catch the exception. */
+ goto out;
+
+ BEGIN_CASE(JSOP_SETLOCALPOP)
+ /*
+ * The stack must have a block with at least one local slot below
+ * the exception object.
+ */
+ JS_ASSERT(sp - fp->spbase >= 2);
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot + 1 < (uintN)depth);
+ fp->spbase[slot] = POP_OPND();
+ END_CASE(JSOP_SETLOCALPOP)
+
+ BEGIN_CASE(JSOP_INSTANCEOF)
+ SAVE_SP_AND_PC(fp);
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_PRIMITIVE(rval) ||
+ !(obj = JSVAL_TO_OBJECT(rval))->map->ops->hasInstance) {
+ str = js_DecompileValueGenerator(cx, -1, rval, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INSTANCEOF_RHS,
+ JS_GetStringBytes(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ lval = FETCH_OPND(-2);
+ cond = JS_FALSE;
+ ok = obj->map->ops->hasInstance(cx, obj, lval, &cond);
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond));
+ END_CASE(JSOP_INSTANCEOF)
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ BEGIN_CASE(JSOP_DEBUGGER)
+ {
+ JSTrapHandler handler = rt->debuggerHandler;
+ if (handler) {
+ SAVE_SP_AND_PC(fp);
+ switch (handler(cx, script, pc, &rval,
+ rt->debuggerHandlerData)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ break;
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+ }
+ END_CASE(JSOP_DEBUGGER)
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ BEGIN_CASE(JSOP_DEFXMLNS)
+ rval = POP();
+ SAVE_SP_AND_PC(fp);
+ ok = js_SetDefaultXMLNamespace(cx, rval);
+ if (!ok)
+ goto out;
+ END_CASE(JSOP_DEFXMLNS)
+
+ BEGIN_CASE(JSOP_ANYNAME)
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetAnyName(cx, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ END_CASE(JSOP_ANYNAME)
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMEPART, 0)
+ PUSH_OPND(ATOM_KEY(atom));
+ END_LITOPX_CASE(JSOP_QNAMEPART)
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMECONST, 0)
+ rval = ATOM_KEY(atom);
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ConstructXMLQNameObject(cx, lval, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_QNAMECONST)
+
+ BEGIN_CASE(JSOP_QNAME)
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ConstructXMLQNameObject(cx, lval, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ sp--;
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_QNAME)
+
+ BEGIN_CASE(JSOP_TOATTRNAME)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_ToAttributeName(cx, &rval);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_TOATTRNAME)
+
+ BEGIN_CASE(JSOP_TOATTRVAL)
+ rval = FETCH_OPND(-1);
+ JS_ASSERT(JSVAL_IS_STRING(rval));
+ SAVE_SP_AND_PC(fp);
+ str = js_EscapeAttributeValue(cx, JSVAL_TO_STRING(rval));
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_TOATTRVAL)
+
+ BEGIN_CASE(JSOP_ADDATTRNAME)
+ BEGIN_CASE(JSOP_ADDATTRVAL)
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+ str = JSVAL_TO_STRING(lval);
+ str2 = JSVAL_TO_STRING(rval);
+ SAVE_SP_AND_PC(fp);
+ str = js_AddAttributePart(cx, op == JSOP_ADDATTRNAME, str, str2);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ sp--;
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_ADDATTRNAME)
+
+ BEGIN_CASE(JSOP_BINDXMLNAME)
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindXMLProperty(cx, lval, &obj, &rval);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ PUSH_OPND(rval);
+ END_CASE(JSOP_BINDXMLNAME)
+
+ BEGIN_CASE(JSOP_SETXMLNAME)
+ obj = JSVAL_TO_OBJECT(FETCH_OPND(-3));
+ lval = FETCH_OPND(-2);
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_SetXMLProperty(cx, obj, lval, &rval);
+ if (!ok)
+ goto out;
+ sp -= 2;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETXMLNAME)
+
+ BEGIN_CASE(JSOP_XMLNAME)
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindXMLProperty(cx, lval, &obj, &rval);
+ if (!ok)
+ goto out;
+ ok = js_GetXMLProperty(cx, obj, rval, &rval);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_XMLNAME)
+
+ BEGIN_CASE(JSOP_DESCENDANTS)
+ BEGIN_CASE(JSOP_DELDESC)
+ FETCH_OBJECT(cx, -2, lval, obj);
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetXMLDescendants(cx, obj, rval, &rval);
+ if (!ok)
+ goto out;
+
+ if (op == JSOP_DELDESC) {
+ sp[-1] = rval; /* set local root */
+ ok = js_DeleteXMLListElements(cx, JSVAL_TO_OBJECT(rval));
+ if (!ok)
+ goto out;
+ rval = JSVAL_TRUE; /* always succeed */
+ }
+
+ sp--;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_DESCENDANTS)
+
+ BEGIN_CASE(JSOP_FILTER)
+ FETCH_OBJECT(cx, -1, lval, obj);
+ len = GET_JUMP_OFFSET(pc);
+ SAVE_SP_AND_PC(fp);
+ ok = js_FilterXMLList(cx, obj, pc + js_CodeSpec[op].length, &rval);
+ if (!ok)
+ goto out;
+ JS_ASSERT(fp->sp == sp);
+ STORE_OPND(-1, rval);
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_ENDFILTER)
+ *result = POP_OPND();
+ goto out;
+
+ EMPTY_CASE(JSOP_STARTXML)
+ EMPTY_CASE(JSOP_STARTXMLEXPR)
+
+ BEGIN_CASE(JSOP_TOXML)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ValueToXMLObject(cx, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_TOXML)
+
+ BEGIN_CASE(JSOP_TOXMLLIST)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ValueToXMLListObject(cx, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_TOXMLLIST)
+
+ BEGIN_CASE(JSOP_XMLTAGEXPR)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ str = js_ValueToString(cx, rval);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_XMLTAGEXPR)
+
+ BEGIN_CASE(JSOP_XMLELTEXPR)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ if (VALUE_IS_XML(cx, rval)) {
+ str = js_ValueToXMLString(cx, rval);
+ } else {
+ str = js_ValueToString(cx, rval);
+ if (str)
+ str = js_EscapeElementValue(cx, str);
+ }
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_XMLELTEXPR)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLOBJECT, 0)
+ SAVE_SP_AND_PC(fp);
+ obj = js_CloneXMLObject(cx, ATOM_TO_OBJECT(atom));
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_XMLOBJECT)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCDATA, 0)
+ str = ATOM_TO_STRING(atom);
+ obj = js_NewXMLSpecialObject(cx, JSXML_CLASS_TEXT, NULL, str);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_XMLCDATA)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCOMMENT, 0)
+ str = ATOM_TO_STRING(atom);
+ obj = js_NewXMLSpecialObject(cx, JSXML_CLASS_COMMENT, NULL, str);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_XMLCOMMENT)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLPI, 0)
+ str = ATOM_TO_STRING(atom);
+ rval = FETCH_OPND(-1);
+ str2 = JSVAL_TO_STRING(rval);
+ SAVE_SP_AND_PC(fp);
+ obj = js_NewXMLSpecialObject(cx,
+ JSXML_CLASS_PROCESSING_INSTRUCTION,
+ str, str2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_XMLPI)
+
+ BEGIN_LITOPX_CASE(JSOP_GETMETHOD, 0)
+ /* Get an immediate atom naming the property. */
+ id = ATOM_TO_JSID(atom);
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ if (!JSVAL_IS_PRIMITIVE(lval)) {
+ STORE_OPND(-1, lval);
+ obj = JSVAL_TO_OBJECT(lval);
+
+ /* Special-case XML object method lookup, per ECMA-357. */
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ obj = ops->getMethod(cx, obj, id, &rval);
+ if (!obj)
+ ok = JS_FALSE;
+ } else {
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ }
+ } else {
+ if (JSVAL_IS_STRING(lval)) {
+ i = JSProto_String;
+ } else if (JSVAL_IS_NUMBER(lval)) {
+ i = JSProto_Number;
+ } else if (JSVAL_IS_BOOLEAN(lval)) {
+ i = JSProto_Boolean;
+ } else {
+ JS_ASSERT(JSVAL_IS_NULL(lval) || JSVAL_IS_VOID(lval));
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ lval, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NO_PROPERTIES,
+ JS_GetStringBytes(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ ok = js_GetClassPrototype(cx, NULL, INT_TO_JSID(i), &obj);
+ if (!ok)
+ goto out;
+ JS_ASSERT(obj);
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ obj = (JSObject *) lval; /* keep tagged as non-object */
+ }
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_LITOPX_CASE(JSOP_GETMETHOD)
+
+ BEGIN_LITOPX_CASE(JSOP_SETMETHOD, 0)
+ /* Get an immediate atom naming the property. */
+ id = ATOM_TO_JSID(atom);
+ rval = FETCH_OPND(-1);
+ FETCH_OBJECT(cx, -2, lval, obj);
+ SAVE_SP_AND_PC(fp);
+
+ /* Special-case XML object method lookup, per ECMA-357. */
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ ok = ops->setMethod(cx, obj, id, &rval);
+ } else {
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ }
+ if (!ok)
+ goto out;
+ --sp;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_SETMETHOD)
+
+ BEGIN_CASE(JSOP_GETFUNNS)
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetFunctionNamespace(cx, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ END_CASE(JSOP_GETFUNNS)
+#endif /* JS_HAS_XML_SUPPORT */
+
+ BEGIN_LITOPX_CASE(JSOP_ENTERBLOCK, 0)
+ obj = ATOM_TO_OBJECT(atom);
+ JS_ASSERT(fp->spbase + OBJ_BLOCK_DEPTH(cx, obj) == sp);
+ vp = sp + OBJ_BLOCK_COUNT(cx, obj);
+ JS_ASSERT(vp <= fp->spbase + depth);
+ while (sp < vp) {
+ STORE_OPND(0, JSVAL_VOID);
+ sp++;
+ }
+
+ /*
+ * If this frame had to reflect the compile-time block chain into
+ * the runtime scope chain, we can't optimize block scopes out of
+ * runtime any longer, because an outer block that parents obj has
+ * been cloned onto the scope chain. To avoid re-cloning such a
+ * parent and accumulating redundant clones via js_GetScopeChain,
+ * we must clone each block eagerly on entry, and push it on the
+ * scope chain, until this frame pops.
+ */
+ if (fp->flags & JSFRAME_POP_BLOCKS) {
+ JS_ASSERT(!fp->blockChain);
+
+ /*
+ * Check whether JSOP_DEFLOCALFUN emulated JSOP_ENTERBLOCK for
+ * the body block in order to correctly scope the local cloned
+ * function object it creates.
+ */
+ parent = fp->scopeChain;
+ if (OBJ_GET_PROTO(cx, parent) == obj) {
+ JS_ASSERT(OBJ_GET_CLASS(cx, parent) == &js_BlockClass);
+ } else {
+ obj = js_CloneBlockObject(cx, obj, parent, fp);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->scopeChain = obj;
+ }
+ } else {
+ JS_ASSERT(!fp->blockChain ||
+ OBJ_GET_PARENT(cx, obj) == fp->blockChain);
+ fp->blockChain = obj;
+ }
+ END_LITOPX_CASE(JSOP_ENTERBLOCK)
+
+ BEGIN_CASE(JSOP_LEAVEBLOCKEXPR)
+ BEGIN_CASE(JSOP_LEAVEBLOCK)
+ {
+ JSObject **chainp;
+
+ /* Grab the result of the expression. */
+ if (op == JSOP_LEAVEBLOCKEXPR)
+ rval = FETCH_OPND(-1);
+
+ chainp = &fp->blockChain;
+ obj = *chainp;
+ if (!obj) {
+ chainp = &fp->scopeChain;
+ obj = *chainp;
+
+ /*
+ * This block was cloned, so clear its private data and sync
+ * its locals to their property slots.
+ */
+ SAVE_SP_AND_PC(fp);
+ ok = js_PutBlockObject(cx, obj);
+ if (!ok)
+ goto out;
+ }
+
+ sp -= GET_UINT16(pc);
+ JS_ASSERT(fp->spbase <= sp && sp <= fp->spbase + depth);
+
+ /* Store the result into the topmost stack slot. */
+ if (op == JSOP_LEAVEBLOCKEXPR)
+ STORE_OPND(-1, rval);
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_BlockClass);
+ JS_ASSERT(op == JSOP_LEAVEBLOCKEXPR
+ ? fp->spbase + OBJ_BLOCK_DEPTH(cx, obj) == sp - 1
+ : fp->spbase + OBJ_BLOCK_DEPTH(cx, obj) == sp);
+
+ *chainp = OBJ_GET_PARENT(cx, obj);
+ JS_ASSERT(chainp != &fp->blockChain ||
+ !*chainp ||
+ OBJ_GET_CLASS(cx, *chainp) == &js_BlockClass);
+ }
+ END_CASE(JSOP_LEAVEBLOCK)
+
+ BEGIN_CASE(JSOP_GETLOCAL)
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ PUSH_OPND(fp->spbase[slot]);
+ obj = NULL;
+ END_CASE(JSOP_GETLOCAL)
+
+ BEGIN_CASE(JSOP_SETLOCAL)
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ vp = &fp->spbase[slot];
+ GC_POKE(cx, *vp);
+ *vp = FETCH_OPND(-1);
+ obj = NULL;
+ END_CASE(JSOP_SETLOCAL)
+
+/* NB: This macro doesn't use JS_BEGIN_MACRO/JS_END_MACRO around its body. */
+#define FAST_LOCAL_INCREMENT_OP(PRE,OPEQ,MINMAX) \
+ slot = GET_UINT16(pc); \
+ JS_ASSERT(slot < (uintN)depth); \
+ vp = fp->spbase + slot; \
+ rval = *vp; \
+ if (!JSVAL_IS_INT(rval) || rval == INT_TO_JSVAL(JSVAL_INT_##MINMAX)) \
+ goto do_nonint_fast_incop; \
+ PRE = rval; \
+ rval OPEQ 2; \
+ *vp = rval; \
+ PUSH_OPND(PRE)
+
+ BEGIN_CASE(JSOP_INCLOCAL)
+ FAST_LOCAL_INCREMENT_OP(rval, +=, MAX);
+ END_CASE(JSOP_INCLOCAL)
+
+ BEGIN_CASE(JSOP_DECLOCAL)
+ FAST_LOCAL_INCREMENT_OP(rval, -=, MIN);
+ END_CASE(JSOP_DECLOCAL)
+
+ BEGIN_CASE(JSOP_LOCALINC)
+ FAST_LOCAL_INCREMENT_OP(rtmp, +=, MAX);
+ END_CASE(JSOP_LOCALINC)
+
+ BEGIN_CASE(JSOP_LOCALDEC)
+ FAST_LOCAL_INCREMENT_OP(rtmp, -=, MIN);
+ END_CASE(JSOP_LOCALDEC)
+
+#undef FAST_LOCAL_INCREMENT_OP
+
+ EMPTY_CASE(JSOP_STARTITER)
+
+ BEGIN_CASE(JSOP_ENDITER)
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(sp[-1]));
+ iterobj = JSVAL_TO_OBJECT(sp[-1]);
+
+ /*
+ * js_CloseNativeIterator checks whether the iterator is not
+ * native, and also detects the case of a native iterator that
+ * has already escaped, even though a for-in loop caused it to
+ * be created. See jsiter.c.
+ */
+ SAVE_SP_AND_PC(fp);
+ js_CloseNativeIterator(cx, iterobj);
+ *--sp = JSVAL_NULL;
+ END_CASE(JSOP_ENDITER)
+
+#if JS_HAS_GENERATORS
+ BEGIN_CASE(JSOP_GENERATOR)
+ pc += JSOP_GENERATOR_LENGTH;
+ SAVE_SP_AND_PC(fp);
+ obj = js_NewGenerator(cx, fp);
+ if (!obj) {
+ ok = JS_FALSE;
+ } else {
+ JS_ASSERT(!fp->callobj && !fp->argsobj);
+ fp->rval = OBJECT_TO_JSVAL(obj);
+ }
+ goto out;
+
+ BEGIN_CASE(JSOP_YIELD)
+ ASSERT_NOT_THROWING(cx);
+ if (fp->flags & JSFRAME_FILTERING) {
+ /* FIXME: bug 309894 -- fix to eliminate this error. */
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_YIELD_FROM_FILTER);
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (FRAME_TO_GENERATOR(fp)->state == JSGEN_CLOSING) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ fp->argv[-2], NULL);
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GENERATOR_YIELD,
+ JSSTRING_CHARS(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->rval = FETCH_OPND(-1);
+ fp->flags |= JSFRAME_YIELDING;
+ pc += JSOP_YIELD_LENGTH;
+ SAVE_SP_AND_PC(fp);
+ goto out;
+
+ BEGIN_CASE(JSOP_ARRAYPUSH)
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ lval = fp->spbase[slot];
+ obj = JSVAL_TO_OBJECT(lval);
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_ArrayClass);
+ rval = FETCH_OPND(-1);
+
+ /* We know that the array is created with only a 'length' slot. */
+ i = obj->map->freeslot - (JSSLOT_FREE(&js_ArrayClass) + 1);
+ id = INT_TO_JSID(i);
+
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ --sp;
+ END_CASE(JSOP_ARRAYPUSH)
+#endif /* JS_HAS_GENERATORS */
+
+#if !JS_HAS_GENERATORS
+ L_JSOP_GENERATOR:
+ L_JSOP_YIELD:
+ L_JSOP_ARRAYPUSH:
+#endif
+
+#if !JS_HAS_DESTRUCTURING
+ L_JSOP_FOREACHKEYVAL:
+ L_JSOP_ENUMCONSTELEM:
+#endif
+
+#ifdef JS_THREADED_INTERP
+ L_JSOP_BACKPATCH:
+ L_JSOP_BACKPATCH_POP:
+#else
+ default:
+#endif
+ {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", op);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_BYTECODE, numBuf);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+#ifndef JS_THREADED_INTERP
+
+ } /* switch (op) */
+
+ advance_pc:
+ pc += len;
+
+#ifdef DEBUG
+ if (tracefp) {
+ intN ndefs, n;
+ jsval *siter;
+
+ ndefs = js_CodeSpec[op].ndefs;
+ if (ndefs) {
+ SAVE_SP_AND_PC(fp);
+ if (op == JSOP_FORELEM && sp[-1] == JSVAL_FALSE)
+ --ndefs;
+ for (n = -ndefs; n < 0; n++) {
+ str = js_DecompileValueGenerator(cx, n, sp[n], NULL);
+ if (str) {
+ fprintf(tracefp, "%s %s",
+ (n == -ndefs) ? " output:" : ",",
+ JS_GetStringBytes(str));
+ }
+ }
+ fprintf(tracefp, " @ %d\n", sp - fp->spbase);
+ }
+ fprintf(tracefp, " stack: ");
+ for (siter = fp->spbase; siter < sp; siter++) {
+ str = js_ValueToSource(cx, *siter);
+ fprintf(tracefp, "%s ",
+ str ? JS_GetStringBytes(str) : "<null>");
+ }
+ fputc('\n', tracefp);
+ }
+#endif /* DEBUG */
+ }
+#endif /* !JS_THREADED_INTERP */
+
+out:
+ if (!ok) {
+ /*
+ * Has an exception been raised? Also insist that we are not in an
+ * XML filtering predicate expression, to avoid catching exceptions
+ * within the filtering predicate, such as this example taken from
+ * tests/e4x/Regress/regress-301596.js:
+ *
+ * try {
+ * <xml/>.(@a == 1);
+ * throw 5;
+ * } catch (e) {
+ * }
+ *
+ * The inner interpreter activation executing the predicate bytecode
+ * will throw "reference to undefined XML name @a" (or 5, in older
+ * versions that followed the first edition of ECMA-357 and evaluated
+ * unbound identifiers to undefined), and the exception must not be
+ * caught until control unwinds to the outer interpreter activation.
+ *
+ * Otherwise, the wrong stack depth will be restored by JSOP_SETSP,
+ * and the catch will move into the filtering predicate expression,
+ * leading to double catch execution if it rethrows.
+ *
+ * FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=309894
+ */
+ if (cx->throwing && !(fp->flags & JSFRAME_FILTERING)) {
+ /*
+ * Call debugger throw hook if set (XXX thread safety?).
+ */
+ JSTrapHandler handler = rt->throwHook;
+ if (handler) {
+ SAVE_SP_AND_PC(fp);
+ switch (handler(cx, script, pc, &rval, rt->throwHookData)) {
+ case JSTRAP_ERROR:
+ cx->throwing = JS_FALSE;
+ goto no_catch;
+ case JSTRAP_RETURN:
+ ok = JS_TRUE;
+ cx->throwing = JS_FALSE;
+ fp->rval = rval;
+ goto no_catch;
+ case JSTRAP_THROW:
+ cx->exception = rval;
+ case JSTRAP_CONTINUE:
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+
+ /*
+ * Look for a try block in script that can catch this exception.
+ */
+#if JS_HAS_GENERATORS
+ if (JS_LIKELY(cx->exception != JSVAL_ARETURN)) {
+ SCRIPT_FIND_CATCH_START(script, pc, pc);
+ if (!pc)
+ goto no_catch;
+ } else {
+ pc = js_FindFinallyHandler(script, pc);
+ if (!pc) {
+ cx->throwing = JS_FALSE;
+ ok = JS_TRUE;
+ fp->rval = JSVAL_VOID;
+ goto no_catch;
+ }
+ }
+#else
+ SCRIPT_FIND_CATCH_START(script, pc, pc);
+ if (!pc)
+ goto no_catch;
+#endif
+
+ /* Don't clear cx->throwing to save cx->exception from GC. */
+ len = 0;
+ ok = JS_TRUE;
+ DO_NEXT_OP(len);
+ }
+no_catch:;
+ }
+
+ /*
+ * Check whether control fell off the end of a lightweight function, or an
+ * exception thrown under such a function was not caught by it. If so, go
+ * to the inline code under JSOP_RETURN.
+ */
+ if (inlineCallCount)
+ goto inline_return;
+
+ /*
+ * Reset sp before freeing stack slots, because our caller may GC soon.
+ * Clear spbase to indicate that we've popped the 2 * depth operand slots.
+ * Restore the previous frame's execution state.
+ */
+ if (JS_LIKELY(mark != NULL)) {
+ /* If fp has blocks on its scope chain, home their locals now. */
+ if (fp->flags & JSFRAME_POP_BLOCKS) {
+ SAVE_SP_AND_PC(fp);
+ ok &= PutBlockObjects(cx, fp);
+ }
+
+ fp->sp = fp->spbase;
+ fp->spbase = NULL;
+ js_FreeRawStack(cx, mark);
+ } else {
+ SAVE_SP(fp);
+ }
+
+out2:
+ if (cx->version == currentVersion && currentVersion != originalVersion)
+ js_SetVersion(cx, originalVersion);
+ cx->interpLevel--;
+ return ok;
+
+atom_not_defined:
+ {
+ const char *printable = js_AtomToPrintableString(cx, atom);
+ if (printable)
+ js_ReportIsNotDefined(cx, printable);
+ ok = JS_FALSE;
+ goto out;
+ }
+}
diff --git a/third_party/js-1.7/jsinterp.h b/third_party/js-1.7/jsinterp.h
new file mode 100644
index 0000000..ab60b3a
--- /dev/null
+++ b/third_party/js-1.7/jsinterp.h
@@ -0,0 +1,361 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsinterp_h___
+#define jsinterp_h___
+/*
+ * JS interpreter interface.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * JS stack frame, may be allocated on the C stack by native callers. Always
+ * allocated on cx->stackPool for calls from the interpreter to an interpreted
+ * function.
+ *
+ * NB: This struct is manually initialized in jsinterp.c and jsiter.c. If you
+ * add new members, update both files. But first, try to remove members. The
+ * sharp* and xml* members should be moved onto the stack as local variables
+ * with well-known slots, if possible.
+ */
+struct JSStackFrame {
+ JSObject *callobj; /* lazily created Call object */
+ JSObject *argsobj; /* lazily created arguments object */
+ JSObject *varobj; /* variables object, where vars go */
+ JSScript *script; /* script being interpreted */
+ JSFunction *fun; /* function being called or null */
+ JSObject *thisp; /* "this" pointer if in method */
+ uintN argc; /* actual argument count */
+ jsval *argv; /* base of argument stack slots */
+ jsval rval; /* function return value */
+ uintN nvars; /* local variable count */
+ jsval *vars; /* base of variable stack slots */
+ JSStackFrame *down; /* previous frame */
+ void *annotation; /* used by Java security */
+ JSObject *scopeChain; /* scope chain */
+ jsbytecode *pc; /* program counter */
+ jsval *sp; /* stack pointer */
+ jsval *spbase; /* operand stack base */
+ uintN sharpDepth; /* array/object initializer depth */
+ JSObject *sharpArray; /* scope for #n= initializer vars */
+ uint32 flags; /* frame flags -- see below */
+ JSStackFrame *dormantNext; /* next dormant frame chain */
+ JSObject *xmlNamespace; /* null or default xml namespace in E4X */
+ JSObject *blockChain; /* active compile-time block scopes */
+};
+
+typedef struct JSInlineFrame {
+ JSStackFrame frame; /* base struct */
+ jsval *rvp; /* ptr to caller's return value slot */
+ void *mark; /* mark before inline frame */
+ void *hookData; /* debugger call hook data */
+ JSVersion callerVersion; /* dynamic version of calling script */
+} JSInlineFrame;
+
+/* JS stack frame flags. */
+#define JSFRAME_CONSTRUCTING 0x01 /* frame is for a constructor invocation */
+#define JSFRAME_INTERNAL 0x02 /* internal call, not invoked by a script */
+#define JSFRAME_SKIP_CALLER 0x04 /* skip one link when evaluating f.caller
+ for this invocation of f */
+#define JSFRAME_ASSIGNING 0x08 /* a complex (not simplex JOF_ASSIGNING) op
+ is currently assigning to a property */
+#define JSFRAME_DEBUGGER 0x10 /* frame for JS_EvaluateInStackFrame */
+#define JSFRAME_EVAL 0x20 /* frame for obj_eval */
+#define JSFRAME_SPECIAL 0x30 /* special evaluation frame flags */
+#define JSFRAME_COMPILING 0x40 /* frame is being used by compiler */
+#define JSFRAME_COMPILE_N_GO 0x80 /* compiler-and-go mode, can optimize name
+ references based on scope chain */
+#define JSFRAME_SCRIPT_OBJECT 0x100 /* compiling source for a Script object */
+#define JSFRAME_YIELDING 0x200 /* js_Interpret dispatched JSOP_YIELD */
+#define JSFRAME_FILTERING 0x400 /* XML filtering predicate expression */
+#define JSFRAME_ITERATOR 0x800 /* trying to get an iterator for for-in */
+#define JSFRAME_POP_BLOCKS 0x1000 /* scope chain contains blocks to pop */
+#define JSFRAME_GENERATOR 0x2000 /* frame belongs to generator-iterator */
+
+#define JSFRAME_OVERRIDE_SHIFT 24 /* override bit-set params; see jsfun.c */
+#define JSFRAME_OVERRIDE_BITS 8
+
+/*
+ * Property cache for quickened get/set property opcodes.
+ */
+#define PROPERTY_CACHE_LOG2 10
+#define PROPERTY_CACHE_SIZE JS_BIT(PROPERTY_CACHE_LOG2)
+#define PROPERTY_CACHE_MASK JS_BITMASK(PROPERTY_CACHE_LOG2)
+
+#define PROPERTY_CACHE_HASH(obj, id) \
+ ((((jsuword)(obj) >> JSVAL_TAGBITS) ^ (jsuword)(id)) & PROPERTY_CACHE_MASK)
+
+#ifdef JS_THREADSAFE
+
+#if HAVE_ATOMIC_DWORD_ACCESS
+
+#define PCE_LOAD(cache, pce, entry) JS_ATOMIC_DWORD_LOAD(pce, entry)
+#define PCE_STORE(cache, pce, entry) JS_ATOMIC_DWORD_STORE(pce, entry)
+
+#else /* !HAVE_ATOMIC_DWORD_ACCESS */
+
+#define JS_PROPERTY_CACHE_METERING 1
+
+#define PCE_LOAD(cache, pce, entry) \
+ JS_BEGIN_MACRO \
+ uint32 prefills_; \
+ uint32 fills_ = (cache)->fills; \
+ do { \
+ /* Load until cache->fills is stable (see FILL macro below). */ \
+ prefills_ = fills_; \
+ (entry) = *(pce); \
+ } while ((fills_ = (cache)->fills) != prefills_); \
+ JS_END_MACRO
+
+#define PCE_STORE(cache, pce, entry) \
+ JS_BEGIN_MACRO \
+ do { \
+ /* Store until no racing collider stores half or all of pce. */ \
+ *(pce) = (entry); \
+ } while (PCE_OBJECT(*pce) != PCE_OBJECT(entry) || \
+ PCE_PROPERTY(*pce) != PCE_PROPERTY(entry)); \
+ JS_END_MACRO
+
+#endif /* !HAVE_ATOMIC_DWORD_ACCESS */
+
+#else /* !JS_THREADSAFE */
+
+#define PCE_LOAD(cache, pce, entry) ((entry) = *(pce))
+#define PCE_STORE(cache, pce, entry) (*(pce) = (entry))
+
+#endif /* !JS_THREADSAFE */
+
+typedef union JSPropertyCacheEntry {
+ struct {
+ JSObject *object; /* weak link to object */
+ JSScopeProperty *property; /* weak link to property */
+ } s;
+#ifdef HAVE_ATOMIC_DWORD_ACCESS
+ prdword align;
+#endif
+} JSPropertyCacheEntry;
+
+/* These may be called in lvalue or rvalue position. */
+#define PCE_OBJECT(entry) ((entry).s.object)
+#define PCE_PROPERTY(entry) ((entry).s.property)
+
+typedef struct JSPropertyCache {
+ JSPropertyCacheEntry table[PROPERTY_CACHE_SIZE];
+ JSBool empty;
+ JSBool disabled;
+#ifdef JS_PROPERTY_CACHE_METERING
+ uint32 fills;
+ uint32 recycles;
+ uint32 tests;
+ uint32 misses;
+ uint32 flushes;
+# define PCMETER(x) x
+#else
+# define PCMETER(x) /* nothing */
+#endif
+} JSPropertyCache;
+
+#define PROPERTY_CACHE_FILL(cache, obj, id, sprop) \
+ JS_BEGIN_MACRO \
+ JSPropertyCache *cache_ = (cache); \
+ if (!cache_->disabled) { \
+ uintN hashIndex_ = (uintN) PROPERTY_CACHE_HASH(obj, id); \
+ JSPropertyCacheEntry *pce_ = &cache_->table[hashIndex_]; \
+ JSPropertyCacheEntry entry_; \
+ JSScopeProperty *pce_sprop_; \
+ PCE_LOAD(cache_, pce_, entry_); \
+ pce_sprop_ = PCE_PROPERTY(entry_); \
+ PCMETER(if (pce_sprop_ && pce_sprop_ != sprop) \
+ cache_->recycles++); \
+ PCE_OBJECT(entry_) = obj; \
+ PCE_PROPERTY(entry_) = sprop; \
+ cache_->empty = JS_FALSE; \
+ PCMETER(cache_->fills++); \
+ PCE_STORE(cache_, pce_, entry_); \
+ } \
+ JS_END_MACRO
+
+#define PROPERTY_CACHE_TEST(cache, obj, id, sprop) \
+ JS_BEGIN_MACRO \
+ uintN hashIndex_ = (uintN) PROPERTY_CACHE_HASH(obj, id); \
+ JSPropertyCache *cache_ = (cache); \
+ JSPropertyCacheEntry *pce_ = &cache_->table[hashIndex_]; \
+ JSPropertyCacheEntry entry_; \
+ JSScopeProperty *pce_sprop_; \
+ PCE_LOAD(cache_, pce_, entry_); \
+ pce_sprop_ = PCE_PROPERTY(entry_); \
+ PCMETER(cache_->tests++); \
+ if (pce_sprop_ && \
+ PCE_OBJECT(entry_) == obj && \
+ pce_sprop_->id == id) { \
+ sprop = pce_sprop_; \
+ } else { \
+ PCMETER(cache_->misses++); \
+ sprop = NULL; \
+ } \
+ JS_END_MACRO
+
+extern void
+js_FlushPropertyCache(JSContext *cx);
+
+extern void
+js_DisablePropertyCache(JSContext *cx);
+
+extern void
+js_EnablePropertyCache(JSContext *cx);
+
+extern JS_FRIEND_API(jsval *)
+js_AllocStack(JSContext *cx, uintN nslots, void **markp);
+
+extern JS_FRIEND_API(void)
+js_FreeStack(JSContext *cx, void *mark);
+
+extern JSBool
+js_GetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_SetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_GetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_SetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+#ifdef DUMP_CALL_TABLE
+# define JSOPTION_LOGCALL_TOSOURCE JS_BIT(15)
+
+extern JSHashTable *js_CallTable;
+extern size_t js_LogCallToSourceLimit;
+
+extern void js_DumpCallTable(JSContext *cx);
+#endif
+
+/*
+ * Refresh and return fp->scopeChain. It may be stale if block scopes are
+ * active but not yet reflected by objects in the scope chain. If a block
+ * scope contains a with, eval, XML filtering predicate, or similar such
+ * dynamically scoped construct, then compile-time block scope at fp->blocks
+ * must reflect at runtime.
+ */
+extern JSObject *
+js_GetScopeChain(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Compute the 'this' parameter for a call with nominal 'this' given by thisp
+ * and arguments including argv[-1] (nominal 'this') and argv[-2] (callee).
+ * Activation objects ("Call" objects not created with "new Call()", i.e.,
+ * "Call" objects that have private data) may not be referred to by 'this',
+ * per ECMA-262, so js_ComputeThis censors them.
+ */
+extern JSObject *
+js_ComputeThis(JSContext *cx, JSObject *thisp, jsval *argv);
+
+/*
+ * NB: js_Invoke requires that cx is currently running JS (i.e., that cx->fp
+ * is non-null), and that the callee, |this| parameter, and actual arguments
+ * are already pushed on the stack under cx->fp->sp.
+ */
+extern JS_FRIEND_API(JSBool)
+js_Invoke(JSContext *cx, uintN argc, uintN flags);
+
+/*
+ * Consolidated js_Invoke flags simply rename certain JSFRAME_* flags, so that
+ * we can share bits stored in JSStackFrame.flags and passed to:
+ *
+ * js_Invoke
+ * js_InternalInvoke
+ * js_ValueToFunction
+ * js_ValueToFunctionObject
+ * js_ValueToCallableObject
+ * js_ReportIsNotFunction
+ *
+ * See jsfun.h for the latter four and flag renaming macros.
+ */
+#define JSINVOKE_CONSTRUCT JSFRAME_CONSTRUCTING
+#define JSINVOKE_INTERNAL JSFRAME_INTERNAL
+#define JSINVOKE_SKIP_CALLER JSFRAME_SKIP_CALLER
+#define JSINVOKE_ITERATOR JSFRAME_ITERATOR
+
+/*
+ * Mask to isolate construct and iterator flags for use with jsfun.h functions.
+ */
+#define JSINVOKE_FUNFLAGS (JSINVOKE_CONSTRUCT | JSINVOKE_ITERATOR)
+
+/*
+ * "Internal" calls may come from C or C++ code using a JSContext on which no
+ * JS is running (!cx->fp), so they may need to push a dummy JSStackFrame.
+ */
+#define js_InternalCall(cx,obj,fval,argc,argv,rval) \
+ js_InternalInvoke(cx, obj, fval, 0, argc, argv, rval)
+
+#define js_InternalConstruct(cx,obj,fval,argc,argv,rval) \
+ js_InternalInvoke(cx, obj, fval, JSINVOKE_CONSTRUCT, argc, argv, rval)
+
+extern JSBool
+js_InternalInvoke(JSContext *cx, JSObject *obj, jsval fval, uintN flags,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval,
+ JSAccessMode mode, uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
+ JSStackFrame *down, uintN flags, jsval *result);
+
+extern JSBool
+js_CheckRedeclaration(JSContext *cx, JSObject *obj, jsid id, uintN attrs,
+ JSObject **objp, JSProperty **propp);
+
+extern JSBool
+js_StrictlyEqual(jsval lval, jsval rval);
+
+extern JSBool
+js_InvokeConstructor(JSContext *cx, jsval *vp, uintN argc);
+
+extern JSBool
+js_Interpret(JSContext *cx, jsbytecode *pc, jsval *result);
+
+JS_END_EXTERN_C
+
+#endif /* jsinterp_h___ */
diff --git a/third_party/js-1.7/jsiter.c b/third_party/js-1.7/jsiter.c
new file mode 100644
index 0000000..0a4de54
--- /dev/null
+++ b/third_party/js-1.7/jsiter.c
@@ -0,0 +1,1080 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript iterators.
+ */
+#include "jsstddef.h"
+#include <string.h> /* for memcpy */
+#include "jstypes.h"
+#include "jsutil.h"
+#include "jsarena.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsiter.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscope.h"
+#include "jsscript.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+extern const char js_throw_str[]; /* from jsscan.h */
+
+#define JSSLOT_ITER_STATE (JSSLOT_PRIVATE)
+#define JSSLOT_ITER_FLAGS (JSSLOT_PRIVATE + 1)
+
+#if JSSLOT_ITER_FLAGS >= JS_INITIAL_NSLOTS
+#error JS_INITIAL_NSLOTS must be greater than JSSLOT_ITER_FLAGS.
+#endif
+
+/*
+ * Shared code to close iterator's state either through an explicit call or
+ * when GC detects that the iterator is no longer reachable.
+ */
+void
+js_CloseIteratorState(JSContext *cx, JSObject *iterobj)
+{
+ jsval *slots;
+ jsval state, parent;
+ JSObject *iterable;
+
+ JS_ASSERT(JS_InstanceOf(cx, iterobj, &js_IteratorClass, NULL));
+ slots = iterobj->slots;
+
+ /* Avoid double work if js_CloseNativeIterator was called on obj. */
+ state = slots[JSSLOT_ITER_STATE];
+ if (JSVAL_IS_NULL(state))
+ return;
+
+ /* Protect against failure to fully initialize obj. */
+ parent = slots[JSSLOT_PARENT];
+ if (!JSVAL_IS_PRIMITIVE(parent)) {
+ iterable = JSVAL_TO_OBJECT(parent);
+#if JS_HAS_XML_SUPPORT
+ if ((JSVAL_TO_INT(slots[JSSLOT_ITER_FLAGS]) & JSITER_FOREACH) &&
+ OBJECT_IS_XML(cx, iterable)) {
+ ((JSXMLObjectOps *) iterable->map->ops)->
+ enumerateValues(cx, iterable, JSENUMERATE_DESTROY, &state,
+ NULL, NULL);
+ } else
+#endif
+ OBJ_ENUMERATE(cx, iterable, JSENUMERATE_DESTROY, &state, NULL);
+ }
+ slots[JSSLOT_ITER_STATE] = JSVAL_NULL;
+}
+
+JSClass js_IteratorClass = {
+ "Iterator",
+ JSCLASS_HAS_RESERVED_SLOTS(2) | /* slots for state and flags */
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+InitNativeIterator(JSContext *cx, JSObject *iterobj, JSObject *obj, uintN flags)
+{
+ jsval state;
+ JSBool ok;
+
+ JS_ASSERT(JSVAL_TO_PRIVATE(iterobj->slots[JSSLOT_CLASS]) ==
+ &js_IteratorClass);
+
+ /* Initialize iterobj in case of enumerate hook failure. */
+ iterobj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(obj);
+ iterobj->slots[JSSLOT_ITER_STATE] = JSVAL_NULL;
+ iterobj->slots[JSSLOT_ITER_FLAGS] = INT_TO_JSVAL(flags);
+ if (!js_RegisterCloseableIterator(cx, iterobj))
+ return JS_FALSE;
+ if (!obj)
+ return JS_TRUE;
+
+ ok =
+#if JS_HAS_XML_SUPPORT
+ ((flags & JSITER_FOREACH) && OBJECT_IS_XML(cx, obj))
+ ? ((JSXMLObjectOps *) obj->map->ops)->
+ enumerateValues(cx, obj, JSENUMERATE_INIT, &state, NULL, NULL)
+ :
+#endif
+ OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &state, NULL);
+ if (!ok)
+ return JS_FALSE;
+
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (flags & JSITER_ENUMERATE) {
+ /*
+ * The enumerating iterator needs the original object to suppress
+ * enumeration of deleted or shadowed prototype properties. Since the
+ * enumerator never escapes to scripts, we use the prototype slot to
+ * store the original object.
+ */
+ JS_ASSERT(obj != iterobj);
+ iterobj->slots[JSSLOT_PROTO] = OBJECT_TO_JSVAL(obj);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+Iterator(JSContext *cx, JSObject *iterobj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool keyonly;
+ uintN flags;
+ JSObject *obj;
+
+ keyonly = JS_FALSE;
+ if (!js_ValueToBoolean(cx, argv[1], &keyonly))
+ return JS_FALSE;
+ flags = keyonly ? 0 : JSITER_FOREACH;
+
+ if (cx->fp->flags & JSFRAME_CONSTRUCTING) {
+ /* XXX work around old valueOf call hidden beneath js_ValueToObject */
+ if (!JSVAL_IS_PRIMITIVE(argv[0])) {
+ obj = JSVAL_TO_OBJECT(argv[0]);
+ } else {
+ obj = js_ValueToNonNullObject(cx, argv[0]);
+ if (!obj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(obj);
+ }
+ return InitNativeIterator(cx, iterobj, obj, flags);
+ }
+
+ *rval = argv[0];
+ return js_ValueToIterator(cx, flags, rval);
+}
+
+static JSBool
+NewKeyValuePair(JSContext *cx, jsid key, jsval val, jsval *rval)
+{
+ jsval vec[2];
+ JSTempValueRooter tvr;
+ JSObject *aobj;
+
+ vec[0] = ID_TO_VALUE(key);
+ vec[1] = val;
+
+ JS_PUSH_TEMP_ROOT(cx, 2, vec, &tvr);
+ aobj = js_NewArrayObject(cx, 2, vec);
+ *rval = OBJECT_TO_JSVAL(aobj);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+
+ return aobj != NULL;
+}
+
+static JSBool
+IteratorNextImpl(JSContext *cx, JSObject *obj, jsval *rval)
+{
+ JSObject *iterable;
+ jsval state;
+ uintN flags;
+ JSBool foreach, ok;
+ jsid id;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_IteratorClass);
+
+ iterable = OBJ_GET_PARENT(cx, obj);
+ JS_ASSERT(iterable);
+ state = OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_STATE);
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+
+ flags = JSVAL_TO_INT(OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_FLAGS));
+ JS_ASSERT(!(flags & JSITER_ENUMERATE));
+ foreach = (flags & JSITER_FOREACH) != 0;
+ ok =
+#if JS_HAS_XML_SUPPORT
+ (foreach && OBJECT_IS_XML(cx, iterable))
+ ? ((JSXMLObjectOps *) iterable->map->ops)->
+ enumerateValues(cx, iterable, JSENUMERATE_NEXT, &state,
+ &id, rval)
+ :
+#endif
+ OBJ_ENUMERATE(cx, iterable, JSENUMERATE_NEXT, &state, &id);
+ if (!ok)
+ return JS_FALSE;
+
+ OBJ_SET_SLOT(cx, obj, JSSLOT_ITER_STATE, state);
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+
+ if (foreach) {
+#if JS_HAS_XML_SUPPORT
+ if (!OBJECT_IS_XML(cx, iterable) &&
+ !OBJ_GET_PROPERTY(cx, iterable, id, rval)) {
+ return JS_FALSE;
+ }
+#endif
+ if (!NewKeyValuePair(cx, id, *rval, rval))
+ return JS_FALSE;
+ } else {
+ *rval = ID_TO_VALUE(id);
+ }
+ return JS_TRUE;
+
+ stop:
+ JS_ASSERT(OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_STATE) == JSVAL_NULL);
+ *rval = JSVAL_HOLE;
+ return JS_TRUE;
+}
+
+static JSBool
+js_ThrowStopIteration(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ JS_ASSERT(!JS_IsExceptionPending(cx));
+ if (js_FindClassObject(cx, NULL, INT_TO_JSID(JSProto_StopIteration), &v))
+ JS_SetPendingException(cx, v);
+ return JS_FALSE;
+}
+
+static JSBool
+iterator_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ if (!JS_InstanceOf(cx, obj, &js_IteratorClass, argv))
+ return JS_FALSE;
+
+ if (!IteratorNextImpl(cx, obj, rval))
+ return JS_FALSE;
+
+ if (*rval == JSVAL_HOLE) {
+ *rval = JSVAL_NULL;
+ js_ThrowStopIteration(cx, obj);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+iterator_self(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec iterator_methods[] = {
+ {js_iterator_str, iterator_self, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_next_str, iterator_next, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {0,0,0,0,0}
+};
+
+uintN
+js_GetNativeIteratorFlags(JSContext *cx, JSObject *iterobj)
+{
+ if (OBJ_GET_CLASS(cx, iterobj) != &js_IteratorClass)
+ return 0;
+ return JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_FLAGS));
+}
+
+void
+js_CloseNativeIterator(JSContext *cx, JSObject *iterobj)
+{
+ uintN flags;
+
+ /*
+ * If this iterator is not an instance of the native default iterator
+ * class, leave it to be GC'ed.
+ */
+ if (!JS_InstanceOf(cx, iterobj, &js_IteratorClass, NULL))
+ return;
+
+ /*
+ * If this iterator was not created by js_ValueToIterator called from the
+ * for-in loop code in js_Interpret, leave it to be GC'ed.
+ */
+ flags = JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_FLAGS));
+ if (!(flags & JSITER_ENUMERATE))
+ return;
+
+ js_CloseIteratorState(cx, iterobj);
+}
+
+/*
+ * Call ToObject(v).__iterator__(keyonly) if ToObject(v).__iterator__ exists.
+ * Otherwise construct the defualt iterator.
+ */
+JSBool
+js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp)
+{
+ JSObject *obj;
+ JSTempValueRooter tvr;
+ const JSAtom *atom;
+ JSBool ok;
+ JSObject *iterobj;
+ jsval arg;
+ JSString *str;
+
+ JS_ASSERT(!(flags & ~(JSITER_ENUMERATE |
+ JSITER_FOREACH |
+ JSITER_KEYVALUE)));
+
+ /* JSITER_KEYVALUE must always come with JSITER_FOREACH */
+ JS_ASSERT(!(flags & JSITER_KEYVALUE) || (flags & JSITER_FOREACH));
+
+ /* XXX work around old valueOf call hidden beneath js_ValueToObject */
+ if (!JSVAL_IS_PRIMITIVE(*vp)) {
+ obj = JSVAL_TO_OBJECT(*vp);
+ } else {
+ /*
+ * Enumerating over null and undefined gives an empty enumerator.
+ * This is contrary to ECMA-262 9.9 ToObject, invoked from step 3 of
+ * the first production in 12.6.4 and step 4 of the second production,
+ * but it's "web JS" compatible.
+ */
+ if ((flags & JSITER_ENUMERATE)) {
+ if (!js_ValueToObject(cx, *vp, &obj))
+ return JS_FALSE;
+ if (!obj)
+ goto default_iter;
+ } else {
+ obj = js_ValueToNonNullObject(cx, *vp);
+ if (!obj)
+ return JS_FALSE;
+ }
+ }
+
+ JS_ASSERT(obj);
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+
+ atom = cx->runtime->atomState.iteratorAtom;
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ if (!js_GetXMLFunction(cx, obj, ATOM_TO_JSID(atom), vp))
+ goto bad;
+ } else
+#endif
+ {
+ if (!OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp))
+ goto bad;
+ }
+
+ if (JSVAL_IS_VOID(*vp)) {
+ default_iter:
+ /*
+ * Fail over to the default enumerating native iterator.
+ *
+ * Create iterobj with a NULL parent to ensure that we use the correct
+ * scope chain to lookup the iterator's constructor. Since we use the
+ * parent slot to keep track of the iterable, we must fix it up after.
+ */
+ iterobj = js_NewObject(cx, &js_IteratorClass, NULL, NULL);
+ if (!iterobj)
+ goto bad;
+
+ /* Store iterobj in *vp to protect it from GC (callers must root vp). */
+ *vp = OBJECT_TO_JSVAL(iterobj);
+
+ if (!InitNativeIterator(cx, iterobj, obj, flags))
+ goto bad;
+ } else {
+ arg = BOOLEAN_TO_JSVAL((flags & JSITER_FOREACH) == 0);
+ if (!js_InternalInvoke(cx, obj, *vp, JSINVOKE_ITERATOR, 1, &arg, vp))
+ goto bad;
+ if (JSVAL_IS_PRIMITIVE(*vp)) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, *vp, NULL);
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ITERATOR_RETURN,
+ JSSTRING_CHARS(str),
+ JSSTRING_CHARS(ATOM_TO_STRING(atom)));
+ }
+ goto bad;
+ }
+ }
+
+ ok = JS_TRUE;
+ out:
+ if (obj)
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+ bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+static JSBool
+CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval)
+{
+ JSObject *obj, *origobj;
+ jsval state;
+ JSBool foreach;
+ jsid id;
+ JSObject *obj2;
+ JSBool cond;
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+ JSProperty *prop;
+ JSString *str;
+
+ JS_ASSERT(flags & JSITER_ENUMERATE);
+ JS_ASSERT(JSVAL_TO_PRIVATE(iterobj->slots[JSSLOT_CLASS]) ==
+ &js_IteratorClass);
+
+ obj = JSVAL_TO_OBJECT(iterobj->slots[JSSLOT_PARENT]);
+ origobj = JSVAL_TO_OBJECT(iterobj->slots[JSSLOT_PROTO]);
+ state = iterobj->slots[JSSLOT_ITER_STATE];
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+
+ foreach = (flags & JSITER_FOREACH) != 0;
+#if JS_HAS_XML_SUPPORT
+ /*
+ * Treat an XML object specially only when it starts the prototype chain.
+ * Otherwise we need to do the usual deleted and shadowed property checks.
+ */
+ if (obj == origobj && OBJECT_IS_XML(cx, obj)) {
+ if (foreach) {
+ JSXMLObjectOps *xmlops = (JSXMLObjectOps *) obj->map->ops;
+
+ if (!xmlops->enumerateValues(cx, obj, JSENUMERATE_NEXT, &state,
+ &id, rval)) {
+ return JS_FALSE;
+ }
+ } else {
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_NEXT, &state, &id))
+ return JS_FALSE;
+ }
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+ } else
+#endif
+ {
+ restart:
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_NEXT, &state, &id))
+ return JS_TRUE;
+
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (JSVAL_IS_NULL(state)) {
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ /*
+ * We just finished enumerating an XML obj that is present on
+ * the prototype chain of a non-XML origobj. Stop further
+ * prototype chain searches because XML objects don't
+ * enumerate prototypes.
+ */
+ JS_ASSERT(origobj != obj);
+ JS_ASSERT(!OBJECT_IS_XML(cx, origobj));
+ } else
+#endif
+ {
+ obj = OBJ_GET_PROTO(cx, obj);
+ if (obj) {
+ iterobj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(obj);
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &state, NULL))
+ return JS_FALSE;
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (!JSVAL_IS_NULL(state))
+ goto restart;
+ }
+ }
+ goto stop;
+ }
+
+ /* Skip properties not in obj when looking from origobj. */
+ if (!OBJ_LOOKUP_PROPERTY(cx, origobj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop)
+ goto restart;
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+
+ /*
+ * If the id was found in a prototype object or an unrelated object
+ * (specifically, not in an inner object for obj), skip it. This step
+ * means that all OBJ_LOOKUP_PROPERTY implementations must return an
+ * object further along on the prototype chain, or else possibly an
+ * object returned by the JSExtendedClass.outerObject optional hook.
+ */
+ if (obj != obj2) {
+ cond = JS_FALSE;
+ clasp = OBJ_GET_CLASS(cx, obj2);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass *) clasp;
+ cond = xclasp->outerObject &&
+ xclasp->outerObject(cx, obj2) == obj;
+ }
+ if (!cond)
+ goto restart;
+ }
+
+ if (foreach) {
+ /* Get property querying the original object. */
+ if (!OBJ_GET_PROPERTY(cx, origobj, id, rval))
+ return JS_FALSE;
+ }
+ }
+
+ if (foreach) {
+ if (flags & JSITER_KEYVALUE) {
+ if (!NewKeyValuePair(cx, id, *rval, rval))
+ return JS_FALSE;
+ }
+ } else {
+ /* Make rval a string for uniformity and compatibility. */
+ if (JSID_IS_ATOM(id)) {
+ *rval = ATOM_KEY(JSID_TO_ATOM(id));
+ }
+#if JS_HAS_XML_SUPPORT
+ else if (JSID_IS_OBJECT(id)) {
+ str = js_ValueToString(cx, OBJECT_JSID_TO_JSVAL(id));
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ }
+#endif
+ else {
+ str = js_NumberToString(cx, (jsdouble)JSID_TO_INT(id));
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ }
+ }
+ return JS_TRUE;
+
+ stop:
+ JS_ASSERT(iterobj->slots[JSSLOT_ITER_STATE] == JSVAL_NULL);
+ *rval = JSVAL_HOLE;
+ return JS_TRUE;
+}
+
+JSBool
+js_CallIteratorNext(JSContext *cx, JSObject *iterobj, jsval *rval)
+{
+ uintN flags;
+
+ /* Fast path for native iterators */
+ if (OBJ_GET_CLASS(cx, iterobj) == &js_IteratorClass) {
+ flags = JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_FLAGS));
+ if (flags & JSITER_ENUMERATE)
+ return CallEnumeratorNext(cx, iterobj, flags, rval);
+
+ /*
+ * Call next directly as all the methods of the native iterator are
+ * read-only and permanent.
+ */
+ if (!IteratorNextImpl(cx, iterobj, rval))
+ return JS_FALSE;
+ } else {
+ jsid id = ATOM_TO_JSID(cx->runtime->atomState.nextAtom);
+
+ if (!JS_GetMethodById(cx, iterobj, id, &iterobj, rval))
+ return JS_FALSE;
+ if (!js_InternalCall(cx, iterobj, *rval, 0, NULL, rval)) {
+ /* Check for StopIteration. */
+ if (!cx->throwing ||
+ JSVAL_IS_PRIMITIVE(cx->exception) ||
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(cx->exception))
+ != &js_StopIterationClass) {
+ return JS_FALSE;
+ }
+
+ /* Inline JS_ClearPendingException(cx). */
+ cx->throwing = JS_FALSE;
+ cx->exception = JSVAL_VOID;
+ *rval = JSVAL_HOLE;
+ return JS_TRUE;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+stopiter_hasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ *bp = !JSVAL_IS_PRIMITIVE(v) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_StopIterationClass;
+ return JS_TRUE;
+}
+
+JSClass js_StopIterationClass = {
+ js_StopIteration_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_StopIteration),
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub,
+ JS_ConvertStub, JS_FinalizeStub,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, stopiter_hasInstance,
+ NULL, NULL
+};
+
+#if JS_HAS_GENERATORS
+
+static void
+generator_finalize(JSContext *cx, JSObject *obj)
+{
+ JSGenerator *gen;
+
+ gen = (JSGenerator *) JS_GetPrivate(cx, obj);
+ if (gen) {
+ /*
+ * gen can be open on shutdown when close hooks are ignored or when
+ * the embedding cancels scheduled close hooks.
+ */
+ JS_ASSERT(gen->state == JSGEN_NEWBORN || gen->state == JSGEN_CLOSED ||
+ gen->state == JSGEN_OPEN);
+ JS_free(cx, gen);
+ }
+}
+
+static uint32
+generator_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSGenerator *gen;
+
+ gen = (JSGenerator *) JS_GetPrivate(cx, obj);
+ if (gen) {
+ /*
+ * We must mark argv[-2], as js_MarkStackFrame will not. Note that
+ * js_MarkStackFrame will mark thisp (argv[-1]) and actual arguments,
+ * plus any missing formals and local GC roots.
+ */
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(gen->frame.argv[-2]));
+ GC_MARK(cx, JSVAL_TO_GCTHING(gen->frame.argv[-2]), "generator");
+ js_MarkStackFrame(cx, &gen->frame);
+ }
+ return 0;
+}
+
+JSClass js_GeneratorClass = {
+ js_Generator_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_IS_ANONYMOUS |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Generator),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, generator_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, generator_mark, NULL
+};
+
+/*
+ * Called from the JSOP_GENERATOR case in the interpreter, with fp referring
+ * to the frame by which the generator function was activated. Create a new
+ * JSGenerator object, which contains its own JSStackFrame that we populate
+ * from *fp. We know that upon return, the JSOP_GENERATOR opcode will return
+ * from the activation in fp, so we can steal away fp->callobj and fp->argsobj
+ * if they are non-null.
+ */
+JSObject *
+js_NewGenerator(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *obj;
+ uintN argc, nargs, nvars, depth, nslots;
+ JSGenerator *gen;
+ jsval *newsp;
+
+ /* After the following return, failing control flow must goto bad. */
+ obj = js_NewObject(cx, &js_GeneratorClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ /* Load and compute stack slot counts. */
+ argc = fp->argc;
+ nargs = JS_MAX(argc, fp->fun->nargs);
+ nvars = fp->nvars;
+ depth = fp->script->depth;
+ nslots = 2 + nargs + nvars + 2 * depth;
+
+ /* Allocate obj's private data struct. */
+ gen = (JSGenerator *)
+ JS_malloc(cx, sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval));
+ if (!gen)
+ goto bad;
+
+ gen->obj = obj;
+
+ /* Steal away objects reflecting fp and point them at gen->frame. */
+ gen->frame.callobj = fp->callobj;
+ if (fp->callobj) {
+ JS_SetPrivate(cx, fp->callobj, &gen->frame);
+ fp->callobj = NULL;
+ }
+ gen->frame.argsobj = fp->argsobj;
+ if (fp->argsobj) {
+ JS_SetPrivate(cx, fp->argsobj, &gen->frame);
+ fp->argsobj = NULL;
+ }
+
+ /* These two references can be shared with fp until it goes away. */
+ gen->frame.varobj = fp->varobj;
+ gen->frame.thisp = fp->thisp;
+
+ /* Copy call-invariant script and function references. */
+ gen->frame.script = fp->script;
+ gen->frame.fun = fp->fun;
+
+ /* Use newsp to carve space out of gen->stack. */
+ newsp = gen->stack;
+ gen->arena.next = NULL;
+ gen->arena.base = (jsuword) newsp;
+ gen->arena.limit = gen->arena.avail = (jsuword) (newsp + nslots);
+
+#define COPY_STACK_ARRAY(vec,cnt,num) \
+ JS_BEGIN_MACRO \
+ gen->frame.cnt = cnt; \
+ gen->frame.vec = newsp; \
+ newsp += (num); \
+ memcpy(gen->frame.vec, fp->vec, (num) * sizeof(jsval)); \
+ JS_END_MACRO
+
+ /* Copy argv, rval, and vars. */
+ *newsp++ = fp->argv[-2];
+ *newsp++ = fp->argv[-1];
+ COPY_STACK_ARRAY(argv, argc, nargs);
+ gen->frame.rval = fp->rval;
+ COPY_STACK_ARRAY(vars, nvars, nvars);
+
+#undef COPY_STACK_ARRAY
+
+ /* Initialize or copy virtual machine state. */
+ gen->frame.down = NULL;
+ gen->frame.annotation = NULL;
+ gen->frame.scopeChain = fp->scopeChain;
+ gen->frame.pc = fp->pc;
+
+ /* Allocate generating pc and operand stack space. */
+ gen->frame.spbase = gen->frame.sp = newsp + depth;
+
+ /* Copy remaining state (XXX sharp* and xml* should be local vars). */
+ gen->frame.sharpDepth = 0;
+ gen->frame.sharpArray = NULL;
+ gen->frame.flags = fp->flags | JSFRAME_GENERATOR;
+ gen->frame.dormantNext = NULL;
+ gen->frame.xmlNamespace = NULL;
+ gen->frame.blockChain = NULL;
+
+ /* Note that gen is newborn. */
+ gen->state = JSGEN_NEWBORN;
+
+ if (!JS_SetPrivate(cx, obj, gen)) {
+ JS_free(cx, gen);
+ goto bad;
+ }
+
+ /*
+ * Register with GC to ensure that suspended finally blocks will be
+ * executed.
+ */
+ js_RegisterGenerator(cx, gen);
+ return obj;
+
+ bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+}
+
+typedef enum JSGeneratorOp {
+ JSGENOP_NEXT,
+ JSGENOP_SEND,
+ JSGENOP_THROW,
+ JSGENOP_CLOSE
+} JSGeneratorOp;
+
+/*
+ * Start newborn or restart yielding generator and perform the requested
+ * operation inside its frame.
+ */
+static JSBool
+SendToGenerator(JSContext *cx, JSGeneratorOp op, JSObject *obj,
+ JSGenerator *gen, jsval arg, jsval *rval)
+{
+ JSStackFrame *fp;
+ jsval junk;
+ JSArena *arena;
+ JSBool ok;
+
+ JS_ASSERT(gen->state == JSGEN_NEWBORN || gen->state == JSGEN_OPEN);
+ switch (op) {
+ case JSGENOP_NEXT:
+ case JSGENOP_SEND:
+ if (gen->state == JSGEN_OPEN) {
+ /*
+ * Store the argument to send as the result of the yield
+ * expression.
+ */
+ gen->frame.sp[-1] = arg;
+ }
+ gen->state = JSGEN_RUNNING;
+ break;
+
+ case JSGENOP_THROW:
+ JS_SetPendingException(cx, arg);
+ gen->state = JSGEN_RUNNING;
+ break;
+
+ default:
+ JS_ASSERT(op == JSGENOP_CLOSE);
+ JS_SetPendingException(cx, JSVAL_ARETURN);
+ gen->state = JSGEN_CLOSING;
+ break;
+ }
+
+ /* Extend the current stack pool with gen->arena. */
+ arena = cx->stackPool.current;
+ JS_ASSERT(!arena->next);
+ JS_ASSERT(!gen->arena.next);
+ JS_ASSERT(cx->stackPool.current != &gen->arena);
+ cx->stackPool.current = arena->next = &gen->arena;
+
+ /* Push gen->frame around the interpreter activation. */
+ fp = cx->fp;
+ cx->fp = &gen->frame;
+ gen->frame.down = fp;
+ ok = js_Interpret(cx, gen->frame.pc, &junk);
+ cx->fp = fp;
+ gen->frame.down = NULL;
+
+ /* Retract the stack pool and sanitize gen->arena. */
+ JS_ASSERT(!gen->arena.next);
+ JS_ASSERT(arena->next == &gen->arena);
+ JS_ASSERT(cx->stackPool.current == &gen->arena);
+ cx->stackPool.current = arena;
+ arena->next = NULL;
+
+ if (gen->frame.flags & JSFRAME_YIELDING) {
+ /* Yield cannot fail, throw or be called on closing. */
+ JS_ASSERT(ok);
+ JS_ASSERT(!cx->throwing);
+ JS_ASSERT(gen->state == JSGEN_RUNNING);
+ JS_ASSERT(op != JSGENOP_CLOSE);
+ gen->frame.flags &= ~JSFRAME_YIELDING;
+ gen->state = JSGEN_OPEN;
+ *rval = gen->frame.rval;
+ return JS_TRUE;
+ }
+
+ gen->state = JSGEN_CLOSED;
+
+ if (ok) {
+ /* Returned, explicitly or by falling off the end. */
+ if (op == JSGENOP_CLOSE)
+ return JS_TRUE;
+ return js_ThrowStopIteration(cx, obj);
+ }
+
+ /*
+ * An error, silent termination by branch callback or an exception.
+ * Propagate the condition to the caller.
+ */
+ return JS_FALSE;
+}
+
+/*
+ * Execute gen's close hook after the GC detects that the object has become
+ * unreachable.
+ */
+JSBool
+js_CloseGeneratorObject(JSContext *cx, JSGenerator *gen)
+{
+ /* We pass null as rval since SendToGenerator never uses it with CLOSE. */
+ return SendToGenerator(cx, JSGENOP_CLOSE, gen->obj, gen, JSVAL_VOID, NULL);
+}
+
+/*
+ * Common subroutine of generator_(next|send|throw|close) methods.
+ */
+static JSBool
+generator_op(JSContext *cx, JSGeneratorOp op,
+ JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSGenerator *gen;
+ JSString *str;
+ jsval arg;
+
+ if (!JS_InstanceOf(cx, obj, &js_GeneratorClass, argv))
+ return JS_FALSE;
+
+ gen = (JSGenerator *) JS_GetPrivate(cx, obj);
+ if (gen == NULL) {
+ /* This happens when obj is the generator prototype. See bug 352885. */
+ goto closed_generator;
+ }
+
+ switch (gen->state) {
+ case JSGEN_NEWBORN:
+ switch (op) {
+ case JSGENOP_NEXT:
+ case JSGENOP_THROW:
+ break;
+
+ case JSGENOP_SEND:
+ if (!JSVAL_IS_VOID(argv[0])) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ argv[0], NULL);
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GENERATOR_SEND,
+ JSSTRING_CHARS(str));
+ }
+ return JS_FALSE;
+ }
+ break;
+
+ default:
+ JS_ASSERT(op == JSGENOP_CLOSE);
+ gen->state = JSGEN_CLOSED;
+ return JS_TRUE;
+ }
+ break;
+
+ case JSGEN_OPEN:
+ break;
+
+ case JSGEN_RUNNING:
+ case JSGEN_CLOSING:
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, argv[-1],
+ JS_GetFunctionId(gen->frame.fun));
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_NESTING_GENERATOR,
+ JSSTRING_CHARS(str));
+ }
+ return JS_FALSE;
+
+ default:
+ JS_ASSERT(gen->state == JSGEN_CLOSED);
+
+ closed_generator:
+ switch (op) {
+ case JSGENOP_NEXT:
+ case JSGENOP_SEND:
+ return js_ThrowStopIteration(cx, obj);
+ case JSGENOP_THROW:
+ JS_SetPendingException(cx, argv[0]);
+ return JS_FALSE;
+ default:
+ JS_ASSERT(op == JSGENOP_CLOSE);
+ return JS_TRUE;
+ }
+ }
+
+ arg = (op == JSGENOP_SEND || op == JSGENOP_THROW)
+ ? argv[0]
+ : JSVAL_VOID;
+ if (!SendToGenerator(cx, op, obj, gen, arg, rval))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+generator_send(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_SEND, obj, argc, argv, rval);
+}
+
+static JSBool
+generator_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_NEXT, obj, argc, argv, rval);
+}
+
+static JSBool
+generator_throw(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_THROW, obj, argc, argv, rval);
+}
+
+static JSBool
+generator_close(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_CLOSE, obj, argc, argv, rval);
+}
+
+static JSFunctionSpec generator_methods[] = {
+ {js_iterator_str, iterator_self, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_next_str, generator_next, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_send_str, generator_send, 1,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_throw_str, generator_throw, 1,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_close_str, generator_close, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {0,0,0,0,0}
+};
+
+#endif /* JS_HAS_GENERATORS */
+
+JSObject *
+js_InitIteratorClasses(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *stop;
+
+ /* Idempotency required: we initialize several things, possibly lazily. */
+ if (!js_GetClassObject(cx, obj, JSProto_StopIteration, &stop))
+ return NULL;
+ if (stop)
+ return stop;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_IteratorClass, Iterator, 2,
+ NULL, iterator_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+ proto->slots[JSSLOT_ITER_STATE] = JSVAL_NULL;
+
+#if JS_HAS_GENERATORS
+ /* Initialize the generator internals if configured. */
+ if (!JS_InitClass(cx, obj, NULL, &js_GeneratorClass, NULL, 0,
+ NULL, generator_methods, NULL, NULL)) {
+ return NULL;
+ }
+#endif
+
+ return JS_InitClass(cx, obj, NULL, &js_StopIterationClass, NULL, 0,
+ NULL, NULL, NULL, NULL);
+}
diff --git a/third_party/js-1.7/jsiter.h b/third_party/js-1.7/jsiter.h
new file mode 100644
index 0000000..1a99b6b
--- /dev/null
+++ b/third_party/js-1.7/jsiter.h
@@ -0,0 +1,114 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsiter_h___
+#define jsiter_h___
+/*
+ * JavaScript iterators.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+#define JSITER_ENUMERATE 0x1 /* for-in compatible hidden default iterator */
+#define JSITER_FOREACH 0x2 /* return [key, value] pair rather than key */
+#define JSITER_KEYVALUE 0x4 /* destructuring for-in wants [key, value] */
+
+extern void
+js_CloseNativeIterator(JSContext *cx, JSObject *iterobj);
+
+extern void
+js_CloseIteratorState(JSContext *cx, JSObject *iterobj);
+
+/*
+ * Convert the value stored in *vp to its iteration object. The flags should
+ * contain JSITER_ENUMERATE if js_ValueToIterator is called when enumerating
+ * for-in semantics are required, and when the caller can guarantee that the
+ * iterator will never be exposed to scripts.
+ */
+extern JSBool
+js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp);
+
+/*
+ * Given iterobj, call iterobj.next(). If the iterator stopped, set *rval to
+ * JSVAL_HOLE. Otherwise set it to the result of the next call.
+ */
+extern JSBool
+js_CallIteratorNext(JSContext *cx, JSObject *iterobj, jsval *rval);
+
+#if JS_HAS_GENERATORS
+
+/*
+ * Generator state codes.
+ */
+typedef enum JSGeneratorState {
+ JSGEN_NEWBORN, /* not yet started */
+ JSGEN_OPEN, /* started by a .next() or .send(undefined) call */
+ JSGEN_RUNNING, /* currently executing via .next(), etc., call */
+ JSGEN_CLOSING, /* close method is doing asynchronous return */
+ JSGEN_CLOSED /* closed, cannot be started or closed again */
+} JSGeneratorState;
+
+struct JSGenerator {
+ JSGenerator *next;
+ JSObject *obj;
+ JSGeneratorState state;
+ JSStackFrame frame;
+ JSArena arena;
+ jsval stack[1];
+};
+
+#define FRAME_TO_GENERATOR(fp) \
+ ((JSGenerator *) ((uint8 *)(fp) - offsetof(JSGenerator, frame)))
+
+extern JSObject *
+js_NewGenerator(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_CloseGeneratorObject(JSContext *cx, JSGenerator *gen);
+
+#endif
+
+extern JSClass js_GeneratorClass;
+extern JSClass js_IteratorClass;
+extern JSClass js_StopIterationClass;
+
+extern JSObject *
+js_InitIteratorClasses(JSContext *cx, JSObject *obj);
+
+#endif /* jsiter_h___ */
diff --git a/third_party/js-1.7/jskeyword.tbl b/third_party/js-1.7/jskeyword.tbl
new file mode 100644
index 0000000..49b9c6c
--- /dev/null
+++ b/third_party/js-1.7/jskeyword.tbl
@@ -0,0 +1,124 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+JS_KEYWORD(break, TOK_BREAK, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(case, TOK_CASE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(continue, TOK_CONTINUE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(default, TOK_DEFAULT, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(delete, TOK_DELETE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(do, TOK_DO, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(else, TOK_ELSE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(export, TOK_EXPORT, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(false, TOK_PRIMARY, JSOP_FALSE, JSVERSION_DEFAULT)
+JS_KEYWORD(for, TOK_FOR, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(function, TOK_FUNCTION, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(if, TOK_IF, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(in, TOK_IN, JSOP_IN, JSVERSION_DEFAULT)
+JS_KEYWORD(new, TOK_NEW, JSOP_NEW, JSVERSION_DEFAULT)
+JS_KEYWORD(null, TOK_PRIMARY, JSOP_NULL, JSVERSION_DEFAULT)
+JS_KEYWORD(return, TOK_RETURN, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(switch, TOK_SWITCH, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(this, TOK_PRIMARY, JSOP_THIS, JSVERSION_DEFAULT)
+JS_KEYWORD(true, TOK_PRIMARY, JSOP_TRUE, JSVERSION_DEFAULT)
+JS_KEYWORD(typeof, TOK_UNARYOP, JSOP_TYPEOF, JSVERSION_DEFAULT)
+JS_KEYWORD(var, TOK_VAR, JSOP_DEFVAR, JSVERSION_DEFAULT)
+JS_KEYWORD(void, TOK_UNARYOP, JSOP_VOID, JSVERSION_DEFAULT)
+JS_KEYWORD(while, TOK_WHILE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(with, TOK_WITH, JSOP_NOP, JSVERSION_DEFAULT)
+#if JS_HAS_CONST
+JS_KEYWORD(const, TOK_VAR, JSOP_DEFCONST, JSVERSION_DEFAULT)
+#else
+JS_KEYWORD(const, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+JS_KEYWORD(try, TOK_TRY, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(catch, TOK_CATCH, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(finally, TOK_FINALLY, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(throw, TOK_THROW, JSOP_NOP, JSVERSION_DEFAULT)
+
+JS_KEYWORD(instanceof, TOK_INSTANCEOF, JSOP_INSTANCEOF,JSVERSION_DEFAULT)
+
+#if JS_HAS_RESERVED_JAVA_KEYWORDS
+JS_KEYWORD(abstract, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(boolean, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(byte, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(char, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(class, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(double, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(extends, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(final, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(float, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(goto, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(implements, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(import, TOK_IMPORT, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(int, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(interface, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(long, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(native, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(package, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(private, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(protected, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(public, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(short, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(static, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(super, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(synchronized,TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(throws, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(transient, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(volatile, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+#if JS_HAS_RESERVED_ECMA_KEYWORDS
+JS_KEYWORD(enum, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+#if JS_HAS_DEBUGGER_KEYWORD
+JS_KEYWORD(debugger, TOK_DEBUGGER, JSOP_NOP, JSVERSION_DEFAULT)
+#elif JS_HAS_RESERVED_ECMA_KEYWORDS
+JS_KEYWORD(debugger, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+#if JS_HAS_GENERATORS
+JS_KEYWORD(yield, TOK_YIELD, JSOP_NOP, JSVERSION_1_7)
+#endif
+
+#if JS_HAS_BLOCK_SCOPE
+JS_KEYWORD(let, TOK_LET, JSOP_NOP, JSVERSION_1_7)
+#endif
diff --git a/third_party/js-1.7/jskwgen.c b/third_party/js-1.7/jskwgen.c
new file mode 100644
index 0000000..5ae39bd
--- /dev/null
+++ b/third_party/js-1.7/jskwgen.c
@@ -0,0 +1,460 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is String Switch Generator for JavaScript Keywords,
+ * released 2005-12-09.
+ *
+ * The Initial Developer of the Original Code is
+ * Igor Bukanov.
+ * Portions created by the Initial Developer are Copyright (C) 2005-2006
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+
+#include "jsconfig.h"
+
+const char * const keyword_list[] = {
+#define JS_KEYWORD(keyword, type, op, version) #keyword,
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+};
+
+struct gen_opt {
+ FILE *output; /* output file for generated source */
+ unsigned use_if_threshold; /* max number of choices to generate
+ "if" selector instead of "switch" */
+ unsigned char_tail_test_threshold; /* max number of unprocessed columns
+ to use inlined char compare
+ for remaining chars and not generic
+ string compare code */
+ unsigned indent_level; /* current source identation level */
+};
+
+static unsigned column_to_compare;
+
+static int
+length_comparator(const void *a, const void *b)
+{
+ const char *str1 = keyword_list[*(unsigned *)a];
+ const char *str2 = keyword_list[*(unsigned *)b];
+ return (int)strlen(str1) - (int)strlen(str2);
+}
+
+static int
+column_comparator(const void *a, const void *b)
+{
+ const char *str1 = keyword_list[*(unsigned *)a];
+ const char *str2 = keyword_list[*(unsigned *)b];
+ return (int)str1[column_to_compare] - (int)str2[column_to_compare];
+}
+
+static unsigned
+count_different_lengths(unsigned indexes[], unsigned nelem)
+{
+ unsigned nlength, current_length, i, l;
+
+ current_length = 0;
+ nlength = 0;
+ for (i = 0; i != nelem; ++i) {
+ l = (unsigned)strlen(keyword_list[indexes[i]]);
+ assert(l != 0);
+ if (current_length != l) {
+ ++nlength;
+ current_length = l;
+ }
+ }
+ return nlength;
+}
+
+static void
+find_char_span_and_count(unsigned indexes[], unsigned nelem, unsigned column,
+ unsigned *span_result, unsigned *count_result)
+{
+ unsigned i, count;
+ unsigned char c, prev, minc, maxc;
+
+ assert(nelem != 0);
+ minc = maxc = prev = (unsigned char)keyword_list[indexes[0]][column];
+ count = 1;
+ for (i = 1; i != nelem; ++i) {
+ c = (unsigned char)keyword_list[indexes[i]][column];
+ if (prev != c) {
+ prev = c;
+ ++count;
+ if (minc > c) {
+ minc = c;
+ } else if (maxc < c) {
+ maxc = c;
+ }
+ }
+ }
+
+ *span_result = maxc - minc + 1;
+ *count_result = count;
+}
+
+static unsigned
+find_optimal_switch_column(struct gen_opt *opt,
+ unsigned indexes[], unsigned nelem,
+ unsigned columns[], unsigned unprocessed_columns,
+ int *use_if_result)
+{
+ unsigned i;
+ unsigned span, min_span, min_span_index;
+ unsigned nchar, min_nchar, min_nchar_index;
+
+ assert(unprocessed_columns != 0);
+ i = 0;
+ min_nchar = min_span = (unsigned)-1;
+ min_nchar_index = min_span_index = 0;
+ do {
+ column_to_compare = columns[i];
+ qsort(indexes, nelem, sizeof(indexes[0]), column_comparator);
+ find_char_span_and_count(indexes, nelem, column_to_compare,
+ &span, &nchar);
+ assert(span != 0);
+ if (span == 1) {
+ assert(nchar == 1);
+ *use_if_result = 1;
+ return 1;
+ }
+ assert(nchar != 1);
+ if (min_span > span) {
+ min_span = span;
+ min_span_index = i;
+ }
+ if (min_nchar > nchar) {
+ min_nchar = nchar;
+ min_nchar_index = i;
+ }
+ } while (++i != unprocessed_columns);
+
+ if (min_nchar <= opt->use_if_threshold) {
+ *use_if_result = 1;
+ i = min_nchar_index;
+ } else {
+ *use_if_result = 0;
+ i = min_span_index;
+ }
+
+ /*
+ * Restore order corresponding to i if it was destroyed by
+ * subsequent sort.
+ */
+ if (i != unprocessed_columns - 1) {
+ column_to_compare = columns[i];
+ qsort(indexes, nelem, sizeof(indexes[0]), column_comparator);
+ }
+
+ return i;
+}
+
+
+static void
+p(struct gen_opt *opt, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(opt->output, format, ap);
+ va_end(ap);
+}
+
+/* Size for '\xxx' where xxx is octal escape */
+#define MIN_QUOTED_CHAR_BUFFER 7
+
+static char *
+qchar(char c, char *quoted_buffer)
+{
+ char *s;
+
+ s = quoted_buffer;
+ *s++ = '\'';
+ switch (c) {
+ case '\n': c = 'n'; goto one_char_escape;
+ case '\r': c = 'r'; goto one_char_escape;
+ case '\t': c = 't'; goto one_char_escape;
+ case '\f': c = 't'; goto one_char_escape;
+ case '\0': c = '0'; goto one_char_escape;
+ case '\'': goto one_char_escape;
+ one_char_escape:
+ *s++ = '\\';
+ break;
+ default:
+ if (!isprint(c)) {
+ *s++ = '\\';
+ *s++ = (char)('0' + (0x3 & (((unsigned char)c) >> 6)));
+ *s++ = (char)('0' + (0x7 & (((unsigned char)c) >> 3)));
+ c = (char)('0' + (0x7 & ((unsigned char)c)));
+ }
+ }
+ *s++ = c;
+ *s++ = '\'';
+ *s = '\0';
+ assert(s + 1 <= quoted_buffer + MIN_QUOTED_CHAR_BUFFER);
+ return quoted_buffer;
+}
+
+static void
+nl(struct gen_opt *opt)
+{
+ putc('\n', opt->output);
+}
+
+static void
+indent(struct gen_opt *opt)
+{
+ unsigned n = opt->indent_level;
+ while (n != 0) {
+ --n;
+ fputs(" ", opt->output);
+ }
+}
+
+static void
+line(struct gen_opt *opt, const char *format, ...)
+{
+ va_list ap;
+
+ indent(opt);
+ va_start(ap, format);
+ vfprintf(opt->output, format, ap);
+ va_end(ap);
+ nl(opt);
+}
+
+static void
+generate_letter_switch_r(struct gen_opt *opt,
+ unsigned indexes[], unsigned nelem,
+ unsigned columns[], unsigned unprocessed_columns)
+{
+ char qbuf[MIN_QUOTED_CHAR_BUFFER];
+
+ assert(nelem != 0);
+ if (nelem == 1) {
+ unsigned kw_index = indexes[0];
+ const char *keyword = keyword_list[kw_index];
+
+ if (unprocessed_columns == 0) {
+ line(opt, "JSKW_GOT_MATCH(%u) /* %s */", kw_index, keyword);
+ } else if (unprocessed_columns > opt->char_tail_test_threshold) {
+ line(opt, "JSKW_TEST_GUESS(%u) /* %s */", kw_index, keyword);
+ } else {
+ unsigned i, column;
+
+ indent(opt); p(opt, "if (");
+ for (i = 0; i != unprocessed_columns; ++i) {
+ column = columns[i];
+ qchar(keyword[column], qbuf);
+ p(opt, "%sJSKW_AT(%u)==%s", (i == 0) ? "" : " && ",
+ column, qbuf);
+ }
+ p(opt, ") {"); nl(opt);
+ ++opt->indent_level;
+ line(opt, "JSKW_GOT_MATCH(%u) /* %s */", kw_index, keyword);
+ --opt->indent_level;
+ line(opt, "}");
+ line(opt, "JSKW_NO_MATCH()");
+ }
+ } else {
+ unsigned optimal_column_index, optimal_column;
+ unsigned i;
+ int use_if;
+ char current;
+
+ assert(unprocessed_columns != 0);
+ optimal_column_index = find_optimal_switch_column(opt, indexes, nelem,
+ columns,
+ unprocessed_columns,
+ &use_if);
+ optimal_column = columns[optimal_column_index];
+ columns[optimal_column_index] = columns[unprocessed_columns - 1];
+
+ if (!use_if)
+ line(opt, "switch (JSKW_AT(%u)) {", optimal_column);
+
+ current = keyword_list[indexes[0]][optimal_column];
+ for (i = 0; i != nelem;) {
+ unsigned same_char_begin = i;
+ char next = current;
+
+ for (++i; i != nelem; ++i) {
+ next = keyword_list[indexes[i]][optimal_column];
+ if (next != current)
+ break;
+ }
+ qchar(current, qbuf);
+ if (use_if) {
+ line(opt, "if (JSKW_AT(%u) == %s) {", optimal_column, qbuf);
+ } else {
+ line(opt, " case %s:", qbuf);
+ }
+ ++opt->indent_level;
+ generate_letter_switch_r(opt, indexes + same_char_begin,
+ i - same_char_begin,
+ columns, unprocessed_columns - 1);
+ --opt->indent_level;
+ if (use_if) {
+ line(opt, "}");
+ }
+ current = next;
+ }
+
+ if (!use_if) {
+ line(opt, "}");
+ }
+
+ columns[optimal_column_index] = optimal_column;
+
+ line(opt, "JSKW_NO_MATCH()");
+ }
+}
+
+static void
+generate_letter_switch(struct gen_opt *opt,
+ unsigned indexes[], unsigned nelem,
+ unsigned current_length)
+{
+ unsigned *columns;
+ unsigned i;
+
+ columns = malloc(sizeof(columns[0]) * current_length);
+ if (!columns) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i != current_length; ++i) {
+ columns[i] = i;
+ }
+ generate_letter_switch_r(opt, indexes, nelem, columns, current_length);
+ free(columns);
+}
+
+
+static void
+generate_switch(struct gen_opt *opt)
+{
+ unsigned *indexes;
+ unsigned nlength;
+ unsigned i, current;
+ int use_if;
+ unsigned nelem;
+
+ nelem = sizeof(keyword_list)/sizeof(keyword_list[0]);
+
+ line(opt, "/*");
+ line(opt, " * Generating switch for the list of %u entries:", nelem);
+ for (i = 0; i != nelem; ++i) {
+ line(opt, " * %s", keyword_list[i]);
+ }
+ line(opt, " */");
+
+ indexes = malloc(sizeof(indexes[0]) * nelem);
+ if (!indexes) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i != nelem; ++i)
+ indexes[i] = i;
+ qsort(indexes, nelem, sizeof(indexes[i]), length_comparator);
+ nlength = count_different_lengths(indexes, nelem);
+
+ use_if = (nlength <= opt->use_if_threshold);
+
+ if (!use_if)
+ line(opt, "switch (JSKW_LENGTH()) {");
+
+ current = (unsigned)strlen(keyword_list[indexes[0]]);
+ for (i = 0; i != nelem;) {
+ unsigned same_length_begin = i;
+ unsigned next = current;
+
+ for (++i; i != nelem; ++i) {
+ next = (unsigned)strlen(keyword_list[indexes[i]]);
+ if (next != current)
+ break;
+ }
+ if (use_if) {
+ line(opt, "if (JSKW_LENGTH() == %u) {", current);
+ } else {
+ line(opt, " case %u:", current);
+ }
+ ++opt->indent_level;
+ generate_letter_switch(opt, indexes + same_length_begin,
+ i - same_length_begin,
+ current);
+ --opt->indent_level;
+ if (use_if) {
+ line(opt, "}");
+ }
+ current = next;
+ }
+ if (!use_if)
+ line(opt, "}");
+ line(opt, "JSKW_NO_MATCH()");
+ free(indexes);
+}
+
+int main(int argc, char **argv)
+{
+ struct gen_opt opt;
+
+ if (argc < 2) {
+ opt.output = stdout;
+ } else {
+ opt.output = fopen(argv[1], "w");
+ if (!opt.output) {
+ perror("fopen");
+ exit(EXIT_FAILURE);
+ }
+ }
+ opt.indent_level = 1;
+ opt.use_if_threshold = 3;
+ opt.char_tail_test_threshold = 4;
+
+ generate_switch(&opt);
+
+ if (opt.output != stdout) {
+ if (fclose(opt.output)) {
+ perror("fclose");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/third_party/js-1.7/jslibmath.h b/third_party/js-1.7/jslibmath.h
new file mode 100644
index 0000000..3f75f30
--- /dev/null
+++ b/third_party/js-1.7/jslibmath.h
@@ -0,0 +1,266 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * By default all math calls go to fdlibm. The defines for each platform
+ * remap the math calls to native routines.
+ */
+
+#ifndef _LIBMATH_H
+#define _LIBMATH_H
+
+#include <math.h>
+#include "jsconfig.h"
+
+/*
+ * Define on which platforms to use fdlibm. Not used by default under
+ * assumption that native math library works unless proved guilty.
+ * Plus there can be problems with endian-ness and such in fdlibm itself.
+ *
+ * fdlibm compatibility notes:
+ * - fdlibm broken on OSF1/alpha
+ */
+
+#ifndef JS_USE_FDLIBM_MATH
+#define JS_USE_FDLIBM_MATH 0
+#endif
+
+#if !JS_USE_FDLIBM_MATH
+
+/*
+ * Use system provided math routines.
+ */
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_atan2 atan2
+#define fd_ceil ceil
+
+/* The right copysign function is not always named the same thing. */
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+#define fd_copysign __builtin_copysign
+#elif defined WINCE
+#define fd_copysign _copysign
+#elif defined _WIN32
+#if _MSC_VER < 1400
+/* Try to work around apparent _copysign bustage in VC6 and VC7. */
+#define fd_copysign js_copysign
+extern double js_copysign(double, double);
+#else
+#define fd_copysign _copysign
+#endif
+#else
+#define fd_copysign copysign
+#endif
+
+#define fd_cos cos
+#define fd_exp exp
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+#define fd_log log
+#define fd_pow pow
+#define fd_sin sin
+#define fd_sqrt sqrt
+#define fd_tan tan
+
+#else
+
+/*
+ * Use math routines in fdlibm.
+ */
+
+#undef __P
+#ifdef __STDC__
+#define __P(p) p
+#else
+#define __P(p) ()
+#endif
+
+#if (defined _WIN32 && !defined WINCE) || defined SUNOS4
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_cos cos
+#define fd_sin sin
+#define fd_tan tan
+#define fd_exp exp
+#define fd_log log
+#define fd_sqrt sqrt
+#define fd_ceil ceil
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_atan2 __P((double, double));
+extern double fd_copysign __P((double, double));
+extern double fd_pow __P((double, double));
+
+#elif defined IRIX
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_exp exp
+#define fd_log log
+#define fd_log10 log10
+#define fd_sqrt sqrt
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_cos __P((double));
+extern double fd_sin __P((double));
+extern double fd_tan __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_pow __P((double, double));
+extern double fd_ceil __P((double));
+extern double fd_copysign __P((double, double));
+
+#elif defined SOLARIS
+
+#define fd_atan atan
+#define fd_cos cos
+#define fd_sin sin
+#define fd_tan tan
+#define fd_exp exp
+#define fd_sqrt sqrt
+#define fd_ceil ceil
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_acos __P((double));
+extern double fd_asin __P((double));
+extern double fd_log __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_pow __P((double, double));
+extern double fd_copysign __P((double, double));
+
+#elif defined HPUX
+
+#define fd_cos cos
+#define fd_sin sin
+#define fd_exp exp
+#define fd_sqrt sqrt
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_ceil __P((double));
+extern double fd_acos __P((double));
+extern double fd_log __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_tan __P((double));
+extern double fd_pow __P((double, double));
+extern double fd_asin __P((double));
+extern double fd_atan __P((double));
+extern double fd_copysign __P((double, double));
+
+#elif defined(OSF1)
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_copysign copysign
+#define fd_cos cos
+#define fd_exp exp
+#define fd_fabs fabs
+#define fd_fmod fmod
+#define fd_sin sin
+#define fd_sqrt sqrt
+#define fd_tan tan
+
+extern double fd_atan2 __P((double, double));
+extern double fd_ceil __P((double));
+extern double fd_floor __P((double));
+extern double fd_log __P((double));
+extern double fd_pow __P((double, double));
+
+#elif defined(AIX)
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan2 atan2
+#define fd_copysign copysign
+#define fd_cos cos
+#define fd_exp exp
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+#define fd_log log
+#define fd_sin sin
+#define fd_sqrt sqrt
+
+extern double fd_atan __P((double));
+extern double fd_ceil __P((double));
+extern double fd_pow __P((double,double));
+extern double fd_tan __P((double));
+
+#else /* other platform.. generic paranoid slow fdlibm */
+
+extern double fd_acos __P((double));
+extern double fd_asin __P((double));
+extern double fd_atan __P((double));
+extern double fd_cos __P((double));
+extern double fd_sin __P((double));
+extern double fd_tan __P((double));
+
+extern double fd_exp __P((double));
+extern double fd_log __P((double));
+extern double fd_sqrt __P((double));
+
+extern double fd_ceil __P((double));
+extern double fd_fabs __P((double));
+extern double fd_floor __P((double));
+extern double fd_fmod __P((double, double));
+
+extern double fd_atan2 __P((double, double));
+extern double fd_pow __P((double, double));
+extern double fd_copysign __P((double, double));
+
+#endif
+
+#endif /* JS_USE_FDLIBM_MATH */
+
+#endif /* _LIBMATH_H */
+
diff --git a/third_party/js-1.7/jslock.c b/third_party/js-1.7/jslock.c
new file mode 100644
index 0000000..4855009
--- /dev/null
+++ b/third_party/js-1.7/jslock.c
@@ -0,0 +1,1303 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifdef JS_THREADSAFE
+
+/*
+ * JS locking stubs.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include "jspubtd.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jscntxt.h"
+#include "jsdtoa.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsscope.h"
+#include "jsstr.h"
+
+#define ReadWord(W) (W)
+
+#ifndef NSPR_LOCK
+
+#include <memory.h>
+
+static PRLock **global_locks;
+static uint32 global_lock_count = 1;
+static uint32 global_locks_log2 = 0;
+static uint32 global_locks_mask = 0;
+
+#define GLOBAL_LOCK_INDEX(id) (((uint32)(id) >> 2) & global_locks_mask)
+
+static void
+js_LockGlobal(void *id)
+{
+ uint32 i = GLOBAL_LOCK_INDEX(id);
+ PR_Lock(global_locks[i]);
+}
+
+static void
+js_UnlockGlobal(void *id)
+{
+ uint32 i = GLOBAL_LOCK_INDEX(id);
+ PR_Unlock(global_locks[i]);
+}
+
+/* Exclude Alpha NT. */
+#if defined(_WIN32) && defined(_M_IX86)
+#pragma warning( disable : 4035 )
+
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+ __asm {
+ mov eax, ov
+ mov ecx, nv
+ mov ebx, w
+ lock cmpxchg [ebx], ecx
+ sete al
+ and eax, 1h
+ }
+}
+
+#elif defined(__GNUC__) && defined(__i386__)
+
+/* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+ unsigned int res;
+
+ __asm__ __volatile__ (
+ "lock\n"
+ "cmpxchgl %2, (%1)\n"
+ "sete %%al\n"
+ "andl $1, %%eax\n"
+ : "=a" (res)
+ : "r" (w), "r" (nv), "a" (ov)
+ : "cc", "memory");
+ return (int)res;
+}
+
+#elif (defined(__USLC__) || defined(_SCO_DS)) && defined(i386)
+
+/* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
+
+asm int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+%ureg w, nv;
+ movl ov,%eax
+ lock
+ cmpxchgl nv,(w)
+ sete %al
+ andl $1,%eax
+%ureg w; mem ov, nv;
+ movl ov,%eax
+ movl nv,%ecx
+ lock
+ cmpxchgl %ecx,(w)
+ sete %al
+ andl $1,%eax
+%ureg nv;
+ movl ov,%eax
+ movl w,%edx
+ lock
+ cmpxchgl nv,(%edx)
+ sete %al
+ andl $1,%eax
+%mem w, ov, nv;
+ movl ov,%eax
+ movl nv,%ecx
+ movl w,%edx
+ lock
+ cmpxchgl %ecx,(%edx)
+ sete %al
+ andl $1,%eax
+}
+#pragma asm full_optimization js_CompareAndSwap
+
+#elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
+
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+#if defined(__GNUC__)
+ unsigned int res;
+ JS_ASSERT(ov != nv);
+ asm volatile ("\
+stbar\n\
+cas [%1],%2,%3\n\
+cmp %2,%3\n\
+be,a 1f\n\
+mov 1,%0\n\
+mov 0,%0\n\
+1:"
+ : "=r" (res)
+ : "r" (w), "r" (ov), "r" (nv));
+ return (int)res;
+#else /* !__GNUC__ */
+ extern int compare_and_swap(jsword*, jsword, jsword);
+ JS_ASSERT(ov != nv);
+ return compare_and_swap(w, ov, nv);
+#endif
+}
+
+#elif defined(AIX)
+
+#include <sys/atomic_op.h>
+
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+ return !_check_lock((atomic_p)w, ov, nv);
+}
+
+#else
+
+#error "Define NSPR_LOCK if your platform lacks a compare-and-swap instruction."
+
+#endif /* arch-tests */
+
+#endif /* !NSPR_LOCK */
+
+void
+js_InitLock(JSThinLock *tl)
+{
+#ifdef NSPR_LOCK
+ tl->owner = 0;
+ tl->fat = (JSFatLock*)JS_NEW_LOCK();
+#else
+ memset(tl, 0, sizeof(JSThinLock));
+#endif
+}
+
+void
+js_FinishLock(JSThinLock *tl)
+{
+#ifdef NSPR_LOCK
+ tl->owner = 0xdeadbeef;
+ if (tl->fat)
+ JS_DESTROY_LOCK(((JSLock*)tl->fat));
+#else
+ JS_ASSERT(tl->owner == 0);
+ JS_ASSERT(tl->fat == NULL);
+#endif
+}
+
+static void js_Dequeue(JSThinLock *);
+
+#ifdef DEBUG_SCOPE_COUNT
+
+#include <stdio.h>
+#include "jsdhash.h"
+
+static FILE *logfp;
+static JSDHashTable logtbl;
+
+typedef struct logentry {
+ JSDHashEntryStub stub;
+ char op;
+ const char *file;
+ int line;
+} logentry;
+
+static void
+logit(JSScope *scope, char op, const char *file, int line)
+{
+ logentry *entry;
+
+ if (!logfp) {
+ logfp = fopen("/tmp/scope.log", "w");
+ if (!logfp)
+ return;
+ setvbuf(logfp, NULL, _IONBF, 0);
+ }
+ fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
+
+ if (!logtbl.entryStore &&
+ !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
+ sizeof(logentry), 100)) {
+ return;
+ }
+ entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
+ if (!entry)
+ return;
+ entry->stub.key = scope;
+ entry->op = op;
+ entry->file = file;
+ entry->line = line;
+}
+
+void
+js_unlog_scope(JSScope *scope)
+{
+ if (!logtbl.entryStore)
+ return;
+ (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
+}
+
+# define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
+
+#else
+
+# define LOGIT(scope,op) /* nothing */
+
+#endif /* DEBUG_SCOPE_COUNT */
+
+/*
+ * Return true if scope's ownercx, or the ownercx of a single-threaded scope
+ * for which ownercx is waiting to become multi-threaded and shared, is cx.
+ * That condition implies deadlock in ClaimScope if cx's thread were to wait
+ * to share scope.
+ *
+ * (i) rt->gcLock held
+ */
+static JSBool
+WillDeadlock(JSScope *scope, JSContext *cx)
+{
+ JSContext *ownercx;
+
+ do {
+ ownercx = scope->ownercx;
+ if (ownercx == cx) {
+ JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
+ return JS_TRUE;
+ }
+ } while (ownercx && (scope = ownercx->scopeToShare) != NULL);
+ return JS_FALSE;
+}
+
+/*
+ * Make scope multi-threaded, i.e. share its ownership among contexts in rt
+ * using a "thin" or (if necessary due to contention) "fat" lock. Called only
+ * from ClaimScope, immediately below, when we detect deadlock were we to wait
+ * for scope's lock, because its ownercx is waiting on a scope owned by the
+ * calling cx.
+ *
+ * (i) rt->gcLock held
+ */
+static void
+ShareScope(JSRuntime *rt, JSScope *scope)
+{
+ JSScope **todop;
+
+ if (scope->u.link) {
+ for (todop = &rt->scopeSharingTodo; *todop != scope;
+ todop = &(*todop)->u.link) {
+ JS_ASSERT(*todop != NO_SCOPE_SHARING_TODO);
+ }
+ *todop = scope->u.link;
+ scope->u.link = NULL; /* null u.link for sanity ASAP */
+ JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
+ }
+ js_InitLock(&scope->lock);
+ if (scope == rt->setSlotScope) {
+ /*
+ * Nesting locks on another thread that's using scope->ownercx: give
+ * the held lock a reentrancy count of 1 and set its lock.owner field
+ * directly (no compare-and-swap needed while scope->ownercx is still
+ * non-null). See below in ClaimScope, before the ShareScope call,
+ * for more on why this is necessary.
+ *
+ * If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
+ * acquiring scope->lock.fat here, against another thread holding that
+ * fat lock and trying to grab rt->gcLock. This is because no other
+ * thread can attempt to acquire scope->lock.fat until scope->ownercx
+ * is null *and* our thread has released rt->gcLock, which interlocks
+ * scope->ownercx's transition to null against tests of that member
+ * in ClaimScope.
+ */
+ scope->lock.owner = CX_THINLOCK_ID(scope->ownercx);
+#ifdef NSPR_LOCK
+ JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
+#endif
+ scope->u.count = 1;
+ } else {
+ scope->u.count = 0;
+ }
+ js_FinishSharingScope(rt, scope);
+}
+
+/*
+ * js_FinishSharingScope is the tail part of ShareScope, split out to become a
+ * subroutine of JS_EndRequest too. The bulk of the work here involves making
+ * mutable strings in the scope's object's slots be immutable. We have to do
+ * this because such strings will soon be available to multiple threads, so
+ * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
+ * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
+ * or js_UndependString.
+ *
+ * The last bit of work done by js_FinishSharingScope nulls scope->ownercx and
+ * updates rt->sharedScopes.
+ */
+#define MAKE_STRING_IMMUTABLE(rt, v, vp) \
+ JS_BEGIN_MACRO \
+ JSString *str_ = JSVAL_TO_STRING(v); \
+ uint8 *flagp_ = js_GetGCThingFlags(str_); \
+ if (*flagp_ & GCF_MUTABLE) { \
+ if (JSSTRING_IS_DEPENDENT(str_) && \
+ !js_UndependString(NULL, str_)) { \
+ JS_RUNTIME_METER(rt, badUndependStrings); \
+ *vp = JSVAL_VOID; \
+ } else { \
+ *flagp_ &= ~GCF_MUTABLE; \
+ } \
+ } \
+ JS_END_MACRO
+
+void
+js_FinishSharingScope(JSRuntime *rt, JSScope *scope)
+{
+ JSObject *obj;
+ uint32 nslots;
+ jsval v, *vp, *end;
+
+ obj = scope->object;
+ nslots = JS_MIN(obj->map->freeslot, obj->map->nslots);
+ for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
+ v = *vp;
+ if (JSVAL_IS_STRING(v))
+ MAKE_STRING_IMMUTABLE(rt, v, vp);
+ }
+
+ scope->ownercx = NULL; /* NB: set last, after lock init */
+ JS_RUNTIME_METER(rt, sharedScopes);
+}
+
+/*
+ * Given a scope with apparently non-null ownercx different from cx, try to
+ * set ownercx to cx, claiming exclusive (single-threaded) ownership of scope.
+ * If we claim ownership, return true. Otherwise, we wait for ownercx to be
+ * set to null (indicating that scope is multi-threaded); or if waiting would
+ * deadlock, we set ownercx to null ourselves via ShareScope. In any case,
+ * once ownercx is null we return false.
+ */
+static JSBool
+ClaimScope(JSScope *scope, JSContext *cx)
+{
+ JSRuntime *rt;
+ JSContext *ownercx;
+ jsrefcount saveDepth;
+ PRStatus stat;
+
+ rt = cx->runtime;
+ JS_RUNTIME_METER(rt, claimAttempts);
+ JS_LOCK_GC(rt);
+
+ /* Reload in case ownercx went away while we blocked on the lock. */
+ while ((ownercx = scope->ownercx) != NULL) {
+ /*
+ * Avoid selflock if ownercx is dead, or is not running a request, or
+ * has the same thread as cx. Set scope->ownercx to cx so that the
+ * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
+ * fast path around the corresponding js_UnlockScope or js_UnlockObj
+ * function call.
+ *
+ * If scope->u.link is non-null, scope has already been inserted on
+ * the rt->scopeSharingTodo list, because another thread's context
+ * already wanted to lock scope while ownercx was running a request.
+ * We can't claim any scope whose u.link is non-null at this point,
+ * even if ownercx->requestDepth is 0 (see below where we suspend our
+ * request before waiting on rt->scopeSharingDone).
+ */
+ if (!scope->u.link &&
+ (!js_ValidContextPointer(rt, ownercx) ||
+ !ownercx->requestDepth ||
+ ownercx->thread == cx->thread)) {
+ JS_ASSERT(scope->u.count == 0);
+ scope->ownercx = cx;
+ JS_UNLOCK_GC(rt);
+ JS_RUNTIME_METER(rt, claimedScopes);
+ return JS_TRUE;
+ }
+
+ /*
+ * Avoid deadlock if scope's owner context is waiting on a scope that
+ * we own, by revoking scope's ownership. This approach to deadlock
+ * avoidance works because the engine never nests scope locks, except
+ * for the notable case of js_SetProtoOrParent (see jsobj.c).
+ *
+ * If cx could hold locks on ownercx->scopeToShare, or if ownercx
+ * could hold locks on scope, we would need to keep reentrancy counts
+ * for all such "flyweight" (ownercx != NULL) locks, so that control
+ * would unwind properly once these locks became "thin" or "fat".
+ * Apart from the js_SetProtoOrParent exception, the engine promotes
+ * a scope from exclusive to shared access only when locking, never
+ * when holding or unlocking.
+ *
+ * If ownercx's thread is calling js_SetProtoOrParent, trying to lock
+ * the inner scope (the scope of the object being set as the prototype
+ * of the outer object), ShareScope will find the outer object's scope
+ * at rt->setSlotScope. If it's the same as scope, we give it a lock
+ * held by ownercx's thread with reentrancy count of 1, then we return
+ * here and break. After that we unwind to js_[GS]etSlotThreadSafe or
+ * js_LockScope (our caller), where we wait on the newly-fattened lock
+ * until ownercx's thread unwinds from js_SetProtoOrParent.
+ *
+ * Avoid deadlock before any of this scope/context cycle detection if
+ * cx is on the active GC's thread, because in that case, no requests
+ * will run until the GC completes. Any scope wanted by the GC (from
+ * a finalizer) that can't be claimed must be slated for sharing.
+ */
+ if (rt->gcThread == cx->thread ||
+ (ownercx->scopeToShare &&
+ WillDeadlock(ownercx->scopeToShare, cx))) {
+ ShareScope(rt, scope);
+ break;
+ }
+
+ /*
+ * Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we
+ * can decide whether scope is on rt->scopeSharingTodo with a single
+ * non-null test, and avoid double-insertion bugs.
+ */
+ if (!scope->u.link) {
+ scope->u.link = rt->scopeSharingTodo;
+ rt->scopeSharingTodo = scope;
+ js_HoldObjectMap(cx, &scope->map);
+ }
+
+ /*
+ * Inline JS_SuspendRequest before we wait on rt->scopeSharingDone,
+ * saving and clearing cx->requestDepth so we don't deadlock if the
+ * GC needs to run on ownercx.
+ *
+ * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
+ * to decrement rt->requestCount if cx is active on the GC's thread,
+ * because the GC has already reduced rt->requestCount to exclude all
+ * such such contexts.
+ */
+ saveDepth = cx->requestDepth;
+ if (saveDepth) {
+ cx->requestDepth = 0;
+ if (rt->gcThread != cx->thread) {
+ JS_ASSERT(rt->requestCount > 0);
+ rt->requestCount--;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+ }
+ }
+
+ /*
+ * We know that some other thread's context owns scope, which is now
+ * linked onto rt->scopeSharingTodo, awaiting the end of that other
+ * thread's request. So it is safe to wait on rt->scopeSharingDone.
+ */
+ cx->scopeToShare = scope;
+ stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT);
+ JS_ASSERT(stat != PR_FAILURE);
+
+ /*
+ * Inline JS_ResumeRequest after waiting on rt->scopeSharingDone,
+ * restoring cx->requestDepth. Same note as above for the inlined,
+ * specialized JS_SuspendRequest code: beware rt->gcThread.
+ */
+ if (saveDepth) {
+ if (rt->gcThread != cx->thread) {
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ rt->requestCount++;
+ }
+ cx->requestDepth = saveDepth;
+ }
+
+ /*
+ * Don't clear cx->scopeToShare until after we're through waiting on
+ * all condition variables protected by rt->gcLock -- that includes
+ * rt->scopeSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
+ * in the inlined JS_ResumeRequest code immediately above).
+ *
+ * Otherwise, the GC could easily deadlock with another thread that
+ * owns a scope wanted by a finalizer. By keeping cx->scopeToShare
+ * set till here, we ensure that such deadlocks are detected, which
+ * results in the finalized object's scope being shared (it must, of
+ * course, have other, live objects sharing it).
+ */
+ cx->scopeToShare = NULL;
+ }
+
+ JS_UNLOCK_GC(rt);
+ return JS_FALSE;
+}
+
+/* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
+JS_FRIEND_API(jsval)
+js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
+{
+ jsval v;
+ JSScope *scope;
+#ifndef NSPR_LOCK
+ JSThinLock *tl;
+ jsword me;
+#endif
+
+ /*
+ * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
+ * all slots starting from 0 as required slots. A property definition or
+ * some prior arrangement must have allocated slot.
+ *
+ * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
+ * the crucial distinction between a |required slot number| that's passed
+ * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
+ * passed to the JS_Get/SetReservedSlot APIs.
+ */
+ if (!OBJ_IS_NATIVE(obj))
+ return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
+
+ /*
+ * Native object locking is inlined here to optimize the single-threaded
+ * and contention-free multi-threaded cases.
+ */
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->ownercx != cx);
+ JS_ASSERT(obj->slots && slot < obj->map->freeslot);
+
+ /*
+ * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
+ * Also avoid locking an object owning a sealed scope. If neither of those
+ * special cases applies, try to claim scope's flyweight lock from whatever
+ * context may have had it in an earlier request.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx) ||
+ (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
+ (scope->ownercx && ClaimScope(scope, cx))) {
+ return obj->slots[slot];
+ }
+
+#ifndef NSPR_LOCK
+ tl = &scope->lock;
+ me = CX_THINLOCK_ID(cx);
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ if (js_CompareAndSwap(&tl->owner, 0, me)) {
+ /*
+ * Got the lock with one compare-and-swap. Even so, someone else may
+ * have mutated obj so it now has its own scope and lock, which would
+ * require either a restart from the top of this routine, or a thin
+ * lock release followed by fat lock acquisition.
+ */
+ if (scope == OBJ_SCOPE(obj)) {
+ v = obj->slots[slot];
+ if (!js_CompareAndSwap(&tl->owner, me, 0)) {
+ /* Assert that scope locks never revert to flyweight. */
+ JS_ASSERT(scope->ownercx != cx);
+ LOGIT(scope, '1');
+ scope->u.count = 1;
+ js_UnlockObj(cx, obj);
+ }
+ return v;
+ }
+ if (!js_CompareAndSwap(&tl->owner, me, 0))
+ js_Dequeue(tl);
+ }
+ else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
+ return obj->slots[slot];
+ }
+#endif
+
+ js_LockObj(cx, obj);
+ v = obj->slots[slot];
+
+ /*
+ * Test whether cx took ownership of obj's scope during js_LockObj.
+ *
+ * This does not mean that a given scope reverted to flyweight from "thin"
+ * or "fat" -- it does mean that obj's map pointer changed due to another
+ * thread setting a property, requiring obj to cease sharing a prototype
+ * object's scope (whose lock was not flyweight, else we wouldn't be here
+ * in the first place!).
+ */
+ scope = OBJ_SCOPE(obj);
+ if (scope->ownercx != cx)
+ js_UnlockScope(cx, scope);
+ return v;
+}
+
+void
+js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
+{
+ JSScope *scope;
+#ifndef NSPR_LOCK
+ JSThinLock *tl;
+ jsword me;
+#endif
+
+ /* Any string stored in a thread-safe object must be immutable. */
+ if (JSVAL_IS_STRING(v))
+ MAKE_STRING_IMMUTABLE(cx->runtime, v, &v);
+
+ /*
+ * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
+ * for the Get case.
+ */
+ if (!OBJ_IS_NATIVE(obj)) {
+ OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
+ return;
+ }
+
+ /*
+ * Native object locking is inlined here to optimize the single-threaded
+ * and contention-free multi-threaded cases.
+ */
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->ownercx != cx);
+ JS_ASSERT(obj->slots && slot < obj->map->freeslot);
+
+ /*
+ * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
+ * Also avoid locking an object owning a sealed scope. If neither of those
+ * special cases applies, try to claim scope's flyweight lock from whatever
+ * context may have had it in an earlier request.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx) ||
+ (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
+ (scope->ownercx && ClaimScope(scope, cx))) {
+ obj->slots[slot] = v;
+ return;
+ }
+
+#ifndef NSPR_LOCK
+ tl = &scope->lock;
+ me = CX_THINLOCK_ID(cx);
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ if (js_CompareAndSwap(&tl->owner, 0, me)) {
+ if (scope == OBJ_SCOPE(obj)) {
+ obj->slots[slot] = v;
+ if (!js_CompareAndSwap(&tl->owner, me, 0)) {
+ /* Assert that scope locks never revert to flyweight. */
+ JS_ASSERT(scope->ownercx != cx);
+ LOGIT(scope, '1');
+ scope->u.count = 1;
+ js_UnlockObj(cx, obj);
+ }
+ return;
+ }
+ if (!js_CompareAndSwap(&tl->owner, me, 0))
+ js_Dequeue(tl);
+ }
+ else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
+ obj->slots[slot] = v;
+ return;
+ }
+#endif
+
+ js_LockObj(cx, obj);
+ obj->slots[slot] = v;
+
+ /*
+ * Same drill as above, in js_GetSlotThreadSafe. Note that we cannot
+ * assume obj has its own mutable scope (where scope->object == obj) yet,
+ * because OBJ_SET_SLOT is called for the "universal", common slots such
+ * as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope.
+ * See also the JSPROP_SHARED attribute and its usage.
+ */
+ scope = OBJ_SCOPE(obj);
+ if (scope->ownercx != cx)
+ js_UnlockScope(cx, scope);
+}
+
+#ifndef NSPR_LOCK
+
+static JSFatLock *
+NewFatlock()
+{
+ JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
+ if (!fl) return NULL;
+ fl->susp = 0;
+ fl->next = NULL;
+ fl->prevp = NULL;
+ fl->slock = PR_NewLock();
+ fl->svar = PR_NewCondVar(fl->slock);
+ return fl;
+}
+
+static void
+DestroyFatlock(JSFatLock *fl)
+{
+ PR_DestroyLock(fl->slock);
+ PR_DestroyCondVar(fl->svar);
+ free(fl);
+}
+
+static JSFatLock *
+ListOfFatlocks(int listc)
+{
+ JSFatLock *m;
+ JSFatLock *m0;
+ int i;
+
+ JS_ASSERT(listc>0);
+ m0 = m = NewFatlock();
+ for (i=1; i<listc; i++) {
+ m->next = NewFatlock();
+ m = m->next;
+ }
+ return m0;
+}
+
+static void
+DeleteListOfFatlocks(JSFatLock *m)
+{
+ JSFatLock *m0;
+ for (; m; m=m0) {
+ m0 = m->next;
+ DestroyFatlock(m);
+ }
+}
+
+static JSFatLockTable *fl_list_table = NULL;
+static uint32 fl_list_table_len = 0;
+static uint32 fl_list_chunk_len = 0;
+
+static JSFatLock *
+GetFatlock(void *id)
+{
+ JSFatLock *m;
+
+ uint32 i = GLOBAL_LOCK_INDEX(id);
+ if (fl_list_table[i].free == NULL) {
+#ifdef DEBUG
+ if (fl_list_table[i].taken)
+ printf("Ran out of fat locks!\n");
+#endif
+ fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
+ }
+ m = fl_list_table[i].free;
+ fl_list_table[i].free = m->next;
+ m->susp = 0;
+ m->next = fl_list_table[i].taken;
+ m->prevp = &fl_list_table[i].taken;
+ if (fl_list_table[i].taken)
+ fl_list_table[i].taken->prevp = &m->next;
+ fl_list_table[i].taken = m;
+ return m;
+}
+
+static void
+PutFatlock(JSFatLock *m, void *id)
+{
+ uint32 i;
+ if (m == NULL)
+ return;
+
+ /* Unlink m from fl_list_table[i].taken. */
+ *m->prevp = m->next;
+ if (m->next)
+ m->next->prevp = m->prevp;
+
+ /* Insert m in fl_list_table[i].free. */
+ i = GLOBAL_LOCK_INDEX(id);
+ m->next = fl_list_table[i].free;
+ fl_list_table[i].free = m;
+}
+
+#endif /* !NSPR_LOCK */
+
+JSBool
+js_SetupLocks(int listc, int globc)
+{
+#ifndef NSPR_LOCK
+ uint32 i;
+
+ if (global_locks)
+ return JS_TRUE;
+#ifdef DEBUG
+ if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
+ printf("Bad number %d in js_SetupLocks()!\n", listc);
+ if (globc > 100 || globc < 0) /* globc == number of global locks */
+ printf("Bad number %d in js_SetupLocks()!\n", listc);
+#endif
+ global_locks_log2 = JS_CeilingLog2(globc);
+ global_locks_mask = JS_BITMASK(global_locks_log2);
+ global_lock_count = JS_BIT(global_locks_log2);
+ global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
+ if (!global_locks)
+ return JS_FALSE;
+ for (i = 0; i < global_lock_count; i++) {
+ global_locks[i] = PR_NewLock();
+ if (!global_locks[i]) {
+ global_lock_count = i;
+ js_CleanupLocks();
+ return JS_FALSE;
+ }
+ }
+ fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
+ if (!fl_list_table) {
+ js_CleanupLocks();
+ return JS_FALSE;
+ }
+ fl_list_table_len = global_lock_count;
+ for (i = 0; i < global_lock_count; i++)
+ fl_list_table[i].free = fl_list_table[i].taken = NULL;
+ fl_list_chunk_len = listc;
+#endif /* !NSPR_LOCK */
+ return JS_TRUE;
+}
+
+void
+js_CleanupLocks()
+{
+#ifndef NSPR_LOCK
+ uint32 i;
+
+ if (global_locks) {
+ for (i = 0; i < global_lock_count; i++)
+ PR_DestroyLock(global_locks[i]);
+ free(global_locks);
+ global_locks = NULL;
+ global_lock_count = 1;
+ global_locks_log2 = 0;
+ global_locks_mask = 0;
+ }
+ if (fl_list_table) {
+ for (i = 0; i < fl_list_table_len; i++) {
+ DeleteListOfFatlocks(fl_list_table[i].free);
+ fl_list_table[i].free = NULL;
+ DeleteListOfFatlocks(fl_list_table[i].taken);
+ fl_list_table[i].taken = NULL;
+ }
+ free(fl_list_table);
+ fl_list_table = NULL;
+ fl_list_table_len = 0;
+ }
+#endif /* !NSPR_LOCK */
+}
+
+#ifndef NSPR_LOCK
+
+/*
+ * Fast locking and unlocking is implemented by delaying the allocation of a
+ * system lock (fat lock) until contention. As long as a locking thread A
+ * runs uncontended, the lock is represented solely by storing A's identity in
+ * the object being locked.
+ *
+ * If another thread B tries to lock the object currently locked by A, B is
+ * enqueued into a fat lock structure (which might have to be allocated and
+ * pointed to by the object), and suspended using NSPR conditional variables
+ * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
+ * signalling to A that when releasing the lock, B must be dequeued and
+ * notified.
+ *
+ * The basic operation of the locking primitives (js_Lock, js_Unlock,
+ * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
+ * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
+ * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
+ * succeeds this implies that p is uncontended (no one is waiting because the
+ * wait bit is not set).
+ *
+ * When dequeueing, the lock is released, and one of the threads suspended on
+ * the lock is notified. If other threads still are waiting, the wait bit is
+ * kept (in js_Enqueue), and if not, the fat lock is deallocated.
+ *
+ * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
+ * are serialized using a global lock. For scalability, a hashtable of global
+ * locks is used, which is indexed modulo the thin lock pointer.
+ */
+
+/*
+ * Invariants:
+ * (i) global lock is held
+ * (ii) fl->susp >= 0
+ */
+static int
+js_SuspendThread(JSThinLock *tl)
+{
+ JSFatLock *fl;
+ PRStatus stat;
+
+ if (tl->fat == NULL)
+ fl = tl->fat = GetFatlock(tl);
+ else
+ fl = tl->fat;
+ JS_ASSERT(fl->susp >= 0);
+ fl->susp++;
+ PR_Lock(fl->slock);
+ js_UnlockGlobal(tl);
+ stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
+ JS_ASSERT(stat != PR_FAILURE);
+ PR_Unlock(fl->slock);
+ js_LockGlobal(tl);
+ fl->susp--;
+ if (fl->susp == 0) {
+ PutFatlock(fl, tl);
+ tl->fat = NULL;
+ }
+ return tl->fat == NULL;
+}
+
+/*
+ * (i) global lock is held
+ * (ii) fl->susp > 0
+ */
+static void
+js_ResumeThread(JSThinLock *tl)
+{
+ JSFatLock *fl = tl->fat;
+ PRStatus stat;
+
+ JS_ASSERT(fl != NULL);
+ JS_ASSERT(fl->susp > 0);
+ PR_Lock(fl->slock);
+ js_UnlockGlobal(tl);
+ stat = PR_NotifyCondVar(fl->svar);
+ JS_ASSERT(stat != PR_FAILURE);
+ PR_Unlock(fl->slock);
+}
+
+static void
+js_Enqueue(JSThinLock *tl, jsword me)
+{
+ jsword o, n;
+
+ js_LockGlobal(tl);
+ for (;;) {
+ o = ReadWord(tl->owner);
+ n = Thin_SetWait(o);
+ if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) {
+ if (js_SuspendThread(tl))
+ me = Thin_RemoveWait(me);
+ else
+ me = Thin_SetWait(me);
+ }
+ else if (js_CompareAndSwap(&tl->owner, 0, me)) {
+ js_UnlockGlobal(tl);
+ return;
+ }
+ }
+}
+
+static void
+js_Dequeue(JSThinLock *tl)
+{
+ jsword o;
+
+ js_LockGlobal(tl);
+ o = ReadWord(tl->owner);
+ JS_ASSERT(Thin_GetWait(o) != 0);
+ JS_ASSERT(tl->fat != NULL);
+ if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */
+ JS_ASSERT(0);
+ js_ResumeThread(tl);
+}
+
+JS_INLINE void
+js_Lock(JSThinLock *tl, jsword me)
+{
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ if (js_CompareAndSwap(&tl->owner, 0, me))
+ return;
+ if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
+ js_Enqueue(tl, me);
+#ifdef DEBUG
+ else
+ JS_ASSERT(0);
+#endif
+}
+
+JS_INLINE void
+js_Unlock(JSThinLock *tl, jsword me)
+{
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+
+ /*
+ * Only me can hold the lock, no need to use compare and swap atomic
+ * operation for this common case.
+ */
+ if (tl->owner == me) {
+ tl->owner = 0;
+ return;
+ }
+ JS_ASSERT(Thin_GetWait(tl->owner));
+ if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
+ js_Dequeue(tl);
+#ifdef DEBUG
+ else
+ JS_ASSERT(0); /* unbalanced unlock */
+#endif
+}
+
+#endif /* !NSPR_LOCK */
+
+void
+js_LockRuntime(JSRuntime *rt)
+{
+ PR_Lock(rt->rtLock);
+#ifdef DEBUG
+ rt->rtLockOwner = js_CurrentThreadId();
+#endif
+}
+
+void
+js_UnlockRuntime(JSRuntime *rt)
+{
+#ifdef DEBUG
+ rt->rtLockOwner = 0;
+#endif
+ PR_Unlock(rt->rtLock);
+}
+
+void
+js_LockScope(JSContext *cx, JSScope *scope)
+{
+ jsword me = CX_THINLOCK_ID(cx);
+
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ JS_ASSERT(scope->ownercx != cx);
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+ if (scope->ownercx && ClaimScope(scope, cx))
+ return;
+
+ if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) {
+ JS_ASSERT(scope->u.count > 0);
+ LOGIT(scope, '+');
+ scope->u.count++;
+ } else {
+ JSThinLock *tl = &scope->lock;
+ JS_LOCK0(tl, me);
+ JS_ASSERT(scope->u.count == 0);
+ LOGIT(scope, '1');
+ scope->u.count = 1;
+ }
+}
+
+void
+js_UnlockScope(JSContext *cx, JSScope *scope)
+{
+ jsword me = CX_THINLOCK_ID(cx);
+
+ /* We hope compilers use me instead of reloading cx->thread in the macro. */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+ if (cx->lockedSealedScope == scope) {
+ cx->lockedSealedScope = NULL;
+ return;
+ }
+
+ /*
+ * If scope->ownercx is not null, it's likely that two contexts not using
+ * requests nested locks for scope. The first context, cx here, claimed
+ * scope; the second, scope->ownercx here, re-claimed it because the first
+ * was not in a request, or was on the same thread. We don't want to keep
+ * track of such nesting, because it penalizes the common non-nested case.
+ * Instead of asserting here and silently coping, we simply re-claim scope
+ * for cx and return.
+ *
+ * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
+ * case where an asymmetric thread model (Mozilla's main thread is known
+ * to be the only thread that runs the GC) combined with multiple contexts
+ * per thread has led to such request-less nesting.
+ */
+ if (scope->ownercx) {
+ JS_ASSERT(scope->u.count == 0);
+ JS_ASSERT(scope->lock.owner == 0);
+ scope->ownercx = cx;
+ return;
+ }
+
+ JS_ASSERT(scope->u.count > 0);
+ if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) {
+ JS_ASSERT(0); /* unbalanced unlock */
+ return;
+ }
+ LOGIT(scope, '-');
+ if (--scope->u.count == 0) {
+ JSThinLock *tl = &scope->lock;
+ JS_UNLOCK0(tl, me);
+ }
+}
+
+/*
+ * NB: oldscope may be null if our caller is js_GetMutableScope and it just
+ * dropped the last reference to oldscope.
+ */
+void
+js_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope)
+{
+ jsword me;
+ JSThinLock *tl;
+
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope));
+
+ /*
+ * If the last reference to oldscope went away, newscope needs no lock
+ * state update.
+ */
+ if (!oldscope)
+ return;
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, oldscope));
+
+ /*
+ * Special case in js_LockScope and js_UnlockScope for the GC calling
+ * code that locks, unlocks, or mutates. Nothing to do in these cases,
+ * because scope and newscope were "locked" by the GC thread, so neither
+ * was actually locked.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+
+ /*
+ * Special case in js_LockObj and js_UnlockScope for locking the sealed
+ * scope of an object that owns that scope (the prototype or mutated obj
+ * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
+ */
+ JS_ASSERT(cx->lockedSealedScope != newscope);
+ if (cx->lockedSealedScope == oldscope) {
+ JS_ASSERT(newscope->ownercx == cx ||
+ (!newscope->ownercx && newscope->u.count == 1));
+ cx->lockedSealedScope = NULL;
+ return;
+ }
+
+ /*
+ * If oldscope is single-threaded, there's nothing to do.
+ */
+ if (oldscope->ownercx) {
+ JS_ASSERT(oldscope->ownercx == cx);
+ JS_ASSERT(newscope->ownercx == cx ||
+ (!newscope->ownercx && newscope->u.count == 1));
+ return;
+ }
+
+ /*
+ * We transfer oldscope->u.count only if newscope is not single-threaded.
+ * Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or
+ * JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only
+ * if they find newscope->ownercx != cx.
+ */
+ if (newscope->ownercx != cx) {
+ JS_ASSERT(!newscope->ownercx);
+ newscope->u.count = oldscope->u.count;
+ }
+
+ /*
+ * Reset oldscope's lock state so that it is completely unlocked.
+ */
+ LOGIT(oldscope, '0');
+ oldscope->u.count = 0;
+ tl = &oldscope->lock;
+ me = CX_THINLOCK_ID(cx);
+ JS_UNLOCK0(tl, me);
+}
+
+void
+js_LockObj(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+
+ /*
+ * We must test whether the GC is calling and return without mutating any
+ * state, especially cx->lockedSealedScope. Note asymmetry with respect to
+ * js_UnlockObj, which is a thin-layer on top of js_UnlockScope.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+
+ for (;;) {
+ scope = OBJ_SCOPE(obj);
+ if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
+ !cx->lockedSealedScope) {
+ cx->lockedSealedScope = scope;
+ return;
+ }
+
+ js_LockScope(cx, scope);
+
+ /* If obj still has this scope, we're done. */
+ if (scope == OBJ_SCOPE(obj))
+ return;
+
+ /* Lost a race with a mutator; retry with obj's new scope. */
+ js_UnlockScope(cx, scope);
+ }
+}
+
+void
+js_UnlockObj(JSContext *cx, JSObject *obj)
+{
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ js_UnlockScope(cx, OBJ_SCOPE(obj));
+}
+
+#ifdef DEBUG
+
+JSBool
+js_IsRuntimeLocked(JSRuntime *rt)
+{
+ return js_CurrentThreadId() == rt->rtLockOwner;
+}
+
+JSBool
+js_IsObjLocked(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope = OBJ_SCOPE(obj);
+
+ return MAP_IS_NATIVE(&scope->map) && js_IsScopeLocked(cx, scope);
+}
+
+JSBool
+js_IsScopeLocked(JSContext *cx, JSScope *scope)
+{
+ /* Special case: the GC locking any object's scope, see js_LockScope. */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return JS_TRUE;
+
+ /* Special case: locked object owning a sealed scope, see js_LockObj. */
+ if (cx->lockedSealedScope == scope)
+ return JS_TRUE;
+
+ /*
+ * General case: the scope is either exclusively owned (by cx), or it has
+ * a thin or fat lock to cope with shared (concurrent) ownership.
+ */
+ if (scope->ownercx) {
+ JS_ASSERT(scope->ownercx == cx || scope->ownercx->thread == cx->thread);
+ return JS_TRUE;
+ }
+ return js_CurrentThreadId() ==
+ ((JSThread *)Thin_RemoveWait(ReadWord(scope->lock.owner)))->id;
+}
+
+#endif /* DEBUG */
+#endif /* JS_THREADSAFE */
diff --git a/third_party/js-1.7/jslock.h b/third_party/js-1.7/jslock.h
new file mode 100644
index 0000000..f9ed03d
--- /dev/null
+++ b/third_party/js-1.7/jslock.h
@@ -0,0 +1,266 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#ifndef jslock_h__
+#define jslock_h__
+
+#ifdef JS_THREADSAFE
+
+#include "jstypes.h"
+#include "pratom.h"
+#include "prlock.h"
+#include "prcvar.h"
+#include "prthread.h"
+
+#include "jsprvtd.h" /* for JSScope, etc. */
+#include "jspubtd.h" /* for JSRuntime, etc. */
+
+#define Thin_GetWait(W) ((jsword)(W) & 0x1)
+#define Thin_SetWait(W) ((jsword)(W) | 0x1)
+#define Thin_RemoveWait(W) ((jsword)(W) & ~0x1)
+
+typedef struct JSFatLock JSFatLock;
+
+struct JSFatLock {
+ int susp;
+ PRLock *slock;
+ PRCondVar *svar;
+ JSFatLock *next;
+ JSFatLock **prevp;
+};
+
+typedef struct JSThinLock {
+ jsword owner;
+ JSFatLock *fat;
+} JSThinLock;
+
+#define CX_THINLOCK_ID(cx) ((jsword)(cx)->thread)
+#define CURRENT_THREAD_IS_ME(me) (((JSThread *)me)->id == js_CurrentThreadId())
+
+typedef PRLock JSLock;
+
+typedef struct JSFatLockTable {
+ JSFatLock *free;
+ JSFatLock *taken;
+} JSFatLockTable;
+
+/*
+ * Atomic increment and decrement for a reference counter, given jsrefcount *p.
+ * NB: jsrefcount is int32, aka PRInt32, so that pratom.h functions work.
+ */
+#define JS_ATOMIC_INCREMENT(p) PR_AtomicIncrement((PRInt32 *)(p))
+#define JS_ATOMIC_DECREMENT(p) PR_AtomicDecrement((PRInt32 *)(p))
+#define JS_ATOMIC_ADD(p,v) PR_AtomicAdd((PRInt32 *)(p), (PRInt32)(v))
+
+#define js_CurrentThreadId() (jsword)PR_GetCurrentThread()
+#define JS_NEW_LOCK() PR_NewLock()
+#define JS_DESTROY_LOCK(l) PR_DestroyLock(l)
+#define JS_ACQUIRE_LOCK(l) PR_Lock(l)
+#define JS_RELEASE_LOCK(l) PR_Unlock(l)
+#define JS_LOCK0(P,M) js_Lock(P,M)
+#define JS_UNLOCK0(P,M) js_Unlock(P,M)
+
+#define JS_NEW_CONDVAR(l) PR_NewCondVar(l)
+#define JS_DESTROY_CONDVAR(cv) PR_DestroyCondVar(cv)
+#define JS_WAIT_CONDVAR(cv,to) PR_WaitCondVar(cv,to)
+#define JS_NO_TIMEOUT PR_INTERVAL_NO_TIMEOUT
+#define JS_NOTIFY_CONDVAR(cv) PR_NotifyCondVar(cv)
+#define JS_NOTIFY_ALL_CONDVAR(cv) PR_NotifyAllCondVar(cv)
+
+/*
+ * Include jsscope.h so JS_LOCK_OBJ macro callers don't have to include it.
+ * Since there is a JSThinLock member in JSScope, we can't nest this include
+ * much earlier (see JSThinLock's typedef, above). Yes, that means there is
+ * an #include cycle between jslock.h and jsscope.h: moderate-sized XXX here,
+ * to be fixed by moving JS_LOCK_SCOPE to jsscope.h, JS_LOCK_OBJ to jsobj.h,
+ * and so on.
+ */
+#include "jsscope.h"
+
+#define JS_LOCK_RUNTIME(rt) js_LockRuntime(rt)
+#define JS_UNLOCK_RUNTIME(rt) js_UnlockRuntime(rt)
+
+/*
+ * NB: The JS_LOCK_OBJ and JS_UNLOCK_OBJ macros work *only* on native objects
+ * (objects for which OBJ_IS_NATIVE returns true). All uses of these macros in
+ * the engine are predicated on OBJ_IS_NATIVE or equivalent checks. These uses
+ * are for optimizations above the JSObjectOps layer, under which object locks
+ * normally hide.
+ */
+#define JS_LOCK_OBJ(cx,obj) ((OBJ_SCOPE(obj)->ownercx == (cx)) \
+ ? (void)0 \
+ : (js_LockObj(cx, obj)))
+#define JS_UNLOCK_OBJ(cx,obj) ((OBJ_SCOPE(obj)->ownercx == (cx)) \
+ ? (void)0 : js_UnlockObj(cx, obj))
+
+#define JS_LOCK_SCOPE(cx,scope) ((scope)->ownercx == (cx) ? (void)0 \
+ : js_LockScope(cx, scope))
+#define JS_UNLOCK_SCOPE(cx,scope) ((scope)->ownercx == (cx) ? (void)0 \
+ : js_UnlockScope(cx, scope))
+#define JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope) \
+ js_TransferScopeLock(cx, scope, newscope)
+
+extern void js_LockRuntime(JSRuntime *rt);
+extern void js_UnlockRuntime(JSRuntime *rt);
+extern void js_LockObj(JSContext *cx, JSObject *obj);
+extern void js_UnlockObj(JSContext *cx, JSObject *obj);
+extern void js_LockScope(JSContext *cx, JSScope *scope);
+extern void js_UnlockScope(JSContext *cx, JSScope *scope);
+extern int js_SetupLocks(int,int);
+extern void js_CleanupLocks();
+extern void js_TransferScopeLock(JSContext *, JSScope *, JSScope *);
+extern JS_FRIEND_API(jsval)
+js_GetSlotThreadSafe(JSContext *, JSObject *, uint32);
+extern void js_SetSlotThreadSafe(JSContext *, JSObject *, uint32, jsval);
+extern void js_InitLock(JSThinLock *);
+extern void js_FinishLock(JSThinLock *);
+extern void js_FinishSharingScope(JSRuntime *rt, JSScope *scope);
+
+#ifdef DEBUG
+
+#define JS_IS_RUNTIME_LOCKED(rt) js_IsRuntimeLocked(rt)
+#define JS_IS_OBJ_LOCKED(cx,obj) js_IsObjLocked(cx,obj)
+#define JS_IS_SCOPE_LOCKED(cx,scope) js_IsScopeLocked(cx,scope)
+
+extern JSBool js_IsRuntimeLocked(JSRuntime *rt);
+extern JSBool js_IsObjLocked(JSContext *cx, JSObject *obj);
+extern JSBool js_IsScopeLocked(JSContext *cx, JSScope *scope);
+
+#else
+
+#define JS_IS_RUNTIME_LOCKED(rt) 0
+#define JS_IS_OBJ_LOCKED(cx,obj) 1
+#define JS_IS_SCOPE_LOCKED(cx,scope) 1
+
+#endif /* DEBUG */
+
+#define JS_LOCK_OBJ_VOID(cx, obj, e) \
+ JS_BEGIN_MACRO \
+ JS_LOCK_OBJ(cx, obj); \
+ e; \
+ JS_UNLOCK_OBJ(cx, obj); \
+ JS_END_MACRO
+
+#define JS_LOCK_VOID(cx, e) \
+ JS_BEGIN_MACRO \
+ JSRuntime *_rt = (cx)->runtime; \
+ JS_LOCK_RUNTIME_VOID(_rt, e); \
+ JS_END_MACRO
+
+/* FIXME: bug 353962 hackaround */
+#define JS_USE_ONLY_NSPR_LOCKS 1
+
+#if defined(JS_USE_ONLY_NSPR_LOCKS) || \
+ !( (defined(_WIN32) && defined(_M_IX86)) || \
+ (defined(__GNUC__) && defined(__i386__)) || \
+ ((defined(__USLC__) || defined(_SCO_DS)) && defined(i386)) || \
+ (defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)) || \
+ defined(AIX) )
+
+#define NSPR_LOCK 1
+
+#undef JS_LOCK0
+#undef JS_UNLOCK0
+#define JS_LOCK0(P,M) (JS_ACQUIRE_LOCK(((JSLock*)(P)->fat)), (P)->owner = (M))
+#define JS_UNLOCK0(P,M) ((P)->owner = 0, JS_RELEASE_LOCK(((JSLock*)(P)->fat)))
+
+#else /* arch-tests */
+
+#undef NSPR_LOCK
+
+extern JS_INLINE void js_Lock(JSThinLock *tl, jsword me);
+extern JS_INLINE void js_Unlock(JSThinLock *tl, jsword me);
+
+#endif /* arch-tests */
+
+#else /* !JS_THREADSAFE */
+
+#define JS_ATOMIC_INCREMENT(p) (++*(p))
+#define JS_ATOMIC_DECREMENT(p) (--*(p))
+#define JS_ATOMIC_ADD(p,v) (*(p) += (v))
+
+#define JS_CurrentThreadId() 0
+#define JS_NEW_LOCK() NULL
+#define JS_DESTROY_LOCK(l) ((void)0)
+#define JS_ACQUIRE_LOCK(l) ((void)0)
+#define JS_RELEASE_LOCK(l) ((void)0)
+#define JS_LOCK0(P,M) ((void)0)
+#define JS_UNLOCK0(P,M) ((void)0)
+
+#define JS_NEW_CONDVAR(l) NULL
+#define JS_DESTROY_CONDVAR(cv) ((void)0)
+#define JS_WAIT_CONDVAR(cv,to) ((void)0)
+#define JS_NOTIFY_CONDVAR(cv) ((void)0)
+#define JS_NOTIFY_ALL_CONDVAR(cv) ((void)0)
+
+#define JS_LOCK_RUNTIME(rt) ((void)0)
+#define JS_UNLOCK_RUNTIME(rt) ((void)0)
+#define JS_LOCK_OBJ(cx,obj) ((void)0)
+#define JS_UNLOCK_OBJ(cx,obj) ((void)0)
+#define JS_LOCK_OBJ_VOID(cx,obj,e) (e)
+#define JS_LOCK_SCOPE(cx,scope) ((void)0)
+#define JS_UNLOCK_SCOPE(cx,scope) ((void)0)
+#define JS_TRANSFER_SCOPE_LOCK(c,o,n) ((void)0)
+
+#define JS_IS_RUNTIME_LOCKED(rt) 1
+#define JS_IS_OBJ_LOCKED(cx,obj) 1
+#define JS_IS_SCOPE_LOCKED(cx,scope) 1
+#define JS_LOCK_VOID(cx, e) JS_LOCK_RUNTIME_VOID((cx)->runtime, e)
+
+#endif /* !JS_THREADSAFE */
+
+#define JS_LOCK_RUNTIME_VOID(rt,e) \
+ JS_BEGIN_MACRO \
+ JS_LOCK_RUNTIME(rt); \
+ e; \
+ JS_UNLOCK_RUNTIME(rt); \
+ JS_END_MACRO
+
+#define JS_LOCK_GC(rt) JS_ACQUIRE_LOCK((rt)->gcLock)
+#define JS_UNLOCK_GC(rt) JS_RELEASE_LOCK((rt)->gcLock)
+#define JS_LOCK_GC_VOID(rt,e) (JS_LOCK_GC(rt), (e), JS_UNLOCK_GC(rt))
+#define JS_AWAIT_GC_DONE(rt) JS_WAIT_CONDVAR((rt)->gcDone, JS_NO_TIMEOUT)
+#define JS_NOTIFY_GC_DONE(rt) JS_NOTIFY_ALL_CONDVAR((rt)->gcDone)
+#define JS_AWAIT_REQUEST_DONE(rt) JS_WAIT_CONDVAR((rt)->requestDone, \
+ JS_NO_TIMEOUT)
+#define JS_NOTIFY_REQUEST_DONE(rt) JS_NOTIFY_CONDVAR((rt)->requestDone)
+
+#define JS_LOCK(P,CX) JS_LOCK0(P, CX_THINLOCK_ID(CX))
+#define JS_UNLOCK(P,CX) JS_UNLOCK0(P, CX_THINLOCK_ID(CX))
+
+#endif /* jslock_h___ */
diff --git a/third_party/js-1.7/jslocko.asm b/third_party/js-1.7/jslocko.asm
new file mode 100644
index 0000000..95353ba
--- /dev/null
+++ b/third_party/js-1.7/jslocko.asm
@@ -0,0 +1,60 @@
+; -*- Mode: asm; tab-width: 8; c-basic-offset: 4 -*-
+
+; ***** BEGIN LICENSE BLOCK *****
+; Version: MPL 1.1/GPL 2.0/LGPL 2.1
+;
+; The contents of this file are subject to the Mozilla Public License Version
+; 1.1 (the "License"); you may not use this file except in compliance with
+; the License. You may obtain a copy of the License at
+; http://www.mozilla.org/MPL/
+;
+; Software distributed under the License is distributed on an "AS IS" basis,
+; WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+; for the specific language governing rights and limitations under the
+; License.
+;
+; The Original Code is an OS/2 implementation of js_CompareAndSwap in assembly.
+;
+; The Initial Developer of the Original Code is
+; IBM Corporation.
+; Portions created by the Initial Developer are Copyright (C) 2001
+; the Initial Developer. All Rights Reserved.
+;
+; Contributor(s):
+;
+; Alternatively, the contents of this file may be used under the terms of
+; either the GNU General Public License Version 2 or later (the "GPL"), or
+; the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+; in which case the provisions of the GPL or the LGPL are applicable instead
+; of those above. If you wish to allow use of your version of this file only
+; under the terms of either the GPL or the LGPL, and not to allow others to
+; use your version of this file under the terms of the MPL, indicate your
+; decision by deleting the provisions above and replace them with the notice
+; and other provisions required by the GPL or the LGPL. If you do not delete
+; the provisions above, a recipient may use your version of this file under
+; the terms of any one of the MPL, the GPL or the LGPL.
+;
+; ***** END LICENSE BLOCK *****
+
+ .486P
+ .MODEL FLAT, OPTLINK
+ .STACK
+
+ .CODE
+
+;;;---------------------------------------------------------------------
+;;; int _Optlink js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+;;;---------------------------------------------------------------------
+js_CompareAndSwap PROC OPTLINK EXPORT
+ push ebx
+ mov ebx, eax
+ mov eax, edx
+ mov edx, ebx
+ lock cmpxchg [ebx], ecx
+ sete al
+ and eax, 1h
+ pop ebx
+ ret
+js_CompareAndSwap endp
+
+ END
diff --git a/third_party/js-1.7/jslog2.c b/third_party/js-1.7/jslog2.c
new file mode 100644
index 0000000..876e528
--- /dev/null
+++ b/third_party/js-1.7/jslog2.c
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include "jsbit.h"
+#include "jsutil.h"
+
+/*
+** Compute the log of the least power of 2 greater than or equal to n
+*/
+JS_PUBLIC_API(JSIntn) JS_CeilingLog2(JSUint32 n)
+{
+ JSIntn log2;
+
+ JS_CEILING_LOG2(log2, n);
+ return log2;
+}
+
+/*
+** Compute the log of the greatest power of 2 less than or equal to n.
+** This really just finds the highest set bit in the word.
+*/
+JS_PUBLIC_API(JSIntn) JS_FloorLog2(JSUint32 n)
+{
+ JSIntn log2;
+
+ JS_FLOOR_LOG2(log2, n);
+ return log2;
+}
+
+/*
+ * js_FloorLog2wImpl has to be defined only for 64-bit non-GCC case.
+ */
+#if !defined(JS_HAS_GCC_BUILTIN_CLZ) && JS_BYTES_PER_WORD == 8
+
+JSUword
+js_FloorLog2wImpl(JSUword n)
+{
+ JSUword log2, m;
+
+ JS_ASSERT(n != 0);
+
+ log2 = 0;
+ m = n >> 32;
+ if (m != 0) { n = m; log2 = 32; }
+ m = n >> 16;
+ if (m != 0) { n = m; log2 |= 16; }
+ m = n >> 8;
+ if (m != 0) { n = m; log2 |= 8; }
+ m = n >> 4;
+ if (m != 0) { n = m; log2 |= 4; }
+ m = n >> 2;
+ if (m != 0) { n = m; log2 |= 2; }
+ log2 |= (n >> 1);
+
+ return log2;
+}
+
+#endif
diff --git a/third_party/js-1.7/jslong.c b/third_party/js-1.7/jslong.c
new file mode 100644
index 0000000..9a4a5b4
--- /dev/null
+++ b/third_party/js-1.7/jslong.c
@@ -0,0 +1,281 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include "jstypes.h"
+#include "jslong.h"
+
+static JSInt64 ll_zero = JSLL_INIT( 0x00000000,0x00000000 );
+static JSInt64 ll_maxint = JSLL_INIT( 0x7fffffff, 0xffffffff );
+static JSInt64 ll_minint = JSLL_INIT( 0x80000000, 0x00000000 );
+
+#ifdef HAVE_WATCOM_BUG_2
+JSInt64 __pascal __loadds __export
+ JSLL_Zero(void) { return ll_zero; }
+JSInt64 __pascal __loadds __export
+ JSLL_MaxInt(void) { return ll_maxint; }
+JSInt64 __pascal __loadds __export
+ JSLL_MinInt(void) { return ll_minint; }
+#else
+JS_PUBLIC_API(JSInt64) JSLL_Zero(void) { return ll_zero; }
+JS_PUBLIC_API(JSInt64) JSLL_MaxInt(void) { return ll_maxint; }
+JS_PUBLIC_API(JSInt64) JSLL_MinInt(void) { return ll_minint; }
+#endif
+
+#ifndef JS_HAVE_LONG_LONG
+/*
+** Divide 64-bit a by 32-bit b, which must be normalized so its high bit is 1.
+*/
+static void norm_udivmod32(JSUint32 *qp, JSUint32 *rp, JSUint64 a, JSUint32 b)
+{
+ JSUint32 d1, d0, q1, q0;
+ JSUint32 r1, r0, m;
+
+ d1 = jshi16(b);
+ d0 = jslo16(b);
+ r1 = a.hi % d1;
+ q1 = a.hi / d1;
+ m = q1 * d0;
+ r1 = (r1 << 16) | jshi16(a.lo);
+ if (r1 < m) {
+ q1--, r1 += b;
+ if (r1 >= b /* i.e., we didn't get a carry when adding to r1 */
+ && r1 < m) {
+ q1--, r1 += b;
+ }
+ }
+ r1 -= m;
+ r0 = r1 % d1;
+ q0 = r1 / d1;
+ m = q0 * d0;
+ r0 = (r0 << 16) | jslo16(a.lo);
+ if (r0 < m) {
+ q0--, r0 += b;
+ if (r0 >= b
+ && r0 < m) {
+ q0--, r0 += b;
+ }
+ }
+ *qp = (q1 << 16) | q0;
+ *rp = r0 - m;
+}
+
+static JSUint32 CountLeadingZeros(JSUint32 a)
+{
+ JSUint32 t;
+ JSUint32 r = 32;
+
+ if ((t = a >> 16) != 0)
+ r -= 16, a = t;
+ if ((t = a >> 8) != 0)
+ r -= 8, a = t;
+ if ((t = a >> 4) != 0)
+ r -= 4, a = t;
+ if ((t = a >> 2) != 0)
+ r -= 2, a = t;
+ if ((t = a >> 1) != 0)
+ r -= 1, a = t;
+ if (a & 1)
+ r--;
+ return r;
+}
+
+JS_PUBLIC_API(void) jsll_udivmod(JSUint64 *qp, JSUint64 *rp, JSUint64 a, JSUint64 b)
+{
+ JSUint32 n0, n1, n2;
+ JSUint32 q0, q1;
+ JSUint32 rsh, lsh;
+
+ n0 = a.lo;
+ n1 = a.hi;
+
+ if (b.hi == 0) {
+ if (b.lo > n1) {
+ /* (0 q0) = (n1 n0) / (0 D0) */
+
+ lsh = CountLeadingZeros(b.lo);
+
+ if (lsh) {
+ /*
+ * Normalize, i.e. make the most significant bit of the
+ * denominator be set.
+ */
+ b.lo = b.lo << lsh;
+ n1 = (n1 << lsh) | (n0 >> (32 - lsh));
+ n0 = n0 << lsh;
+ }
+
+ a.lo = n0, a.hi = n1;
+ norm_udivmod32(&q0, &n0, a, b.lo);
+ q1 = 0;
+
+ /* remainder is in n0 >> lsh */
+ } else {
+ /* (q1 q0) = (n1 n0) / (0 d0) */
+
+ if (b.lo == 0) /* user wants to divide by zero! */
+ b.lo = 1 / b.lo; /* so go ahead and crash */
+
+ lsh = CountLeadingZeros(b.lo);
+
+ if (lsh == 0) {
+ /*
+ * From (n1 >= b.lo)
+ * && (the most significant bit of b.lo is set),
+ * conclude that
+ * (the most significant bit of n1 is set)
+ * && (the leading quotient digit q1 = 1).
+ *
+ * This special case is necessary, not an optimization
+ * (Shifts counts of 32 are undefined).
+ */
+ n1 -= b.lo;
+ q1 = 1;
+ } else {
+ /*
+ * Normalize.
+ */
+ rsh = 32 - lsh;
+
+ b.lo = b.lo << lsh;
+ n2 = n1 >> rsh;
+ n1 = (n1 << lsh) | (n0 >> rsh);
+ n0 = n0 << lsh;
+
+ a.lo = n1, a.hi = n2;
+ norm_udivmod32(&q1, &n1, a, b.lo);
+ }
+
+ /* n1 != b.lo... */
+
+ a.lo = n0, a.hi = n1;
+ norm_udivmod32(&q0, &n0, a, b.lo);
+
+ /* remainder in n0 >> lsh */
+ }
+
+ if (rp) {
+ rp->lo = n0 >> lsh;
+ rp->hi = 0;
+ }
+ } else {
+ if (b.hi > n1) {
+ /* (0 0) = (n1 n0) / (D1 d0) */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* remainder in (n1 n0) */
+ if (rp) {
+ rp->lo = n0;
+ rp->hi = n1;
+ }
+ } else {
+ /* (0 q0) = (n1 n0) / (d1 d0) */
+
+ lsh = CountLeadingZeros(b.hi);
+ if (lsh == 0) {
+ /*
+ * From (n1 >= b.hi)
+ * && (the most significant bit of b.hi is set),
+ * conclude that
+ * (the most significant bit of n1 is set)
+ * && (the quotient digit q0 = 0 or 1).
+ *
+ * This special case is necessary, not an optimization.
+ */
+
+ /*
+ * The condition on the next line takes advantage of that
+ * n1 >= b.hi (true due to control flow).
+ */
+ if (n1 > b.hi || n0 >= b.lo) {
+ q0 = 1;
+ a.lo = n0, a.hi = n1;
+ JSLL_SUB(a, a, b);
+ } else {
+ q0 = 0;
+ }
+ q1 = 0;
+
+ if (rp) {
+ rp->lo = n0;
+ rp->hi = n1;
+ }
+ } else {
+ JSInt64 m;
+
+ /*
+ * Normalize.
+ */
+ rsh = 32 - lsh;
+
+ b.hi = (b.hi << lsh) | (b.lo >> rsh);
+ b.lo = b.lo << lsh;
+ n2 = n1 >> rsh;
+ n1 = (n1 << lsh) | (n0 >> rsh);
+ n0 = n0 << lsh;
+
+ a.lo = n1, a.hi = n2;
+ norm_udivmod32(&q0, &n1, a, b.hi);
+ JSLL_MUL32(m, q0, b.lo);
+
+ if ((m.hi > n1) || ((m.hi == n1) && (m.lo > n0))) {
+ q0--;
+ JSLL_SUB(m, m, b);
+ }
+
+ q1 = 0;
+
+ /* Remainder is ((n1 n0) - (m1 m0)) >> lsh */
+ if (rp) {
+ a.lo = n0, a.hi = n1;
+ JSLL_SUB(a, a, m);
+ rp->lo = (a.hi << rsh) | (a.lo >> lsh);
+ rp->hi = a.hi >> lsh;
+ }
+ }
+ }
+ }
+
+ if (qp) {
+ qp->lo = q0;
+ qp->hi = q1;
+ }
+}
+#endif /* !JS_HAVE_LONG_LONG */
diff --git a/third_party/js-1.7/jslong.h b/third_party/js-1.7/jslong.h
new file mode 100644
index 0000000..059cf00
--- /dev/null
+++ b/third_party/js-1.7/jslong.h
@@ -0,0 +1,437 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+** File: jslong.h
+** Description: Portable access to 64 bit numerics
+**
+** Long-long (64-bit signed integer type) support. Some C compilers
+** don't support 64 bit integers yet, so we use these macros to
+** support both machines that do and don't.
+**/
+#ifndef jslong_h___
+#define jslong_h___
+
+#include "jstypes.h"
+
+JS_BEGIN_EXTERN_C
+
+/***********************************************************************
+** DEFINES: JSLL_MaxInt
+** JSLL_MinInt
+** JSLL_Zero
+** DESCRIPTION:
+** Various interesting constants and static variable
+** initializer
+***********************************************************************/
+#ifdef HAVE_WATCOM_BUG_2
+JSInt64 __pascal __loadds __export
+ JSLL_MaxInt(void);
+JSInt64 __pascal __loadds __export
+ JSLL_MinInt(void);
+JSInt64 __pascal __loadds __export
+ JSLL_Zero(void);
+#else
+extern JS_PUBLIC_API(JSInt64) JSLL_MaxInt(void);
+extern JS_PUBLIC_API(JSInt64) JSLL_MinInt(void);
+extern JS_PUBLIC_API(JSInt64) JSLL_Zero(void);
+#endif
+
+#define JSLL_MAXINT JSLL_MaxInt()
+#define JSLL_MININT JSLL_MinInt()
+#define JSLL_ZERO JSLL_Zero()
+
+#ifdef JS_HAVE_LONG_LONG
+
+#if JS_BYTES_PER_LONG == 8
+#define JSLL_INIT(hi, lo) ((hi ## L << 32) + lo ## L)
+#elif (defined(WIN32) || defined(WIN16)) && !defined(__GNUC__)
+#define JSLL_INIT(hi, lo) ((hi ## i64 << 32) + lo ## i64)
+#else
+#define JSLL_INIT(hi, lo) ((hi ## LL << 32) + lo ## LL)
+#endif
+
+/***********************************************************************
+** MACROS: JSLL_*
+** DESCRIPTION:
+** The following macros define portable access to the 64 bit
+** math facilities.
+**
+***********************************************************************/
+
+/***********************************************************************
+** MACROS: JSLL_<relational operators>
+**
+** JSLL_IS_ZERO Test for zero
+** JSLL_EQ Test for equality
+** JSLL_NE Test for inequality
+** JSLL_GE_ZERO Test for zero or positive
+** JSLL_CMP Compare two values
+***********************************************************************/
+#define JSLL_IS_ZERO(a) ((a) == 0)
+#define JSLL_EQ(a, b) ((a) == (b))
+#define JSLL_NE(a, b) ((a) != (b))
+#define JSLL_GE_ZERO(a) ((a) >= 0)
+#define JSLL_CMP(a, op, b) ((JSInt64)(a) op (JSInt64)(b))
+#define JSLL_UCMP(a, op, b) ((JSUint64)(a) op (JSUint64)(b))
+
+/***********************************************************************
+** MACROS: JSLL_<logical operators>
+**
+** JSLL_AND Logical and
+** JSLL_OR Logical or
+** JSLL_XOR Logical exclusion
+** JSLL_OR2 A disgusting deviation
+** JSLL_NOT Negation (one's compliment)
+***********************************************************************/
+#define JSLL_AND(r, a, b) ((r) = (a) & (b))
+#define JSLL_OR(r, a, b) ((r) = (a) | (b))
+#define JSLL_XOR(r, a, b) ((r) = (a) ^ (b))
+#define JSLL_OR2(r, a) ((r) = (r) | (a))
+#define JSLL_NOT(r, a) ((r) = ~(a))
+
+/***********************************************************************
+** MACROS: JSLL_<mathematical operators>
+**
+** JSLL_NEG Negation (two's compliment)
+** JSLL_ADD Summation (two's compliment)
+** JSLL_SUB Difference (two's compliment)
+***********************************************************************/
+#define JSLL_NEG(r, a) ((r) = -(a))
+#define JSLL_ADD(r, a, b) ((r) = (a) + (b))
+#define JSLL_SUB(r, a, b) ((r) = (a) - (b))
+
+/***********************************************************************
+** MACROS: JSLL_<mathematical operators>
+**
+** JSLL_MUL Product (two's compliment)
+** JSLL_DIV Quotient (two's compliment)
+** JSLL_MOD Modulus (two's compliment)
+***********************************************************************/
+#define JSLL_MUL(r, a, b) ((r) = (a) * (b))
+#define JSLL_DIV(r, a, b) ((r) = (a) / (b))
+#define JSLL_MOD(r, a, b) ((r) = (a) % (b))
+
+/***********************************************************************
+** MACROS: JSLL_<shifting operators>
+**
+** JSLL_SHL Shift left [0..64] bits
+** JSLL_SHR Shift right [0..64] bits with sign extension
+** JSLL_USHR Unsigned shift right [0..64] bits
+** JSLL_ISHL Signed shift left [0..64] bits
+***********************************************************************/
+#define JSLL_SHL(r, a, b) ((r) = (JSInt64)(a) << (b))
+#define JSLL_SHR(r, a, b) ((r) = (JSInt64)(a) >> (b))
+#define JSLL_USHR(r, a, b) ((r) = (JSUint64)(a) >> (b))
+#define JSLL_ISHL(r, a, b) ((r) = (JSInt64)(a) << (b))
+
+/***********************************************************************
+** MACROS: JSLL_<conversion operators>
+**
+** JSLL_L2I Convert to signed 32 bit
+** JSLL_L2UI Convert to unsigned 32 bit
+** JSLL_L2F Convert to floating point
+** JSLL_L2D Convert to floating point
+** JSLL_I2L Convert signed to 64 bit
+** JSLL_UI2L Convert unsigned to 64 bit
+** JSLL_F2L Convert float to 64 bit
+** JSLL_D2L Convert float to 64 bit
+***********************************************************************/
+#define JSLL_L2I(i, l) ((i) = (JSInt32)(l))
+#define JSLL_L2UI(ui, l) ((ui) = (JSUint32)(l))
+#define JSLL_L2F(f, l) ((f) = (JSFloat64)(l))
+#define JSLL_L2D(d, l) ((d) = (JSFloat64)(l))
+
+#define JSLL_I2L(l, i) ((l) = (JSInt64)(i))
+#define JSLL_UI2L(l, ui) ((l) = (JSInt64)(ui))
+#define JSLL_F2L(l, f) ((l) = (JSInt64)(f))
+#define JSLL_D2L(l, d) ((l) = (JSInt64)(d))
+
+/***********************************************************************
+** MACROS: JSLL_UDIVMOD
+** DESCRIPTION:
+** Produce both a quotient and a remainder given an unsigned
+** INPUTS: JSUint64 a: The dividend of the operation
+** JSUint64 b: The quotient of the operation
+** OUTPUTS: JSUint64 *qp: pointer to quotient
+** JSUint64 *rp: pointer to remainder
+***********************************************************************/
+#define JSLL_UDIVMOD(qp, rp, a, b) \
+ (*(qp) = ((JSUint64)(a) / (b)), \
+ *(rp) = ((JSUint64)(a) % (b)))
+
+#else /* !JS_HAVE_LONG_LONG */
+
+#ifdef IS_LITTLE_ENDIAN
+#define JSLL_INIT(hi, lo) {JS_INT32(lo), JS_INT32(hi)}
+#else
+#define JSLL_INIT(hi, lo) {JS_INT32(hi), JS_INT32(lo)}
+#endif
+
+#define JSLL_IS_ZERO(a) (((a).hi == 0) && ((a).lo == 0))
+#define JSLL_EQ(a, b) (((a).hi == (b).hi) && ((a).lo == (b).lo))
+#define JSLL_NE(a, b) (((a).hi != (b).hi) || ((a).lo != (b).lo))
+#define JSLL_GE_ZERO(a) (((a).hi >> 31) == 0)
+
+#ifdef DEBUG
+#define JSLL_CMP(a, op, b) (JS_ASSERT((#op)[1] != '='), JSLL_REAL_CMP(a, op, b))
+#define JSLL_UCMP(a, op, b) (JS_ASSERT((#op)[1] != '='), JSLL_REAL_UCMP(a, op, b))
+#else
+#define JSLL_CMP(a, op, b) JSLL_REAL_CMP(a, op, b)
+#define JSLL_UCMP(a, op, b) JSLL_REAL_UCMP(a, op, b)
+#endif
+
+#define JSLL_REAL_CMP(a,op,b) (((JSInt32)(a).hi op (JSInt32)(b).hi) || \
+ (((a).hi == (b).hi) && ((a).lo op (b).lo)))
+#define JSLL_REAL_UCMP(a,op,b) (((a).hi op (b).hi) || \
+ (((a).hi == (b).hi) && ((a).lo op (b).lo)))
+
+#define JSLL_AND(r, a, b) ((r).lo = (a).lo & (b).lo, \
+ (r).hi = (a).hi & (b).hi)
+#define JSLL_OR(r, a, b) ((r).lo = (a).lo | (b).lo, \
+ (r).hi = (a).hi | (b).hi)
+#define JSLL_XOR(r, a, b) ((r).lo = (a).lo ^ (b).lo, \
+ (r).hi = (a).hi ^ (b).hi)
+#define JSLL_OR2(r, a) ((r).lo = (r).lo | (a).lo, \
+ (r).hi = (r).hi | (a).hi)
+#define JSLL_NOT(r, a) ((r).lo = ~(a).lo, \
+ (r).hi = ~(a).hi)
+
+#define JSLL_NEG(r, a) ((r).lo = -(JSInt32)(a).lo, \
+ (r).hi = -(JSInt32)(a).hi - ((r).lo != 0))
+#define JSLL_ADD(r, a, b) { \
+ JSInt64 _a, _b; \
+ _a = a; _b = b; \
+ (r).lo = _a.lo + _b.lo; \
+ (r).hi = _a.hi + _b.hi + ((r).lo < _b.lo); \
+}
+
+#define JSLL_SUB(r, a, b) { \
+ JSInt64 _a, _b; \
+ _a = a; _b = b; \
+ (r).lo = _a.lo - _b.lo; \
+ (r).hi = _a.hi - _b.hi - (_a.lo < _b.lo); \
+}
+
+#define JSLL_MUL(r, a, b) { \
+ JSInt64 _a, _b; \
+ _a = a; _b = b; \
+ JSLL_MUL32(r, _a.lo, _b.lo); \
+ (r).hi += _a.hi * _b.lo + _a.lo * _b.hi; \
+}
+
+#define jslo16(a) ((a) & JS_BITMASK(16))
+#define jshi16(a) ((a) >> 16)
+
+#define JSLL_MUL32(r, a, b) { \
+ JSUint32 _a1, _a0, _b1, _b0, _y0, _y1, _y2, _y3; \
+ _a1 = jshi16(a), _a0 = jslo16(a); \
+ _b1 = jshi16(b), _b0 = jslo16(b); \
+ _y0 = _a0 * _b0; \
+ _y1 = _a0 * _b1; \
+ _y2 = _a1 * _b0; \
+ _y3 = _a1 * _b1; \
+ _y1 += jshi16(_y0); /* can't carry */ \
+ _y1 += _y2; /* might carry */ \
+ if (_y1 < _y2) \
+ _y3 += (JSUint32)(JS_BIT(16)); /* propagate */ \
+ (r).lo = (jslo16(_y1) << 16) + jslo16(_y0); \
+ (r).hi = _y3 + jshi16(_y1); \
+}
+
+#define JSLL_UDIVMOD(qp, rp, a, b) jsll_udivmod(qp, rp, a, b)
+
+extern JS_PUBLIC_API(void) jsll_udivmod(JSUint64 *qp, JSUint64 *rp, JSUint64 a, JSUint64 b);
+
+#define JSLL_DIV(r, a, b) { \
+ JSInt64 _a, _b; \
+ JSUint32 _negative = (JSInt32)(a).hi < 0; \
+ if (_negative) { \
+ JSLL_NEG(_a, a); \
+ } else { \
+ _a = a; \
+ } \
+ if ((JSInt32)(b).hi < 0) { \
+ _negative ^= 1; \
+ JSLL_NEG(_b, b); \
+ } else { \
+ _b = b; \
+ } \
+ JSLL_UDIVMOD(&(r), 0, _a, _b); \
+ if (_negative) \
+ JSLL_NEG(r, r); \
+}
+
+#define JSLL_MOD(r, a, b) { \
+ JSInt64 _a, _b; \
+ JSUint32 _negative = (JSInt32)(a).hi < 0; \
+ if (_negative) { \
+ JSLL_NEG(_a, a); \
+ } else { \
+ _a = a; \
+ } \
+ if ((JSInt32)(b).hi < 0) { \
+ JSLL_NEG(_b, b); \
+ } else { \
+ _b = b; \
+ } \
+ JSLL_UDIVMOD(0, &(r), _a, _b); \
+ if (_negative) \
+ JSLL_NEG(r, r); \
+}
+
+#define JSLL_SHL(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a = a; \
+ if ((b) < 32) { \
+ (r).lo = _a.lo << ((b) & 31); \
+ (r).hi = (_a.hi << ((b) & 31)) | (_a.lo >> (32 - (b))); \
+ } else { \
+ (r).lo = 0; \
+ (r).hi = _a.lo << ((b) & 31); \
+ } \
+ } else { \
+ (r) = (a); \
+ } \
+}
+
+/* a is an JSInt32, b is JSInt32, r is JSInt64 */
+#define JSLL_ISHL(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a.lo = (a); \
+ _a.hi = 0; \
+ if ((b) < 32) { \
+ (r).lo = (a) << ((b) & 31); \
+ (r).hi = ((a) >> (32 - (b))); \
+ } else { \
+ (r).lo = 0; \
+ (r).hi = (a) << ((b) & 31); \
+ } \
+ } else { \
+ (r).lo = (a); \
+ (r).hi = 0; \
+ } \
+}
+
+#define JSLL_SHR(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a = a; \
+ if ((b) < 32) { \
+ (r).lo = (_a.hi << (32 - (b))) | (_a.lo >> ((b) & 31)); \
+ (r).hi = (JSInt32)_a.hi >> ((b) & 31); \
+ } else { \
+ (r).lo = (JSInt32)_a.hi >> ((b) & 31); \
+ (r).hi = (JSInt32)_a.hi >> 31; \
+ } \
+ } else { \
+ (r) = (a); \
+ } \
+}
+
+#define JSLL_USHR(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a = a; \
+ if ((b) < 32) { \
+ (r).lo = (_a.hi << (32 - (b))) | (_a.lo >> ((b) & 31)); \
+ (r).hi = _a.hi >> ((b) & 31); \
+ } else { \
+ (r).lo = _a.hi >> ((b) & 31); \
+ (r).hi = 0; \
+ } \
+ } else { \
+ (r) = (a); \
+ } \
+}
+
+#define JSLL_L2I(i, l) ((i) = (l).lo)
+#define JSLL_L2UI(ui, l) ((ui) = (l).lo)
+#define JSLL_L2F(f, l) { double _d; JSLL_L2D(_d, l); (f) = (JSFloat64)_d; }
+
+#define JSLL_L2D(d, l) { \
+ int _negative; \
+ JSInt64 _absval; \
+ \
+ _negative = (l).hi >> 31; \
+ if (_negative) { \
+ JSLL_NEG(_absval, l); \
+ } else { \
+ _absval = l; \
+ } \
+ (d) = (double)_absval.hi * 4.294967296e9 + _absval.lo; \
+ if (_negative) \
+ (d) = -(d); \
+}
+
+#define JSLL_I2L(l, i) { JSInt32 _i = (i) >> 31; (l).lo = (i); (l).hi = _i; }
+#define JSLL_UI2L(l, ui) ((l).lo = (ui), (l).hi = 0)
+#define JSLL_F2L(l, f) { double _d = (double)f; JSLL_D2L(l, _d); }
+
+#define JSLL_D2L(l, d) { \
+ int _negative; \
+ double _absval, _d_hi; \
+ JSInt64 _lo_d; \
+ \
+ _negative = ((d) < 0); \
+ _absval = _negative ? -(d) : (d); \
+ \
+ (l).hi = _absval / 4.294967296e9; \
+ (l).lo = 0; \
+ JSLL_L2D(_d_hi, l); \
+ _absval -= _d_hi; \
+ _lo_d.hi = 0; \
+ if (_absval < 0) { \
+ _lo_d.lo = -_absval; \
+ JSLL_SUB(l, l, _lo_d); \
+ } else { \
+ _lo_d.lo = _absval; \
+ JSLL_ADD(l, l, _lo_d); \
+ } \
+ \
+ if (_negative) \
+ JSLL_NEG(l, l); \
+}
+
+#endif /* !JS_HAVE_LONG_LONG */
+
+JS_END_EXTERN_C
+
+#endif /* jslong_h___ */
diff --git a/third_party/js-1.7/jsmath.c b/third_party/js-1.7/jsmath.c
new file mode 100644
index 0000000..2062916
--- /dev/null
+++ b/third_party/js-1.7/jsmath.c
@@ -0,0 +1,514 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS math package.
+ */
+#include "jsstddef.h"
+#include "jslibmath.h"
+#include <stdlib.h>
+#include "jstypes.h"
+#include "jslong.h"
+#include "prmjtime.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jslock.h"
+#include "jsmath.h"
+#include "jsnum.h"
+#include "jsobj.h"
+
+#ifndef M_E
+#define M_E 2.7182818284590452354
+#endif
+#ifndef M_LOG2E
+#define M_LOG2E 1.4426950408889634074
+#endif
+#ifndef M_LOG10E
+#define M_LOG10E 0.43429448190325182765
+#endif
+#ifndef M_LN2
+#define M_LN2 0.69314718055994530942
+#endif
+#ifndef M_LN10
+#define M_LN10 2.30258509299404568402
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.70710678118654752440
+#endif
+
+static JSConstDoubleSpec math_constants[] = {
+ {M_E, "E", 0, {0,0,0}},
+ {M_LOG2E, "LOG2E", 0, {0,0,0}},
+ {M_LOG10E, "LOG10E", 0, {0,0,0}},
+ {M_LN2, "LN2", 0, {0,0,0}},
+ {M_LN10, "LN10", 0, {0,0,0}},
+ {M_PI, "PI", 0, {0,0,0}},
+ {M_SQRT2, "SQRT2", 0, {0,0,0}},
+ {M_SQRT1_2, "SQRT1_2", 0, {0,0,0}},
+ {0,0,0,{0,0,0}}
+};
+
+JSClass js_MathClass = {
+ js_Math_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Math),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+math_abs(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_fabs(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_acos(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_acos(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_asin(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_asin(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_atan(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_atan(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_atan2(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, y, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ if (!js_ValueToNumber(cx, argv[1], &y))
+ return JS_FALSE;
+#if !JS_USE_FDLIBM_MATH && defined(_MSC_VER)
+ /*
+ * MSVC's atan2 does not yield the result demanded by ECMA when both x
+ * and y are infinite.
+ * - The result is a multiple of pi/4.
+ * - The sign of x determines the sign of the result.
+ * - The sign of y determines the multiplicator, 1 or 3.
+ */
+ if (JSDOUBLE_IS_INFINITE(x) && JSDOUBLE_IS_INFINITE(y)) {
+ z = fd_copysign(M_PI / 4, x);
+ if (y < 0)
+ z *= 3;
+ return js_NewDoubleValue(cx, z, rval);
+ }
+#endif
+ z = fd_atan2(x, y);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_ceil(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_ceil(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_cos(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_cos(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_exp(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+#ifdef _WIN32
+ if (!JSDOUBLE_IS_NaN(x)) {
+ if (x == *cx->runtime->jsPositiveInfinity) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsPositiveInfinity);
+ return JS_TRUE;
+ }
+ if (x == *cx->runtime->jsNegativeInfinity) {
+ *rval = JSVAL_ZERO;
+ return JS_TRUE;
+ }
+ }
+#endif
+ z = fd_exp(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_floor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_floor(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_log(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_log(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_max(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z = *cx->runtime->jsNegativeInfinity;
+ uintN i;
+
+ if (argc == 0) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNegativeInfinity);
+ return JS_TRUE;
+ }
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &x))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(x)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ if (x == 0 && x == z && fd_copysign(1.0, z) == -1)
+ z = x;
+ else
+ z = (x > z) ? x : z;
+ }
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_min(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z = *cx->runtime->jsPositiveInfinity;
+ uintN i;
+
+ if (argc == 0) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsPositiveInfinity);
+ return JS_TRUE;
+ }
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &x))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(x)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ if (x == 0 && x == z && fd_copysign(1.0,x) == -1)
+ z = x;
+ else
+ z = (x < z) ? x : z;
+ }
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_pow(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, y, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ if (!js_ValueToNumber(cx, argv[1], &y))
+ return JS_FALSE;
+#if !JS_USE_FDLIBM_MATH
+ /*
+ * Because C99 and ECMA specify different behavior for pow(),
+ * we need to wrap the libm call to make it ECMA compliant.
+ */
+ if (!JSDOUBLE_IS_FINITE(y) && (x == 1.0 || x == -1.0)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ /* pow(x, +-0) is always 1, even for x = NaN. */
+ if (y == 0) {
+ *rval = JSVAL_ONE;
+ return JS_TRUE;
+ }
+#endif
+ z = fd_pow(x, y);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+/*
+ * Math.random() support, lifted from java.util.Random.java.
+ */
+static void
+random_setSeed(JSRuntime *rt, int64 seed)
+{
+ int64 tmp;
+
+ JSLL_I2L(tmp, 1000);
+ JSLL_DIV(seed, seed, tmp);
+ JSLL_XOR(tmp, seed, rt->rngMultiplier);
+ JSLL_AND(rt->rngSeed, tmp, rt->rngMask);
+}
+
+static void
+random_init(JSRuntime *rt)
+{
+ int64 tmp, tmp2;
+
+ /* Do at most once. */
+ if (rt->rngInitialized)
+ return;
+ rt->rngInitialized = JS_TRUE;
+
+ /* rt->rngMultiplier = 0x5DEECE66DL */
+ JSLL_ISHL(tmp, 0x5, 32);
+ JSLL_UI2L(tmp2, 0xDEECE66DL);
+ JSLL_OR(rt->rngMultiplier, tmp, tmp2);
+
+ /* rt->rngAddend = 0xBL */
+ JSLL_I2L(rt->rngAddend, 0xBL);
+
+ /* rt->rngMask = (1L << 48) - 1 */
+ JSLL_I2L(tmp, 1);
+ JSLL_SHL(tmp2, tmp, 48);
+ JSLL_SUB(rt->rngMask, tmp2, tmp);
+
+ /* rt->rngDscale = (jsdouble)(1L << 53) */
+ JSLL_SHL(tmp2, tmp, 53);
+ JSLL_L2D(rt->rngDscale, tmp2);
+
+ /* Finally, set the seed from current time. */
+ random_setSeed(rt, PRMJ_Now());
+}
+
+static uint32
+random_next(JSRuntime *rt, int bits)
+{
+ int64 nextseed, tmp;
+ uint32 retval;
+
+ JSLL_MUL(nextseed, rt->rngSeed, rt->rngMultiplier);
+ JSLL_ADD(nextseed, nextseed, rt->rngAddend);
+ JSLL_AND(nextseed, nextseed, rt->rngMask);
+ rt->rngSeed = nextseed;
+ JSLL_USHR(tmp, nextseed, 48 - bits);
+ JSLL_L2I(retval, tmp);
+ return retval;
+}
+
+static jsdouble
+random_nextDouble(JSRuntime *rt)
+{
+ int64 tmp, tmp2;
+ jsdouble d;
+
+ JSLL_ISHL(tmp, random_next(rt, 26), 27);
+ JSLL_UI2L(tmp2, random_next(rt, 27));
+ JSLL_ADD(tmp, tmp, tmp2);
+ JSLL_L2D(d, tmp);
+ return d / rt->rngDscale;
+}
+
+static JSBool
+math_random(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSRuntime *rt;
+ jsdouble z;
+
+ rt = cx->runtime;
+ JS_LOCK_RUNTIME(rt);
+ random_init(rt);
+ z = random_nextDouble(rt);
+ JS_UNLOCK_RUNTIME(rt);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+#if defined _WIN32 && !defined WINCE && _MSC_VER < 1400
+/* Try to work around apparent _copysign bustage in VC6 and VC7. */
+double
+js_copysign(double x, double y)
+{
+ jsdpun xu, yu;
+
+ xu.d = x;
+ yu.d = y;
+ xu.s.hi &= ~JSDOUBLE_HI32_SIGNBIT;
+ xu.s.hi |= yu.s.hi & JSDOUBLE_HI32_SIGNBIT;
+ return xu.d;
+}
+#endif
+
+static JSBool
+math_round(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_copysign(fd_floor(x + 0.5), x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_sin(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_sin(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_sqrt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_sqrt(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_tan(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_tan(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+math_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = ATOM_KEY(CLASS_ATOM(cx, Math));
+ return JS_TRUE;
+}
+#endif
+
+static JSFunctionSpec math_static_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, math_toSource, 0, 0, 0},
+#endif
+ {"abs", math_abs, 1, 0, 0},
+ {"acos", math_acos, 1, 0, 0},
+ {"asin", math_asin, 1, 0, 0},
+ {"atan", math_atan, 1, 0, 0},
+ {"atan2", math_atan2, 2, 0, 0},
+ {"ceil", math_ceil, 1, 0, 0},
+ {"cos", math_cos, 1, 0, 0},
+ {"exp", math_exp, 1, 0, 0},
+ {"floor", math_floor, 1, 0, 0},
+ {"log", math_log, 1, 0, 0},
+ {"max", math_max, 2, 0, 0},
+ {"min", math_min, 2, 0, 0},
+ {"pow", math_pow, 2, 0, 0},
+ {"random", math_random, 0, 0, 0},
+ {"round", math_round, 1, 0, 0},
+ {"sin", math_sin, 1, 0, 0},
+ {"sqrt", math_sqrt, 1, 0, 0},
+ {"tan", math_tan, 1, 0, 0},
+ {0,0,0,0,0}
+};
+
+JSObject *
+js_InitMathClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *Math;
+
+ Math = JS_DefineObject(cx, obj, js_Math_str, &js_MathClass, NULL, 0);
+ if (!Math)
+ return NULL;
+ if (!JS_DefineFunctions(cx, Math, math_static_methods))
+ return NULL;
+ if (!JS_DefineConstDoubles(cx, Math, math_constants))
+ return NULL;
+ return Math;
+}
diff --git a/third_party/js-1.7/jsmath.h b/third_party/js-1.7/jsmath.h
new file mode 100644
index 0000000..1f60630
--- /dev/null
+++ b/third_party/js-1.7/jsmath.h
@@ -0,0 +1,57 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* -*- Mode: C; tab-width: 8 -*-
+ * Copyright (C) 1998-1999 Netscape Communications Corporation, All Rights Reserved.
+ */
+
+#ifndef jsmath_h___
+#define jsmath_h___
+/*
+ * JS math functions.
+ */
+
+JS_BEGIN_EXTERN_C
+
+extern JSClass js_MathClass;
+
+extern JSObject *
+js_InitMathClass(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jsmath_h___ */
diff --git a/third_party/js-1.7/jsnum.c b/third_party/js-1.7/jsnum.c
new file mode 100644
index 0000000..987619d
--- /dev/null
+++ b/third_party/js-1.7/jsnum.c
@@ -0,0 +1,1147 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS number type and wrapper class.
+ */
+#include "jsstddef.h"
+#if defined(XP_WIN) || defined(XP_OS2)
+#include <float.h>
+#endif
+#include <locale.h>
+#include <limits.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdtoa.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsprf.h"
+#include "jsstr.h"
+
+static JSBool
+num_isNaN(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ *rval = BOOLEAN_TO_JSVAL(JSDOUBLE_IS_NaN(x));
+ return JS_TRUE;
+}
+
+static JSBool
+num_isFinite(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ *rval = BOOLEAN_TO_JSVAL(JSDOUBLE_IS_FINITE(x));
+ return JS_TRUE;
+}
+
+static JSBool
+num_parseFloat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ const jschar *bp, *ep;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ /* XXXbe js_strtod shouldn't require NUL termination */
+ bp = js_UndependString(cx, str);
+ if (!bp)
+ return JS_FALSE;
+ if (!js_strtod(cx, bp, &ep, &d))
+ return JS_FALSE;
+ if (ep == bp) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ return js_NewNumberValue(cx, d, rval);
+}
+
+/* See ECMA 15.1.2.2. */
+static JSBool
+num_parseInt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsint radix;
+ JSString *str;
+ jsdouble d;
+ const jschar *bp, *ep;
+
+ if (argc > 1) {
+ if (!js_ValueToECMAInt32(cx, argv[1], &radix))
+ return JS_FALSE;
+ } else {
+ radix = 0;
+ }
+ if (radix != 0 && (radix < 2 || radix > 36)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ /* XXXbe js_strtointeger shouldn't require NUL termination */
+ bp = js_UndependString(cx, str);
+ if (!bp)
+ return JS_FALSE;
+ if (!js_strtointeger(cx, bp, &ep, radix, &d))
+ return JS_FALSE;
+ if (ep == bp) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ return js_NewNumberValue(cx, d, rval);
+}
+
+const char js_Infinity_str[] = "Infinity";
+const char js_NaN_str[] = "NaN";
+const char js_isNaN_str[] = "isNaN";
+const char js_isFinite_str[] = "isFinite";
+const char js_parseFloat_str[] = "parseFloat";
+const char js_parseInt_str[] = "parseInt";
+
+static JSFunctionSpec number_functions[] = {
+ {js_isNaN_str, num_isNaN, 1,0,0},
+ {js_isFinite_str, num_isFinite, 1,0,0},
+ {js_parseFloat_str, num_parseFloat, 1,0,0},
+ {js_parseInt_str, num_parseInt, 2,0,0},
+ {0,0,0,0,0}
+};
+
+JSClass js_NumberClass = {
+ js_Number_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Number),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+Number(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble d;
+ jsval v;
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ } else {
+ d = 0.0;
+ }
+ if (!js_NewNumberValue(cx, d, &v))
+ return JS_FALSE;
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ *rval = v;
+ return JS_TRUE;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, v);
+ return JS_TRUE;
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+num_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ jsdouble d;
+ char numBuf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr;
+ char buf[64];
+ JSString *str;
+
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_NUMBER(v));
+ }
+ d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v);
+ numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ JS_snprintf(buf, sizeof buf, "(new %s(%s))", js_NumberClass.name, numStr);
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+/* The buf must be big enough for MIN_INT to fit including '-' and '\0'. */
+static char *
+IntToString(jsint i, char *buf, size_t bufSize)
+{
+ char *cp;
+ jsuint u;
+
+ u = (i < 0) ? -i : i;
+
+ cp = buf + bufSize; /* one past last buffer cell */
+ *--cp = '\0'; /* null terminate the string to be */
+
+ /*
+ * Build the string from behind. We use multiply and subtraction
+ * instead of modulus because that's much faster.
+ */
+ do {
+ jsuint newu = u / 10;
+ *--cp = (char)(u - newu * 10) + '0';
+ u = newu;
+ } while (u != 0);
+
+ if (i < 0)
+ *--cp = '-';
+
+ return cp;
+}
+
+static JSBool
+num_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ jsdouble d;
+ jsint base;
+ JSString *str;
+
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_NUMBER(v));
+ }
+ d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v);
+ base = 10;
+ if (argc != 0) {
+ if (!js_ValueToECMAInt32(cx, argv[0], &base))
+ return JS_FALSE;
+ if (base < 2 || base > 36) {
+ char numBuf[12];
+ char *numStr = IntToString(base, numBuf, sizeof numBuf);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_RADIX,
+ numStr);
+ return JS_FALSE;
+ }
+ }
+ if (base == 10) {
+ str = js_NumberToString(cx, d);
+ } else {
+ char *dStr = JS_dtobasestr(base, d);
+ if (!dStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ str = JS_NewStringCopyZ(cx, dStr);
+ free(dStr);
+ }
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+num_toLocaleString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ char thousandsLength, decimalLength;
+ const char *numGrouping, *tmpGroup;
+ JSRuntime *rt;
+ JSString *numStr, *str;
+ char *num, *buf, *dec, *end, *tmpSrc, *tmpDest;
+ int digits, size, remainder, nrepeat;
+
+ /*
+ * Create the string, move back to bytes to make string twiddling
+ * a bit easier and so we can insert platform charset seperators.
+ */
+ if (!num_toString(cx, obj, 0, argv, rval))
+ return JS_FALSE;
+ JS_ASSERT(JSVAL_IS_STRING(*rval));
+ numStr = JSVAL_TO_STRING(*rval);
+ num = js_GetStringBytes(cx->runtime, numStr);
+
+ /* Find bit before the decimal. */
+ dec = strchr(num, '.');
+ digits = dec ? dec - num : (int)strlen(num);
+ end = num + digits;
+
+ rt = cx->runtime;
+ thousandsLength = strlen(rt->thousandsSeparator);
+ decimalLength = strlen(rt->decimalSeparator);
+
+ /* Figure out how long resulting string will be. */
+ size = digits + (dec ? decimalLength + strlen(dec + 1) : 0);
+
+ numGrouping = tmpGroup = rt->numGrouping;
+ remainder = digits;
+ if (*num == '-')
+ remainder--;
+
+ while (*tmpGroup != CHAR_MAX && *tmpGroup != '\0') {
+ if (*tmpGroup >= remainder)
+ break;
+ size += thousandsLength;
+ remainder -= *tmpGroup;
+ tmpGroup++;
+ }
+ if (*tmpGroup == '\0' && *numGrouping != '\0') {
+ nrepeat = (remainder - 1) / tmpGroup[-1];
+ size += thousandsLength * nrepeat;
+ remainder -= nrepeat * tmpGroup[-1];
+ } else {
+ nrepeat = 0;
+ }
+ tmpGroup--;
+
+ buf = (char *)JS_malloc(cx, size + 1);
+ if (!buf)
+ return JS_FALSE;
+
+ tmpDest = buf;
+ tmpSrc = num;
+
+ while (*tmpSrc == '-' || remainder--)
+ *tmpDest++ = *tmpSrc++;
+ while (tmpSrc < end) {
+ strcpy(tmpDest, rt->thousandsSeparator);
+ tmpDest += thousandsLength;
+ memcpy(tmpDest, tmpSrc, *tmpGroup);
+ tmpDest += *tmpGroup;
+ tmpSrc += *tmpGroup;
+ if (--nrepeat < 0)
+ tmpGroup--;
+ }
+
+ if (dec) {
+ strcpy(tmpDest, rt->decimalSeparator);
+ tmpDest += decimalLength;
+ strcpy(tmpDest, dec + 1);
+ } else {
+ *tmpDest++ = '\0';
+ }
+
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToUnicode)
+ return cx->localeCallbacks->localeToUnicode(cx, buf, rval);
+
+ str = JS_NewString(cx, buf, size);
+ if (!str) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+
+ return JS_TRUE;
+}
+
+static JSBool
+num_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ *rval = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ return JS_TRUE;
+}
+
+
+#define MAX_PRECISION 100
+
+static JSBool
+num_to(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval, JSDToStrMode zeroArgMode,
+ JSDToStrMode oneArgMode, jsint precisionMin, jsint precisionMax, jsint precisionOffset)
+{
+ jsval v;
+ jsdouble d, precision;
+ JSString *str;
+ char buf[DTOSTR_VARIABLE_BUFFER_SIZE(MAX_PRECISION+1)], *numStr; /* Use MAX_PRECISION+1 because precisionOffset can be 1 */
+
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_NUMBER(v));
+ }
+ d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v);
+
+ if (JSVAL_IS_VOID(argv[0])) {
+ precision = 0.0;
+ oneArgMode = zeroArgMode;
+ } else {
+ if (!js_ValueToNumber(cx, argv[0], &precision))
+ return JS_FALSE;
+ precision = js_DoubleToInteger(precision);
+ if (precision < precisionMin || precision > precisionMax) {
+ numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, precision);
+ if (!numStr)
+ JS_ReportOutOfMemory(cx);
+ else
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_PRECISION_RANGE, numStr);
+ return JS_FALSE;
+ }
+ }
+
+ numStr = JS_dtostr(buf, sizeof buf, oneArgMode, (jsint)precision + precisionOffset, d);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ str = JS_NewStringCopyZ(cx, numStr);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+num_toFixed(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* We allow a larger range of precision than ECMA requires; this is permitted by ECMA. */
+ return num_to(cx, obj, argc, argv, rval, DTOSTR_FIXED, DTOSTR_FIXED, -20, MAX_PRECISION, 0);
+}
+
+static JSBool
+num_toExponential(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* We allow a larger range of precision than ECMA requires; this is permitted by ECMA. */
+ return num_to(cx, obj, argc, argv, rval, DTOSTR_STANDARD_EXPONENTIAL, DTOSTR_EXPONENTIAL, 0, MAX_PRECISION, 1);
+}
+
+static JSBool
+num_toPrecision(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* We allow a larger range of precision than ECMA requires; this is permitted by ECMA. */
+ return num_to(cx, obj, argc, argv, rval, DTOSTR_STANDARD, DTOSTR_PRECISION, 1, MAX_PRECISION, 0);
+}
+
+static JSFunctionSpec number_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, num_toSource, 0,JSFUN_THISP_NUMBER,0},
+#endif
+ {js_toString_str, num_toString, 0,JSFUN_THISP_NUMBER,0},
+ {js_toLocaleString_str, num_toLocaleString, 0,JSFUN_THISP_NUMBER,0},
+ {js_valueOf_str, num_valueOf, 0,JSFUN_THISP_NUMBER,0},
+ {"toFixed", num_toFixed, 1,JSFUN_THISP_NUMBER,0},
+ {"toExponential", num_toExponential, 1,JSFUN_THISP_NUMBER,0},
+ {"toPrecision", num_toPrecision, 1,JSFUN_THISP_NUMBER,0},
+ {0,0,0,0,0}
+};
+
+/* NB: Keep this in synch with number_constants[]. */
+enum nc_slot {
+ NC_NaN,
+ NC_POSITIVE_INFINITY,
+ NC_NEGATIVE_INFINITY,
+ NC_MAX_VALUE,
+ NC_MIN_VALUE,
+ NC_LIMIT
+};
+
+/*
+ * Some to most C compilers forbid spelling these at compile time, or barf
+ * if you try, so all but MAX_VALUE are set up by js_InitRuntimeNumberState
+ * using union jsdpun.
+ */
+static JSConstDoubleSpec number_constants[] = {
+ {0, js_NaN_str, 0,{0,0,0}},
+ {0, "POSITIVE_INFINITY", 0,{0,0,0}},
+ {0, "NEGATIVE_INFINITY", 0,{0,0,0}},
+ {1.7976931348623157E+308, "MAX_VALUE", 0,{0,0,0}},
+ {0, "MIN_VALUE", 0,{0,0,0}},
+ {0,0,0,{0,0,0}}
+};
+
+static jsdouble NaN;
+
+#if (defined XP_WIN || defined XP_OS2) && \
+ !defined WINCE && \
+ !defined __MWERKS__ && \
+ (defined _M_IX86 || \
+ (defined __GNUC__ && !defined __MINGW32__))
+
+/*
+ * Set the exception mask to mask all exceptions and set the FPU precision
+ * to 53 bit mantissa.
+ * On Alpha platform this is handled via Compiler option.
+ */
+#define FIX_FPU() _control87(MCW_EM | PC_53, MCW_EM | MCW_PC)
+
+#else
+
+#define FIX_FPU() ((void)0)
+
+#endif
+
+JSBool
+js_InitRuntimeNumberState(JSContext *cx)
+{
+ JSRuntime *rt;
+ jsdpun u;
+ struct lconv *locale;
+
+ rt = cx->runtime;
+ JS_ASSERT(!rt->jsNaN);
+
+ FIX_FPU();
+
+ u.s.hi = JSDOUBLE_HI32_EXPMASK | JSDOUBLE_HI32_MANTMASK;
+ u.s.lo = 0xffffffff;
+ number_constants[NC_NaN].dval = NaN = u.d;
+ rt->jsNaN = js_NewDouble(cx, NaN, GCF_LOCK);
+ if (!rt->jsNaN)
+ return JS_FALSE;
+
+ u.s.hi = JSDOUBLE_HI32_EXPMASK;
+ u.s.lo = 0x00000000;
+ number_constants[NC_POSITIVE_INFINITY].dval = u.d;
+ rt->jsPositiveInfinity = js_NewDouble(cx, u.d, GCF_LOCK);
+ if (!rt->jsPositiveInfinity)
+ return JS_FALSE;
+
+ u.s.hi = JSDOUBLE_HI32_SIGNBIT | JSDOUBLE_HI32_EXPMASK;
+ u.s.lo = 0x00000000;
+ number_constants[NC_NEGATIVE_INFINITY].dval = u.d;
+ rt->jsNegativeInfinity = js_NewDouble(cx, u.d, GCF_LOCK);
+ if (!rt->jsNegativeInfinity)
+ return JS_FALSE;
+
+ u.s.hi = 0;
+ u.s.lo = 1;
+ number_constants[NC_MIN_VALUE].dval = u.d;
+
+ locale = localeconv();
+ rt->thousandsSeparator =
+ JS_strdup(cx, locale->thousands_sep ? locale->thousands_sep : "'");
+ rt->decimalSeparator =
+ JS_strdup(cx, locale->decimal_point ? locale->decimal_point : ".");
+ rt->numGrouping =
+ JS_strdup(cx, locale->grouping ? locale->grouping : "\3\0");
+
+ return rt->thousandsSeparator && rt->decimalSeparator && rt->numGrouping;
+}
+
+void
+js_FinishRuntimeNumberState(JSContext *cx)
+{
+ JSRuntime *rt = cx->runtime;
+
+ js_UnlockGCThingRT(rt, rt->jsNaN);
+ js_UnlockGCThingRT(rt, rt->jsNegativeInfinity);
+ js_UnlockGCThingRT(rt, rt->jsPositiveInfinity);
+
+ rt->jsNaN = NULL;
+ rt->jsNegativeInfinity = NULL;
+ rt->jsPositiveInfinity = NULL;
+
+ JS_free(cx, (void *)rt->thousandsSeparator);
+ JS_free(cx, (void *)rt->decimalSeparator);
+ JS_free(cx, (void *)rt->numGrouping);
+ rt->thousandsSeparator = rt->decimalSeparator = rt->numGrouping = NULL;
+}
+
+JSObject *
+js_InitNumberClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *ctor;
+ JSRuntime *rt;
+
+ /* XXX must do at least once per new thread, so do it per JSContext... */
+ FIX_FPU();
+
+ if (!JS_DefineFunctions(cx, obj, number_functions))
+ return NULL;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_NumberClass, Number, 1,
+ NULL, number_methods, NULL, NULL);
+ if (!proto || !(ctor = JS_GetConstructor(cx, proto)))
+ return NULL;
+ OBJ_SET_SLOT(cx, proto, JSSLOT_PRIVATE, JSVAL_ZERO);
+ if (!JS_DefineConstDoubles(cx, ctor, number_constants))
+ return NULL;
+
+ /* ECMA 15.1.1.1 */
+ rt = cx->runtime;
+ if (!JS_DefineProperty(cx, obj, js_NaN_str, DOUBLE_TO_JSVAL(rt->jsNaN),
+ NULL, NULL, JSPROP_PERMANENT)) {
+ return NULL;
+ }
+
+ /* ECMA 15.1.1.2 */
+ if (!JS_DefineProperty(cx, obj, js_Infinity_str,
+ DOUBLE_TO_JSVAL(rt->jsPositiveInfinity),
+ NULL, NULL, JSPROP_PERMANENT)) {
+ return NULL;
+ }
+ return proto;
+}
+
+jsdouble *
+js_NewDouble(JSContext *cx, jsdouble d, uintN gcflag)
+{
+ jsdouble *dp;
+
+ dp = (jsdouble *) js_NewGCThing(cx, gcflag | GCX_DOUBLE, sizeof(jsdouble));
+ if (!dp)
+ return NULL;
+ *dp = d;
+ return dp;
+}
+
+void
+js_FinalizeDouble(JSContext *cx, jsdouble *dp)
+{
+ *dp = NaN;
+}
+
+JSBool
+js_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ jsdouble *dp;
+
+ dp = js_NewDouble(cx, d, 0);
+ if (!dp)
+ return JS_FALSE;
+ *rval = DOUBLE_TO_JSVAL(dp);
+ return JS_TRUE;
+}
+
+JSBool
+js_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ jsint i;
+
+ if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
+ *rval = INT_TO_JSVAL(i);
+ } else {
+ if (!js_NewDoubleValue(cx, d, rval))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_NumberToObject(JSContext *cx, jsdouble d)
+{
+ JSObject *obj;
+ jsval v;
+
+ obj = js_NewObject(cx, &js_NumberClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+ if (!js_NewNumberValue(cx, d, &v)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, v);
+ return obj;
+}
+
+JSString *
+js_NumberToString(JSContext *cx, jsdouble d)
+{
+ jsint i;
+ char buf[DTOSTR_STANDARD_BUFFER_SIZE];
+ char *numStr;
+
+ if (JSDOUBLE_IS_INT(d, i)) {
+ numStr = IntToString(i, buf, sizeof buf);
+ } else {
+ numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, d);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ }
+ return JS_NewStringCopyZ(cx, numStr);
+}
+
+JSBool
+js_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp)
+{
+ JSObject *obj;
+ JSString *str;
+ const jschar *bp, *ep;
+
+ if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (!obj) {
+ *dp = 0;
+ return JS_TRUE;
+ }
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_NUMBER, &v))
+ return JS_FALSE;
+ }
+ if (JSVAL_IS_INT(v)) {
+ *dp = (jsdouble)JSVAL_TO_INT(v);
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ *dp = *JSVAL_TO_DOUBLE(v);
+ } else if (JSVAL_IS_STRING(v)) {
+ str = JSVAL_TO_STRING(v);
+ /*
+ * Note that ECMA doesn't treat a string beginning with a '0' as an
+ * octal number here. This works because all such numbers will be
+ * interpreted as decimal by js_strtod and will never get passed to
+ * js_strtointeger (which would interpret them as octal).
+ */
+ /* XXXbe js_strtod shouldn't require NUL termination */
+ bp = js_UndependString(cx, str);
+ if (!bp)
+ return JS_FALSE;
+ if ((!js_strtod(cx, bp, &ep, dp) ||
+ js_SkipWhiteSpace(ep) != bp + str->length) &&
+ (!js_strtointeger(cx, bp, &ep, 0, dp) ||
+ js_SkipWhiteSpace(ep) != bp + str->length)) {
+ goto badstr;
+ }
+ } else if (JSVAL_IS_BOOLEAN(v)) {
+ *dp = JSVAL_TO_BOOLEAN(v) ? 1 : 0;
+ } else {
+badstr:
+ *dp = *cx->runtime->jsNaN;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ jsdouble d;
+
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ return js_DoubleToECMAInt32(cx, d, ip);
+}
+
+JSBool
+js_DoubleToECMAInt32(JSContext *cx, jsdouble d, int32 *ip)
+{
+ jsdouble two32 = 4294967296.0;
+ jsdouble two31 = 2147483648.0;
+
+ if (!JSDOUBLE_IS_FINITE(d) || d == 0) {
+ *ip = 0;
+ return JS_TRUE;
+ }
+ d = fmod(d, two32);
+ d = (d >= 0) ? floor(d) : ceil(d) + two32;
+ if (d >= two31)
+ *ip = (int32)(d - two32);
+ else
+ *ip = (int32)d;
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip)
+{
+ jsdouble d;
+
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ return js_DoubleToECMAUint32(cx, d, ip);
+}
+
+JSBool
+js_DoubleToECMAUint32(JSContext *cx, jsdouble d, uint32 *ip)
+{
+ JSBool neg;
+ jsdouble two32 = 4294967296.0;
+
+ if (!JSDOUBLE_IS_FINITE(d) || d == 0) {
+ *ip = 0;
+ return JS_TRUE;
+ }
+
+ neg = (d < 0);
+ d = floor(neg ? -d : d);
+ d = neg ? -d : d;
+
+ d = fmod(d, two32);
+
+ d = (d >= 0) ? d : d + two32;
+ *ip = (uint32)d;
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ jsdouble d;
+ JSString *str;
+
+ if (JSVAL_IS_INT(v)) {
+ *ip = JSVAL_TO_INT(v);
+ return JS_TRUE;
+ }
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(d) || d <= -2147483649.0 || 2147483648.0 <= d) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_CONVERT, JS_GetStringBytes(str));
+
+ }
+ return JS_FALSE;
+ }
+ *ip = (int32)floor(d + 0.5); /* Round to nearest */
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToUint16(JSContext *cx, jsval v, uint16 *ip)
+{
+ jsdouble d;
+ jsuint i, m;
+ JSBool neg;
+
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ if (d == 0 || !JSDOUBLE_IS_FINITE(d)) {
+ *ip = 0;
+ return JS_TRUE;
+ }
+ i = (jsuint)d;
+ if ((jsdouble)i == d) {
+ *ip = (uint16)i;
+ return JS_TRUE;
+ }
+ neg = (d < 0);
+ d = floor(neg ? -d : d);
+ d = neg ? -d : d;
+ m = JS_BIT(16);
+ d = fmod(d, (double)m);
+ if (d < 0)
+ d += m;
+ *ip = (uint16) d;
+ return JS_TRUE;
+}
+
+jsdouble
+js_DoubleToInteger(jsdouble d)
+{
+ JSBool neg;
+
+ if (d == 0)
+ return d;
+ if (!JSDOUBLE_IS_FINITE(d)) {
+ if (JSDOUBLE_IS_NaN(d))
+ return 0;
+ return d;
+ }
+ neg = (d < 0);
+ d = floor(neg ? -d : d);
+ return neg ? -d : d;
+}
+
+
+JSBool
+js_strtod(JSContext *cx, const jschar *s, const jschar **ep, jsdouble *dp)
+{
+ char cbuf[32];
+ size_t i;
+ char *cstr, *istr, *estr;
+ JSBool negative;
+ jsdouble d;
+ const jschar *s1 = js_SkipWhiteSpace(s);
+ size_t length = js_strlen(s1);
+
+ /* Use cbuf to avoid malloc */
+ if (length >= sizeof cbuf) {
+ cstr = (char *) JS_malloc(cx, length + 1);
+ if (!cstr)
+ return JS_FALSE;
+ } else {
+ cstr = cbuf;
+ }
+
+ for (i = 0; i <= length; i++) {
+ if (s1[i] >> 8) {
+ cstr[i] = 0;
+ break;
+ }
+ cstr[i] = (char)s1[i];
+ }
+
+ istr = cstr;
+ if ((negative = (*istr == '-')) != 0 || *istr == '+')
+ istr++;
+ if (!strncmp(istr, js_Infinity_str, sizeof js_Infinity_str - 1)) {
+ d = *(negative ? cx->runtime->jsNegativeInfinity : cx->runtime->jsPositiveInfinity);
+ estr = istr + 8;
+ } else {
+ int err;
+ d = JS_strtod(cstr, &estr, &err);
+ if (err == JS_DTOA_ENOMEM) {
+ JS_ReportOutOfMemory(cx);
+ if (cstr != cbuf)
+ JS_free(cx, cstr);
+ return JS_FALSE;
+ }
+ if (err == JS_DTOA_ERANGE) {
+ if (d == HUGE_VAL)
+ d = *cx->runtime->jsPositiveInfinity;
+ else if (d == -HUGE_VAL)
+ d = *cx->runtime->jsNegativeInfinity;
+ }
+#ifdef HPUX
+ if (d == 0.0 && negative) {
+ /*
+ * "-0", "-1e-2000" come out as positive zero
+ * here on HPUX. Force a negative zero instead.
+ */
+ JSDOUBLE_HI32(d) = JSDOUBLE_HI32_SIGNBIT;
+ JSDOUBLE_LO32(d) = 0;
+ }
+#endif
+ }
+
+ i = estr - cstr;
+ if (cstr != cbuf)
+ JS_free(cx, cstr);
+ *ep = i ? s1 + i : s;
+ *dp = d;
+ return JS_TRUE;
+}
+
+struct BinaryDigitReader
+{
+ uintN base; /* Base of number; must be a power of 2 */
+ uintN digit; /* Current digit value in radix given by base */
+ uintN digitMask; /* Mask to extract the next bit from digit */
+ const jschar *digits; /* Pointer to the remaining digits */
+ const jschar *end; /* Pointer to first non-digit */
+};
+
+/* Return the next binary digit from the number or -1 if done */
+static intN GetNextBinaryDigit(struct BinaryDigitReader *bdr)
+{
+ intN bit;
+
+ if (bdr->digitMask == 0) {
+ uintN c;
+
+ if (bdr->digits == bdr->end)
+ return -1;
+
+ c = *bdr->digits++;
+ if ('0' <= c && c <= '9')
+ bdr->digit = c - '0';
+ else if ('a' <= c && c <= 'z')
+ bdr->digit = c - 'a' + 10;
+ else bdr->digit = c - 'A' + 10;
+ bdr->digitMask = bdr->base >> 1;
+ }
+ bit = (bdr->digit & bdr->digitMask) != 0;
+ bdr->digitMask >>= 1;
+ return bit;
+}
+
+JSBool
+js_strtointeger(JSContext *cx, const jschar *s, const jschar **ep, jsint base, jsdouble *dp)
+{
+ JSBool negative;
+ jsdouble value;
+ const jschar *start;
+ const jschar *s1 = js_SkipWhiteSpace(s);
+
+ if ((negative = (*s1 == '-')) != 0 || *s1 == '+')
+ s1++;
+
+ if (base == 0) {
+ /* No base supplied, or some base that evaluated to 0. */
+ if (*s1 == '0') {
+ /* It's either hex or octal; only increment char if str isn't '0' */
+ if (s1[1] == 'X' || s1[1] == 'x') { /* Hex */
+ s1 += 2;
+ base = 16;
+ } else { /* Octal */
+ base = 8;
+ }
+ } else {
+ base = 10; /* Default to decimal. */
+ }
+ } else if (base == 16 && *s1 == '0' && (s1[1] == 'X' || s1[1] == 'x')) {
+ /* If base is 16, ignore hex prefix. */
+ s1 += 2;
+ }
+
+ /*
+ * Done with the preliminaries; find some prefix of the string that's
+ * a number in the given base.
+ */
+ start = s1; /* Mark - if string is empty, we return NaN. */
+ value = 0.0;
+ for (;;) {
+ uintN digit;
+ jschar c = *s1;
+ if ('0' <= c && c <= '9')
+ digit = c - '0';
+ else if ('a' <= c && c <= 'z')
+ digit = c - 'a' + 10;
+ else if ('A' <= c && c <= 'Z')
+ digit = c - 'A' + 10;
+ else
+ break;
+ if (digit >= (uintN)base)
+ break;
+ value = value * base + digit;
+ s1++;
+ }
+
+ if (value >= 9007199254740992.0) {
+ if (base == 10) {
+ /*
+ * If we're accumulating a decimal number and the number is >=
+ * 2^53, then the result from the repeated multiply-add above may
+ * be inaccurate. Call JS_strtod to get the correct answer.
+ */
+ size_t i;
+ size_t length = s1 - start;
+ char *cstr = (char *) JS_malloc(cx, length + 1);
+ char *estr;
+ int err=0;
+
+ if (!cstr)
+ return JS_FALSE;
+ for (i = 0; i != length; i++)
+ cstr[i] = (char)start[i];
+ cstr[length] = 0;
+
+ value = JS_strtod(cstr, &estr, &err);
+ if (err == JS_DTOA_ENOMEM) {
+ JS_ReportOutOfMemory(cx);
+ JS_free(cx, cstr);
+ return JS_FALSE;
+ }
+ if (err == JS_DTOA_ERANGE && value == HUGE_VAL)
+ value = *cx->runtime->jsPositiveInfinity;
+ JS_free(cx, cstr);
+ } else if ((base & (base - 1)) == 0) {
+ /*
+ * The number may also be inaccurate for power-of-two bases. This
+ * happens if the addition in value * base + digit causes a round-
+ * down to an even least significant mantissa bit when the first
+ * dropped bit is a one. If any of the following digits in the
+ * number (which haven't been added in yet) are nonzero, then the
+ * correct action would have been to round up instead of down. An
+ * example occurs when reading the number 0x1000000000000081, which
+ * rounds to 0x1000000000000000 instead of 0x1000000000000100.
+ */
+ struct BinaryDigitReader bdr;
+ intN bit, bit2;
+ intN j;
+
+ bdr.base = base;
+ bdr.digitMask = 0;
+ bdr.digits = start;
+ bdr.end = s1;
+ value = 0.0;
+
+ /* Skip leading zeros. */
+ do {
+ bit = GetNextBinaryDigit(&bdr);
+ } while (bit == 0);
+
+ if (bit == 1) {
+ /* Gather the 53 significant bits (including the leading 1) */
+ value = 1.0;
+ for (j = 52; j; j--) {
+ bit = GetNextBinaryDigit(&bdr);
+ if (bit < 0)
+ goto done;
+ value = value*2 + bit;
+ }
+ /* bit2 is the 54th bit (the first dropped from the mantissa) */
+ bit2 = GetNextBinaryDigit(&bdr);
+ if (bit2 >= 0) {
+ jsdouble factor = 2.0;
+ intN sticky = 0; /* sticky is 1 if any bit beyond the 54th is 1 */
+ intN bit3;
+
+ while ((bit3 = GetNextBinaryDigit(&bdr)) >= 0) {
+ sticky |= bit3;
+ factor *= 2;
+ }
+ value += bit2 & (bit | sticky);
+ value *= factor;
+ }
+ done:;
+ }
+ }
+ }
+ /* We don't worry about inaccurate numbers for any other base. */
+
+ if (s1 == start) {
+ *dp = 0.0;
+ *ep = s;
+ } else {
+ *dp = negative ? -value : value;
+ *ep = s1;
+ }
+ return JS_TRUE;
+}
diff --git a/third_party/js-1.7/jsnum.h b/third_party/js-1.7/jsnum.h
new file mode 100644
index 0000000..cd99501
--- /dev/null
+++ b/third_party/js-1.7/jsnum.h
@@ -0,0 +1,268 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsnum_h___
+#define jsnum_h___
+/*
+ * JS number (IEEE double) interface.
+ *
+ * JS numbers are optimistically stored in the top 31 bits of 32-bit integers,
+ * but floating point literals, results that overflow 31 bits, and division and
+ * modulus operands and results require a 64-bit IEEE double. These are GC'ed
+ * and pointed to by 32-bit jsvals on the stack and in object properties.
+ *
+ * When a JS number is treated as an object (followed by . or []), the runtime
+ * wraps it with a JSObject whose valueOf method returns the unwrapped number.
+ */
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Stefan Hanske <sh990154@mail.uni-greifswald.de> reports:
+ * ARM is a little endian architecture but 64 bit double words are stored
+ * differently: the 32 bit words are in little endian byte order, the two words
+ * are stored in big endian`s way.
+ */
+
+#if defined(__arm) || defined(__arm32__) || defined(__arm26__) || defined(__arm__)
+#define CPU_IS_ARM
+#endif
+
+typedef union jsdpun {
+ struct {
+#if defined(IS_LITTLE_ENDIAN) && !defined(CPU_IS_ARM)
+ uint32 lo, hi;
+#else
+ uint32 hi, lo;
+#endif
+ } s;
+ jsdouble d;
+} jsdpun;
+
+#if (__GNUC__ == 2 && __GNUC_MINOR__ > 95) || __GNUC__ > 2
+/*
+ * This version of the macros is safe for the alias optimizations that gcc
+ * does, but uses gcc-specific extensions.
+ */
+
+#define JSDOUBLE_HI32(x) (__extension__ ({ jsdpun u; u.d = (x); u.s.hi; }))
+#define JSDOUBLE_LO32(x) (__extension__ ({ jsdpun u; u.d = (x); u.s.lo; }))
+#define JSDOUBLE_SET_HI32(x, y) \
+ (__extension__ ({ jsdpun u; u.d = (x); u.s.hi = (y); (x) = u.d; }))
+#define JSDOUBLE_SET_LO32(x, y) \
+ (__extension__ ({ jsdpun u; u.d = (x); u.s.lo = (y); (x) = u.d; }))
+
+#else /* not or old GNUC */
+
+/*
+ * We don't know of any non-gcc compilers that perform alias optimization,
+ * so this code should work.
+ */
+
+#if defined(IS_LITTLE_ENDIAN) && !defined(CPU_IS_ARM)
+#define JSDOUBLE_HI32(x) (((uint32 *)&(x))[1])
+#define JSDOUBLE_LO32(x) (((uint32 *)&(x))[0])
+#else
+#define JSDOUBLE_HI32(x) (((uint32 *)&(x))[0])
+#define JSDOUBLE_LO32(x) (((uint32 *)&(x))[1])
+#endif
+
+#define JSDOUBLE_SET_HI32(x, y) (JSDOUBLE_HI32(x)=(y))
+#define JSDOUBLE_SET_LO32(x, y) (JSDOUBLE_LO32(x)=(y))
+
+#endif /* not or old GNUC */
+
+#define JSDOUBLE_HI32_SIGNBIT 0x80000000
+#define JSDOUBLE_HI32_EXPMASK 0x7ff00000
+#define JSDOUBLE_HI32_MANTMASK 0x000fffff
+
+#define JSDOUBLE_IS_NaN(x) \
+ ((JSDOUBLE_HI32(x) & JSDOUBLE_HI32_EXPMASK) == JSDOUBLE_HI32_EXPMASK && \
+ (JSDOUBLE_LO32(x) || (JSDOUBLE_HI32(x) & JSDOUBLE_HI32_MANTMASK)))
+
+#define JSDOUBLE_IS_INFINITE(x) \
+ ((JSDOUBLE_HI32(x) & ~JSDOUBLE_HI32_SIGNBIT) == JSDOUBLE_HI32_EXPMASK && \
+ !JSDOUBLE_LO32(x))
+
+#define JSDOUBLE_IS_FINITE(x) \
+ ((JSDOUBLE_HI32(x) & JSDOUBLE_HI32_EXPMASK) != JSDOUBLE_HI32_EXPMASK)
+
+#define JSDOUBLE_IS_NEGZERO(d) (JSDOUBLE_HI32(d) == JSDOUBLE_HI32_SIGNBIT && \
+ JSDOUBLE_LO32(d) == 0)
+
+/*
+ * JSDOUBLE_IS_INT first checks that d is neither NaN nor infinite, to avoid
+ * raising SIGFPE on platforms such as Alpha Linux, then (only if the cast is
+ * safe) leaves i as (jsint)d. This also avoid anomalous NaN floating point
+ * comparisons under MSVC.
+ */
+#define JSDOUBLE_IS_INT(d, i) (JSDOUBLE_IS_FINITE(d) \
+ && !JSDOUBLE_IS_NEGZERO(d) \
+ && ((d) == (i = (jsint)(d))))
+
+#if defined(XP_WIN)
+#define JSDOUBLE_COMPARE(LVAL, OP, RVAL, IFNAN) \
+ ((JSDOUBLE_IS_NaN(LVAL) || JSDOUBLE_IS_NaN(RVAL)) \
+ ? (IFNAN) \
+ : (LVAL) OP (RVAL))
+#else
+#define JSDOUBLE_COMPARE(LVAL, OP, RVAL, IFNAN) ((LVAL) OP (RVAL))
+#endif
+
+/* Initialize number constants and runtime state for the first context. */
+extern JSBool
+js_InitRuntimeNumberState(JSContext *cx);
+
+extern void
+js_FinishRuntimeNumberState(JSContext *cx);
+
+/* Initialize the Number class, returning its prototype object. */
+extern JSClass js_NumberClass;
+
+extern JSObject *
+js_InitNumberClass(JSContext *cx, JSObject *obj);
+
+/*
+ * String constants for global function names, used in jsapi.c and jsnum.c.
+ */
+extern const char js_Infinity_str[];
+extern const char js_NaN_str[];
+extern const char js_isNaN_str[];
+extern const char js_isFinite_str[];
+extern const char js_parseFloat_str[];
+extern const char js_parseInt_str[];
+
+/* GC-allocate a new JS number. */
+extern jsdouble *
+js_NewDouble(JSContext *cx, jsdouble d, uintN gcflag);
+
+extern void
+js_FinalizeDouble(JSContext *cx, jsdouble *dp);
+
+extern JSBool
+js_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval);
+
+extern JSBool
+js_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval);
+
+/* Construct a Number instance that wraps around d. */
+extern JSObject *
+js_NumberToObject(JSContext *cx, jsdouble d);
+
+/* Convert a number to a GC'ed string. */
+extern JSString *
+js_NumberToString(JSContext *cx, jsdouble d);
+
+/*
+ * Convert a value to a number, returning false after reporting any error,
+ * otherwise returning true with *dp set.
+ */
+extern JSBool
+js_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp);
+
+/*
+ * Convert a value or a double to an int32, according to the ECMA rules
+ * for ToInt32.
+ */
+extern JSBool
+js_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip);
+
+extern JSBool
+js_DoubleToECMAInt32(JSContext *cx, jsdouble d, int32 *ip);
+
+/*
+ * Convert a value or a double to a uint32, according to the ECMA rules
+ * for ToUint32.
+ */
+extern JSBool
+js_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip);
+
+extern JSBool
+js_DoubleToECMAUint32(JSContext *cx, jsdouble d, uint32 *ip);
+
+/*
+ * Convert a value to a number, then to an int32 if it fits by rounding to
+ * nearest; but failing with an error report if the double is out of range
+ * or unordered.
+ */
+extern JSBool
+js_ValueToInt32(JSContext *cx, jsval v, int32 *ip);
+
+/*
+ * Convert a value to a number, then to a uint16 according to the ECMA rules
+ * for ToUint16.
+ */
+extern JSBool
+js_ValueToUint16(JSContext *cx, jsval v, uint16 *ip);
+
+/*
+ * Convert a jsdouble to an integral number, stored in a jsdouble.
+ * If d is NaN, return 0. If d is an infinity, return it without conversion.
+ */
+extern jsdouble
+js_DoubleToInteger(jsdouble d);
+
+/*
+ * Similar to strtod except that it replaces overflows with infinities of the
+ * correct sign, and underflows with zeros of the correct sign. Guaranteed to
+ * return the closest double number to the given input in dp.
+ *
+ * Also allows inputs of the form [+|-]Infinity, which produce an infinity of
+ * the appropriate sign. The case of the "Infinity" string must match exactly.
+ * If the string does not contain a number, set *ep to s and return 0.0 in dp.
+ * Return false if out of memory.
+ */
+extern JSBool
+js_strtod(JSContext *cx, const jschar *s, const jschar **ep, jsdouble *dp);
+
+/*
+ * Similar to strtol except that it handles integers of arbitrary size.
+ * Guaranteed to return the closest double number to the given input when radix
+ * is 10 or a power of 2. Callers may see round-off errors for very large
+ * numbers of a different radix than 10 or a power of 2.
+ *
+ * If the string does not contain a number, set *ep to s and return 0.0 in dp.
+ * Return false if out of memory.
+ */
+extern JSBool
+js_strtointeger(JSContext *cx, const jschar *s, const jschar **ep, jsint radix, jsdouble *dp);
+
+JS_END_EXTERN_C
+
+#endif /* jsnum_h___ */
diff --git a/third_party/js-1.7/jsobj.c b/third_party/js-1.7/jsobj.c
new file mode 100644
index 0000000..b552aca
--- /dev/null
+++ b/third_party/js-1.7/jsobj.c
@@ -0,0 +1,5035 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS object implementation.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsdhash.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsopcode.h"
+
+#include "jsdbgapi.h" /* whether or not JS_HAS_OBJ_WATCHPOINT */
+
+#if JS_HAS_GENERATORS
+#include "jsiter.h"
+#endif
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#if JS_HAS_XDR
+#include "jsxdrapi.h"
+#endif
+
+#ifdef JS_THREADSAFE
+#define NATIVE_DROP_PROPERTY js_DropProperty
+
+extern void
+js_DropProperty(JSContext *cx, JSObject *obj, JSProperty *prop);
+#else
+#define NATIVE_DROP_PROPERTY NULL
+#endif
+
+JS_FRIEND_DATA(JSObjectOps) js_ObjectOps = {
+ js_NewObjectMap, js_DestroyObjectMap,
+ js_LookupProperty, js_DefineProperty,
+ js_GetProperty, js_SetProperty,
+ js_GetAttributes, js_SetAttributes,
+ js_DeleteProperty, js_DefaultValue,
+ js_Enumerate, js_CheckAccess,
+ NULL, NATIVE_DROP_PROPERTY,
+ js_Call, js_Construct,
+ NULL, js_HasInstance,
+ js_SetProtoOrParent, js_SetProtoOrParent,
+ js_Mark, js_Clear,
+ js_GetRequiredSlot, js_SetRequiredSlot
+};
+
+JSClass js_ObjectClass = {
+ js_Object_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Object),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#if JS_HAS_OBJ_PROTO_PROP
+
+static JSBool
+obj_getSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+static JSBool
+obj_setSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+static JSBool
+obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+static JSPropertySpec object_props[] = {
+ /* These two must come first; see object_props[slot].name usage below. */
+ {js_proto_str, JSSLOT_PROTO, JSPROP_PERMANENT|JSPROP_SHARED,
+ obj_getSlot, obj_setSlot},
+ {js_parent_str,JSSLOT_PARENT,JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED,
+ obj_getSlot, obj_setSlot},
+ {js_count_str, 0, JSPROP_PERMANENT,obj_getCount, obj_getCount},
+ {0,0,0,0,0}
+};
+
+/* NB: JSSLOT_PROTO and JSSLOT_PARENT are already indexes into object_props. */
+#define JSSLOT_COUNT 2
+
+static JSBool
+ReportStrictSlot(JSContext *cx, uint32 slot)
+{
+ if (slot == JSSLOT_PROTO)
+ return JS_TRUE;
+ return JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_DEPRECATED_USAGE,
+ object_props[slot].name);
+}
+
+static JSBool
+obj_getSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ uint32 slot;
+ jsid propid;
+ JSAccessMode mode;
+ uintN attrs;
+ JSObject *pobj;
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+
+ slot = (uint32) JSVAL_TO_INT(id);
+ if (id == INT_TO_JSVAL(JSSLOT_PROTO)) {
+ propid = ATOM_TO_JSID(cx->runtime->atomState.protoAtom);
+ mode = JSACC_PROTO;
+ } else {
+ propid = ATOM_TO_JSID(cx->runtime->atomState.parentAtom);
+ mode = JSACC_PARENT;
+ }
+
+ /* Let OBJ_CHECK_ACCESS get the slot's value, based on the access mode. */
+ if (!OBJ_CHECK_ACCESS(cx, obj, propid, mode, vp, &attrs))
+ return JS_FALSE;
+
+ pobj = JSVAL_TO_OBJECT(*vp);
+ if (pobj) {
+ clasp = OBJ_GET_CLASS(cx, pobj);
+ if (clasp == &js_CallClass || clasp == &js_BlockClass) {
+ /* Censor activations and lexical scopes per ECMA-262. */
+ *vp = JSVAL_NULL;
+ } else if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass *) clasp;
+ if (xclasp->outerObject) {
+ pobj = xclasp->outerObject(cx, pobj);
+ if (!pobj)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(pobj);
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+obj_setSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSObject *pobj;
+ uint32 slot;
+ jsid propid;
+ uintN attrs;
+
+ if (!JSVAL_IS_OBJECT(*vp))
+ return JS_TRUE;
+ pobj = JSVAL_TO_OBJECT(*vp);
+
+ if (pobj) {
+ /*
+ * Innerize pobj here to avoid sticking unwanted properties on the
+ * outer object. This ensures that any with statements only grant
+ * access to the inner object.
+ */
+ OBJ_TO_INNER_OBJECT(cx, pobj);
+ if (!pobj)
+ return JS_FALSE;
+ }
+ slot = (uint32) JSVAL_TO_INT(id);
+ if (JS_HAS_STRICT_OPTION(cx) && !ReportStrictSlot(cx, slot))
+ return JS_FALSE;
+
+ /* __parent__ is readonly and permanent, only __proto__ may be set. */
+ propid = ATOM_TO_JSID(cx->runtime->atomState.protoAtom);
+ if (!OBJ_CHECK_ACCESS(cx, obj, propid, JSACC_PROTO|JSACC_WRITE, vp, &attrs))
+ return JS_FALSE;
+
+ return js_SetProtoOrParent(cx, obj, slot, pobj);
+}
+
+static JSBool
+obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsval iter_state;
+ jsid num_properties;
+ JSBool ok;
+
+ if (JS_HAS_STRICT_OPTION(cx) && !ReportStrictSlot(cx, JSSLOT_COUNT))
+ return JS_FALSE;
+
+ /* Get the number of properties to enumerate. */
+ iter_state = JSVAL_NULL;
+ ok = OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &iter_state, &num_properties);
+ if (!ok)
+ goto out;
+
+ if (!JSVAL_IS_INT(num_properties)) {
+ JS_ASSERT(0);
+ *vp = JSVAL_ZERO;
+ goto out;
+ }
+ *vp = num_properties;
+
+out:
+ if (iter_state != JSVAL_NULL)
+ ok = OBJ_ENUMERATE(cx, obj, JSENUMERATE_DESTROY, &iter_state, 0);
+ return ok;
+}
+
+#else /* !JS_HAS_OBJ_PROTO_PROP */
+
+#define object_props NULL
+
+#endif /* !JS_HAS_OBJ_PROTO_PROP */
+
+JSBool
+js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj)
+{
+ JSRuntime *rt;
+ JSObject *obj2, *oldproto;
+ JSScope *scope, *newscope;
+
+ /*
+ * Serialize all proto and parent setting in order to detect cycles.
+ * We nest locks in this function, and only here, in the following orders:
+ *
+ * (1) rt->setSlotLock < pobj's scope lock;
+ * rt->setSlotLock < pobj's proto-or-parent's scope lock;
+ * rt->setSlotLock < pobj's grand-proto-or-parent's scope lock;
+ * etc...
+ * (2) rt->setSlotLock < obj's scope lock < pobj's scope lock.
+ *
+ * We avoid AB-BA deadlock by restricting obj from being on pobj's parent
+ * or proto chain (pobj may already be on obj's parent or proto chain; it
+ * could be moving up or down). We finally order obj with respect to pobj
+ * at the bottom of this routine (just before releasing rt->setSlotLock),
+ * by making pobj be obj's prototype or parent.
+ *
+ * After we have set the slot and released rt->setSlotLock, another call
+ * to js_SetProtoOrParent could nest locks according to the first order
+ * list above, but it cannot deadlock with any other thread. For there
+ * to be a deadlock, other parts of the engine would have to nest scope
+ * locks in the opposite order. XXXbe ensure they don't!
+ */
+ rt = cx->runtime;
+#ifdef JS_THREADSAFE
+
+ JS_ACQUIRE_LOCK(rt->setSlotLock);
+ while (rt->setSlotBusy) {
+ jsrefcount saveDepth;
+
+ /* Take pains to avoid nesting rt->gcLock inside rt->setSlotLock! */
+ JS_RELEASE_LOCK(rt->setSlotLock);
+ saveDepth = JS_SuspendRequest(cx);
+ JS_ACQUIRE_LOCK(rt->setSlotLock);
+ if (rt->setSlotBusy)
+ JS_WAIT_CONDVAR(rt->setSlotDone, JS_NO_TIMEOUT);
+ JS_RELEASE_LOCK(rt->setSlotLock);
+ JS_ResumeRequest(cx, saveDepth);
+ JS_ACQUIRE_LOCK(rt->setSlotLock);
+ }
+ rt->setSlotBusy = JS_TRUE;
+ JS_RELEASE_LOCK(rt->setSlotLock);
+
+#define SET_SLOT_DONE(rt) \
+ JS_BEGIN_MACRO \
+ JS_ACQUIRE_LOCK((rt)->setSlotLock); \
+ (rt)->setSlotBusy = JS_FALSE; \
+ JS_NOTIFY_ALL_CONDVAR((rt)->setSlotDone); \
+ JS_RELEASE_LOCK((rt)->setSlotLock); \
+ JS_END_MACRO
+
+#else
+
+#define SET_SLOT_DONE(rt) /* nothing */
+
+#endif
+
+ obj2 = pobj;
+ while (obj2) {
+ if (obj2 == obj) {
+ SET_SLOT_DONE(rt);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CYCLIC_VALUE,
+#if JS_HAS_OBJ_PROTO_PROP
+ object_props[slot].name
+#else
+ (slot == JSSLOT_PROTO) ? js_proto_str
+ : js_parent_str
+#endif
+ );
+ return JS_FALSE;
+ }
+ obj2 = JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj2, slot));
+ }
+
+ if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
+ /* Check to see whether obj shares its prototype's scope. */
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ oldproto = JSVAL_TO_OBJECT(LOCKED_OBJ_GET_SLOT(obj, JSSLOT_PROTO));
+ if (oldproto && OBJ_SCOPE(oldproto) == scope) {
+ /* Either obj needs a new empty scope, or it should share pobj's. */
+ if (!pobj ||
+ !OBJ_IS_NATIVE(pobj) ||
+ OBJ_GET_CLASS(cx, pobj) != LOCKED_OBJ_GET_CLASS(oldproto)) {
+ /*
+ * With no proto and no scope of its own, obj is truly empty.
+ *
+ * If pobj is not native, obj needs its own empty scope -- it
+ * should not continue to share oldproto's scope once oldproto
+ * is not on obj's prototype chain. That would put properties
+ * from oldproto's scope ahead of properties defined by pobj,
+ * in lookup order.
+ *
+ * If pobj's class differs from oldproto's, we may need a new
+ * scope to handle differences in private and reserved slots,
+ * so we suboptimally but safely make one.
+ */
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ JS_UNLOCK_OBJ(cx, obj);
+ SET_SLOT_DONE(rt);
+ return JS_FALSE;
+ }
+ } else if (OBJ_SCOPE(pobj) != scope) {
+#ifdef JS_THREADSAFE
+ /*
+ * We are about to nest scope locks. Help jslock.c:ShareScope
+ * keep scope->u.count balanced for the JS_UNLOCK_SCOPE, while
+ * avoiding deadlock, by recording scope in rt->setSlotScope.
+ */
+ if (scope->ownercx) {
+ JS_ASSERT(scope->ownercx == cx);
+ rt->setSlotScope = scope;
+ }
+#endif
+
+ /* We can't deadlock because we checked for cycles above (2). */
+ JS_LOCK_OBJ(cx, pobj);
+ newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
+ obj->map = &newscope->map;
+ js_DropObjectMap(cx, &scope->map, obj);
+ JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
+ scope = newscope;
+#ifdef JS_THREADSAFE
+ rt->setSlotScope = NULL;
+#endif
+ }
+ }
+ LOCKED_OBJ_SET_SLOT(obj, JSSLOT_PROTO, OBJECT_TO_JSVAL(pobj));
+ JS_UNLOCK_SCOPE(cx, scope);
+ } else {
+ OBJ_SET_SLOT(cx, obj, slot, OBJECT_TO_JSVAL(pobj));
+ }
+
+ SET_SLOT_DONE(rt);
+ return JS_TRUE;
+
+#undef SET_SLOT_DONE
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_object(const void *key)
+{
+ return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
+}
+
+static JSHashEntry *
+MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap)
+{
+ JSSharpObjectMap *map;
+ JSHashTable *table;
+ JSHashNumber hash;
+ JSHashEntry **hep, *he;
+ jsatomid sharpid;
+ JSIdArray *ida;
+ JSBool ok;
+ jsint i, length;
+ jsid id;
+#if JS_HAS_GETTER_SETTER
+ JSObject *obj2;
+ JSProperty *prop;
+ uintN attrs;
+#endif
+ jsval val;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return NULL;
+ }
+
+ map = &cx->sharpObjectMap;
+ table = map->table;
+ hash = js_hash_object(obj);
+ hep = JS_HashTableRawLookup(table, hash, obj);
+ he = *hep;
+ if (!he) {
+ sharpid = 0;
+ he = JS_HashTableRawAdd(table, hep, hash, obj,
+ JS_UINT32_TO_PTR(sharpid));
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+
+ /*
+ * Increment map->depth to protect js_EnterSharpObject from reentering
+ * itself badly. Without this fix, if we reenter the basis case where
+ * map->depth == 0, when unwinding the inner call we will destroy the
+ * newly-created hash table and crash.
+ */
+ ++map->depth;
+ ida = JS_Enumerate(cx, obj);
+ --map->depth;
+ if (!ida)
+ return NULL;
+
+ ok = JS_TRUE;
+ for (i = 0, length = ida->length; i < length; i++) {
+ id = ida->vector[i];
+#if JS_HAS_GETTER_SETTER
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ break;
+ if (!prop)
+ continue;
+ ok = OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &attrs);
+ if (ok) {
+ if (OBJ_IS_NATIVE(obj2) &&
+ (attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
+ val = JSVAL_NULL;
+ if (attrs & JSPROP_GETTER)
+ val = (jsval) ((JSScopeProperty*)prop)->getter;
+ if (attrs & JSPROP_SETTER) {
+ if (val != JSVAL_NULL) {
+ /* Mark the getter, then set val to setter. */
+ ok = (MarkSharpObjects(cx, JSVAL_TO_OBJECT(val),
+ NULL)
+ != NULL);
+ }
+ val = (jsval) ((JSScopeProperty*)prop)->setter;
+ }
+ } else {
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val);
+ }
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+#else
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val);
+#endif
+ if (!ok)
+ break;
+ if (!JSVAL_IS_PRIMITIVE(val) &&
+ !MarkSharpObjects(cx, JSVAL_TO_OBJECT(val), NULL)) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ if (!ok || !idap)
+ JS_DestroyIdArray(cx, ida);
+ if (!ok)
+ return NULL;
+ } else {
+ sharpid = JS_PTR_TO_UINT32(he->value);
+ if (sharpid == 0) {
+ sharpid = ++map->sharpgen << SHARP_ID_SHIFT;
+ he->value = JS_UINT32_TO_PTR(sharpid);
+ }
+ ida = NULL;
+ }
+ if (idap)
+ *idap = ida;
+ return he;
+}
+
+JSHashEntry *
+js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap,
+ jschar **sp)
+{
+ JSSharpObjectMap *map;
+ JSHashTable *table;
+ JSIdArray *ida;
+ JSHashNumber hash;
+ JSHashEntry *he, **hep;
+ jsatomid sharpid;
+ char buf[20];
+ size_t len;
+
+ if (JS_HAS_NATIVE_BRANCH_CALLBACK_OPTION(cx) &&
+ cx->branchCallback &&
+ !cx->branchCallback(cx, NULL)) {
+ return NULL;
+ }
+
+ /* Set to null in case we return an early error. */
+ *sp = NULL;
+ map = &cx->sharpObjectMap;
+ table = map->table;
+ if (!table) {
+ table = JS_NewHashTable(8, js_hash_object, JS_CompareValues,
+ JS_CompareValues, NULL, NULL);
+ if (!table) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ map->table = table;
+ JS_KEEP_ATOMS(cx->runtime);
+ }
+
+ /* From this point the control must flow either through out: or bad:. */
+ ida = NULL;
+ if (map->depth == 0) {
+ he = MarkSharpObjects(cx, obj, &ida);
+ if (!he)
+ goto bad;
+ JS_ASSERT((JS_PTR_TO_UINT32(he->value) & SHARP_BIT) == 0);
+ if (!idap) {
+ JS_DestroyIdArray(cx, ida);
+ ida = NULL;
+ }
+ } else {
+ hash = js_hash_object(obj);
+ hep = JS_HashTableRawLookup(table, hash, obj);
+ he = *hep;
+
+ /*
+ * It's possible that the value of a property has changed from the
+ * first time the object's properties are traversed (when the property
+ * ids are entered into the hash table) to the second (when they are
+ * converted to strings), i.e., the OBJ_GET_PROPERTY() call is not
+ * idempotent.
+ */
+ if (!he) {
+ he = JS_HashTableRawAdd(table, hep, hash, obj, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ goto bad;
+ }
+ sharpid = 0;
+ goto out;
+ }
+ }
+
+ sharpid = JS_PTR_TO_UINT32(he->value);
+ if (sharpid != 0) {
+ len = JS_snprintf(buf, sizeof buf, "#%u%c",
+ sharpid >> SHARP_ID_SHIFT,
+ (sharpid & SHARP_BIT) ? '#' : '=');
+ *sp = js_InflateString(cx, buf, &len);
+ if (!*sp) {
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ goto bad;
+ }
+ }
+
+out:
+ JS_ASSERT(he);
+ if ((sharpid & SHARP_BIT) == 0) {
+ if (idap && !ida) {
+ ida = JS_Enumerate(cx, obj);
+ if (!ida) {
+ if (*sp) {
+ JS_free(cx, *sp);
+ *sp = NULL;
+ }
+ goto bad;
+ }
+ }
+ map->depth++;
+ }
+
+ if (idap)
+ *idap = ida;
+ return he;
+
+bad:
+ /* Clean up the sharpObjectMap table on outermost error. */
+ if (map->depth == 0) {
+ JS_UNKEEP_ATOMS(cx->runtime);
+ map->sharpgen = 0;
+ JS_HashTableDestroy(map->table);
+ map->table = NULL;
+ }
+ return NULL;
+}
+
+void
+js_LeaveSharpObject(JSContext *cx, JSIdArray **idap)
+{
+ JSSharpObjectMap *map;
+ JSIdArray *ida;
+
+ map = &cx->sharpObjectMap;
+ JS_ASSERT(map->depth > 0);
+ if (--map->depth == 0) {
+ JS_UNKEEP_ATOMS(cx->runtime);
+ map->sharpgen = 0;
+ JS_HashTableDestroy(map->table);
+ map->table = NULL;
+ }
+ if (idap) {
+ ida = *idap;
+ if (ida) {
+ JS_DestroyIdArray(cx, ida);
+ *idap = NULL;
+ }
+ }
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+gc_sharp_table_entry_marker(JSHashEntry *he, intN i, void *arg)
+{
+ GC_MARK((JSContext *)arg, (JSObject *)he->key, "sharp table entry");
+ return JS_DHASH_NEXT;
+}
+
+void
+js_GCMarkSharpMap(JSContext *cx, JSSharpObjectMap *map)
+{
+ JS_ASSERT(map->depth > 0);
+ JS_ASSERT(map->table);
+
+ /*
+ * During recursive calls to MarkSharpObjects a non-native object or
+ * object with a custom getProperty method can potentially return an
+ * unrooted value or even cut from the object graph an argument of one of
+ * MarkSharpObjects recursive invocations. So we must protect map->table
+ * entries against GC.
+ *
+ * We can not simply use JSTempValueRooter to mark the obj argument of
+ * MarkSharpObjects during recursion as we have to protect *all* entries
+ * in JSSharpObjectMap including those that contains otherwise unreachable
+ * objects just allocated through custom getProperty. Otherwise newer
+ * allocations can re-use the address of an object stored in the hashtable
+ * confusing js_EnterSharpObject. So to address the problem we simply
+ * mark all objects from map->table.
+ *
+ * An alternative "proper" solution is to use JSTempValueRooter in
+ * MarkSharpObjects with code to remove during finalization entries
+ * with otherwise unreachable objects. But this is way too complex
+ * to justify spending efforts.
+ */
+ JS_HashTableEnumerateEntries(map->table, gc_sharp_table_entry_marker, cx);
+}
+
+#define OBJ_TOSTRING_EXTRA 4 /* for 4 local GC roots */
+
+#if JS_HAS_TOSOURCE
+JSBool
+js_obj_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSBool ok, outermost;
+ JSHashEntry *he;
+ JSIdArray *ida;
+ jschar *chars, *ochars, *vsharp;
+ const jschar *idstrchars, *vchars;
+ size_t nchars, idstrlength, gsoplength, vlength, vsharplength, curlen;
+ char *comma;
+ jsint i, j, length, valcnt;
+ jsid id;
+#if JS_HAS_GETTER_SETTER
+ JSObject *obj2;
+ JSProperty *prop;
+ uintN attrs;
+#endif
+ jsval *val;
+ JSString *gsopold[2];
+ JSString *gsop[2];
+ JSAtom *atom;
+ JSString *idstr, *valstr, *str;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ /* If outermost, we need parentheses to be an expression, not a block. */
+ outermost = (cx->sharpObjectMap.depth == 0);
+ he = js_EnterSharpObject(cx, obj, &ida, &chars);
+ if (!he)
+ return JS_FALSE;
+ if (IS_SHARP(he)) {
+ /*
+ * We didn't enter -- obj is already "sharp", meaning we've visited it
+ * already in our depth first search, and therefore chars contains a
+ * string of the form "#n#".
+ */
+ JS_ASSERT(!ida);
+#if JS_HAS_SHARP_VARS
+ nchars = js_strlen(chars);
+#else
+ chars[0] = '{';
+ chars[1] = '}';
+ chars[2] = 0;
+ nchars = 2;
+#endif
+ goto make_string;
+ }
+ JS_ASSERT(ida);
+ ok = JS_TRUE;
+
+ if (!chars) {
+ /* If outermost, allocate 4 + 1 for "({})" and the terminator. */
+ chars = (jschar *) malloc(((outermost ? 4 : 2) + 1) * sizeof(jschar));
+ nchars = 0;
+ if (!chars)
+ goto error;
+ if (outermost)
+ chars[nchars++] = '(';
+ } else {
+ /* js_EnterSharpObject returned a string of the form "#n=" in chars. */
+ MAKE_SHARP(he);
+ nchars = js_strlen(chars);
+ chars = (jschar *)
+ realloc((ochars = chars), (nchars + 2 + 1) * sizeof(jschar));
+ if (!chars) {
+ free(ochars);
+ goto error;
+ }
+ if (outermost) {
+ /*
+ * No need for parentheses around the whole shebang, because #n=
+ * unambiguously begins an object initializer, and never a block
+ * statement.
+ */
+ outermost = JS_FALSE;
+ }
+ }
+
+#ifdef DUMP_CALL_TABLE
+ if (cx->options & JSOPTION_LOGCALL_TOSOURCE) {
+ const char *classname = OBJ_GET_CLASS(cx, obj)->name;
+ size_t classnchars = strlen(classname);
+ static const char classpropid[] = "C";
+ const char *cp;
+ size_t onchars = nchars;
+
+ /* 2 for ': ', 2 quotes around classname, 2 for ', ' after. */
+ classnchars += sizeof classpropid - 1 + 2 + 2;
+ if (ida->length)
+ classnchars += 2;
+
+ /* 2 for the braces, 1 for the terminator */
+ chars = (jschar *)
+ realloc((ochars = chars),
+ (nchars + classnchars + 2 + 1) * sizeof(jschar));
+ if (!chars) {
+ free(ochars);
+ goto error;
+ }
+
+ chars[nchars++] = '{'; /* 1 from the 2 braces */
+ for (cp = classpropid; *cp; cp++)
+ chars[nchars++] = (jschar) *cp;
+ chars[nchars++] = ':';
+ chars[nchars++] = ' '; /* 2 for ': ' */
+ chars[nchars++] = '"';
+ for (cp = classname; *cp; cp++)
+ chars[nchars++] = (jschar) *cp;
+ chars[nchars++] = '"'; /* 2 quotes */
+ if (ida->length) {
+ chars[nchars++] = ',';
+ chars[nchars++] = ' '; /* 2 for ', ' */
+ }
+
+ JS_ASSERT(nchars - onchars == 1 + classnchars);
+ } else
+#endif
+ chars[nchars++] = '{';
+
+ comma = NULL;
+
+ /*
+ * We have four local roots for cooked and raw value GC safety. Hoist the
+ * "argv + 2" out of the loop using the val local, which refers to the raw
+ * (unconverted, "uncooked") values.
+ */
+ val = argv + 2;
+
+ for (i = 0, length = ida->length; i < length; i++) {
+ JSBool idIsLexicalIdentifier, needOldStyleGetterSetter;
+
+ /* Get strings for id and value and GC-root them via argv. */
+ id = ida->vector[i];
+
+#if JS_HAS_GETTER_SETTER
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ goto error;
+#endif
+
+ /*
+ * Convert id to a jsval and then to a string. Decide early whether we
+ * prefer get/set or old getter/setter syntax.
+ */
+ atom = JSID_IS_ATOM(id) ? JSID_TO_ATOM(id) : NULL;
+ idstr = js_ValueToString(cx, ID_TO_VALUE(id));
+ if (!idstr) {
+ ok = JS_FALSE;
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ goto error;
+ }
+ *rval = STRING_TO_JSVAL(idstr); /* local root */
+ idIsLexicalIdentifier = js_IsIdentifier(idstr);
+ needOldStyleGetterSetter =
+ !idIsLexicalIdentifier ||
+ js_CheckKeyword(JSSTRING_CHARS(idstr),
+ JSSTRING_LENGTH(idstr)) != TOK_EOF;
+
+#if JS_HAS_GETTER_SETTER
+
+ valcnt = 0;
+ if (prop) {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &attrs);
+ if (!ok) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ goto error;
+ }
+ if (OBJ_IS_NATIVE(obj2) &&
+ (attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
+ if (attrs & JSPROP_GETTER) {
+ val[valcnt] = (jsval) ((JSScopeProperty *)prop)->getter;
+ gsopold[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.getterAtom);
+ gsop[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.getAtom);
+ valcnt++;
+ }
+ if (attrs & JSPROP_SETTER) {
+ val[valcnt] = (jsval) ((JSScopeProperty *)prop)->setter;
+ gsopold[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.setterAtom);
+ gsop[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.setAtom);
+ valcnt++;
+ }
+ } else {
+ valcnt = 1;
+ gsop[0] = NULL;
+ gsopold[0] = NULL;
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val[0]);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+
+#else /* !JS_HAS_GETTER_SETTER */
+
+ /*
+ * We simplify the source code at the price of minor dead code bloat in
+ * the ECMA version (for testing only, see jsconfig.h). The null
+ * default values in gsop[j] suffice to disable non-ECMA getter and
+ * setter code.
+ */
+ valcnt = 1;
+ gsop[0] = NULL;
+ gsopold[0] = NULL;
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val[0]);
+
+#endif /* !JS_HAS_GETTER_SETTER */
+
+ if (!ok)
+ goto error;
+
+ /*
+ * If id is a string that's not an identifier, then it needs to be
+ * quoted. Also, negative integer ids must be quoted.
+ */
+ if (atom
+ ? !idIsLexicalIdentifier
+ : (JSID_IS_OBJECT(id) || JSID_TO_INT(id) < 0)) {
+ idstr = js_QuoteString(cx, idstr, (jschar)'\'');
+ if (!idstr) {
+ ok = JS_FALSE;
+ goto error;
+ }
+ *rval = STRING_TO_JSVAL(idstr); /* local root */
+ }
+ idstrchars = JSSTRING_CHARS(idstr);
+ idstrlength = JSSTRING_LENGTH(idstr);
+
+ for (j = 0; j < valcnt; j++) {
+ /* Convert val[j] to its canonical source form. */
+ valstr = js_ValueToSource(cx, val[j]);
+ if (!valstr) {
+ ok = JS_FALSE;
+ goto error;
+ }
+ argv[j] = STRING_TO_JSVAL(valstr); /* local root */
+ vchars = JSSTRING_CHARS(valstr);
+ vlength = JSSTRING_LENGTH(valstr);
+
+ if (vchars[0] == '#')
+ needOldStyleGetterSetter = JS_TRUE;
+
+ if (needOldStyleGetterSetter)
+ gsop[j] = gsopold[j];
+
+#ifndef OLD_GETTER_SETTER
+ /*
+ * Remove '(function ' from the beginning of valstr and ')' from the
+ * end so that we can put "get" in front of the function definition.
+ */
+ if (gsop[j] && VALUE_IS_FUNCTION(cx, val[j]) &&
+ !needOldStyleGetterSetter) {
+ const jschar *start = vchars;
+ if (vchars[0] == '(')
+ vchars++;
+ vchars = js_strchr_limit(vchars, '(', vchars + vlength);
+ if (vchars) {
+ vlength -= vchars - start + 1;
+ } else {
+ gsop[j] = NULL;
+ vchars = start;
+ }
+ }
+#else
+ needOldStyleGetterSetter = JS_TRUE;
+ gsop[j] = gsopold[j];
+#endif
+
+ /* If val[j] is a non-sharp object, consider sharpening it. */
+ vsharp = NULL;
+ vsharplength = 0;
+#if JS_HAS_SHARP_VARS
+ if (!JSVAL_IS_PRIMITIVE(val[j]) && vchars[0] != '#') {
+ he = js_EnterSharpObject(cx, JSVAL_TO_OBJECT(val[j]), NULL,
+ &vsharp);
+ if (!he) {
+ ok = JS_FALSE;
+ goto error;
+ }
+ if (IS_SHARP(he)) {
+ vchars = vsharp;
+ vlength = js_strlen(vchars);
+ needOldStyleGetterSetter = JS_TRUE;
+ gsop[j] = gsopold[j];
+ } else {
+ if (vsharp) {
+ vsharplength = js_strlen(vsharp);
+ MAKE_SHARP(he);
+ needOldStyleGetterSetter = JS_TRUE;
+ gsop[j] = gsopold[j];
+ }
+ js_LeaveSharpObject(cx, NULL);
+ }
+ }
+#endif
+
+#define SAFE_ADD(n) \
+ JS_BEGIN_MACRO \
+ size_t n_ = (n); \
+ curlen += n_; \
+ if (curlen < n_) \
+ goto overflow; \
+ JS_END_MACRO
+
+ curlen = nchars;
+ if (comma)
+ SAFE_ADD(2);
+ SAFE_ADD(idstrlength + 1);
+ if (gsop[j])
+ SAFE_ADD(JSSTRING_LENGTH(gsop[j]) + 1);
+ SAFE_ADD(vsharplength);
+ SAFE_ADD(vlength);
+ /* Account for the trailing null. */
+ SAFE_ADD((outermost ? 2 : 1) + 1);
+#undef SAFE_ADD
+
+ if (curlen > (size_t)-1 / sizeof(jschar))
+ goto overflow;
+
+ /* Allocate 1 + 1 at end for closing brace and terminating 0. */
+ chars = (jschar *)
+ realloc((ochars = chars), curlen * sizeof(jschar));
+ if (!chars) {
+ /* Save code space on error: let JS_free ignore null vsharp. */
+ JS_free(cx, vsharp);
+ free(ochars);
+ goto error;
+ }
+
+ if (comma) {
+ chars[nchars++] = comma[0];
+ chars[nchars++] = comma[1];
+ }
+ comma = ", ";
+
+ if (needOldStyleGetterSetter) {
+ js_strncpy(&chars[nchars], idstrchars, idstrlength);
+ nchars += idstrlength;
+ if (gsop[j]) {
+ chars[nchars++] = ' ';
+ gsoplength = JSSTRING_LENGTH(gsop[j]);
+ js_strncpy(&chars[nchars], JSSTRING_CHARS(gsop[j]),
+ gsoplength);
+ nchars += gsoplength;
+ }
+ chars[nchars++] = ':';
+ } else { /* New style "decompilation" */
+ if (gsop[j]) {
+ gsoplength = JSSTRING_LENGTH(gsop[j]);
+ js_strncpy(&chars[nchars], JSSTRING_CHARS(gsop[j]),
+ gsoplength);
+ nchars += gsoplength;
+ chars[nchars++] = ' ';
+ }
+ js_strncpy(&chars[nchars], idstrchars, idstrlength);
+ nchars += idstrlength;
+ /* Extraneous space after id here will be extracted later */
+ chars[nchars++] = gsop[j] ? ' ' : ':';
+ }
+
+ if (vsharplength) {
+ js_strncpy(&chars[nchars], vsharp, vsharplength);
+ nchars += vsharplength;
+ }
+ js_strncpy(&chars[nchars], vchars, vlength);
+ nchars += vlength;
+
+ if (vsharp)
+ JS_free(cx, vsharp);
+#ifdef DUMP_CALL_TABLE
+ if (outermost && nchars >= js_LogCallToSourceLimit)
+ break;
+#endif
+ }
+ }
+
+ chars[nchars++] = '}';
+ if (outermost)
+ chars[nchars++] = ')';
+ chars[nchars] = 0;
+
+ error:
+ js_LeaveSharpObject(cx, &ida);
+
+ if (!ok) {
+ if (chars)
+ free(chars);
+ return ok;
+ }
+
+ if (!chars) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ make_string:
+ str = js_NewString(cx, chars, nchars, 0);
+ if (!str) {
+ free(chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+
+ overflow:
+ JS_free(cx, vsharp);
+ free(chars);
+ chars = NULL;
+ goto error;
+}
+#endif /* JS_HAS_TOSOURCE */
+
+JSBool
+js_obj_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jschar *chars;
+ size_t nchars;
+ const char *clazz, *prefix;
+ JSString *str;
+
+ clazz = OBJ_GET_CLASS(cx, obj)->name;
+ nchars = 9 + strlen(clazz); /* 9 for "[object ]" */
+ chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+
+ prefix = "[object ";
+ nchars = 0;
+ while ((chars[nchars] = (jschar)*prefix) != 0)
+ nchars++, prefix++;
+ while ((chars[nchars] = (jschar)*clazz) != 0)
+ nchars++, clazz++;
+ chars[nchars++] = ']';
+ chars[nchars] = 0;
+
+ str = js_NewString(cx, chars, nchars, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+js_obj_toLocaleString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[-1]);
+ if (!str)
+ return JS_FALSE;
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+obj_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * Check whether principals subsumes scopeobj's principals, and return true
+ * if so (or if scopeobj has no principals, for backward compatibility with
+ * the JS API, which does not require principals), and false otherwise.
+ */
+JSBool
+js_CheckPrincipalsAccess(JSContext *cx, JSObject *scopeobj,
+ JSPrincipals *principals, JSAtom *caller)
+{
+ JSRuntime *rt;
+ JSPrincipals *scopePrincipals;
+ const char *callerstr;
+
+ rt = cx->runtime;
+ if (rt->findObjectPrincipals) {
+ scopePrincipals = rt->findObjectPrincipals(cx, scopeobj);
+ if (!principals || !scopePrincipals ||
+ !principals->subsume(principals, scopePrincipals)) {
+ callerstr = js_AtomToPrintableString(cx, caller);
+ if (!callerstr)
+ return JS_FALSE;
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INDIRECT_CALL, callerstr);
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_CheckScopeChainValidity(JSContext *cx, JSObject *scopeobj, const char *caller)
+{
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+ JSObject *inner;
+
+ if (!scopeobj)
+ goto bad;
+
+ OBJ_TO_INNER_OBJECT(cx, scopeobj);
+ if (!scopeobj)
+ return NULL;
+
+ inner = scopeobj;
+
+ /* XXX This is an awful gross hack. */
+ while (scopeobj) {
+ clasp = OBJ_GET_CLASS(cx, scopeobj);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass*)clasp;
+ if (xclasp->innerObject &&
+ xclasp->innerObject(cx, scopeobj) != scopeobj) {
+ goto bad;
+ }
+ }
+
+ scopeobj = OBJ_GET_PARENT(cx, scopeobj);
+ }
+
+ return inner;
+
+bad:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INDIRECT_CALL, caller);
+ return NULL;
+}
+
+static JSBool
+obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSStackFrame *fp, *caller;
+ JSBool indirectCall;
+ JSObject *scopeobj;
+ JSString *str;
+ const char *file;
+ uintN line;
+ JSPrincipals *principals;
+ JSScript *script;
+ JSBool ok;
+#if JS_HAS_EVAL_THIS_SCOPE
+ JSObject *callerScopeChain = NULL, *callerVarObj = NULL;
+ JSObject *setCallerScopeChain = NULL;
+ JSBool setCallerVarObj = JS_FALSE;
+#endif
+
+ fp = cx->fp;
+ caller = JS_GetScriptedCaller(cx, fp);
+ JS_ASSERT(!caller || caller->pc);
+ indirectCall = (caller && *caller->pc != JSOP_EVAL);
+
+ if (indirectCall &&
+ !JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_INDIRECT_CALL,
+ js_eval_str)) {
+ return JS_FALSE;
+ }
+
+ if (!JSVAL_IS_STRING(argv[0])) {
+ *rval = argv[0];
+ return JS_TRUE;
+ }
+
+ /*
+ * If the caller is a lightweight function and doesn't have a variables
+ * object, then we need to provide one for the compiler to stick any
+ * declared (var) variables into.
+ */
+ if (caller && !caller->varobj && !js_GetCallObject(cx, caller, NULL))
+ return JS_FALSE;
+
+#if JS_HAS_SCRIPT_OBJECT
+ /*
+ * Script.prototype.compile/exec and Object.prototype.eval all take an
+ * optional trailing argument that overrides the scope object.
+ */
+ scopeobj = NULL;
+ if (argc >= 2) {
+ if (!js_ValueToObject(cx, argv[1], &scopeobj))
+ return JS_FALSE;
+ argv[1] = OBJECT_TO_JSVAL(scopeobj);
+ }
+ if (!scopeobj)
+#endif
+ {
+#if JS_HAS_EVAL_THIS_SCOPE
+ /* If obj.eval(str), emulate 'with (obj) eval(str)' in the caller. */
+ if (indirectCall) {
+ callerScopeChain = js_GetScopeChain(cx, caller);
+ if (!callerScopeChain)
+ return JS_FALSE;
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj)
+ return JS_FALSE;
+ if (obj != callerScopeChain) {
+ if (!js_CheckPrincipalsAccess(cx, obj,
+ caller->script->principals,
+ cx->runtime->atomState.evalAtom))
+ {
+ return JS_FALSE;
+ }
+
+ scopeobj = js_NewWithObject(cx, obj, callerScopeChain, -1);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ /* Set fp->scopeChain too, for the compiler. */
+ caller->scopeChain = fp->scopeChain = scopeobj;
+
+ /* Remember scopeobj so we can null its private when done. */
+ setCallerScopeChain = scopeobj;
+ }
+
+ callerVarObj = caller->varobj;
+ if (obj != callerVarObj) {
+ /* Set fp->varobj too, for the compiler. */
+ caller->varobj = fp->varobj = obj;
+ setCallerVarObj = JS_TRUE;
+ }
+ }
+ /* From here on, control must exit through label out with ok set. */
+#endif
+
+ /* Compile using caller's current scope object. */
+ if (caller) {
+ scopeobj = js_GetScopeChain(cx, caller);
+ if (!scopeobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ }
+
+ /* Ensure we compile this eval with the right object in the scope chain. */
+ scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_eval_str);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ str = JSVAL_TO_STRING(argv[0]);
+ if (caller) {
+ principals = JS_EvalFramePrincipals(cx, fp, caller);
+ if (principals == caller->script->principals) {
+ file = caller->script->filename;
+ line = js_PCToLineNumber(cx, caller->script, caller->pc);
+ } else {
+ file = principals->codebase;
+ line = 0;
+ }
+ } else {
+ file = NULL;
+ line = 0;
+ principals = NULL;
+ }
+
+ /*
+ * Set JSFRAME_EVAL on fp and any frames (e.g., fun_call if eval.call was
+ * invoked) between fp and its scripted caller, to help the compiler easily
+ * find the same caller whose scope and var obj we've set.
+ *
+ * XXX this nonsense could, and perhaps should, go away with a better way
+ * to pass params to the compiler than via the top-most frame.
+ */
+ do {
+ fp->flags |= JSFRAME_EVAL;
+ } while ((fp = fp->down) != caller);
+
+ script = JS_CompileUCScriptForPrincipals(cx, scopeobj, principals,
+ JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str),
+ file, line);
+ if (!script) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+#if JS_HAS_SCRIPT_OBJECT
+ if (argc < 2)
+#endif
+ {
+ /* Execute using caller's new scope object (might be a Call object). */
+ if (caller)
+ scopeobj = caller->scopeChain;
+ }
+
+ /*
+ * Belt-and-braces: check that the lesser of eval's principals and the
+ * caller's principals has access to scopeobj.
+ */
+ ok = js_CheckPrincipalsAccess(cx, scopeobj, principals,
+ cx->runtime->atomState.evalAtom);
+ if (ok)
+ ok = js_Execute(cx, scopeobj, script, caller, JSFRAME_EVAL, rval);
+
+ JS_DestroyScript(cx, script);
+
+out:
+#if JS_HAS_EVAL_THIS_SCOPE
+ /* Restore OBJ_GET_PARENT(scopeobj) not callerScopeChain in case of Call. */
+ if (setCallerScopeChain) {
+ caller->scopeChain = callerScopeChain;
+ JS_ASSERT(OBJ_GET_CLASS(cx, setCallerScopeChain) == &js_WithClass);
+ JS_SetPrivate(cx, setCallerScopeChain, NULL);
+ }
+ if (setCallerVarObj)
+ caller->varobj = callerVarObj;
+#endif
+ return ok;
+}
+
+#if JS_HAS_OBJ_WATCHPOINT
+
+static JSBool
+obj_watch_handler(JSContext *cx, JSObject *obj, jsval id, jsval old, jsval *nvp,
+ void *closure)
+{
+ JSObject *callable;
+ JSRuntime *rt;
+ JSStackFrame *caller;
+ JSPrincipals *subject, *watcher;
+ JSResolvingKey key;
+ JSResolvingEntry *entry;
+ uint32 generation;
+ jsval argv[3];
+ JSBool ok;
+
+ callable = (JSObject *) closure;
+
+ rt = cx->runtime;
+ if (rt->findObjectPrincipals) {
+ /* Skip over any obj_watch_* frames between us and the real subject. */
+ caller = JS_GetScriptedCaller(cx, cx->fp);
+ if (caller) {
+ /*
+ * Only call the watch handler if the watcher is allowed to watch
+ * the currently executing script.
+ */
+ watcher = rt->findObjectPrincipals(cx, callable);
+ subject = JS_StackFramePrincipals(cx, caller);
+
+ if (watcher && subject && !watcher->subsume(watcher, subject)) {
+ /* Silently don't call the watch handler. */
+ return JS_TRUE;
+ }
+ }
+ }
+
+ /* Avoid recursion on (obj, id) already being watched on cx. */
+ key.obj = obj;
+ key.id = id;
+ if (!js_StartResolving(cx, &key, JSRESFLAG_WATCH, &entry))
+ return JS_FALSE;
+ if (!entry)
+ return JS_TRUE;
+ generation = cx->resolvingTable->generation;
+
+ argv[0] = id;
+ argv[1] = old;
+ argv[2] = *nvp;
+ ok = js_InternalCall(cx, obj, OBJECT_TO_JSVAL(callable), 3, argv, nvp);
+ js_StopResolving(cx, &key, JSRESFLAG_WATCH, entry, generation);
+ return ok;
+}
+
+static JSBool
+obj_watch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *callable;
+ jsval userid, value;
+ jsid propid;
+ uintN attrs;
+
+ callable = js_ValueToCallableObject(cx, &argv[1], 0);
+ if (!callable)
+ return JS_FALSE;
+
+ /* Compute the unique int/atom symbol id needed by js_LookupProperty. */
+ userid = argv[0];
+ if (!JS_ValueToId(cx, userid, &propid))
+ return JS_FALSE;
+
+ if (!OBJ_CHECK_ACCESS(cx, obj, propid, JSACC_WATCH, &value, &attrs))
+ return JS_FALSE;
+ if (attrs & JSPROP_READONLY)
+ return JS_TRUE;
+ return JS_SetWatchPoint(cx, obj, userid, obj_watch_handler, callable);
+}
+
+static JSBool
+obj_unwatch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return JS_ClearWatchPoint(cx, obj, argv[0], NULL, NULL);
+}
+
+#endif /* JS_HAS_OBJ_WATCHPOINT */
+
+/*
+ * Prototype and property query methods, to complement the 'in' and
+ * 'instanceof' operators.
+ */
+
+/* Proposed ECMA 15.2.4.5. */
+static JSBool
+obj_hasOwnProperty(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return js_HasOwnPropertyHelper(cx, obj, obj->map->ops->lookupProperty,
+ argc, argv, rval);
+}
+
+JSBool
+js_HasOwnPropertyHelper(JSContext *cx, JSObject *obj, JSLookupPropOp lookup,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ jsid id;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!lookup(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ *rval = JSVAL_FALSE;
+ } else if (obj2 == obj) {
+ *rval = JSVAL_TRUE;
+ } else {
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ xclasp = (clasp->flags & JSCLASS_IS_EXTENDED)
+ ? (JSExtendedClass *)clasp
+ : NULL;
+ if (xclasp && xclasp->outerObject &&
+ xclasp->outerObject(cx, obj2) == obj) {
+ *rval = JSVAL_TRUE;
+ } else if (OBJ_IS_NATIVE(obj2) && OBJ_GET_CLASS(cx, obj2) == clasp) {
+ /*
+ * The combination of JSPROP_SHARED and JSPROP_PERMANENT in a
+ * delegated property makes that property appear to be direct in
+ * all delegating instances of the same native class. This hack
+ * avoids bloating every function instance with its own 'length'
+ * (AKA 'arity') property. But it must not extend across class
+ * boundaries, to avoid making hasOwnProperty lie (bug 320854).
+ *
+ * It's not really a hack, of course: a permanent property can't
+ * be deleted, and JSPROP_SHARED means "don't allocate a slot in
+ * any instance, prototype or delegating". Without a slot, and
+ * without the ability to remove and recreate (with differences)
+ * the property, there is no way to tell whether it is directly
+ * owned, or indirectly delegated.
+ */
+ sprop = (JSScopeProperty *)prop;
+ *rval = BOOLEAN_TO_JSVAL(SPROP_IS_SHARED_PERMANENT(sprop));
+ } else {
+ *rval = JSVAL_FALSE;
+ }
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+}
+
+/* Proposed ECMA 15.2.4.6. */
+static JSBool
+obj_isPrototypeOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSBool b;
+
+ if (!js_IsDelegate(cx, obj, *argv, &b))
+ return JS_FALSE;
+ *rval = BOOLEAN_TO_JSVAL(b);
+ return JS_TRUE;
+}
+
+/* Proposed ECMA 15.2.4.7. */
+static JSBool
+obj_propertyIsEnumerable(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsid id;
+ uintN attrs;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+
+ if (!prop) {
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+ }
+
+ /*
+ * XXX ECMA spec error compatible: return false unless hasOwnProperty.
+ * The ECMA spec really should be fixed so propertyIsEnumerable and the
+ * for..in loop agree on whether prototype properties are enumerable,
+ * obviously by fixing this method (not by breaking the for..in loop!).
+ *
+ * We check here for shared permanent prototype properties, which should
+ * be treated as if they are local to obj. They are an implementation
+ * technique used to satisfy ECMA requirements; users should not be able
+ * to distinguish a shared permanent proto-property from a local one.
+ */
+ if (obj2 != obj &&
+ !(OBJ_IS_NATIVE(obj2) &&
+ SPROP_IS_SHARED_PERMANENT((JSScopeProperty *)prop))) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+ }
+
+ ok = OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &attrs);
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (ok)
+ *rval = BOOLEAN_TO_JSVAL((attrs & JSPROP_ENUMERATE) != 0);
+ return ok;
+}
+
+#if JS_HAS_GETTER_SETTER
+static JSBool
+obj_defineGetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval fval, junk;
+ jsid id;
+ uintN attrs;
+
+ fval = argv[1];
+ if (JS_TypeOfValue(cx, fval) != JSTYPE_FUNCTION) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ js_getter_str);
+ return JS_FALSE;
+ }
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!js_CheckRedeclaration(cx, obj, id, JSPROP_GETTER, NULL, NULL))
+ return JS_FALSE;
+ /*
+ * Getters and setters are just like watchpoints from an access
+ * control point of view.
+ */
+ if (!OBJ_CHECK_ACCESS(cx, obj, id, JSACC_WATCH, &junk, &attrs))
+ return JS_FALSE;
+ return OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID,
+ (JSPropertyOp) JSVAL_TO_OBJECT(fval), NULL,
+ JSPROP_ENUMERATE | JSPROP_GETTER | JSPROP_SHARED,
+ NULL);
+}
+
+static JSBool
+obj_defineSetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval fval, junk;
+ jsid id;
+ uintN attrs;
+
+ fval = argv[1];
+ if (JS_TypeOfValue(cx, fval) != JSTYPE_FUNCTION) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ js_setter_str);
+ return JS_FALSE;
+ }
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!js_CheckRedeclaration(cx, obj, id, JSPROP_SETTER, NULL, NULL))
+ return JS_FALSE;
+ /*
+ * Getters and setters are just like watchpoints from an access
+ * control point of view.
+ */
+ if (!OBJ_CHECK_ACCESS(cx, obj, id, JSACC_WATCH, &junk, &attrs))
+ return JS_FALSE;
+ return OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID,
+ NULL, (JSPropertyOp) JSVAL_TO_OBJECT(fval),
+ JSPROP_ENUMERATE | JSPROP_SETTER | JSPROP_SHARED,
+ NULL);
+}
+
+static JSBool
+obj_lookupGetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsid id;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ if (OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *) prop;
+ if (sprop->attrs & JSPROP_GETTER)
+ *rval = OBJECT_TO_JSVAL(sprop->getter);
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+obj_lookupSetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsid id;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ if (OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *) prop;
+ if (sprop->attrs & JSPROP_SETTER)
+ *rval = OBJECT_TO_JSVAL(sprop->setter);
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+#endif /* JS_HAS_GETTER_SETTER */
+
+#if JS_HAS_OBJ_WATCHPOINT
+const char js_watch_str[] = "watch";
+const char js_unwatch_str[] = "unwatch";
+#endif
+const char js_hasOwnProperty_str[] = "hasOwnProperty";
+const char js_isPrototypeOf_str[] = "isPrototypeOf";
+const char js_propertyIsEnumerable_str[] = "propertyIsEnumerable";
+#if JS_HAS_GETTER_SETTER
+const char js_defineGetter_str[] = "__defineGetter__";
+const char js_defineSetter_str[] = "__defineSetter__";
+const char js_lookupGetter_str[] = "__lookupGetter__";
+const char js_lookupSetter_str[] = "__lookupSetter__";
+#endif
+
+static JSFunctionSpec object_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, js_obj_toSource, 0, 0, OBJ_TOSTRING_EXTRA},
+#endif
+ {js_toString_str, js_obj_toString, 0, 0, OBJ_TOSTRING_EXTRA},
+ {js_toLocaleString_str, js_obj_toLocaleString, 0, 0, OBJ_TOSTRING_EXTRA},
+ {js_valueOf_str, obj_valueOf, 0,0,0},
+ {js_eval_str, obj_eval, 1,0,0},
+#if JS_HAS_OBJ_WATCHPOINT
+ {js_watch_str, obj_watch, 2,0,0},
+ {js_unwatch_str, obj_unwatch, 1,0,0},
+#endif
+ {js_hasOwnProperty_str, obj_hasOwnProperty, 1,0,0},
+ {js_isPrototypeOf_str, obj_isPrototypeOf, 1,0,0},
+ {js_propertyIsEnumerable_str, obj_propertyIsEnumerable, 1,0,0},
+#if JS_HAS_GETTER_SETTER
+ {js_defineGetter_str, obj_defineGetter, 2,0,0},
+ {js_defineSetter_str, obj_defineSetter, 2,0,0},
+ {js_lookupGetter_str, obj_lookupGetter, 1,0,0},
+ {js_lookupSetter_str, obj_lookupSetter, 1,0,0},
+#endif
+ {0,0,0,0,0}
+};
+
+static JSBool
+Object(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (argc == 0) {
+ /* Trigger logic below to construct a blank object. */
+ obj = NULL;
+ } else {
+ /* If argv[0] is null or undefined, obj comes back null. */
+ if (!js_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ }
+ if (!obj) {
+ JS_ASSERT(!argc || JSVAL_IS_NULL(argv[0]) || JSVAL_IS_VOID(argv[0]));
+ if (cx->fp->flags & JSFRAME_CONSTRUCTING)
+ return JS_TRUE;
+ obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * ObjectOps and Class for with-statement stack objects.
+ */
+static JSBool
+with_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_LookupProperty(cx, obj, id, objp, propp);
+ return OBJ_LOOKUP_PROPERTY(cx, proto, id, objp, propp);
+}
+
+static JSBool
+with_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_GetProperty(cx, obj, id, vp);
+ return OBJ_GET_PROPERTY(cx, proto, id, vp);
+}
+
+static JSBool
+with_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_SetProperty(cx, obj, id, vp);
+ return OBJ_SET_PROPERTY(cx, proto, id, vp);
+}
+
+static JSBool
+with_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_GetAttributes(cx, obj, id, prop, attrsp);
+ return OBJ_GET_ATTRIBUTES(cx, proto, id, prop, attrsp);
+}
+
+static JSBool
+with_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_SetAttributes(cx, obj, id, prop, attrsp);
+ return OBJ_SET_ATTRIBUTES(cx, proto, id, prop, attrsp);
+}
+
+static JSBool
+with_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_DeleteProperty(cx, obj, id, rval);
+ return OBJ_DELETE_PROPERTY(cx, proto, id, rval);
+}
+
+static JSBool
+with_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_DefaultValue(cx, obj, hint, vp);
+ return OBJ_DEFAULT_VALUE(cx, proto, hint, vp);
+}
+
+static JSBool
+with_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_Enumerate(cx, obj, enum_op, statep, idp);
+ return OBJ_ENUMERATE(cx, proto, enum_op, statep, idp);
+}
+
+static JSBool
+with_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_CheckAccess(cx, obj, id, mode, vp, attrsp);
+ return OBJ_CHECK_ACCESS(cx, proto, id, mode, vp, attrsp);
+}
+
+static JSObject *
+with_ThisObject(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return obj;
+ return OBJ_THIS_OBJECT(cx, proto);
+}
+
+JS_FRIEND_DATA(JSObjectOps) js_WithObjectOps = {
+ js_NewObjectMap, js_DestroyObjectMap,
+ with_LookupProperty, js_DefineProperty,
+ with_GetProperty, with_SetProperty,
+ with_GetAttributes, with_SetAttributes,
+ with_DeleteProperty, with_DefaultValue,
+ with_Enumerate, with_CheckAccess,
+ with_ThisObject, NATIVE_DROP_PROPERTY,
+ NULL, NULL,
+ NULL, NULL,
+ js_SetProtoOrParent, js_SetProtoOrParent,
+ js_Mark, js_Clear,
+ NULL, NULL
+};
+
+static JSObjectOps *
+with_getObjectOps(JSContext *cx, JSClass *clasp)
+{
+ return &js_WithObjectOps;
+}
+
+JSClass js_WithClass = {
+ "With",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1) | JSCLASS_IS_ANONYMOUS,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ with_getObjectOps,
+ 0,0,0,0,0,0,0
+};
+
+JSObject *
+js_NewWithObject(JSContext *cx, JSObject *proto, JSObject *parent, jsint depth)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_WithClass, proto, parent);
+ if (!obj)
+ return NULL;
+ obj->slots[JSSLOT_PRIVATE] = PRIVATE_TO_JSVAL(cx->fp);
+ OBJ_SET_BLOCK_DEPTH(cx, obj, depth);
+ return obj;
+}
+
+JSObject *
+js_NewBlockObject(JSContext *cx)
+{
+ JSObject *obj;
+
+ /*
+ * Null obj's proto slot so that Object.prototype.* does not pollute block
+ * scopes. Make sure obj has its own scope too, since clearing proto does
+ * not affect OBJ_SCOPE(obj).
+ */
+ obj = js_NewObject(cx, &js_BlockClass, NULL, NULL);
+ if (!obj || !js_GetMutableScope(cx, obj))
+ return NULL;
+ OBJ_SET_PROTO(cx, obj, NULL);
+ return obj;
+}
+
+JSObject *
+js_CloneBlockObject(JSContext *cx, JSObject *proto, JSObject *parent,
+ JSStackFrame *fp)
+{
+ JSObject *clone;
+
+ clone = js_NewObject(cx, &js_BlockClass, proto, parent);
+ if (!clone)
+ return NULL;
+ clone->slots[JSSLOT_PRIVATE] = PRIVATE_TO_JSVAL(fp);
+ clone->slots[JSSLOT_BLOCK_DEPTH] =
+ OBJ_GET_SLOT(cx, proto, JSSLOT_BLOCK_DEPTH);
+ return clone;
+}
+
+/*
+ * XXXblock this reverses a path in the property tree -- try to share
+ * the prototype's scope harder!
+ */
+JSBool
+js_PutBlockObject(JSContext *cx, JSObject *obj)
+{
+ JSStackFrame *fp;
+ uintN depth, slot;
+ JSScopeProperty *sprop;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ JS_ASSERT(fp);
+ depth = OBJ_BLOCK_DEPTH(cx, obj);
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop; sprop = sprop->parent) {
+ if (sprop->getter != js_BlockClass.getProperty)
+ continue;
+ if (!(sprop->flags & SPROP_HAS_SHORTID))
+ continue;
+ slot = depth + (uintN)sprop->shortid;
+ JS_ASSERT(slot < fp->script->depth);
+ if (!js_DefineNativeProperty(cx, obj, sprop->id,
+ fp->spbase[slot], NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT,
+ SPROP_HAS_SHORTID, sprop->shortid,
+ NULL)) {
+ JS_SetPrivate(cx, obj, NULL);
+ return JS_FALSE;
+ }
+ }
+
+ return JS_SetPrivate(cx, obj, NULL);
+}
+
+static JSBool
+block_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ JS_ASSERT(JS_InstanceOf(cx, obj, &js_BlockClass, NULL));
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+
+ slot = OBJ_BLOCK_DEPTH(cx, obj) + (uint16) JSVAL_TO_INT(id);
+ JS_ASSERT((uintN)slot < fp->script->depth);
+ *vp = fp->spbase[slot];
+ return JS_TRUE;
+}
+
+static JSBool
+block_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ JS_ASSERT(JS_InstanceOf(cx, obj, &js_BlockClass, NULL));
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+
+ slot = OBJ_BLOCK_DEPTH(cx, obj) + (uint16) JSVAL_TO_INT(id);
+ JS_ASSERT((uintN)slot < fp->script->depth);
+ fp->spbase[slot] = *vp;
+ return JS_TRUE;
+}
+
+#if JS_HAS_XDR
+
+#define NO_PARENT_INDEX (jsatomid)-1
+
+jsatomid
+FindObjectAtomIndex(JSAtomMap *map, JSObject *obj)
+{
+ size_t i;
+ JSAtom *atom;
+
+ for (i = 0; i < map->length; i++) {
+ atom = map->vector[i];
+ if (ATOM_KEY(atom) == OBJECT_TO_JSVAL(obj))
+ return i;
+ }
+
+ return NO_PARENT_INDEX;
+}
+
+static JSBool
+block_xdrObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSContext *cx;
+ jsatomid parentId;
+ JSAtomMap *atomMap;
+ JSObject *obj, *parent;
+ uint16 depth, count, i;
+ uint32 tmp;
+ JSTempValueRooter tvr;
+ JSScopeProperty *sprop;
+ jsid propid;
+ JSAtom *atom;
+ int16 shortid;
+ JSBool ok;
+
+ cx = xdr->cx;
+#ifdef __GNUC__
+ obj = NULL; /* quell GCC overwarning */
+#endif
+
+ atomMap = &xdr->script->atomMap;
+ if (xdr->mode == JSXDR_ENCODE) {
+ obj = *objp;
+ parent = OBJ_GET_PARENT(cx, obj);
+ parentId = FindObjectAtomIndex(atomMap, parent);
+ depth = OBJ_BLOCK_DEPTH(cx, obj);
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ tmp = (uint32)(depth << 16) | count;
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ else count = 0;
+#endif
+
+ /* First, XDR the parent atomid. */
+ if (!JS_XDRUint32(xdr, &parentId))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ obj = js_NewBlockObject(cx);
+ if (!obj)
+ return JS_FALSE;
+ *objp = obj;
+
+ /*
+ * If there's a parent id, then get the parent out of our script's
+ * atomMap. We know that we XDR block object in outer-to-inner order,
+ * which means that getting the parent now will work.
+ */
+ if (parentId == NO_PARENT_INDEX) {
+ parent = NULL;
+ } else {
+ atom = js_GetAtom(cx, atomMap, parentId);
+ JS_ASSERT(ATOM_IS_OBJECT(atom));
+ parent = ATOM_TO_OBJECT(atom);
+ }
+ obj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(parent);
+ }
+
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(obj), &tvr);
+
+ if (!JS_XDRUint32(xdr, &tmp)) {
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return JS_FALSE;
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ depth = (uint16)(tmp >> 16);
+ count = (uint16)tmp;
+ obj->slots[JSSLOT_BLOCK_DEPTH] = INT_TO_JSVAL(depth);
+ }
+
+ /*
+ * XDR the block object's properties. We know that there are 'count'
+ * properties to XDR, stored as id/shortid pairs. We do not XDR any
+ * non-native properties, only those that the compiler created.
+ */
+ sprop = NULL;
+ ok = JS_TRUE;
+ for (i = 0; i < count; i++) {
+ if (xdr->mode == JSXDR_ENCODE) {
+ /* Find a property to XDR. */
+ do {
+ /* If sprop is NULL, this is the first property. */
+ sprop = sprop ? sprop->parent : OBJ_SCOPE(obj)->lastProp;
+ } while (!(sprop->flags & SPROP_HAS_SHORTID));
+
+ JS_ASSERT(sprop->getter == js_BlockClass.getProperty);
+ propid = sprop->id;
+ JS_ASSERT(JSID_IS_ATOM(propid));
+ atom = JSID_TO_ATOM(propid);
+ shortid = sprop->shortid;
+ JS_ASSERT(shortid >= 0);
+ }
+
+ /* XDR the real id, then the shortid. */
+ if (!js_XDRStringAtom(xdr, &atom) ||
+ !JS_XDRUint16(xdr, (uint16 *)&shortid)) {
+ ok = JS_FALSE;
+ break;
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom),
+ JSVAL_VOID, NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT,
+ SPROP_HAS_SHORTID, shortid, NULL)) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ }
+
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+#else
+# define block_xdrObject NULL
+#endif
+
+JSClass js_BlockClass = {
+ "Block",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1) |
+ JSCLASS_IS_ANONYMOUS | JSCLASS_HAS_CACHED_PROTO(JSProto_Block),
+ JS_PropertyStub, JS_PropertyStub, block_getProperty, block_setProperty,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ NULL, NULL, NULL, NULL, block_xdrObject, NULL, NULL, NULL
+};
+
+JSObject*
+js_InitBlockClass(JSContext *cx, JSObject* obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_BlockClass, NULL, 0, NULL,
+ NULL, NULL, NULL);
+ if (!proto)
+ return NULL;
+
+ OBJ_SET_PROTO(cx, proto, NULL);
+ return proto;
+}
+
+JSObject *
+js_InitObjectClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+ jsval eval;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_ObjectClass, Object, 1,
+ object_props, object_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+
+ /* ECMA (15.1.2.1) says 'eval' is also a property of the global object. */
+ if (!OBJ_GET_PROPERTY(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState.evalAtom),
+ &eval)) {
+ return NULL;
+ }
+ if (!OBJ_DEFINE_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.evalAtom),
+ eval, NULL, NULL, 0, NULL)) {
+ return NULL;
+ }
+
+ return proto;
+}
+
+void
+js_InitObjectMap(JSObjectMap *map, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp)
+{
+ map->nrefs = nrefs;
+ map->ops = ops;
+ map->nslots = JS_INITIAL_NSLOTS;
+ map->freeslot = JSSLOT_FREE(clasp);
+}
+
+JSObjectMap *
+js_NewObjectMap(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp, JSObject *obj)
+{
+ return (JSObjectMap *) js_NewScope(cx, nrefs, ops, clasp, obj);
+}
+
+void
+js_DestroyObjectMap(JSContext *cx, JSObjectMap *map)
+{
+ js_DestroyScope(cx, (JSScope *)map);
+}
+
+JSObjectMap *
+js_HoldObjectMap(JSContext *cx, JSObjectMap *map)
+{
+ JS_ASSERT(map->nrefs >= 0);
+ JS_ATOMIC_INCREMENT(&map->nrefs);
+ return map;
+}
+
+JSObjectMap *
+js_DropObjectMap(JSContext *cx, JSObjectMap *map, JSObject *obj)
+{
+ JS_ASSERT(map->nrefs > 0);
+ JS_ATOMIC_DECREMENT(&map->nrefs);
+ if (map->nrefs == 0) {
+ map->ops->destroyObjectMap(cx, map);
+ return NULL;
+ }
+ if (MAP_IS_NATIVE(map) && ((JSScope *)map)->object == obj)
+ ((JSScope *)map)->object = NULL;
+ return map;
+}
+
+static jsval *
+AllocSlots(JSContext *cx, jsval *slots, uint32 nslots)
+{
+ size_t nbytes, obytes, minbytes;
+ uint32 i, oslots;
+ jsval *newslots;
+
+ nbytes = (nslots + 1) * sizeof(jsval);
+ if (slots) {
+ oslots = slots[-1];
+ obytes = (oslots + 1) * sizeof(jsval);
+ } else {
+ oslots = 0;
+ obytes = 0;
+ }
+
+ if (nbytes <= GC_NBYTES_MAX) {
+ newslots = (jsval *) js_NewGCThing(cx, GCX_PRIVATE, nbytes);
+ } else {
+ newslots = (jsval *)
+ JS_realloc(cx,
+ (obytes <= GC_NBYTES_MAX) ? NULL : slots - 1,
+ nbytes);
+ }
+ if (!newslots)
+ return NULL;
+
+ if (obytes != 0) {
+ /* If either nbytes or obytes fit in a GC-thing, we must copy. */
+ minbytes = JS_MIN(nbytes, obytes);
+ if (minbytes <= GC_NBYTES_MAX)
+ memcpy(newslots + 1, slots, minbytes - sizeof(jsval));
+
+ /* If nbytes are in a GC-thing but obytes aren't, free obytes. */
+ if (nbytes <= GC_NBYTES_MAX && obytes > GC_NBYTES_MAX)
+ JS_free(cx, slots - 1);
+
+ /* If we're extending an allocation, initialize free slots. */
+ if (nslots > oslots) {
+ for (i = 1 + oslots; i <= nslots; i++)
+ newslots[i] = JSVAL_VOID;
+ }
+ }
+
+ newslots[0] = nslots;
+ return ++newslots;
+}
+
+static void
+FreeSlots(JSContext *cx, jsval *slots)
+{
+ size_t nbytes;
+
+ /*
+ * NB: We count on smaller GC-things being finalized before larger things
+ * that become garbage during the same GC. Without this assumption, we
+ * couldn't load slots[-1] here without possibly loading a gcFreeList link
+ * (see struct JSGCThing in jsgc.h).
+ */
+ nbytes = (slots[-1] + 1) * sizeof(jsval);
+ if (nbytes > GC_NBYTES_MAX)
+ JS_free(cx, slots - 1);
+}
+
+extern JSBool
+js_GetClassId(JSContext *cx, JSClass *clasp, jsid *idp)
+{
+ JSProtoKey key;
+ JSAtom *atom;
+
+ key = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (key != JSProto_Null) {
+ *idp = INT_TO_JSID(key);
+ } else if (clasp->flags & JSCLASS_IS_ANONYMOUS) {
+ *idp = INT_TO_JSID(JSProto_Object);
+ } else {
+ atom = js_Atomize(cx, clasp->name, strlen(clasp->name), 0);
+ if (!atom)
+ return JS_FALSE;
+ *idp = ATOM_TO_JSID(atom);
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent)
+{
+ jsid id;
+ JSObject *obj;
+ JSObjectOps *ops;
+ JSObjectMap *map;
+ JSClass *protoclasp;
+ uint32 nslots, i;
+ jsval *newslots;
+ JSTempValueRooter tvr;
+
+ /* Bootstrap the ur-object, and make it the default prototype object. */
+ if (!proto) {
+ if (!js_GetClassId(cx, clasp, &id))
+ return NULL;
+ if (!js_GetClassPrototype(cx, parent, id, &proto))
+ return NULL;
+ if (!proto &&
+ !js_GetClassPrototype(cx, parent, INT_TO_JSID(JSProto_Object),
+ &proto)) {
+ return NULL;
+ }
+ }
+
+ /* Always call the class's getObjectOps hook if it has one. */
+ ops = clasp->getObjectOps
+ ? clasp->getObjectOps(cx, clasp)
+ : &js_ObjectOps;
+
+ /*
+ * Allocate a zeroed object from the GC heap. Do this *after* any other
+ * GC-thing allocations under js_GetClassPrototype or clasp->getObjectOps,
+ * to avoid displacing the newborn root for obj.
+ */
+ obj = (JSObject *) js_NewGCThing(cx, GCX_OBJECT, sizeof(JSObject));
+ if (!obj)
+ return NULL;
+
+ /*
+ * Root obj to prevent it from being collected out from under this call.
+ * to js_NewObject. AllocSlots can trigger a finalizer from a last-ditch
+ * GC calling JS_ClearNewbornRoots. There's also the possibilty of things
+ * happening under the objectHook call-out further below.
+ */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+
+ /*
+ * Share proto's map only if it has the same JSObjectOps, and only if
+ * proto's class has the same private and reserved slots as obj's map
+ * and class have. We assume that if prototype and object are of the
+ * same class, they always have the same number of computed reserved
+ * slots (returned via clasp->reserveSlots); otherwise, prototype and
+ * object classes must have the same (null or not) reserveSlots hook.
+ */
+ if (proto &&
+ (map = proto->map)->ops == ops &&
+ ((protoclasp = OBJ_GET_CLASS(cx, proto)) == clasp ||
+ (!((protoclasp->flags ^ clasp->flags) &
+ (JSCLASS_HAS_PRIVATE |
+ (JSCLASS_RESERVED_SLOTS_MASK << JSCLASS_RESERVED_SLOTS_SHIFT))) &&
+ protoclasp->reserveSlots == clasp->reserveSlots)))
+ {
+ /*
+ * Default parent to the parent of the prototype, which was set from
+ * the parent of the prototype's constructor.
+ */
+ if (!parent)
+ parent = OBJ_GET_PARENT(cx, proto);
+
+ /* Share the given prototype's map. */
+ obj->map = js_HoldObjectMap(cx, map);
+
+ /* Ensure that obj starts with the minimum slots for clasp. */
+ nslots = JS_INITIAL_NSLOTS;
+ } else {
+ /* Leave parent alone. Allocate a new map for obj. */
+ map = ops->newObjectMap(cx, 1, ops, clasp, obj);
+ if (!map)
+ goto bad;
+ obj->map = map;
+
+ /* Let ops->newObjectMap set nslots so as to reserve slots. */
+ nslots = map->nslots;
+ }
+
+ /* Allocate a slots vector, with a -1'st element telling its length. */
+ newslots = AllocSlots(cx, NULL, nslots);
+ if (!newslots) {
+ js_DropObjectMap(cx, obj->map, obj);
+ obj->map = NULL;
+ goto bad;
+ }
+
+ /* Set the proto, parent, and class properties. */
+ newslots[JSSLOT_PROTO] = OBJECT_TO_JSVAL(proto);
+ newslots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(parent);
+ newslots[JSSLOT_CLASS] = PRIVATE_TO_JSVAL(clasp);
+
+ /* Clear above JSSLOT_CLASS so the GC doesn't load uninitialized memory. */
+ for (i = JSSLOT_CLASS + 1; i < nslots; i++)
+ newslots[i] = JSVAL_VOID;
+
+ /* Store newslots after initializing all of 'em, just in case. */
+ obj->slots = newslots;
+
+ if (cx->runtime->objectHook) {
+ JS_KEEP_ATOMS(cx->runtime);
+ cx->runtime->objectHook(cx, obj, JS_TRUE, cx->runtime->objectHookData);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ cx->weakRoots.newborn[GCX_OBJECT] = (JSGCThing *) obj;
+ return obj;
+
+bad:
+ obj = NULL;
+ goto out;
+}
+
+JS_STATIC_DLL_CALLBACK(JSObject *)
+js_InitNullClass(JSContext *cx, JSObject *obj)
+{
+ JS_ASSERT(0);
+ return NULL;
+}
+
+#define JS_PROTO(name,code,init) extern JSObject *init(JSContext *, JSObject *);
+#include "jsproto.tbl"
+#undef JS_PROTO
+
+static JSObjectOp lazy_prototype_init[JSProto_LIMIT] = {
+#define JS_PROTO(name,code,init) init,
+#include "jsproto.tbl"
+#undef JS_PROTO
+};
+
+JSBool
+js_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp)
+{
+ JSBool ok;
+ JSObject *tmp, *cobj;
+ JSResolvingKey rkey;
+ JSResolvingEntry *rentry;
+ uint32 generation;
+ JSObjectOp init;
+ jsval v;
+
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ if (!(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL)) {
+ *objp = NULL;
+ return JS_TRUE;
+ }
+
+ ok = JS_GetReservedSlot(cx, obj, key, &v);
+ if (!ok)
+ return JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ *objp = JSVAL_TO_OBJECT(v);
+ return JS_TRUE;
+ }
+
+ rkey.obj = obj;
+ rkey.id = ATOM_TO_JSID(cx->runtime->atomState.classAtoms[key]);
+ if (!js_StartResolving(cx, &rkey, JSRESFLAG_LOOKUP, &rentry))
+ return JS_FALSE;
+ if (!rentry) {
+ /* Already caching key in obj -- suppress recursion. */
+ *objp = NULL;
+ return JS_TRUE;
+ }
+ generation = cx->resolvingTable->generation;
+
+ cobj = NULL;
+ init = lazy_prototype_init[key];
+ if (init) {
+ if (!init(cx, obj)) {
+ ok = JS_FALSE;
+ } else {
+ ok = JS_GetReservedSlot(cx, obj, key, &v);
+ if (ok && !JSVAL_IS_PRIMITIVE(v))
+ cobj = JSVAL_TO_OBJECT(v);
+ }
+ }
+
+ js_StopResolving(cx, &rkey, JSRESFLAG_LOOKUP, rentry, generation);
+ *objp = cobj;
+ return ok;
+}
+
+JSBool
+js_SetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key, JSObject *cobj)
+{
+ JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
+ if (!(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL))
+ return JS_TRUE;
+
+ return JS_SetReservedSlot(cx, obj, key, OBJECT_TO_JSVAL(cobj));
+}
+
+JSBool
+js_FindClassObject(JSContext *cx, JSObject *start, jsid id, jsval *vp)
+{
+ JSObject *obj, *cobj, *pobj;
+ JSProtoKey key;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (start || (cx->fp && (start = cx->fp->scopeChain) != NULL)) {
+ /* Find the topmost object in the scope chain. */
+ do {
+ obj = start;
+ start = OBJ_GET_PARENT(cx, obj);
+ } while (start);
+ } else {
+ obj = cx->globalObject;
+ if (!obj) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ }
+
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj)
+ return JS_FALSE;
+
+ if (JSID_IS_INT(id)) {
+ key = JSID_TO_INT(id);
+ JS_ASSERT(key != JSProto_Null);
+ if (!js_GetClassObject(cx, obj, key, &cobj))
+ return JS_FALSE;
+ if (cobj) {
+ *vp = OBJECT_TO_JSVAL(cobj);
+ return JS_TRUE;
+ }
+ id = ATOM_TO_JSID(cx->runtime->atomState.classAtoms[key]);
+ }
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ if (!js_LookupPropertyWithFlags(cx, obj, id, JSRESOLVE_CLASSNAME,
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ if (!prop) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ JS_ASSERT(OBJ_IS_NATIVE(pobj));
+ sprop = (JSScopeProperty *) prop;
+ JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)));
+ *vp = OBJ_GET_SLOT(cx, pobj, sprop->slot);
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return JS_TRUE;
+}
+
+JSObject *
+js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv)
+{
+ jsid id;
+ jsval cval, rval;
+ JSTempValueRooter argtvr, tvr;
+ JSObject *obj, *ctor;
+
+ JS_PUSH_TEMP_ROOT(cx, argc, argv, &argtvr);
+
+ if (!js_GetClassId(cx, clasp, &id) ||
+ !js_FindClassObject(cx, parent, id, &cval)) {
+ JS_POP_TEMP_ROOT(cx, &argtvr);
+ return NULL;
+ }
+
+ if (JSVAL_IS_PRIMITIVE(cval)) {
+ js_ReportIsNotFunction(cx, &cval, JSV2F_CONSTRUCT | JSV2F_SEARCH_STACK);
+ JS_POP_TEMP_ROOT(cx, &argtvr);
+ return NULL;
+ }
+
+ /*
+ * Protect cval in case a crazy getter for .prototype uproots it. After
+ * this point, all control flow must exit through label out with obj set.
+ */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, cval, &tvr);
+
+ /*
+ * If proto or parent are NULL, set them to Constructor.prototype and/or
+ * Constructor.__parent__, just like JSOP_NEW does.
+ */
+ ctor = JSVAL_TO_OBJECT(cval);
+ if (!parent)
+ parent = OBJ_GET_PARENT(cx, ctor);
+ if (!proto) {
+ if (!OBJ_GET_PROPERTY(cx, ctor,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &rval)) {
+ obj = NULL;
+ goto out;
+ }
+ if (JSVAL_IS_OBJECT(rval))
+ proto = JSVAL_TO_OBJECT(rval);
+ }
+
+ obj = js_NewObject(cx, clasp, proto, parent);
+ if (!obj)
+ goto out;
+
+ if (!js_InternalConstruct(cx, obj, cval, argc, argv, &rval))
+ goto bad;
+
+ if (JSVAL_IS_PRIMITIVE(rval))
+ goto out;
+ obj = JSVAL_TO_OBJECT(rval);
+
+ /*
+ * If the instance's class differs from what was requested, throw a type
+ * error. If the given class has both the JSCLASS_HAS_PRIVATE and the
+ * JSCLASS_CONSTRUCT_PROTOTYPE flags, and the instance does not have its
+ * private data set at this point, then the constructor was replaced and
+ * we should throw a type error.
+ */
+ if (OBJ_GET_CLASS(cx, obj) != clasp ||
+ (!(~clasp->flags & (JSCLASS_HAS_PRIVATE |
+ JSCLASS_CONSTRUCT_PROTOTYPE)) &&
+ !JS_GetPrivate(cx, obj))) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_WRONG_CONSTRUCTOR, clasp->name);
+ goto bad;
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ JS_POP_TEMP_ROOT(cx, &argtvr);
+ return obj;
+
+bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ obj = NULL;
+ goto out;
+}
+
+void
+js_FinalizeObject(JSContext *cx, JSObject *obj)
+{
+ JSObjectMap *map;
+
+ /* Cope with stillborn objects that have no map. */
+ map = obj->map;
+ if (!map)
+ return;
+ JS_ASSERT(obj->slots);
+
+ if (cx->runtime->objectHook)
+ cx->runtime->objectHook(cx, obj, JS_FALSE, cx->runtime->objectHookData);
+
+ /* Remove all watchpoints with weak links to obj. */
+ JS_ClearWatchPointsForObject(cx, obj);
+
+ /*
+ * Finalize obj first, in case it needs map and slots. Optimized to use
+ * LOCKED_OBJ_GET_CLASS instead of OBJ_GET_CLASS, so we avoid "promoting"
+ * obj's scope from lock-free to lock-full (see jslock.c:ClaimScope) when
+ * we're called from the GC. Only the GC should call js_FinalizeObject,
+ * and no other threads run JS (and possibly racing to update obj->slots)
+ * while the GC is running.
+ */
+ LOCKED_OBJ_GET_CLASS(obj)->finalize(cx, obj);
+
+ /* Drop map and free slots. */
+ js_DropObjectMap(cx, map, obj);
+ obj->map = NULL;
+ FreeSlots(cx, obj->slots);
+ obj->slots = NULL;
+}
+
+/* XXXbe if one adds props, deletes earlier props, adds more, the last added
+ won't recycle the deleted props' slots. */
+JSBool
+js_AllocSlot(JSContext *cx, JSObject *obj, uint32 *slotp)
+{
+ JSObjectMap *map;
+ JSClass *clasp;
+ uint32 nslots;
+ jsval *newslots;
+
+ map = obj->map;
+ JS_ASSERT(!MAP_IS_NATIVE(map) || ((JSScope *)map)->object == obj);
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (map->freeslot == JSSLOT_FREE(clasp)) {
+ /* Adjust map->freeslot to include computed reserved slots, if any. */
+ if (clasp->reserveSlots)
+ map->freeslot += clasp->reserveSlots(cx, obj);
+ }
+ nslots = map->nslots;
+ if (map->freeslot >= nslots) {
+ nslots = map->freeslot;
+ JS_ASSERT(nslots >= JS_INITIAL_NSLOTS);
+ nslots += (nslots + 1) / 2;
+
+ newslots = AllocSlots(cx, obj->slots, nslots);
+ if (!newslots)
+ return JS_FALSE;
+ map->nslots = nslots;
+ obj->slots = newslots;
+ }
+
+#ifdef TOO_MUCH_GC
+ obj->slots[map->freeslot] = JSVAL_VOID;
+#endif
+ *slotp = map->freeslot++;
+ return JS_TRUE;
+}
+
+void
+js_FreeSlot(JSContext *cx, JSObject *obj, uint32 slot)
+{
+ JSObjectMap *map;
+ uint32 nslots;
+ jsval *newslots;
+
+ OBJ_CHECK_SLOT(obj, slot);
+ obj->slots[slot] = JSVAL_VOID;
+ map = obj->map;
+ JS_ASSERT(!MAP_IS_NATIVE(map) || ((JSScope *)map)->object == obj);
+ if (map->freeslot == slot + 1)
+ map->freeslot = slot;
+ nslots = map->nslots;
+ if (nslots > JS_INITIAL_NSLOTS && map->freeslot < nslots / 2) {
+ nslots = map->freeslot;
+ nslots += nslots / 2;
+ if (nslots < JS_INITIAL_NSLOTS)
+ nslots = JS_INITIAL_NSLOTS;
+
+ newslots = AllocSlots(cx, obj->slots, nslots);
+ if (!newslots)
+ return;
+ map->nslots = nslots;
+ obj->slots = newslots;
+ }
+}
+
+/* JSVAL_INT_MAX as a string */
+#define JSVAL_INT_MAX_STRING "1073741823"
+
+#define CHECK_FOR_STRING_INDEX(id) \
+ JS_BEGIN_MACRO \
+ if (JSID_IS_ATOM(id)) { \
+ JSAtom *atom_ = JSID_TO_ATOM(id); \
+ JSString *str_ = ATOM_TO_STRING(atom_); \
+ const jschar *cp_ = str_->chars; \
+ JSBool negative_ = (*cp_ == '-'); \
+ if (negative_) cp_++; \
+ if (JS7_ISDEC(*cp_)) { \
+ size_t n_ = str_->length - negative_; \
+ if (n_ <= sizeof(JSVAL_INT_MAX_STRING) - 1) \
+ id = CheckForStringIndex(id, cp_, cp_ + n_, negative_); \
+ } \
+ } \
+ JS_END_MACRO
+
+static jsid
+CheckForStringIndex(jsid id, const jschar *cp, const jschar *end,
+ JSBool negative)
+{
+ jsuint index = JS7_UNDEC(*cp++);
+ jsuint oldIndex = 0;
+ jsuint c = 0;
+
+ if (index != 0) {
+ while (JS7_ISDEC(*cp)) {
+ oldIndex = index;
+ c = JS7_UNDEC(*cp);
+ index = 10 * index + c;
+ cp++;
+ }
+ }
+ if (cp == end &&
+ (oldIndex < (JSVAL_INT_MAX / 10) ||
+ (oldIndex == (JSVAL_INT_MAX / 10) &&
+ c <= (JSVAL_INT_MAX % 10)))) {
+ if (negative)
+ index = 0 - index;
+ id = INT_TO_JSID((jsint)index);
+ }
+ return id;
+}
+
+static JSBool
+HidePropertyName(JSContext *cx, jsid *idp)
+{
+ jsid id;
+ JSAtom *atom, *hidden;
+
+ id = *idp;
+ JS_ASSERT(JSID_IS_ATOM(id));
+
+ atom = JSID_TO_ATOM(id);
+ JS_ASSERT(!(atom->flags & ATOM_HIDDEN));
+ JS_ASSERT(ATOM_IS_STRING(atom));
+
+ hidden = js_AtomizeString(cx, ATOM_TO_STRING(atom), ATOM_HIDDEN);
+ if (!hidden)
+ return JS_FALSE;
+
+ /*
+ * Link hidden to unhidden atom to optimize call_enumerate -- this means
+ * the GC must mark a hidden atom's unhidden counterpart (see js_MarkAtom
+ * in jsgc.c). It uses the atom's entry.value member for this linkage.
+ */
+ hidden->entry.value = atom;
+ *idp = ATOM_TO_JSID(hidden);
+ return JS_TRUE;
+}
+
+JSScopeProperty *
+js_AddHiddenProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid)
+{
+ if (!HidePropertyName(cx, &id))
+ return NULL;
+
+ flags |= SPROP_IS_HIDDEN;
+ return js_AddNativeProperty(cx, obj, id, getter, setter, slot, attrs,
+ flags, shortid);
+}
+
+JSBool
+js_LookupHiddenProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ return HidePropertyName(cx, &id) &&
+ js_LookupPropertyWithFlags(cx, obj, id, JSRESOLVE_HIDDEN,
+ objp, propp);
+}
+
+JSScopeProperty *
+js_AddNativeProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid)
+{
+ JSScope *scope;
+ JSScopeProperty *sprop;
+
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ sprop = NULL;
+ } else {
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+ sprop = js_AddScopeProperty(cx, scope, id, getter, setter, slot, attrs,
+ flags, shortid);
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ return sprop;
+}
+
+JSScopeProperty *
+js_ChangeNativePropertyAttrs(JSContext *cx, JSObject *obj,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter)
+{
+ JSScope *scope;
+
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ sprop = NULL;
+ } else {
+ sprop = js_ChangeScopePropertyAttrs(cx, scope, sprop, attrs, mask,
+ getter, setter);
+ if (sprop) {
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, sprop->id,
+ sprop);
+ }
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ return sprop;
+}
+
+JSBool
+js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ JSProperty **propp)
+{
+ return js_DefineNativeProperty(cx, obj, id, value, getter, setter, attrs,
+ 0, 0, propp);
+}
+
+/*
+ * Backward compatibility requires allowing addProperty hooks to mutate the
+ * nominal initial value of a slot-full property, while GC safety wants that
+ * value to be stored before the call-out through the hook. Optimize to do
+ * both while saving cycles for classes that stub their addProperty hook.
+ */
+#define ADD_PROPERTY_HELPER(cx,clasp,obj,scope,sprop,vp,cleanup) \
+ JS_BEGIN_MACRO \
+ if ((clasp)->addProperty != JS_PropertyStub) { \
+ jsval nominal_ = *(vp); \
+ if (!(clasp)->addProperty(cx, obj, SPROP_USERID(sprop), vp)) { \
+ cleanup; \
+ } \
+ if (*(vp) != nominal_) { \
+ if (SPROP_HAS_VALID_SLOT(sprop, scope)) \
+ LOCKED_OBJ_SET_SLOT(obj, (sprop)->slot, *(vp)); \
+ } \
+ } \
+ JS_END_MACRO
+
+JSBool
+js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN shortid, JSProperty **propp)
+{
+ JSClass *clasp;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+#if JS_HAS_GETTER_SETTER
+ /*
+ * If defining a getter or setter, we must check for its counterpart and
+ * update the attributes and property ops. A getter or setter is really
+ * only half of a property.
+ */
+ if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) {
+ JSObject *pobj;
+ JSProperty *prop;
+
+ /*
+ * If JS_THREADSAFE and id is found, js_LookupProperty returns with
+ * sprop non-null and pobj locked. If pobj == obj, the property is
+ * already in obj and obj has its own (mutable) scope. So if we are
+ * defining a getter whose setter was already defined, or vice versa,
+ * finish the job via js_ChangeScopePropertyAttributes, and refresh
+ * the property cache line for (obj, id) to map sprop.
+ */
+ if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ sprop = (JSScopeProperty *) prop;
+ if (sprop &&
+ pobj == obj &&
+ (sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
+ sprop = js_ChangeScopePropertyAttrs(cx, OBJ_SCOPE(obj), sprop,
+ attrs, sprop->attrs,
+ (attrs & JSPROP_GETTER)
+ ? getter
+ : sprop->getter,
+ (attrs & JSPROP_SETTER)
+ ? setter
+ : sprop->setter);
+
+ /* NB: obj == pobj, so we can share unlock code at the bottom. */
+ if (!sprop)
+ goto bad;
+ goto out;
+ }
+
+ if (prop) {
+ /* NB: call OBJ_DROP_PROPERTY, as pobj might not be native. */
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ prop = NULL;
+ }
+ }
+#endif /* JS_HAS_GETTER_SETTER */
+
+ /* Lock if object locking is required by this implementation. */
+ JS_LOCK_OBJ(cx, obj);
+
+ /* Use the object's class getter and setter by default. */
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (!getter)
+ getter = clasp->getProperty;
+ if (!setter)
+ setter = clasp->setProperty;
+
+ /* Get obj's own scope if it has one, or create a new one for obj. */
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope)
+ goto bad;
+
+ /* Add the property to scope, or replace an existing one of the same id. */
+ if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES)
+ attrs |= JSPROP_SHARED;
+ sprop = js_AddScopeProperty(cx, scope, id, getter, setter,
+ SPROP_INVALID_SLOT, attrs, flags, shortid);
+ if (!sprop)
+ goto bad;
+
+ /* Store value before calling addProperty, in case the latter GC's. */
+ if (SPROP_HAS_VALID_SLOT(sprop, scope))
+ LOCKED_OBJ_SET_SLOT(obj, sprop->slot, value);
+
+ /* XXXbe called with lock held */
+ ADD_PROPERTY_HELPER(cx, clasp, obj, scope, sprop, &value,
+ js_RemoveScopeProperty(cx, scope, id);
+ goto bad);
+
+#if JS_HAS_GETTER_SETTER
+out:
+#endif
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, id, sprop);
+ if (propp)
+ *propp = (JSProperty *) sprop;
+ else
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_TRUE;
+
+bad:
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+}
+
+/*
+ * Given pc pointing after a property accessing bytecode, return true if the
+ * access is "object-detecting" in the sense used by web scripts, e.g., when
+ * checking whether document.all is defined.
+ */
+static JSBool
+Detecting(JSContext *cx, jsbytecode *pc)
+{
+ JSScript *script;
+ jsbytecode *endpc;
+ JSOp op;
+ JSAtom *atom;
+
+ if (!cx->fp)
+ return JS_FALSE;
+ script = cx->fp->script;
+ for (endpc = script->code + script->length; pc < endpc; pc++) {
+ /* General case: a branch or equality op follows the access. */
+ op = (JSOp) *pc;
+ if (js_CodeSpec[op].format & JOF_DETECTING)
+ return JS_TRUE;
+
+ /*
+ * Special case #1: handle (document.all == null). Don't sweat about
+ * JS1.2's revision of the equality operators here.
+ */
+ if (op == JSOP_NULL) {
+ if (++pc < endpc)
+ return *pc == JSOP_EQ || *pc == JSOP_NE;
+ break;
+ }
+
+ /*
+ * Special case #2: handle (document.all == undefined). Don't worry
+ * about someone redefining undefined, which was added by Edition 3,
+ * so is read/write for backward compatibility.
+ */
+ if (op == JSOP_NAME) {
+ atom = GET_ATOM(cx, script, pc);
+ if (atom == cx->runtime->atomState.typeAtoms[JSTYPE_VOID] &&
+ (pc += js_CodeSpec[op].length) < endpc) {
+ op = (JSOp) *pc;
+ return op == JSOP_EQ || op == JSOP_NE ||
+ op == JSOP_NEW_EQ || op == JSOP_NEW_NE;
+ }
+ break;
+ }
+
+ /* At this point, anything but grouping means we're not detecting. */
+ if (op != JSOP_GROUP)
+ break;
+ }
+ return JS_FALSE;
+}
+
+JS_FRIEND_API(JSBool)
+js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ return js_LookupPropertyWithFlags(cx, obj, id, 0, objp, propp);
+}
+
+JSBool
+js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
+ JSObject **objp, JSProperty **propp)
+{
+ JSObject *start, *obj2, *proto;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSClass *clasp;
+ JSResolveOp resolve;
+ JSResolvingKey key;
+ JSResolvingEntry *entry;
+ uint32 generation;
+ JSNewResolveOp newresolve;
+ jsbytecode *pc;
+ const JSCodeSpec *cs;
+ uint32 format;
+ JSBool ok;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ /* Search scopes starting with obj and following the prototype link. */
+ start = obj;
+ for (;;) {
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ if (scope->object == obj) {
+ sprop = SCOPE_GET_PROPERTY(scope, id);
+ } else {
+ /* Shared prototype scope: try resolve before lookup. */
+ sprop = NULL;
+ }
+
+ /* Try obj's class resolve hook if id was not found in obj's scope. */
+ if (!sprop) {
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ resolve = clasp->resolve;
+ if (resolve != JS_ResolveStub) {
+ /* Avoid recursion on (obj, id) already being resolved on cx. */
+ key.obj = obj;
+ key.id = id;
+
+ /*
+ * Once we have successfully added an entry for (obj, key) to
+ * cx->resolvingTable, control must go through cleanup: before
+ * returning. But note that JS_DHASH_ADD may find an existing
+ * entry, in which case we bail to suppress runaway recursion.
+ */
+ if (!js_StartResolving(cx, &key, JSRESFLAG_LOOKUP, &entry)) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ if (!entry) {
+ /* Already resolving id in obj -- suppress recursion. */
+ JS_UNLOCK_OBJ(cx, obj);
+ goto out;
+ }
+ generation = cx->resolvingTable->generation;
+
+ /* Null *propp here so we can test it at cleanup: safely. */
+ *propp = NULL;
+
+ if (clasp->flags & JSCLASS_NEW_RESOLVE) {
+ newresolve = (JSNewResolveOp)resolve;
+ if (!(flags & JSRESOLVE_CLASSNAME) &&
+ cx->fp &&
+ (pc = cx->fp->pc)) {
+ cs = &js_CodeSpec[*pc];
+ format = cs->format;
+ if ((format & JOF_MODEMASK) != JOF_NAME)
+ flags |= JSRESOLVE_QUALIFIED;
+ if ((format & JOF_ASSIGNING) ||
+ (cx->fp->flags & JSFRAME_ASSIGNING)) {
+ flags |= JSRESOLVE_ASSIGNING;
+ } else {
+ pc += cs->length;
+ if (Detecting(cx, pc))
+ flags |= JSRESOLVE_DETECTING;
+ }
+ if (format & JOF_DECLARING)
+ flags |= JSRESOLVE_DECLARING;
+ }
+ obj2 = (clasp->flags & JSCLASS_NEW_RESOLVE_GETS_START)
+ ? start
+ : NULL;
+ JS_UNLOCK_OBJ(cx, obj);
+
+ /* Protect id and all atoms from a GC nested in resolve. */
+ JS_KEEP_ATOMS(cx->runtime);
+ ok = newresolve(cx, obj, ID_TO_VALUE(id), flags, &obj2);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ if (!ok)
+ goto cleanup;
+
+ JS_LOCK_OBJ(cx, obj);
+ if (obj2) {
+ /* Resolved: juggle locks and lookup id again. */
+ if (obj2 != obj) {
+ JS_UNLOCK_OBJ(cx, obj);
+ JS_LOCK_OBJ(cx, obj2);
+ }
+ scope = OBJ_SCOPE(obj2);
+ if (!MAP_IS_NATIVE(&scope->map)) {
+ /* Whoops, newresolve handed back a foreign obj2. */
+ JS_ASSERT(obj2 != obj);
+ JS_UNLOCK_OBJ(cx, obj2);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj2, id, objp, propp);
+ if (!ok || *propp)
+ goto cleanup;
+ JS_LOCK_OBJ(cx, obj2);
+ } else {
+ /*
+ * Require that obj2 have its own scope now, as we
+ * do for old-style resolve. If it doesn't, then
+ * id was not truly resolved, and we'll find it in
+ * the proto chain, or miss it if obj2's proto is
+ * not on obj's proto chain. That last case is a
+ * "too bad!" case.
+ */
+ if (scope->object == obj2)
+ sprop = SCOPE_GET_PROPERTY(scope, id);
+ }
+ if (sprop) {
+ JS_ASSERT(obj2 == scope->object);
+ obj = obj2;
+ } else if (obj2 != obj) {
+ JS_UNLOCK_OBJ(cx, obj2);
+ JS_LOCK_OBJ(cx, obj);
+ }
+ }
+ } else {
+ /*
+ * Old resolve always requires id re-lookup if obj owns
+ * its scope after resolve returns.
+ */
+ JS_UNLOCK_OBJ(cx, obj);
+ ok = resolve(cx, obj, ID_TO_VALUE(id));
+ if (!ok)
+ goto cleanup;
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(MAP_IS_NATIVE(&scope->map));
+ if (scope->object == obj)
+ sprop = SCOPE_GET_PROPERTY(scope, id);
+ }
+
+ cleanup:
+ js_StopResolving(cx, &key, JSRESFLAG_LOOKUP, entry, generation);
+ if (!ok || *propp)
+ return ok;
+ }
+ }
+
+ if (sprop) {
+ JS_ASSERT(OBJ_SCOPE(obj) == scope);
+ *objp = scope->object; /* XXXbe hide in jsscope.[ch] */
+
+ *propp = (JSProperty *) sprop;
+ return JS_TRUE;
+ }
+
+ proto = LOCKED_OBJ_GET_PROTO(obj);
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!proto)
+ break;
+ if (!OBJ_IS_NATIVE(proto))
+ return OBJ_LOOKUP_PROPERTY(cx, proto, id, objp, propp);
+ obj = proto;
+ }
+
+out:
+ *objp = NULL;
+ *propp = NULL;
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(JSBool)
+js_FindProperty(JSContext *cx, jsid id, JSObject **objp, JSObject **pobjp,
+ JSProperty **propp)
+{
+ JSRuntime *rt;
+ JSObject *obj, *pobj, *lastobj;
+ JSScopeProperty *sprop;
+ JSProperty *prop;
+
+ rt = cx->runtime;
+ obj = cx->fp->scopeChain;
+ do {
+ /* Try the property cache and return immediately on cache hit. */
+ if (OBJ_IS_NATIVE(obj)) {
+ JS_LOCK_OBJ(cx, obj);
+ PROPERTY_CACHE_TEST(&rt->propertyCache, obj, id, sprop);
+ if (sprop) {
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ *objp = obj;
+ *pobjp = obj;
+ *propp = (JSProperty *) sprop;
+ return JS_TRUE;
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ }
+
+ /* If cache miss, take the slow path. */
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ if (OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *) prop;
+ PROPERTY_CACHE_FILL(&rt->propertyCache, pobj, id, sprop);
+ }
+ *objp = obj;
+ *pobjp = pobj;
+ *propp = prop;
+ return JS_TRUE;
+ }
+ lastobj = obj;
+ } while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL);
+
+ *objp = lastobj;
+ *pobjp = NULL;
+ *propp = NULL;
+ return JS_TRUE;
+}
+
+JSObject *
+js_FindIdentifierBase(JSContext *cx, jsid id)
+{
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+
+ /*
+ * Look for id's property along the "with" statement chain and the
+ * statically-linked scope chain.
+ */
+ if (!js_FindProperty(cx, id, &obj, &pobj, &prop))
+ return NULL;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return obj;
+ }
+
+ /*
+ * Use the top-level scope from the scope chain, which won't end in the
+ * same scope as cx->globalObject for cross-context function calls.
+ */
+ JS_ASSERT(obj);
+
+ /*
+ * Property not found. Give a strict warning if binding an undeclared
+ * top-level variable.
+ */
+ if (JS_HAS_STRICT_OPTION(cx)) {
+ JSString *str = JSVAL_TO_STRING(ID_TO_VALUE(id));
+ if (!JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_UNDECLARED_VAR,
+ JS_GetStringBytes(str))) {
+ return NULL;
+ }
+ }
+ return obj;
+}
+
+JSBool
+js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj,
+ JSScopeProperty *sprop, jsval *vp)
+{
+ JSScope *scope;
+ uint32 slot;
+ int32 sample;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ JS_ASSERT(OBJ_IS_NATIVE(pobj));
+ JS_ASSERT(JS_IS_OBJ_LOCKED(cx, pobj));
+ scope = OBJ_SCOPE(pobj);
+ JS_ASSERT(scope->object == pobj);
+
+ slot = sprop->slot;
+ *vp = (slot != SPROP_INVALID_SLOT)
+ ? LOCKED_OBJ_GET_SLOT(pobj, slot)
+ : JSVAL_VOID;
+ if (SPROP_HAS_STUB_GETTER(sprop))
+ return JS_TRUE;
+
+ sample = cx->runtime->propertyRemovals;
+ JS_UNLOCK_SCOPE(cx, scope);
+ JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr);
+ ok = SPROP_GET(cx, sprop, obj, pobj, vp);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ return JS_FALSE;
+
+ JS_LOCK_SCOPE(cx, scope);
+ JS_ASSERT(scope->object == pobj);
+ if (SLOT_IN_SCOPE(slot, scope) &&
+ (JS_LIKELY(cx->runtime->propertyRemovals == sample) ||
+ SCOPE_GET_PROPERTY(scope, sprop->id) == sprop)) {
+ LOCKED_OBJ_SET_SLOT(pobj, slot, *vp);
+ }
+
+ return JS_TRUE;
+}
+
+JSBool
+js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp)
+{
+ JSScope *scope;
+ uint32 slot;
+ jsval pval;
+ int32 sample;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ JS_ASSERT(JS_IS_OBJ_LOCKED(cx, obj));
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->object == obj);
+
+ slot = sprop->slot;
+ if (slot != SPROP_INVALID_SLOT) {
+ pval = LOCKED_OBJ_GET_SLOT(obj, slot);
+
+ /* If sprop has a stub setter, keep scope locked and just store *vp. */
+ if (SPROP_HAS_STUB_SETTER(sprop))
+ goto set_slot;
+ } else {
+ /*
+ * Allow API consumers to create shared properties with stub setters.
+ * Such properties lack value storage, so setting them is like writing
+ * to /dev/null.
+ */
+ if (SPROP_HAS_STUB_SETTER(sprop))
+ return JS_TRUE;
+ pval = JSVAL_VOID;
+ }
+
+ sample = cx->runtime->propertyRemovals;
+ JS_UNLOCK_SCOPE(cx, scope);
+ JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr);
+ ok = SPROP_SET(cx, sprop, obj, obj, vp);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ return JS_FALSE;
+
+ JS_LOCK_SCOPE(cx, scope);
+ JS_ASSERT(scope->object == obj);
+ if (SLOT_IN_SCOPE(slot, scope) &&
+ (JS_LIKELY(cx->runtime->propertyRemovals == sample) ||
+ SCOPE_GET_PROPERTY(scope, sprop->id) == sprop)) {
+ set_slot:
+ GC_POKE(cx, pval);
+ LOCKED_OBJ_SET_SLOT(obj, slot, *vp);
+ }
+
+ return JS_TRUE;
+}
+
+JSBool
+js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ if (!js_LookupProperty(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ jsbytecode *pc;
+
+ *vp = JSVAL_VOID;
+
+ if (!OBJ_GET_CLASS(cx, obj)->getProperty(cx, obj, ID_TO_VALUE(id), vp))
+ return JS_FALSE;
+
+ /*
+ * Give a strict warning if foo.bar is evaluated by a script for an
+ * object foo with no property named 'bar'.
+ */
+ if (JSVAL_IS_VOID(*vp) && cx->fp && (pc = cx->fp->pc)) {
+ JSOp op;
+ uintN flags;
+ JSString *str;
+
+ op = *pc;
+ if (op == JSOP_GETXPROP || op == JSOP_GETXELEM) {
+ flags = JSREPORT_ERROR;
+ } else {
+ if (!JS_HAS_STRICT_OPTION(cx) ||
+ (op != JSOP_GETPROP && op != JSOP_GETELEM)) {
+ return JS_TRUE;
+ }
+
+ /*
+ * XXX do not warn about missing __iterator__ as the function
+ * may be called from JS_GetMethodById. See bug 355145.
+ */
+ if (id == ATOM_TO_JSID(cx->runtime->atomState.iteratorAtom))
+ return JS_TRUE;
+
+ /* Kludge to allow (typeof foo == "undefined") tests. */
+ JS_ASSERT(cx->fp->script);
+ pc += js_CodeSpec[op].length;
+ if (Detecting(cx, pc))
+ return JS_TRUE;
+
+ flags = JSREPORT_WARNING | JSREPORT_STRICT;
+ }
+
+ /* Ok, bad undefined property reference: whine about it. */
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (!str ||
+ !JS_ReportErrorFlagsAndNumber(cx, flags,
+ js_GetErrorMessage, NULL,
+ JSMSG_UNDEFINED_PROP,
+ JS_GetStringBytes(str))) {
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+ }
+
+ if (!OBJ_IS_NATIVE(obj2)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return OBJ_GET_PROPERTY(cx, obj2, id, vp);
+ }
+
+ sprop = (JSScopeProperty *) prop;
+ if (!js_NativeGet(cx, obj, obj2, sprop, vp))
+ return JS_FALSE;
+
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj2, id, sprop);
+ JS_UNLOCK_OBJ(cx, obj2);
+ return JS_TRUE;
+}
+
+JSBool
+js_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSScope *scope;
+ uintN attrs, flags;
+ intN shortid;
+ JSClass *clasp;
+ JSPropertyOp getter, setter;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+
+ if (prop && !OBJ_IS_NATIVE(pobj)) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ prop = NULL;
+ }
+ sprop = (JSScopeProperty *) prop;
+
+ /*
+ * Now either sprop is null, meaning id was not found in obj or one of its
+ * prototypes; or sprop is non-null, meaning id was found in pobj's scope.
+ * If JS_THREADSAFE and sprop is non-null, then scope is locked, and sprop
+ * is held: we must OBJ_DROP_PROPERTY or JS_UNLOCK_SCOPE before we return
+ * (the two are equivalent for native objects, but we use JS_UNLOCK_SCOPE
+ * because it is cheaper).
+ */
+ attrs = JSPROP_ENUMERATE;
+ flags = 0;
+ shortid = 0;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ getter = clasp->getProperty;
+ setter = clasp->setProperty;
+
+ if (sprop) {
+ /*
+ * Set scope for use below. It was locked by js_LookupProperty, and
+ * we know pobj owns it (i.e., scope->object == pobj). Therefore we
+ * optimize JS_UNLOCK_OBJ(cx, pobj) into JS_UNLOCK_SCOPE(cx, scope).
+ */
+ scope = OBJ_SCOPE(pobj);
+
+ attrs = sprop->attrs;
+ if ((attrs & JSPROP_READONLY) ||
+ (SCOPE_IS_SEALED(scope) && pobj == obj)) {
+ JS_UNLOCK_SCOPE(cx, scope);
+
+ /*
+ * Here, we'll either return true or goto read_only_error, which
+ * reports a strict warning or throws an error. So we redefine
+ * the |flags| local variable to be JSREPORT_* flags to pass to
+ * JS_ReportErrorFlagsAndNumberUC at label read_only_error. We
+ * must likewise re-task flags further below for the other 'goto
+ * read_only_error;' case.
+ */
+ flags = JSREPORT_ERROR;
+ if ((attrs & JSPROP_READONLY) && JS_VERSION_IS_ECMA(cx)) {
+ if (!JS_HAS_STRICT_OPTION(cx)) {
+ /* Just return true per ECMA if not in strict mode. */
+ return JS_TRUE;
+ }
+
+ /* Strict mode: report a read-only strict warning. */
+ flags = JSREPORT_STRICT | JSREPORT_WARNING;
+ }
+ goto read_only_error;
+ }
+
+ if (pobj != obj) {
+ /*
+ * We found id in a prototype object: prepare to share or shadow.
+ * NB: Thanks to the immutable, garbage-collected property tree
+ * maintained by jsscope.c in cx->runtime, we needn't worry about
+ * sprop going away behind our back after we've unlocked scope.
+ */
+ JS_UNLOCK_SCOPE(cx, scope);
+
+ /* Don't clone a shared prototype property. */
+ if (attrs & JSPROP_SHARED) {
+ if (SPROP_HAS_STUB_SETTER(sprop) &&
+ !(sprop->attrs & JSPROP_GETTER)) {
+ return JS_TRUE;
+ }
+ return SPROP_SET(cx, sprop, obj, pobj, vp);
+ }
+
+ /* Restore attrs to the ECMA default for new properties. */
+ attrs = JSPROP_ENUMERATE;
+
+ /*
+ * Preserve the shortid, getter, and setter when shadowing any
+ * property that has a shortid. An old API convention requires
+ * that the property's getter and setter functions receive the
+ * shortid, not id, when they are called on the shadow we are
+ * about to create in obj's scope.
+ */
+ if (sprop->flags & SPROP_HAS_SHORTID) {
+ flags = SPROP_HAS_SHORTID;
+ shortid = sprop->shortid;
+ getter = sprop->getter;
+ setter = sprop->setter;
+ }
+
+ /*
+ * Forget we found the proto-property now that we've copied any
+ * needed member values.
+ */
+ sprop = NULL;
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ } else {
+ scope = NULL;
+#endif
+ }
+
+ if (!sprop) {
+ if (SCOPE_IS_SEALED(OBJ_SCOPE(obj)) && OBJ_SCOPE(obj)->object == obj) {
+ flags = JSREPORT_ERROR;
+ goto read_only_error;
+ }
+
+ /* Find or make a property descriptor with the right heritage. */
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES)
+ attrs |= JSPROP_SHARED;
+ sprop = js_AddScopeProperty(cx, scope, id, getter, setter,
+ SPROP_INVALID_SLOT, attrs, flags, shortid);
+ if (!sprop) {
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_FALSE;
+ }
+
+ /*
+ * Initialize the new property value (passed to setter) to undefined.
+ * Note that we store before calling addProperty, to match the order
+ * in js_DefineNativeProperty.
+ */
+ if (SPROP_HAS_VALID_SLOT(sprop, scope))
+ LOCKED_OBJ_SET_SLOT(obj, sprop->slot, JSVAL_VOID);
+
+ /* XXXbe called with obj locked */
+ ADD_PROPERTY_HELPER(cx, clasp, obj, scope, sprop, vp,
+ js_RemoveScopeProperty(cx, scope, id);
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_FALSE);
+
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, id, sprop);
+ }
+
+ if (!js_NativeSet(cx, obj, sprop, vp))
+ return JS_FALSE;
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_TRUE;
+
+ read_only_error: {
+ JSString *str = js_DecompileValueGenerator(cx,
+ JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id),
+ NULL);
+ if (!str)
+ return JS_FALSE;
+ return JS_ReportErrorFlagsAndNumberUC(cx, flags, js_GetErrorMessage,
+ NULL, JSMSG_READ_ONLY,
+ JS_GetStringChars(str));
+ }
+}
+
+JSBool
+js_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool noprop, ok;
+ JSScopeProperty *sprop;
+
+ noprop = !prop;
+ if (noprop) {
+ if (!js_LookupProperty(cx, obj, id, &obj, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ *attrsp = 0;
+ return JS_TRUE;
+ }
+ if (!OBJ_IS_NATIVE(obj)) {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, attrsp);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+ }
+ }
+ sprop = (JSScopeProperty *)prop;
+ *attrsp = sprop->attrs;
+ if (noprop)
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return JS_TRUE;
+}
+
+JSBool
+js_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool noprop, ok;
+ JSScopeProperty *sprop;
+
+ noprop = !prop;
+ if (noprop) {
+ if (!js_LookupProperty(cx, obj, id, &obj, &prop))
+ return JS_FALSE;
+ if (!prop)
+ return JS_TRUE;
+ if (!OBJ_IS_NATIVE(obj)) {
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, id, prop, attrsp);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+ }
+ }
+ sprop = (JSScopeProperty *)prop;
+ sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, *attrsp, 0,
+ sprop->getter, sprop->setter);
+ if (noprop)
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return (sprop != NULL);
+}
+
+JSBool
+js_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
+{
+ JSObject *proto;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSString *str;
+ JSScope *scope;
+ JSBool ok;
+
+ *rval = JSVAL_TRUE;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ if (!js_LookupProperty(cx, obj, id, &proto, &prop))
+ return JS_FALSE;
+ if (!prop || proto != obj) {
+ /*
+ * If the property was found in a native prototype, check whether it's
+ * shared and permanent. Such a property stands for direct properties
+ * in all delegating objects, matching ECMA semantics without bloating
+ * each delegating object.
+ */
+ if (prop) {
+ if (OBJ_IS_NATIVE(proto)) {
+ sprop = (JSScopeProperty *)prop;
+ if (SPROP_IS_SHARED_PERMANENT(sprop))
+ *rval = JSVAL_FALSE;
+ }
+ OBJ_DROP_PROPERTY(cx, proto, prop);
+ if (*rval == JSVAL_FALSE)
+ return JS_TRUE;
+ }
+
+ /*
+ * If no property, or the property comes unshared or impermanent from
+ * a prototype, call the class's delProperty hook, passing rval as the
+ * result parameter.
+ */
+ return OBJ_GET_CLASS(cx, obj)->delProperty(cx, obj, ID_TO_VALUE(id),
+ rval);
+ }
+
+ sprop = (JSScopeProperty *)prop;
+ if (sprop->attrs & JSPROP_PERMANENT) {
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ if (JS_VERSION_IS_ECMA(cx)) {
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+ }
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_PERMANENT, JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ /* XXXbe called with obj locked */
+ if (!LOCKED_OBJ_GET_CLASS(obj)->delProperty(cx, obj, SPROP_USERID(sprop),
+ rval)) {
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return JS_FALSE;
+ }
+
+ scope = OBJ_SCOPE(obj);
+ if (SPROP_HAS_VALID_SLOT(sprop, scope))
+ GC_POKE(cx, LOCKED_OBJ_GET_SLOT(obj, sprop->slot));
+
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, id, NULL);
+ ok = js_RemoveScopeProperty(cx, scope, id);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JSBool
+js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
+{
+ jsval v, save;
+ JSString *str;
+
+ v = save = OBJECT_TO_JSVAL(obj);
+ switch (hint) {
+ case JSTYPE_STRING:
+ /*
+ * Propagate the exception if js_TryMethod finds an appropriate
+ * method, and calling that method returned failure.
+ */
+ if (!js_TryMethod(cx, obj, cx->runtime->atomState.toStringAtom, 0, NULL,
+ &v)) {
+ return JS_FALSE;
+ }
+
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ if (!OBJ_GET_CLASS(cx, obj)->convert(cx, obj, hint, &v))
+ return JS_FALSE;
+ }
+ break;
+
+ default:
+ if (!OBJ_GET_CLASS(cx, obj)->convert(cx, obj, hint, &v))
+ return JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ JSType type = JS_TypeOfValue(cx, v);
+ if (type == hint ||
+ (type == JSTYPE_FUNCTION && hint == JSTYPE_OBJECT)) {
+ goto out;
+ }
+ if (!js_TryMethod(cx, obj, cx->runtime->atomState.toStringAtom, 0,
+ NULL, &v)) {
+ return JS_FALSE;
+ }
+ }
+ break;
+ }
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ /* Avoid recursive death through js_DecompileValueGenerator. */
+ if (hint == JSTYPE_STRING) {
+ str = JS_InternString(cx, OBJ_GET_CLASS(cx, obj)->name);
+ if (!str)
+ return JS_FALSE;
+ } else {
+ str = NULL;
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, save, str);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_CONVERT_TO,
+ JS_GetStringBytes(str),
+ (hint == JSTYPE_VOID)
+ ? "primitive type"
+ : js_type_strs[hint]);
+ }
+ return JS_FALSE;
+ }
+out:
+ *vp = v;
+ return JS_TRUE;
+}
+
+JSIdArray *
+js_NewIdArray(JSContext *cx, jsint length)
+{
+ JSIdArray *ida;
+
+ ida = (JSIdArray *)
+ JS_malloc(cx, sizeof(JSIdArray) + (length-1) * sizeof(jsval));
+ if (ida)
+ ida->length = length;
+ return ida;
+}
+
+JSIdArray *
+js_SetIdArrayLength(JSContext *cx, JSIdArray *ida, jsint length)
+{
+ JSIdArray *rida;
+
+ rida = (JSIdArray *)
+ JS_realloc(cx, ida, sizeof(JSIdArray) + (length-1) * sizeof(jsval));
+ if (!rida)
+ JS_DestroyIdArray(cx, ida);
+ else
+ rida->length = length;
+ return rida;
+}
+
+/* Private type used to iterate over all properties of a native JS object */
+struct JSNativeIteratorState {
+ jsint next_index; /* index into jsid array */
+ JSIdArray *ida; /* all property ids in enumeration */
+ JSNativeIteratorState *next; /* double-linked list support */
+ JSNativeIteratorState **prevp;
+};
+
+/*
+ * This function is used to enumerate the properties of native JSObjects
+ * and those host objects that do not define a JSNewEnumerateOp-style iterator
+ * function.
+ */
+JSBool
+js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ JSRuntime *rt;
+ JSObject *proto;
+ JSClass *clasp;
+ JSEnumerateOp enumerate;
+ JSScopeProperty *sprop, *lastProp;
+ jsint i, length;
+ JSScope *scope;
+ JSIdArray *ida;
+ JSNativeIteratorState *state;
+
+ rt = cx->runtime;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ enumerate = clasp->enumerate;
+ if (clasp->flags & JSCLASS_NEW_ENUMERATE)
+ return ((JSNewEnumerateOp) enumerate)(cx, obj, enum_op, statep, idp);
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ if (!enumerate(cx, obj))
+ return JS_FALSE;
+ length = 0;
+
+ /*
+ * The set of all property ids is pre-computed when the iterator
+ * is initialized so as to avoid problems with properties being
+ * deleted during the iteration.
+ */
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+
+ /*
+ * If this object shares a scope with its prototype, don't enumerate
+ * its properties. Otherwise they will be enumerated a second time
+ * when the prototype object is enumerated.
+ */
+ proto = OBJ_GET_PROTO(cx, obj);
+ if (proto && scope == OBJ_SCOPE(proto)) {
+ ida = js_NewIdArray(cx, 0);
+ if (!ida) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ } else {
+ /* Object has a private scope; Enumerate all props in scope. */
+ for (sprop = lastProp = SCOPE_LAST_PROP(scope); sprop;
+ sprop = sprop->parent) {
+ if ((
+#ifdef DUMP_CALL_TABLE
+ (cx->options & JSOPTION_LOGCALL_TOSOURCE) ||
+#endif
+ (sprop->attrs & JSPROP_ENUMERATE)) &&
+ !(sprop->flags & SPROP_IS_ALIAS) &&
+ (!SCOPE_HAD_MIDDLE_DELETE(scope) ||
+ SCOPE_HAS_PROPERTY(scope, sprop))) {
+ length++;
+ }
+ }
+ ida = js_NewIdArray(cx, length);
+ if (!ida) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ i = length;
+ for (sprop = lastProp; sprop; sprop = sprop->parent) {
+ if ((
+#ifdef DUMP_CALL_TABLE
+ (cx->options & JSOPTION_LOGCALL_TOSOURCE) ||
+#endif
+ (sprop->attrs & JSPROP_ENUMERATE)) &&
+ !(sprop->flags & SPROP_IS_ALIAS) &&
+ (!SCOPE_HAD_MIDDLE_DELETE(scope) ||
+ SCOPE_HAS_PROPERTY(scope, sprop))) {
+ JS_ASSERT(i > 0);
+ ida->vector[--i] = sprop->id;
+ }
+ }
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+
+ state = (JSNativeIteratorState *)
+ JS_malloc(cx, sizeof(JSNativeIteratorState));
+ if (!state) {
+ JS_DestroyIdArray(cx, ida);
+ return JS_FALSE;
+ }
+ state->ida = ida;
+ state->next_index = 0;
+
+ JS_LOCK_RUNTIME(rt);
+ state->next = rt->nativeIteratorStates;
+ if (state->next)
+ state->next->prevp = &state->next;
+ state->prevp = &rt->nativeIteratorStates;
+ *state->prevp = state;
+ JS_UNLOCK_RUNTIME(rt);
+
+ *statep = PRIVATE_TO_JSVAL(state);
+ if (idp)
+ *idp = INT_TO_JSVAL(length);
+ break;
+
+ case JSENUMERATE_NEXT:
+ state = (JSNativeIteratorState *) JSVAL_TO_PRIVATE(*statep);
+ ida = state->ida;
+ length = ida->length;
+ if (state->next_index != length) {
+ *idp = ida->vector[state->next_index++];
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSENUMERATE_DESTROY:
+ state = (JSNativeIteratorState *) JSVAL_TO_PRIVATE(*statep);
+
+ JS_LOCK_RUNTIME(rt);
+ JS_ASSERT(rt->nativeIteratorStates);
+ JS_ASSERT(*state->prevp == state);
+ if (state->next) {
+ JS_ASSERT(state->next->prevp == &state->next);
+ state->next->prevp = state->prevp;
+ }
+ *state->prevp = state->next;
+ JS_UNLOCK_RUNTIME(rt);
+
+ JS_DestroyIdArray(cx, state->ida);
+ JS_free(cx, state);
+ *statep = JSVAL_NULL;
+ break;
+ }
+ return JS_TRUE;
+}
+
+void
+js_MarkNativeIteratorStates(JSContext *cx)
+{
+ JSNativeIteratorState *state;
+ jsid *cursor, *end, id;
+
+ state = cx->runtime->nativeIteratorStates;
+ if (!state)
+ return;
+
+ do {
+ JS_ASSERT(*state->prevp == state);
+ cursor = state->ida->vector;
+ end = cursor + state->ida->length;
+ for (; cursor != end; ++cursor) {
+ id = *cursor;
+ MARK_ID(cx, id);
+ }
+ } while ((state = state->next) != NULL);
+}
+
+JSBool
+js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp)
+{
+ JSBool writing;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSClass *clasp;
+ JSScopeProperty *sprop;
+ JSCheckAccessOp check;
+
+ writing = (mode & JSACC_WRITE) != 0;
+ switch (mode & JSACC_TYPEMASK) {
+ case JSACC_PROTO:
+ pobj = obj;
+ if (!writing)
+ *vp = OBJ_GET_SLOT(cx, obj, JSSLOT_PROTO);
+ *attrsp = JSPROP_PERMANENT;
+ break;
+
+ case JSACC_PARENT:
+ JS_ASSERT(!writing);
+ pobj = obj;
+ *vp = OBJ_GET_SLOT(cx, obj, JSSLOT_PARENT);
+ *attrsp = JSPROP_READONLY | JSPROP_PERMANENT;
+ break;
+
+ default:
+ if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ if (!writing)
+ *vp = JSVAL_VOID;
+ *attrsp = 0;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ return !clasp->checkAccess ||
+ clasp->checkAccess(cx, obj, ID_TO_VALUE(id), mode, vp);
+ }
+ if (!OBJ_IS_NATIVE(pobj)) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return OBJ_CHECK_ACCESS(cx, pobj, id, mode, vp, attrsp);
+ }
+
+ sprop = (JSScopeProperty *)prop;
+ *attrsp = sprop->attrs;
+ if (!writing) {
+ *vp = (SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)))
+ ? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)
+ : JSVAL_VOID;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+
+ /*
+ * If obj's class has a stub (null) checkAccess hook, use the per-runtime
+ * checkObjectAccess callback, if configured.
+ *
+ * We don't want to require all classes to supply a checkAccess hook; we
+ * need that hook only for certain classes used when precompiling scripts
+ * and functions ("brutal sharing"). But for general safety of built-in
+ * magic properties such as __proto__ and __parent__, we route all access
+ * checks, even for classes that stub out checkAccess, through the global
+ * checkObjectAccess hook. This covers precompilation-based sharing and
+ * (possibly unintended) runtime sharing across trust boundaries.
+ */
+ clasp = OBJ_GET_CLASS(cx, pobj);
+ check = clasp->checkAccess;
+ if (!check)
+ check = cx->runtime->checkObjectAccess;
+ return !check || check(cx, pobj, ID_TO_VALUE(id), mode, vp);
+}
+
+#ifdef JS_THREADSAFE
+void
+js_DropProperty(JSContext *cx, JSObject *obj, JSProperty *prop)
+{
+ JS_UNLOCK_OBJ(cx, obj);
+}
+#endif
+
+static void
+ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags)
+{
+ /*
+ * The decompiler may need to access the args of the function in
+ * progress rather than the one we had hoped to call.
+ * So we switch the cx->fp to the frame below us. We stick the
+ * current frame in the dormantFrameChain to protect it from gc.
+ */
+
+ JSStackFrame *fp = cx->fp;
+ if (fp->down) {
+ JS_ASSERT(!fp->dormantNext);
+ fp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = fp;
+ cx->fp = fp->down;
+ }
+
+ js_ReportIsNotFunction(cx, vp, flags);
+
+ if (fp->down) {
+ JS_ASSERT(cx->dormantFrameChain == fp);
+ cx->dormantFrameChain = fp->dormantNext;
+ fp->dormantNext = NULL;
+ cx->fp = fp;
+ }
+}
+
+#ifdef NARCISSUS
+static JSBool
+GetCurrentExecutionContext(JSContext *cx, JSObject *obj, jsval *rval)
+{
+ JSObject *tmp;
+ jsval xcval;
+
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .ExecutionContextAtom),
+ &xcval)) {
+ return JS_FALSE;
+ }
+ if (JSVAL_IS_PRIMITIVE(xcval)) {
+ JS_ReportError(cx, "invalid ExecutionContext in global object");
+ return JS_FALSE;
+ }
+ if (!OBJ_GET_PROPERTY(cx, JSVAL_TO_OBJECT(xcval),
+ ATOM_TO_JSID(cx->runtime->atomState.currentAtom),
+ rval)) {
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+#endif
+
+JSBool
+js_Call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSClass *clasp;
+
+ clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(argv[-2]));
+ if (!clasp->call) {
+#ifdef NARCISSUS
+ JSObject *callee, *args;
+ jsval fval, nargv[3];
+ JSBool ok;
+
+ callee = JSVAL_TO_OBJECT(argv[-2]);
+ if (!OBJ_GET_PROPERTY(cx, callee,
+ ATOM_TO_JSID(cx->runtime->atomState.callAtom),
+ &fval)) {
+ return JS_FALSE;
+ }
+ if (VALUE_IS_FUNCTION(cx, fval)) {
+ if (!GetCurrentExecutionContext(cx, obj, &nargv[2]))
+ return JS_FALSE;
+ args = js_GetArgsObject(cx, cx->fp);
+ if (!args)
+ return JS_FALSE;
+ nargv[0] = OBJECT_TO_JSVAL(obj);
+ nargv[1] = OBJECT_TO_JSVAL(args);
+ return js_InternalCall(cx, callee, fval, 3, nargv, rval);
+ }
+ if (JSVAL_IS_OBJECT(fval) && JSVAL_TO_OBJECT(fval) != callee) {
+ argv[-2] = fval;
+ ok = js_Call(cx, obj, argc, argv, rval);
+ argv[-2] = OBJECT_TO_JSVAL(callee);
+ return ok;
+ }
+#endif
+ ReportIsNotFunction(cx, &argv[-2], cx->fp->flags & JSFRAME_ITERATOR);
+ return JS_FALSE;
+ }
+ return clasp->call(cx, obj, argc, argv, rval);
+}
+
+JSBool
+js_Construct(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSClass *clasp;
+
+ clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(argv[-2]));
+ if (!clasp->construct) {
+#ifdef NARCISSUS
+ JSObject *callee, *args;
+ jsval cval, nargv[2];
+ JSBool ok;
+
+ callee = JSVAL_TO_OBJECT(argv[-2]);
+ if (!OBJ_GET_PROPERTY(cx, callee,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .constructAtom),
+ &cval)) {
+ return JS_FALSE;
+ }
+ if (VALUE_IS_FUNCTION(cx, cval)) {
+ if (!GetCurrentExecutionContext(cx, obj, &nargv[1]))
+ return JS_FALSE;
+ args = js_GetArgsObject(cx, cx->fp);
+ if (!args)
+ return JS_FALSE;
+ nargv[0] = OBJECT_TO_JSVAL(args);
+ return js_InternalCall(cx, callee, cval, 2, nargv, rval);
+ }
+ if (JSVAL_IS_OBJECT(cval) && JSVAL_TO_OBJECT(cval) != callee) {
+ argv[-2] = cval;
+ ok = js_Call(cx, obj, argc, argv, rval);
+ argv[-2] = OBJECT_TO_JSVAL(callee);
+ return ok;
+ }
+#endif
+ ReportIsNotFunction(cx, &argv[-2], JSV2F_CONSTRUCT);
+ return JS_FALSE;
+ }
+ return clasp->construct(cx, obj, argc, argv, rval);
+}
+
+JSBool
+js_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSClass *clasp;
+ JSString *str;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->hasInstance)
+ return clasp->hasInstance(cx, obj, v, bp);
+#ifdef NARCISSUS
+ {
+ jsval fval, rval;
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .hasInstanceAtom),
+ &fval)) {
+ return JS_FALSE;
+ }
+ if (VALUE_IS_FUNCTION(cx, fval)) {
+ return js_InternalCall(cx, obj, fval, 1, &v, &rval) &&
+ js_ValueToBoolean(cx, rval, bp);
+ }
+ }
+#endif
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ OBJECT_TO_JSVAL(obj), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INSTANCEOF_RHS,
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_IsDelegate(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSObject *obj2;
+
+ *bp = JS_FALSE;
+ if (JSVAL_IS_PRIMITIVE(v))
+ return JS_TRUE;
+ obj2 = JSVAL_TO_OBJECT(v);
+ while ((obj2 = OBJ_GET_PROTO(cx, obj2)) != NULL) {
+ if (obj2 == obj) {
+ *bp = JS_TRUE;
+ break;
+ }
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_GetClassPrototype(JSContext *cx, JSObject *scope, jsid id,
+ JSObject **protop)
+{
+ jsval v;
+ JSObject *ctor;
+
+ if (!js_FindClassObject(cx, scope, id, &v))
+ return JS_FALSE;
+ if (VALUE_IS_FUNCTION(cx, v)) {
+ ctor = JSVAL_TO_OBJECT(v);
+ if (!OBJ_GET_PROPERTY(cx, ctor,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &v)) {
+ return JS_FALSE;
+ }
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ /*
+ * Set the newborn root in case v is otherwise unreferenced.
+ * It's ok to overwrite newborn roots here, since the getter
+ * called just above could have. Unlike the common GC rooting
+ * model, our callers do not have to protect protop thanks to
+ * this newborn root, since they all immediately create a new
+ * instance that delegates to this object, or just query the
+ * prototype for its class.
+ */
+ cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(v);
+ }
+ }
+ *protop = JSVAL_IS_OBJECT(v) ? JSVAL_TO_OBJECT(v) : NULL;
+ return JS_TRUE;
+}
+
+/*
+ * For shared precompilation of function objects, we support cloning on entry
+ * to an execution context in which the function declaration or expression
+ * should be processed as if it were not precompiled, where the precompiled
+ * function's scope chain does not match the execution context's. The cloned
+ * function object carries its execution-context scope in its parent slot; it
+ * links to the precompiled function (the "clone-parent") via its proto slot.
+ *
+ * Note that this prototype-based delegation leaves an unchecked access path
+ * from the clone to the clone-parent's 'constructor' property. If the clone
+ * lives in a less privileged or shared scope than the clone-parent, this is
+ * a security hole, a sharing hazard, or both. Therefore we check all such
+ * accesses with the following getter/setter pair, which we use when defining
+ * 'constructor' in f.prototype for all function objects f.
+ */
+static JSBool
+CheckCtorGetAccess(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSAtom *atom;
+ uintN attrs;
+
+ atom = cx->runtime->atomState.constructorAtom;
+ JS_ASSERT(id == ATOM_KEY(atom));
+ return OBJ_CHECK_ACCESS(cx, obj, ATOM_TO_JSID(atom), JSACC_READ,
+ vp, &attrs);
+}
+
+static JSBool
+CheckCtorSetAccess(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSAtom *atom;
+ uintN attrs;
+
+ atom = cx->runtime->atomState.constructorAtom;
+ JS_ASSERT(id == ATOM_KEY(atom));
+ return OBJ_CHECK_ACCESS(cx, obj, ATOM_TO_JSID(atom), JSACC_WRITE,
+ vp, &attrs);
+}
+
+JSBool
+js_SetClassPrototype(JSContext *cx, JSObject *ctor, JSObject *proto,
+ uintN attrs)
+{
+ /*
+ * Use the given attributes for the prototype property of the constructor,
+ * as user-defined constructors have a DontDelete prototype (which may be
+ * reset), while native or "system" constructors have DontEnum | ReadOnly |
+ * DontDelete.
+ */
+ if (!OBJ_DEFINE_PROPERTY(cx, ctor,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ OBJECT_TO_JSVAL(proto),
+ JS_PropertyStub, JS_PropertyStub,
+ attrs, NULL)) {
+ return JS_FALSE;
+ }
+
+ /*
+ * ECMA says that Object.prototype.constructor, or f.prototype.constructor
+ * for a user-defined function f, is DontEnum.
+ */
+ return OBJ_DEFINE_PROPERTY(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .constructorAtom),
+ OBJECT_TO_JSVAL(ctor),
+ CheckCtorGetAccess, CheckCtorSetAccess,
+ 0, NULL);
+}
+
+JSBool
+js_ValueToObject(JSContext *cx, jsval v, JSObject **objp)
+{
+ JSObject *obj;
+
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ obj = NULL;
+ } else if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_OBJECT, &v))
+ return JS_FALSE;
+ if (JSVAL_IS_OBJECT(v))
+ obj = JSVAL_TO_OBJECT(v);
+ } else {
+ if (JSVAL_IS_STRING(v)) {
+ obj = js_StringToObject(cx, JSVAL_TO_STRING(v));
+ } else if (JSVAL_IS_INT(v)) {
+ obj = js_NumberToObject(cx, (jsdouble)JSVAL_TO_INT(v));
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ obj = js_NumberToObject(cx, *JSVAL_TO_DOUBLE(v));
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(v));
+ obj = js_BooleanToObject(cx, JSVAL_TO_BOOLEAN(v));
+ }
+ if (!obj)
+ return JS_FALSE;
+ }
+ *objp = obj;
+ return JS_TRUE;
+}
+
+JSObject *
+js_ValueToNonNullObject(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSString *str;
+
+ if (!js_ValueToObject(cx, v, &obj))
+ return NULL;
+ if (!obj) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NO_PROPERTIES, JS_GetStringBytes(str));
+ }
+ }
+ return obj;
+}
+
+JSBool
+js_TryValueOf(JSContext *cx, JSObject *obj, JSType type, jsval *rval)
+{
+ jsval argv[1];
+
+ argv[0] = ATOM_KEY(cx->runtime->atomState.typeAtoms[type]);
+ return js_TryMethod(cx, obj, cx->runtime->atomState.valueOfAtom, 1, argv,
+ rval);
+}
+
+JSBool
+js_TryMethod(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ JSErrorReporter older;
+ jsid id;
+ jsval fval;
+ JSBool ok;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ /*
+ * Report failure only if an appropriate method was found, and calling it
+ * returned failure. We propagate failure in this case to make exceptions
+ * behave properly.
+ */
+ older = JS_SetErrorReporter(cx, NULL);
+ id = ATOM_TO_JSID(atom);
+ fval = JSVAL_VOID;
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ obj = ops->getMethod(cx, obj, id, &fval);
+ ok = (obj != NULL);
+ } else
+#endif
+ {
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &fval);
+ }
+ if (!ok)
+ JS_ClearPendingException(cx);
+ JS_SetErrorReporter(cx, older);
+
+ return JSVAL_IS_PRIMITIVE(fval) ||
+ js_InternalCall(cx, obj, fval, argc, argv, rval);
+}
+
+#if JS_HAS_XDR
+
+JSBool
+js_XDRObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSContext *cx;
+ JSAtom *atom;
+ JSClass *clasp;
+ uint32 classId, classDef;
+ JSProtoKey protoKey;
+ jsid classKey;
+ JSObject *proto;
+
+ cx = xdr->cx;
+ atom = NULL;
+ if (xdr->mode == JSXDR_ENCODE) {
+ clasp = OBJ_GET_CLASS(cx, *objp);
+ classId = JS_XDRFindClassIdByName(xdr, clasp->name);
+ classDef = !classId;
+ if (classDef) {
+ if (!JS_XDRRegisterClass(xdr, clasp, &classId))
+ return JS_FALSE;
+ protoKey = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (protoKey != JSProto_Null) {
+ classDef |= (protoKey << 1);
+ } else {
+ atom = js_Atomize(cx, clasp->name, strlen(clasp->name), 0);
+ if (!atom)
+ return JS_FALSE;
+ }
+ }
+ } else {
+ clasp = NULL; /* quell GCC overwarning */
+ classDef = 0;
+ }
+
+ /*
+ * XDR a flag word, which could be 0 for a class use, in which case no
+ * name follows, only the id in xdr's class registry; 1 for a class def,
+ * in which case the flag word is followed by the class name transferred
+ * from or to atom; or a value greater than 1, an odd number that when
+ * divided by two yields the JSProtoKey for class. In the last case, as
+ * in the 0 classDef case, no name is transferred via atom.
+ */
+ if (!JS_XDRUint32(xdr, &classDef))
+ return JS_FALSE;
+ if (classDef == 1 && !js_XDRCStringAtom(xdr, &atom))
+ return JS_FALSE;
+
+ if (!JS_XDRUint32(xdr, &classId))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ if (classDef) {
+ /* NB: we know that JSProto_Null is 0 here, for backward compat. */
+ protoKey = classDef >> 1;
+ classKey = (protoKey != JSProto_Null)
+ ? INT_TO_JSID(protoKey)
+ : ATOM_TO_JSID(atom);
+ if (!js_GetClassPrototype(cx, NULL, classKey, &proto))
+ return JS_FALSE;
+ clasp = OBJ_GET_CLASS(cx, proto);
+ if (!JS_XDRRegisterClass(xdr, clasp, &classId))
+ return JS_FALSE;
+ } else {
+ clasp = JS_XDRFindClassById(xdr, classId);
+ if (!clasp) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%ld", (long)classId);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_FIND_CLASS, numBuf);
+ return JS_FALSE;
+ }
+ }
+ }
+
+ if (!clasp->xdrObject) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_XDR_CLASS, clasp->name);
+ return JS_FALSE;
+ }
+ return clasp->xdrObject(xdr, objp);
+}
+
+#endif /* JS_HAS_XDR */
+
+#ifdef DEBUG_brendan
+
+#include <stdio.h>
+#include <math.h>
+
+uint32 js_entry_count_max;
+uint32 js_entry_count_sum;
+double js_entry_count_sqsum;
+uint32 js_entry_count_hist[11];
+
+static void
+MeterEntryCount(uintN count)
+{
+ if (count) {
+ js_entry_count_sum += count;
+ js_entry_count_sqsum += (double)count * count;
+ if (count > js_entry_count_max)
+ js_entry_count_max = count;
+ }
+ js_entry_count_hist[JS_MIN(count, 10)]++;
+}
+
+#define DEBUG_scopemeters
+#endif /* DEBUG_brendan */
+
+#ifdef DEBUG_scopemeters
+void
+js_DumpScopeMeters(JSRuntime *rt)
+{
+ static FILE *logfp;
+ if (!logfp)
+ logfp = fopen("/tmp/scope.stats", "a");
+
+ {
+ double mean = 0., var = 0., sigma = 0.;
+ double nscopes = rt->liveScopes;
+ double nentrys = js_entry_count_sum;
+ if (nscopes > 0 && nentrys >= 0) {
+ mean = nentrys / nscopes;
+ var = nscopes * js_entry_count_sqsum - nentrys * nentrys;
+ if (var < 0.0 || nscopes <= 1)
+ var = 0.0;
+ else
+ var /= nscopes * (nscopes - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.) ? sqrt(var) : 0.;
+ }
+
+ fprintf(logfp,
+ "scopes %g entries %g mean %g sigma %g max %u",
+ nscopes, nentrys, mean, sigma, js_entry_count_max);
+ }
+
+ fprintf(logfp, " histogram %u %u %u %u %u %u %u %u %u %u %u\n",
+ js_entry_count_hist[0], js_entry_count_hist[1],
+ js_entry_count_hist[2], js_entry_count_hist[3],
+ js_entry_count_hist[4], js_entry_count_hist[5],
+ js_entry_count_hist[6], js_entry_count_hist[7],
+ js_entry_count_hist[8], js_entry_count_hist[9],
+ js_entry_count_hist[10]);
+ js_entry_count_sum = js_entry_count_max = 0;
+ js_entry_count_sqsum = 0;
+ memset(js_entry_count_hist, 0, sizeof js_entry_count_hist);
+ fflush(logfp);
+}
+#endif
+
+uint32
+js_Mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSClass *clasp;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ scope = OBJ_SCOPE(obj);
+#ifdef DEBUG_brendan
+ if (scope->object == obj)
+ MeterEntryCount(scope->entryCount);
+#endif
+
+ JS_ASSERT(!SCOPE_LAST_PROP(scope) ||
+ SCOPE_HAS_PROPERTY(scope, SCOPE_LAST_PROP(scope)));
+
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) && !SCOPE_HAS_PROPERTY(scope, sprop))
+ continue;
+ MARK_SCOPE_PROPERTY(cx, sprop);
+ }
+
+ /* No one runs while the GC is running, so we can use LOCKED_... here. */
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (clasp->mark)
+ (void) clasp->mark(cx, obj, NULL);
+
+ if (scope->object != obj) {
+ /*
+ * An unmutated object that shares a prototype's scope. We can't tell
+ * how many slots are allocated and in use at obj->slots by looking at
+ * scope, so we get obj->slots' length from its -1'st element.
+ */
+ return (uint32) obj->slots[-1];
+ }
+ return JS_MIN(scope->map.freeslot, scope->map.nslots);
+}
+
+void
+js_Clear(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope;
+ JSRuntime *rt;
+ JSScopeProperty *sprop;
+ uint32 i, n;
+
+ /*
+ * Clear our scope and the property cache of all obj's properties only if
+ * obj owns the scope (i.e., not if obj is unmutated and therefore sharing
+ * its prototype's scope). NB: we do not clear any reserved slots lying
+ * below JSSLOT_FREE(clasp).
+ */
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ if (scope->object == obj) {
+ /* Clear the property cache before we clear the scope. */
+ rt = cx->runtime;
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope) ||
+ SCOPE_HAS_PROPERTY(scope, sprop)) {
+ PROPERTY_CACHE_FILL(&rt->propertyCache, obj, sprop->id, NULL);
+ }
+ }
+
+ /* Now that we're done using scope->lastProp/table, clear scope. */
+ js_ClearScope(cx, scope);
+
+ /* Clear slot values and reset freeslot so we're consistent. */
+ i = scope->map.nslots;
+ n = JSSLOT_FREE(LOCKED_OBJ_GET_CLASS(obj));
+ while (--i >= n)
+ obj->slots[i] = JSVAL_VOID;
+ scope->map.freeslot = n;
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+}
+
+jsval
+js_GetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot)
+{
+ jsval v;
+
+ JS_LOCK_OBJ(cx, obj);
+ v = (slot < (uint32) obj->slots[-1]) ? obj->slots[slot] : JSVAL_VOID;
+ JS_UNLOCK_OBJ(cx, obj);
+ return v;
+}
+
+JSBool
+js_SetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
+{
+ JSScope *scope;
+ uint32 nslots;
+ JSClass *clasp;
+ jsval *newslots;
+
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ nslots = (uint32) obj->slots[-1];
+ if (slot >= nslots) {
+ /*
+ * At this point, obj may or may not own scope. If some path calls
+ * js_GetMutableScope but does not add a slot-owning property, then
+ * scope->object == obj but nslots will be nominal. If obj shares a
+ * prototype's scope, then we cannot update scope->map here, but we
+ * must update obj->slots[-1] when we grow obj->slots.
+ *
+ * See js_Mark, before the last return, where we make a special case
+ * for unmutated (scope->object != obj) objects.
+ */
+ JS_ASSERT(nslots == JS_INITIAL_NSLOTS);
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ nslots = JSSLOT_FREE(clasp);
+ if (clasp->reserveSlots)
+ nslots += clasp->reserveSlots(cx, obj);
+ JS_ASSERT(slot < nslots);
+
+ newslots = AllocSlots(cx, obj->slots, nslots);
+ if (!newslots) {
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_FALSE;
+ }
+ if (scope->object == obj)
+ scope->map.nslots = nslots;
+ obj->slots = newslots;
+ }
+
+ /* Whether or not we grew nslots, we may need to advance freeslot. */
+ if (scope->object == obj && slot >= scope->map.freeslot)
+ scope->map.freeslot = slot + 1;
+
+ obj->slots[slot] = v;
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_TRUE;
+}
+
+#ifdef DEBUG
+
+/* Routines to print out values during debugging. */
+
+void printChar(jschar *cp) {
+ fprintf(stderr, "jschar* (0x%p) \"", (void *)cp);
+ while (*cp)
+ fputc(*cp++, stderr);
+ fputc('"', stderr);
+ fputc('\n', stderr);
+}
+
+void printString(JSString *str) {
+ size_t i, n;
+ jschar *s;
+ fprintf(stderr, "string (0x%p) \"", (void *)str);
+ s = JSSTRING_CHARS(str);
+ for (i=0, n=JSSTRING_LENGTH(str); i < n; i++)
+ fputc(s[i], stderr);
+ fputc('"', stderr);
+ fputc('\n', stderr);
+}
+
+void printVal(JSContext *cx, jsval val);
+
+void printObj(JSContext *cx, JSObject *jsobj) {
+ jsuint i;
+ jsval val;
+ JSClass *clasp;
+
+ fprintf(stderr, "object 0x%p\n", (void *)jsobj);
+ clasp = OBJ_GET_CLASS(cx, jsobj);
+ fprintf(stderr, "class 0x%p %s\n", (void *)clasp, clasp->name);
+ for (i=0; i < jsobj->map->nslots; i++) {
+ fprintf(stderr, "slot %3d ", i);
+ val = jsobj->slots[i];
+ if (JSVAL_IS_OBJECT(val))
+ fprintf(stderr, "object 0x%p\n", (void *)JSVAL_TO_OBJECT(val));
+ else
+ printVal(cx, val);
+ }
+}
+
+void printVal(JSContext *cx, jsval val) {
+ fprintf(stderr, "val %d (0x%p) = ", (int)val, (void *)val);
+ if (JSVAL_IS_NULL(val)) {
+ fprintf(stderr, "null\n");
+ } else if (JSVAL_IS_VOID(val)) {
+ fprintf(stderr, "undefined\n");
+ } else if (JSVAL_IS_OBJECT(val)) {
+ printObj(cx, JSVAL_TO_OBJECT(val));
+ } else if (JSVAL_IS_INT(val)) {
+ fprintf(stderr, "(int) %d\n", JSVAL_TO_INT(val));
+ } else if (JSVAL_IS_STRING(val)) {
+ printString(JSVAL_TO_STRING(val));
+ } else if (JSVAL_IS_DOUBLE(val)) {
+ fprintf(stderr, "(double) %g\n", *JSVAL_TO_DOUBLE(val));
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(val));
+ fprintf(stderr, "(boolean) %s\n",
+ JSVAL_TO_BOOLEAN(val) ? "true" : "false");
+ }
+ fflush(stderr);
+}
+
+void printId(JSContext *cx, jsid id) {
+ fprintf(stderr, "id %d (0x%p) is ", (int)id, (void *)id);
+ printVal(cx, ID_TO_VALUE(id));
+}
+
+void printAtom(JSAtom *atom) {
+ printString(ATOM_TO_STRING(atom));
+}
+
+#endif
diff --git a/third_party/js-1.7/jsobj.h b/third_party/js-1.7/jsobj.h
new file mode 100644
index 0000000..eb3aedb
--- /dev/null
+++ b/third_party/js-1.7/jsobj.h
@@ -0,0 +1,596 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsobj_h___
+#define jsobj_h___
+/*
+ * JS object definitions.
+ *
+ * A JS object consists of a possibly-shared object descriptor containing
+ * ordered property names, called the map; and a dense vector of property
+ * values, called slots. The map/slot pointer pair is GC'ed, while the map
+ * is reference counted and the slot vector is malloc'ed.
+ */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+struct JSObjectMap {
+ jsrefcount nrefs; /* count of all referencing objects */
+ JSObjectOps *ops; /* high level object operation vtable */
+ uint32 nslots; /* length of obj->slots vector */
+ uint32 freeslot; /* index of next free obj->slots element */
+};
+
+/* Shorthand macros for frequently-made calls. */
+#define OBJ_LOOKUP_PROPERTY(cx,obj,id,objp,propp) \
+ (obj)->map->ops->lookupProperty(cx,obj,id,objp,propp)
+#define OBJ_DEFINE_PROPERTY(cx,obj,id,value,getter,setter,attrs,propp) \
+ (obj)->map->ops->defineProperty(cx,obj,id,value,getter,setter,attrs,propp)
+#define OBJ_GET_PROPERTY(cx,obj,id,vp) \
+ (obj)->map->ops->getProperty(cx,obj,id,vp)
+#define OBJ_SET_PROPERTY(cx,obj,id,vp) \
+ (obj)->map->ops->setProperty(cx,obj,id,vp)
+#define OBJ_GET_ATTRIBUTES(cx,obj,id,prop,attrsp) \
+ (obj)->map->ops->getAttributes(cx,obj,id,prop,attrsp)
+#define OBJ_SET_ATTRIBUTES(cx,obj,id,prop,attrsp) \
+ (obj)->map->ops->setAttributes(cx,obj,id,prop,attrsp)
+#define OBJ_DELETE_PROPERTY(cx,obj,id,rval) \
+ (obj)->map->ops->deleteProperty(cx,obj,id,rval)
+#define OBJ_DEFAULT_VALUE(cx,obj,hint,vp) \
+ (obj)->map->ops->defaultValue(cx,obj,hint,vp)
+#define OBJ_ENUMERATE(cx,obj,enum_op,statep,idp) \
+ (obj)->map->ops->enumerate(cx,obj,enum_op,statep,idp)
+#define OBJ_CHECK_ACCESS(cx,obj,id,mode,vp,attrsp) \
+ (obj)->map->ops->checkAccess(cx,obj,id,mode,vp,attrsp)
+
+/* These four are time-optimized to avoid stub calls. */
+#define OBJ_THIS_OBJECT(cx,obj) \
+ ((obj)->map->ops->thisObject \
+ ? (obj)->map->ops->thisObject(cx,obj) \
+ : (obj))
+#define OBJ_DROP_PROPERTY(cx,obj,prop) \
+ ((obj)->map->ops->dropProperty \
+ ? (obj)->map->ops->dropProperty(cx,obj,prop) \
+ : (void)0)
+#define OBJ_GET_REQUIRED_SLOT(cx,obj,slot) \
+ ((obj)->map->ops->getRequiredSlot \
+ ? (obj)->map->ops->getRequiredSlot(cx, obj, slot) \
+ : JSVAL_VOID)
+#define OBJ_SET_REQUIRED_SLOT(cx,obj,slot,v) \
+ ((obj)->map->ops->setRequiredSlot \
+ ? (obj)->map->ops->setRequiredSlot(cx, obj, slot, v) \
+ : JS_TRUE)
+
+#define OBJ_TO_INNER_OBJECT(cx,obj) \
+ JS_BEGIN_MACRO \
+ JSClass *clasp_ = OBJ_GET_CLASS(cx, obj); \
+ if (clasp_->flags & JSCLASS_IS_EXTENDED) { \
+ JSExtendedClass *xclasp_ = (JSExtendedClass*)clasp_; \
+ if (xclasp_->innerObject) \
+ obj = xclasp_->innerObject(cx, obj); \
+ } \
+ JS_END_MACRO
+
+/*
+ * In the original JS engine design, obj->slots pointed to a vector of length
+ * JS_INITIAL_NSLOTS words if obj->map was shared with a prototype object,
+ * else of length obj->map->nslots. With the advent of JS_GetReservedSlot,
+ * JS_SetReservedSlot, and JSCLASS_HAS_RESERVED_SLOTS (see jsapi.h), the size
+ * of the minimum length slots vector in the case where map is shared cannot
+ * be constant. This length starts at JS_INITIAL_NSLOTS, but may advance to
+ * include all the reserved slots.
+ *
+ * Therefore slots must be self-describing. Rather than tag its low order bit
+ * (a bit is all we need) to distinguish initial length from reserved length,
+ * we do "the BSTR thing": over-allocate slots by one jsval, and store the
+ * *net* length (counting usable slots, which have non-negative obj->slots[]
+ * indices) in obj->slots[-1]. All code that sets obj->slots must be aware of
+ * this hack -- you have been warned, and jsobj.c has been updated!
+ */
+struct JSObject {
+ JSObjectMap *map;
+ jsval *slots;
+};
+
+#define JSSLOT_PROTO 0
+#define JSSLOT_PARENT 1
+#define JSSLOT_CLASS 2
+#define JSSLOT_PRIVATE 3
+#define JSSLOT_START(clasp) (((clasp)->flags & JSCLASS_HAS_PRIVATE) \
+ ? JSSLOT_PRIVATE + 1 \
+ : JSSLOT_CLASS + 1)
+
+#define JSSLOT_FREE(clasp) (JSSLOT_START(clasp) \
+ + JSCLASS_RESERVED_SLOTS(clasp))
+
+#define JS_INITIAL_NSLOTS 5
+
+#ifdef DEBUG
+#define MAP_CHECK_SLOT(map,slot) \
+ JS_ASSERT((uint32)slot < JS_MIN((map)->freeslot, (map)->nslots))
+#define OBJ_CHECK_SLOT(obj,slot) \
+ MAP_CHECK_SLOT((obj)->map, slot)
+#else
+#define OBJ_CHECK_SLOT(obj,slot) ((void)0)
+#endif
+
+/* Fast macros for accessing obj->slots while obj is locked (if thread-safe). */
+#define LOCKED_OBJ_GET_SLOT(obj,slot) \
+ (OBJ_CHECK_SLOT(obj, slot), (obj)->slots[slot])
+#define LOCKED_OBJ_SET_SLOT(obj,slot,value) \
+ (OBJ_CHECK_SLOT(obj, slot), (obj)->slots[slot] = (value))
+#define LOCKED_OBJ_GET_PROTO(obj) \
+ JSVAL_TO_OBJECT(LOCKED_OBJ_GET_SLOT(obj, JSSLOT_PROTO))
+#define LOCKED_OBJ_GET_CLASS(obj) \
+ ((JSClass *)JSVAL_TO_PRIVATE(LOCKED_OBJ_GET_SLOT(obj, JSSLOT_CLASS)))
+
+#ifdef JS_THREADSAFE
+
+/* Thread-safe functions and wrapper macros for accessing obj->slots. */
+#define OBJ_GET_SLOT(cx,obj,slot) \
+ (OBJ_CHECK_SLOT(obj, slot), \
+ (OBJ_IS_NATIVE(obj) && OBJ_SCOPE(obj)->ownercx == cx) \
+ ? LOCKED_OBJ_GET_SLOT(obj, slot) \
+ : js_GetSlotThreadSafe(cx, obj, slot))
+
+#define OBJ_SET_SLOT(cx,obj,slot,value) \
+ (OBJ_CHECK_SLOT(obj, slot), \
+ (OBJ_IS_NATIVE(obj) && OBJ_SCOPE(obj)->ownercx == cx) \
+ ? (void) LOCKED_OBJ_SET_SLOT(obj, slot, value) \
+ : js_SetSlotThreadSafe(cx, obj, slot, value))
+
+/*
+ * If thread-safe, define an OBJ_GET_SLOT wrapper that bypasses, for a native
+ * object, the lock-free "fast path" test of (OBJ_SCOPE(obj)->ownercx == cx),
+ * to avoid needlessly switching from lock-free to lock-full scope when doing
+ * GC on a different context from the last one to own the scope. The caller
+ * in this case is probably a JSClass.mark function, e.g., fun_mark, or maybe
+ * a finalizer.
+ *
+ * The GC runs only when all threads except the one on which the GC is active
+ * are suspended at GC-safe points, so there is no hazard in directly accessing
+ * obj->slots[slot] from the GC's thread, once rt->gcRunning has been set. See
+ * jsgc.c for details.
+ */
+#define THREAD_IS_RUNNING_GC(rt, thread) \
+ ((rt)->gcRunning && (rt)->gcThread == (thread))
+
+#define CX_THREAD_IS_RUNNING_GC(cx) \
+ THREAD_IS_RUNNING_GC((cx)->runtime, (cx)->thread)
+
+#define GC_AWARE_GET_SLOT(cx, obj, slot) \
+ ((OBJ_IS_NATIVE(obj) && CX_THREAD_IS_RUNNING_GC(cx)) \
+ ? (obj)->slots[slot] \
+ : OBJ_GET_SLOT(cx, obj, slot))
+
+#else /* !JS_THREADSAFE */
+
+#define OBJ_GET_SLOT(cx,obj,slot) LOCKED_OBJ_GET_SLOT(obj,slot)
+#define OBJ_SET_SLOT(cx,obj,slot,value) LOCKED_OBJ_SET_SLOT(obj,slot,value)
+#define GC_AWARE_GET_SLOT(cx,obj,slot) LOCKED_OBJ_GET_SLOT(obj,slot)
+
+#endif /* !JS_THREADSAFE */
+
+/* Thread-safe proto, parent, and class access macros. */
+#define OBJ_GET_PROTO(cx,obj) \
+ JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj, JSSLOT_PROTO))
+#define OBJ_SET_PROTO(cx,obj,proto) \
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PROTO, OBJECT_TO_JSVAL(proto))
+
+#define OBJ_GET_PARENT(cx,obj) \
+ JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj, JSSLOT_PARENT))
+#define OBJ_SET_PARENT(cx,obj,parent) \
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PARENT, OBJECT_TO_JSVAL(parent))
+
+#define OBJ_GET_CLASS(cx,obj) \
+ ((JSClass *)JSVAL_TO_PRIVATE(OBJ_GET_SLOT(cx, obj, JSSLOT_CLASS)))
+
+/* Test whether a map or object is native. */
+#define MAP_IS_NATIVE(map) \
+ ((map)->ops == &js_ObjectOps || \
+ ((map)->ops && (map)->ops->newObjectMap == js_ObjectOps.newObjectMap))
+
+#define OBJ_IS_NATIVE(obj) MAP_IS_NATIVE((obj)->map)
+
+extern JS_FRIEND_DATA(JSObjectOps) js_ObjectOps;
+extern JS_FRIEND_DATA(JSObjectOps) js_WithObjectOps;
+extern JSClass js_ObjectClass;
+extern JSClass js_WithClass;
+extern JSClass js_BlockClass;
+
+/*
+ * Block scope object macros. The slots reserved by js_BlockClass are:
+ *
+ * JSSLOT_PRIVATE JSStackFrame * active frame pointer or null
+ * JSSLOT_BLOCK_DEPTH int depth of block slots in frame
+ *
+ * After JSSLOT_BLOCK_DEPTH come one or more slots for the block locals.
+ * OBJ_BLOCK_COUNT depends on this arrangement.
+ *
+ * A With object is like a Block object, in that both have one reserved slot
+ * telling the stack depth of the relevant slots (the slot whose value is the
+ * object named in the with statement, the slots containing the block's local
+ * variables); and both have a private slot referring to the JSStackFrame in
+ * whose activation they were created (or null if the with or block object
+ * outlives the frame).
+ */
+#define JSSLOT_BLOCK_DEPTH (JSSLOT_PRIVATE + 1)
+
+#define OBJ_BLOCK_COUNT(cx,obj) \
+ ((obj)->map->freeslot - (JSSLOT_BLOCK_DEPTH + 1))
+#define OBJ_BLOCK_DEPTH(cx,obj) \
+ JSVAL_TO_INT(OBJ_GET_SLOT(cx, obj, JSSLOT_BLOCK_DEPTH))
+#define OBJ_SET_BLOCK_DEPTH(cx,obj,depth) \
+ OBJ_SET_SLOT(cx, obj, JSSLOT_BLOCK_DEPTH, INT_TO_JSVAL(depth))
+
+/*
+ * To make sure this slot is well-defined, always call js_NewWithObject to
+ * create a With object, don't call js_NewObject directly. When creating a
+ * With object that does not correspond to a stack slot, pass -1 for depth.
+ *
+ * When popping the stack across this object's "with" statement, client code
+ * must call JS_SetPrivate(cx, withobj, NULL).
+ */
+extern JSObject *
+js_NewWithObject(JSContext *cx, JSObject *proto, JSObject *parent, jsint depth);
+
+/*
+ * Create a new block scope object not linked to any proto or parent object.
+ * Blocks are created by the compiler to reify let blocks and comprehensions.
+ * Only when dynamic scope is captured do they need to be cloned and spliced
+ * into an active scope chain.
+ */
+extern JSObject *
+js_NewBlockObject(JSContext *cx);
+
+extern JSObject *
+js_CloneBlockObject(JSContext *cx, JSObject *proto, JSObject *parent,
+ JSStackFrame *fp);
+
+extern JSBool
+js_PutBlockObject(JSContext *cx, JSObject *obj);
+
+struct JSSharpObjectMap {
+ jsrefcount depth;
+ jsatomid sharpgen;
+ JSHashTable *table;
+};
+
+#define SHARP_BIT ((jsatomid) 1)
+#define BUSY_BIT ((jsatomid) 2)
+#define SHARP_ID_SHIFT 2
+#define IS_SHARP(he) (JS_PTR_TO_UINT32((he)->value) & SHARP_BIT)
+#define MAKE_SHARP(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)|SHARP_BIT))
+#define IS_BUSY(he) (JS_PTR_TO_UINT32((he)->value) & BUSY_BIT)
+#define MAKE_BUSY(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)|BUSY_BIT))
+#define CLEAR_BUSY(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)&~BUSY_BIT))
+
+extern JSHashEntry *
+js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap,
+ jschar **sp);
+
+extern void
+js_LeaveSharpObject(JSContext *cx, JSIdArray **idap);
+
+/*
+ * Mark objects stored in map if GC happens between js_EnterSharpObject
+ * and js_LeaveSharpObject. GC calls this when map->depth > 0.
+ */
+extern void
+js_GCMarkSharpMap(JSContext *cx, JSSharpObjectMap *map);
+
+extern JSBool
+js_obj_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSBool
+js_obj_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSBool
+js_HasOwnPropertyHelper(JSContext *cx, JSObject *obj, JSLookupPropOp lookup,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSObject*
+js_InitBlockClass(JSContext *cx, JSObject* obj);
+
+extern JSObject *
+js_InitObjectClass(JSContext *cx, JSObject *obj);
+
+/* Select Object.prototype method names shared between jsapi.c and jsobj.c. */
+extern const char js_watch_str[];
+extern const char js_unwatch_str[];
+extern const char js_hasOwnProperty_str[];
+extern const char js_isPrototypeOf_str[];
+extern const char js_propertyIsEnumerable_str[];
+extern const char js_defineGetter_str[];
+extern const char js_defineSetter_str[];
+extern const char js_lookupGetter_str[];
+extern const char js_lookupSetter_str[];
+
+extern void
+js_InitObjectMap(JSObjectMap *map, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp);
+
+extern JSObjectMap *
+js_NewObjectMap(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp, JSObject *obj);
+
+extern void
+js_DestroyObjectMap(JSContext *cx, JSObjectMap *map);
+
+extern JSObjectMap *
+js_HoldObjectMap(JSContext *cx, JSObjectMap *map);
+
+extern JSObjectMap *
+js_DropObjectMap(JSContext *cx, JSObjectMap *map, JSObject *obj);
+
+extern JSBool
+js_GetClassId(JSContext *cx, JSClass *clasp, jsid *idp);
+
+extern JSObject *
+js_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent);
+
+/*
+ * Fast access to immutable standard objects (constructors and prototypes).
+ */
+extern JSBool
+js_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp);
+
+extern JSBool
+js_SetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key, JSObject *cobj);
+
+extern JSBool
+js_FindClassObject(JSContext *cx, JSObject *start, jsid id, jsval *vp);
+
+extern JSObject *
+js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv);
+
+extern void
+js_FinalizeObject(JSContext *cx, JSObject *obj);
+
+extern JSBool
+js_AllocSlot(JSContext *cx, JSObject *obj, uint32 *slotp);
+
+extern void
+js_FreeSlot(JSContext *cx, JSObject *obj, uint32 slot);
+
+/*
+ * Native property add and lookup variants that hide id in the hidden atom
+ * subspace, so as to avoid collisions between internal properties such as
+ * formal arguments and local variables in function objects, and externally
+ * set properties with the same ids.
+ */
+extern JSScopeProperty *
+js_AddHiddenProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid);
+
+extern JSBool
+js_LookupHiddenProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp);
+
+/*
+ * Find or create a property named by id in obj's scope, with the given getter
+ * and setter, slot, attributes, and other members.
+ */
+extern JSScopeProperty *
+js_AddNativeProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid);
+
+/*
+ * Change sprop to have the given attrs, getter, and setter in scope, morphing
+ * it into a potentially new JSScopeProperty. Return a pointer to the changed
+ * or identical property.
+ */
+extern JSScopeProperty *
+js_ChangeNativePropertyAttrs(JSContext *cx, JSObject *obj,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter);
+
+/*
+ * On error, return false. On success, if propp is non-null, return true with
+ * obj locked and with a held property in *propp; if propp is null, return true
+ * but release obj's lock first. Therefore all callers who pass non-null propp
+ * result parameters must later call OBJ_DROP_PROPERTY(cx, obj, *propp) both to
+ * drop the held property, and to release the lock on obj.
+ */
+extern JSBool
+js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ JSProperty **propp);
+
+extern JSBool
+js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN shortid, JSProperty **propp);
+
+/*
+ * Unlike js_DefineProperty, propp must be non-null. On success, and if id was
+ * found, return true with *objp non-null and locked, and with a held property
+ * stored in *propp. If successful but id was not found, return true with both
+ * *objp and *propp null. Therefore all callers who receive a non-null *propp
+ * must later call OBJ_DROP_PROPERTY(cx, *objp, *propp).
+ */
+extern JS_FRIEND_API(JSBool)
+js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp);
+
+/*
+ * Specialized subroutine that allows caller to preset JSRESOLVE_* flags.
+ * JSRESOLVE_HIDDEN flags hidden function param/local name lookups, just for
+ * internal use by fun_resolve and similar built-ins.
+ */
+extern JSBool
+js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
+ JSObject **objp, JSProperty **propp);
+
+#define JSRESOLVE_HIDDEN 0x8000
+
+extern JS_FRIEND_API(JSBool)
+js_FindProperty(JSContext *cx, jsid id, JSObject **objp, JSObject **pobjp,
+ JSProperty **propp);
+
+extern JSObject *
+js_FindIdentifierBase(JSContext *cx, jsid id);
+
+extern JSObject *
+js_FindVariableScope(JSContext *cx, JSFunction **funp);
+
+/*
+ * NB: js_NativeGet and js_NativeSet are called with the scope containing sprop
+ * (pobj's scope for Get, obj's for Set) locked, and on successful return, that
+ * scope is again locked. But on failure, both functions return false with the
+ * scope containing sprop unlocked.
+ */
+extern JSBool
+js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj,
+ JSScopeProperty *sprop, jsval *vp);
+
+extern JSBool
+js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp);
+
+extern JSBool
+js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
+
+extern JSBool
+js_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
+
+extern JSBool
+js_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp);
+
+extern JSBool
+js_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp);
+
+extern JSBool
+js_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval);
+
+extern JSBool
+js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp);
+
+extern JSIdArray *
+js_NewIdArray(JSContext *cx, jsint length);
+
+/*
+ * Unlike realloc(3), this function frees ida on failure.
+ */
+extern JSIdArray *
+js_SetIdArrayLength(JSContext *cx, JSIdArray *ida, jsint length);
+
+extern JSBool
+js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp);
+
+extern void
+js_MarkNativeIteratorStates(JSContext *cx);
+
+extern JSBool
+js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp);
+
+extern JSBool
+js_Call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_Construct(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSBool
+js_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+extern JSBool
+js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj);
+
+extern JSBool
+js_IsDelegate(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+extern JSBool
+js_GetClassPrototype(JSContext *cx, JSObject *scope, jsid id,
+ JSObject **protop);
+
+extern JSBool
+js_SetClassPrototype(JSContext *cx, JSObject *ctor, JSObject *proto,
+ uintN attrs);
+
+extern JSBool
+js_ValueToObject(JSContext *cx, jsval v, JSObject **objp);
+
+extern JSObject *
+js_ValueToNonNullObject(JSContext *cx, jsval v);
+
+extern JSBool
+js_TryValueOf(JSContext *cx, JSObject *obj, JSType type, jsval *rval);
+
+extern JSBool
+js_TryMethod(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_XDRObject(JSXDRState *xdr, JSObject **objp);
+
+extern uint32
+js_Mark(JSContext *cx, JSObject *obj, void *arg);
+
+extern void
+js_Clear(JSContext *cx, JSObject *obj);
+
+extern jsval
+js_GetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot);
+
+extern JSBool
+js_SetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot, jsval v);
+
+extern JSObject *
+js_CheckScopeChainValidity(JSContext *cx, JSObject *scopeobj, const char *caller);
+
+extern JSBool
+js_CheckPrincipalsAccess(JSContext *cx, JSObject *scopeobj,
+ JSPrincipals *principals, JSAtom *caller);
+JS_END_EXTERN_C
+
+#endif /* jsobj_h___ */
diff --git a/third_party/js-1.7/jsopcode.c b/third_party/js-1.7/jsopcode.c
new file mode 100644
index 0000000..3dec776
--- /dev/null
+++ b/third_party/js-1.7/jsopcode.c
@@ -0,0 +1,4794 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS bytecode descriptors, disassemblers, and decompilers.
+ */
+#include "jsstddef.h"
+#ifdef HAVE_MEMORY_H
+#include <memory.h>
+#endif
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsdtoa.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_DESTRUCTURING
+# include "jsnum.h"
+#endif
+
+static const char js_incop_strs[][3] = {"++", "--"};
+
+/* Pollute the namespace locally for MSVC Win16, but not for WatCom. */
+#ifdef __WINDOWS_386__
+ #ifdef FAR
+ #undef FAR
+ #endif
+#else /* !__WINDOWS_386__ */
+#ifndef FAR
+#define FAR
+#endif
+#endif /* !__WINDOWS_386__ */
+
+const JSCodeSpec FAR js_CodeSpec[] = {
+#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ {name,token,length,nuses,ndefs,prec,format},
+#include "jsopcode.tbl"
+#undef OPDEF
+};
+
+uintN js_NumCodeSpecs = sizeof (js_CodeSpec) / sizeof js_CodeSpec[0];
+
+/************************************************************************/
+
+static ptrdiff_t
+GetJumpOffset(jsbytecode *pc, jsbytecode *pc2)
+{
+ uint32 type;
+
+ type = (js_CodeSpec[*pc].format & JOF_TYPEMASK);
+ if (JOF_TYPE_IS_EXTENDED_JUMP(type))
+ return GET_JUMPX_OFFSET(pc2);
+ return GET_JUMP_OFFSET(pc2);
+}
+
+#ifdef DEBUG
+
+JS_FRIEND_API(JSBool)
+js_Disassemble(JSContext *cx, JSScript *script, JSBool lines, FILE *fp)
+{
+ jsbytecode *pc, *end;
+ uintN len;
+
+ pc = script->code;
+ end = pc + script->length;
+ while (pc < end) {
+ if (pc == script->main)
+ fputs("main:\n", fp);
+ len = js_Disassemble1(cx, script, pc,
+ PTRDIFF(pc, script->code, jsbytecode),
+ lines, fp);
+ if (!len)
+ return JS_FALSE;
+ pc += len;
+ }
+ return JS_TRUE;
+}
+
+const char *
+ToDisassemblySource(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSScopeProperty *sprop;
+ char *source;
+ const char *bytes;
+ JSString *str;
+
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
+ source = JS_sprintf_append(NULL, "depth %d {",
+ OBJ_BLOCK_DEPTH(cx, obj));
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop;
+ sprop = sprop->parent) {
+ bytes = js_AtomToPrintableString(cx, JSID_TO_ATOM(sprop->id));
+ if (!bytes)
+ return NULL;
+ source = JS_sprintf_append(source, "%s: %d%s",
+ bytes, sprop->shortid,
+ sprop->parent ? ", " : "");
+ }
+ source = JS_sprintf_append(source, "}");
+ if (!source)
+ return NULL;
+ str = JS_NewString(cx, source, strlen(source));
+ if (!str)
+ return NULL;
+ return JS_GetStringBytes(str);
+ }
+ }
+ return js_ValueToPrintableSource(cx, v);
+}
+
+JS_FRIEND_API(uintN)
+js_Disassemble1(JSContext *cx, JSScript *script, jsbytecode *pc, uintN loc,
+ JSBool lines, FILE *fp)
+{
+ JSOp op;
+ const JSCodeSpec *cs;
+ ptrdiff_t len, off, jmplen;
+ uint32 type;
+ JSAtom *atom;
+ const char *bytes;
+
+ op = (JSOp)*pc;
+ if (op >= JSOP_LIMIT) {
+ char numBuf1[12], numBuf2[12];
+ JS_snprintf(numBuf1, sizeof numBuf1, "%d", op);
+ JS_snprintf(numBuf2, sizeof numBuf2, "%d", JSOP_LIMIT);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BYTECODE_TOO_BIG, numBuf1, numBuf2);
+ return 0;
+ }
+ cs = &js_CodeSpec[op];
+ len = (ptrdiff_t) cs->length;
+ fprintf(fp, "%05u:", loc);
+ if (lines)
+ fprintf(fp, "%4u", JS_PCToLineNumber(cx, script, pc));
+ fprintf(fp, " %s", cs->name);
+ type = cs->format & JOF_TYPEMASK;
+ switch (type) {
+ case JOF_BYTE:
+ if (op == JSOP_TRAP) {
+ op = JS_GetTrapOpcode(cx, script, pc);
+ if (op == JSOP_LIMIT)
+ return 0;
+ len = (ptrdiff_t) js_CodeSpec[op].length;
+ }
+ break;
+
+ case JOF_JUMP:
+ case JOF_JUMPX:
+ off = GetJumpOffset(pc, pc);
+ fprintf(fp, " %u (%d)", loc + off, off);
+ break;
+
+ case JOF_CONST:
+ atom = GET_ATOM(cx, script, pc);
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, " %s", bytes);
+ break;
+
+ case JOF_UINT16:
+ case JOF_LOCAL:
+ fprintf(fp, " %u", GET_UINT16(pc));
+ break;
+
+ case JOF_TABLESWITCH:
+ case JOF_TABLESWITCHX:
+ {
+ jsbytecode *pc2;
+ jsint i, low, high;
+
+ jmplen = (type == JOF_TABLESWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ fprintf(fp, " defaultOffset %d low %d high %d", off, low, high);
+ for (i = low; i <= high; i++) {
+ off = GetJumpOffset(pc, pc2);
+ fprintf(fp, "\n\t%d: %d", i, off);
+ pc2 += jmplen;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LOOKUPSWITCH:
+ case JOF_LOOKUPSWITCHX:
+ {
+ jsbytecode *pc2;
+ jsatomid npairs;
+
+ jmplen = (type == JOF_LOOKUPSWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ npairs = GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+ fprintf(fp, " offset %d npairs %u", off, (uintN) npairs);
+ while (npairs) {
+ atom = GET_ATOM(cx, script, pc2);
+ pc2 += ATOM_INDEX_LEN;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, "\n\t%s: %d", bytes, off);
+ npairs--;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_QARG:
+ fprintf(fp, " %u", GET_ARGNO(pc));
+ break;
+
+ case JOF_QVAR:
+ fprintf(fp, " %u", GET_VARNO(pc));
+ break;
+
+ case JOF_INDEXCONST:
+ fprintf(fp, " %u", GET_VARNO(pc));
+ pc += VARNO_LEN;
+ atom = GET_ATOM(cx, script, pc);
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, " %s", bytes);
+ break;
+
+ case JOF_UINT24:
+ if (op == JSOP_FINDNAME) {
+ /* Special case to avoid a JOF_FINDNAME just for this op. */
+ atom = js_GetAtom(cx, &script->atomMap, GET_UINT24(pc));
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, " %s", bytes);
+ break;
+ }
+
+ JS_ASSERT(op == JSOP_UINT24 || op == JSOP_LITERAL);
+ fprintf(fp, " %u", GET_UINT24(pc));
+ break;
+
+ case JOF_LITOPX:
+ atom = js_GetAtom(cx, &script->atomMap, GET_LITERAL_INDEX(pc));
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+
+ /*
+ * Bytecode: JSOP_LITOPX <uint24> op [<varno> if JSOP_DEFLOCALFUN].
+ * Advance pc to point at op.
+ */
+ pc += 1 + LITERAL_INDEX_LEN;
+ op = *pc;
+ cs = &js_CodeSpec[op];
+ fprintf(fp, " %s op %s", bytes, cs->name);
+ if ((cs->format & JOF_TYPEMASK) == JOF_INDEXCONST)
+ fprintf(fp, " %u", GET_VARNO(pc));
+
+ /*
+ * Set len to advance pc to skip op and any other immediates (namely,
+ * <varno> if JSOP_DEFLOCALFUN).
+ */
+ JS_ASSERT(cs->length > ATOM_INDEX_LEN);
+ len = cs->length - ATOM_INDEX_LEN;
+ break;
+
+ default: {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%lx", (unsigned long) cs->format);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_UNKNOWN_FORMAT, numBuf);
+ return 0;
+ }
+ }
+ fputs("\n", fp);
+ return len;
+}
+
+#endif /* DEBUG */
+
+/************************************************************************/
+
+/*
+ * Sprintf, but with unlimited and automatically allocated buffering.
+ */
+typedef struct Sprinter {
+ JSContext *context; /* context executing the decompiler */
+ JSArenaPool *pool; /* string allocation pool */
+ char *base; /* base address of buffer in pool */
+ size_t size; /* size of buffer allocated at base */
+ ptrdiff_t offset; /* offset of next free char in buffer */
+} Sprinter;
+
+#define INIT_SPRINTER(cx, sp, ap, off) \
+ ((sp)->context = cx, (sp)->pool = ap, (sp)->base = NULL, (sp)->size = 0, \
+ (sp)->offset = off)
+
+#define OFF2STR(sp,off) ((sp)->base + (off))
+#define STR2OFF(sp,str) ((str) - (sp)->base)
+#define RETRACT(sp,str) ((sp)->offset = STR2OFF(sp, str))
+
+static JSBool
+SprintAlloc(Sprinter *sp, size_t nb)
+{
+ char *base;
+
+ base = sp->base;
+ if (!base) {
+ JS_ARENA_ALLOCATE_CAST(base, char *, sp->pool, nb);
+ } else {
+ JS_ARENA_GROW_CAST(base, char *, sp->pool, sp->size, nb);
+ }
+ if (!base) {
+ JS_ReportOutOfMemory(sp->context);
+ return JS_FALSE;
+ }
+ sp->base = base;
+ sp->size += nb;
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+SprintPut(Sprinter *sp, const char *s, size_t len)
+{
+ ptrdiff_t nb, offset;
+ char *bp;
+
+ /* Allocate space for s, including the '\0' at the end. */
+ nb = (sp->offset + len + 1) - sp->size;
+ if (nb > 0 && !SprintAlloc(sp, nb))
+ return -1;
+
+ /* Advance offset and copy s into sp's buffer. */
+ offset = sp->offset;
+ sp->offset += len;
+ bp = sp->base + offset;
+ memmove(bp, s, len);
+ bp[len] = 0;
+ return offset;
+}
+
+static ptrdiff_t
+SprintCString(Sprinter *sp, const char *s)
+{
+ return SprintPut(sp, s, strlen(s));
+}
+
+static ptrdiff_t
+Sprint(Sprinter *sp, const char *format, ...)
+{
+ va_list ap;
+ char *bp;
+ ptrdiff_t offset;
+
+ va_start(ap, format);
+ bp = JS_vsmprintf(format, ap); /* XXX vsaprintf */
+ va_end(ap);
+ if (!bp) {
+ JS_ReportOutOfMemory(sp->context);
+ return -1;
+ }
+ offset = SprintCString(sp, bp);
+ free(bp);
+ return offset;
+}
+
+const jschar js_EscapeMap[] = {
+ '\b', 'b',
+ '\f', 'f',
+ '\n', 'n',
+ '\r', 'r',
+ '\t', 't',
+ '\v', 'v',
+ '"', '"',
+ '\'', '\'',
+ '\\', '\\',
+ 0
+};
+
+#define DONT_ESCAPE 0x10000
+
+static char *
+QuoteString(Sprinter *sp, JSString *str, uint32 quote)
+{
+ JSBool dontEscape, ok;
+ jschar qc, c;
+ ptrdiff_t off, len, nb;
+ const jschar *s, *t, *u, *z;
+ char *bp;
+
+ /* Sample off first for later return value pointer computation. */
+ dontEscape = (quote & DONT_ESCAPE) != 0;
+ qc = (jschar) quote;
+ off = sp->offset;
+ if (qc && Sprint(sp, "%c", (char)qc) < 0)
+ return NULL;
+
+ /* Loop control variables: z points at end of string sentinel. */
+ s = JSSTRING_CHARS(str);
+ z = s + JSSTRING_LENGTH(str);
+ for (t = s; t < z; s = ++t) {
+ /* Move t forward from s past un-quote-worthy characters. */
+ c = *t;
+ while (JS_ISPRINT(c) && c != qc && c != '\\' && !(c >> 8)) {
+ c = *++t;
+ if (t == z)
+ break;
+ }
+ len = PTRDIFF(t, s, jschar);
+
+ /* Allocate space for s, including the '\0' at the end. */
+ nb = (sp->offset + len + 1) - sp->size;
+ if (nb > 0 && !SprintAlloc(sp, nb))
+ return NULL;
+
+ /* Advance sp->offset and copy s into sp's buffer. */
+ bp = sp->base + sp->offset;
+ sp->offset += len;
+ while (--len >= 0)
+ *bp++ = (char) *s++;
+ *bp = '\0';
+
+ if (t == z)
+ break;
+
+ /* Use js_EscapeMap, \u, or \x only if necessary. */
+ if ((u = js_strchr(js_EscapeMap, c)) != NULL) {
+ ok = dontEscape
+ ? Sprint(sp, "%c", (char)c) >= 0
+ : Sprint(sp, "\\%c", (char)u[1]) >= 0;
+ } else {
+#ifdef JS_C_STRINGS_ARE_UTF8
+ /* If this is a surrogate pair, make sure to print the pair. */
+ if (c >= 0xD800 && c <= 0xDBFF) {
+ jschar buffer[3];
+ buffer[0] = c;
+ buffer[1] = *++t;
+ buffer[2] = 0;
+ if (t == z) {
+ char numbuf[10];
+ JS_snprintf(numbuf, sizeof numbuf, "0x%x", c);
+ JS_ReportErrorFlagsAndNumber(sp->context, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_SURROGATE_CHAR,
+ numbuf);
+ ok = JS_FALSE;
+ break;
+ }
+ ok = Sprint(sp, "%hs", buffer) >= 0;
+ } else {
+ /* Print as UTF-8 string. */
+ ok = Sprint(sp, "%hc", c) >= 0;
+ }
+#else
+ /* Use \uXXXX or \xXX if the string can't be displayed as UTF-8. */
+ ok = Sprint(sp, (c >> 8) ? "\\u%04X" : "\\x%02X", c) >= 0;
+#endif
+ }
+ if (!ok)
+ return NULL;
+ }
+
+ /* Sprint the closing quote and return the quoted string. */
+ if (qc && Sprint(sp, "%c", (char)qc) < 0)
+ return NULL;
+
+ /*
+ * If we haven't Sprint'd anything yet, Sprint an empty string so that
+ * the OFF2STR below gives a valid result.
+ */
+ if (off == sp->offset && Sprint(sp, "") < 0)
+ return NULL;
+ return OFF2STR(sp, off);
+}
+
+JSString *
+js_QuoteString(JSContext *cx, JSString *str, jschar quote)
+{
+ void *mark;
+ Sprinter sprinter;
+ char *bytes;
+ JSString *escstr;
+
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ INIT_SPRINTER(cx, &sprinter, &cx->tempPool, 0);
+ bytes = QuoteString(&sprinter, str, quote);
+ escstr = bytes ? JS_NewStringCopyZ(cx, bytes) : NULL;
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return escstr;
+}
+
+/************************************************************************/
+
+#if JS_HAS_BLOCK_SCOPE
+typedef enum JSBraceState {
+ ALWAYS_BRACE,
+ MAYBE_BRACE,
+ DONT_BRACE
+} JSBraceState;
+#endif
+
+struct JSPrinter {
+ Sprinter sprinter; /* base class state */
+ JSArenaPool pool; /* string allocation pool */
+ uintN indent; /* indentation in spaces */
+ JSPackedBool pretty; /* pretty-print: indent, use newlines */
+ JSPackedBool grouped; /* in parenthesized expression context */
+ JSScript *script; /* script being printed */
+ jsbytecode *dvgfence; /* js_DecompileValueGenerator fencepost */
+ JSScope *scope; /* script function scope */
+#if JS_HAS_BLOCK_SCOPE
+ JSBraceState braceState; /* remove braces around let declaration */
+ ptrdiff_t spaceOffset; /* -1 or offset of space before maybe-{ */
+#endif
+};
+
+/*
+ * Hack another flag, a la JS_DONT_PRETTY_PRINT, into uintN indent parameters
+ * to functions such as js_DecompileFunction and js_NewPrinter. This time, as
+ * opposed to JS_DONT_PRETTY_PRINT back in the dark ages, we can assume that a
+ * uintN is at least 32 bits.
+ */
+#define JS_IN_GROUP_CONTEXT 0x10000
+
+JSPrinter *
+js_NewPrinter(JSContext *cx, const char *name, uintN indent, JSBool pretty)
+{
+ JSPrinter *jp;
+
+ jp = (JSPrinter *) JS_malloc(cx, sizeof(JSPrinter));
+ if (!jp)
+ return NULL;
+ INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0);
+ JS_InitArenaPool(&jp->pool, name, 256, 1);
+ jp->indent = indent & ~JS_IN_GROUP_CONTEXT;
+ jp->pretty = pretty;
+ jp->grouped = (indent & JS_IN_GROUP_CONTEXT) != 0;
+ jp->script = NULL;
+ jp->dvgfence = NULL;
+ jp->scope = NULL;
+#if JS_HAS_BLOCK_SCOPE
+ jp->braceState = ALWAYS_BRACE;
+ jp->spaceOffset = -1;
+#endif
+ return jp;
+}
+
+void
+js_DestroyPrinter(JSPrinter *jp)
+{
+ JS_FinishArenaPool(&jp->pool);
+ JS_free(jp->sprinter.context, jp);
+}
+
+JSString *
+js_GetPrinterOutput(JSPrinter *jp)
+{
+ JSContext *cx;
+ JSString *str;
+
+ cx = jp->sprinter.context;
+ if (!jp->sprinter.base)
+ return cx->runtime->emptyString;
+ str = JS_NewStringCopyZ(cx, jp->sprinter.base);
+ if (!str)
+ return NULL;
+ JS_FreeArenaPool(&jp->pool);
+ INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0);
+ return str;
+}
+
+#if !JS_HAS_BLOCK_SCOPE
+# define SET_MAYBE_BRACE(jp) jp
+# define CLEAR_MAYBE_BRACE(jp) jp
+#else
+# define SET_MAYBE_BRACE(jp) ((jp)->braceState = MAYBE_BRACE, (jp))
+# define CLEAR_MAYBE_BRACE(jp) ((jp)->braceState = ALWAYS_BRACE, (jp))
+
+static void
+SetDontBrace(JSPrinter *jp)
+{
+ ptrdiff_t offset;
+ const char *bp;
+
+ /* When not pretty-printing, newline after brace is chopped. */
+ JS_ASSERT(jp->spaceOffset < 0);
+ offset = jp->sprinter.offset - (jp->pretty ? 3 : 2);
+
+ /* The shortest case is "if (x) {". */
+ JS_ASSERT(offset >= 6);
+ bp = jp->sprinter.base;
+ if (bp[offset+0] == ' ' && bp[offset+1] == '{') {
+ JS_ASSERT(!jp->pretty || bp[offset+2] == '\n');
+ jp->spaceOffset = offset;
+ jp->braceState = DONT_BRACE;
+ }
+}
+#endif
+
+int
+js_printf(JSPrinter *jp, const char *format, ...)
+{
+ va_list ap;
+ char *bp, *fp;
+ int cc;
+
+ if (*format == '\0')
+ return 0;
+
+ va_start(ap, format);
+
+ /* If pretty-printing, expand magic tab into a run of jp->indent spaces. */
+ if (*format == '\t') {
+ format++;
+
+#if JS_HAS_BLOCK_SCOPE
+ if (*format == '}' && jp->braceState != ALWAYS_BRACE) {
+ JSBraceState braceState;
+
+ braceState = jp->braceState;
+ jp->braceState = ALWAYS_BRACE;
+ if (braceState == DONT_BRACE) {
+ ptrdiff_t offset, delta, from;
+
+ JS_ASSERT(format[1] == '\n' || format[1] == ' ');
+ offset = jp->spaceOffset;
+ JS_ASSERT(offset >= 6);
+
+ /* Replace " {\n" at the end of jp->sprinter with "\n". */
+ bp = jp->sprinter.base;
+ if (bp[offset+0] == ' ' && bp[offset+1] == '{') {
+ delta = 2;
+ if (jp->pretty) {
+ /* If pretty, we don't have to worry about 'else'. */
+ JS_ASSERT(bp[offset+2] == '\n');
+ } else if (bp[offset-1] != ')') {
+ /* Must keep ' ' to avoid 'dolet' or 'elselet'. */
+ ++offset;
+ delta = 1;
+ }
+
+ from = offset + delta;
+ memmove(bp + offset, bp + from, jp->sprinter.offset - from);
+ jp->sprinter.offset -= delta;
+ jp->spaceOffset = -1;
+
+ format += 2;
+ if (*format == '\0')
+ return 0;
+ }
+ }
+ }
+#endif
+
+ if (jp->pretty && Sprint(&jp->sprinter, "%*s", jp->indent, "") < 0)
+ return -1;
+ }
+
+ /* Suppress newlines (must be once per format, at the end) if not pretty. */
+ fp = NULL;
+ if (!jp->pretty && format[cc = strlen(format) - 1] == '\n') {
+ fp = JS_strdup(jp->sprinter.context, format);
+ if (!fp)
+ return -1;
+ fp[cc] = '\0';
+ format = fp;
+ }
+
+ /* Allocate temp space, convert format, and put. */
+ bp = JS_vsmprintf(format, ap); /* XXX vsaprintf */
+ if (fp) {
+ JS_free(jp->sprinter.context, fp);
+ format = NULL;
+ }
+ if (!bp) {
+ JS_ReportOutOfMemory(jp->sprinter.context);
+ return -1;
+ }
+
+ cc = strlen(bp);
+ if (SprintPut(&jp->sprinter, bp, (size_t)cc) < 0)
+ cc = -1;
+ free(bp);
+
+ va_end(ap);
+ return cc;
+}
+
+JSBool
+js_puts(JSPrinter *jp, const char *s)
+{
+ return SprintCString(&jp->sprinter, s) >= 0;
+}
+
+/************************************************************************/
+
+typedef struct SprintStack {
+ Sprinter sprinter; /* sprinter for postfix to infix buffering */
+ ptrdiff_t *offsets; /* stack of postfix string offsets */
+ jsbytecode *opcodes; /* parallel stack of JS opcodes */
+ uintN top; /* top of stack index */
+ uintN inArrayInit; /* array initialiser/comprehension level */
+ JSPrinter *printer; /* permanent output goes here */
+} SprintStack;
+
+/*
+ * Get a stacked offset from ss->sprinter.base, or if the stacked value |off|
+ * is negative, lazily fetch the generating pc at |spindex = 1 + off| and try
+ * to decompile the code that generated the missing value. This is used when
+ * reporting errors, where the model stack will lack |pcdepth| non-negative
+ * offsets (see js_DecompileValueGenerator and js_DecompileCode).
+ *
+ * If the stacked offset is -1, return 0 to index the NUL padding at the start
+ * of ss->sprinter.base. If this happens, it means there is a decompiler bug
+ * to fix, but it won't violate memory safety.
+ */
+static ptrdiff_t
+GetOff(SprintStack *ss, uintN i)
+{
+ ptrdiff_t off;
+ JSString *str;
+
+ off = ss->offsets[i];
+ if (off < 0) {
+#if defined DEBUG_brendan || defined DEBUG_mrbkap || defined DEBUG_crowder
+ JS_ASSERT(off < -1);
+#endif
+ if (++off == 0) {
+ if (!ss->sprinter.base && SprintPut(&ss->sprinter, "", 0) >= 0)
+ memset(ss->sprinter.base, 0, ss->sprinter.offset);
+ return 0;
+ }
+
+ str = js_DecompileValueGenerator(ss->sprinter.context, off,
+ JSVAL_NULL, NULL);
+ if (!str)
+ return 0;
+ off = SprintCString(&ss->sprinter, JS_GetStringBytes(str));
+ if (off < 0)
+ off = 0;
+ ss->offsets[i] = off;
+ }
+ return off;
+}
+
+static const char *
+GetStr(SprintStack *ss, uintN i)
+{
+ ptrdiff_t off;
+
+ /*
+ * Must call GetOff before using ss->sprinter.base, since it may be null
+ * until bootstrapped by GetOff.
+ */
+ off = GetOff(ss, i);
+ return OFF2STR(&ss->sprinter, off);
+}
+
+/* Gap between stacked strings to allow for insertion of parens and commas. */
+#define PAREN_SLOP (2 + 1)
+
+/*
+ * These pseudo-ops help js_DecompileValueGenerator decompile JSOP_SETNAME,
+ * JSOP_SETPROP, and JSOP_SETELEM, respectively. They are never stored in
+ * bytecode, so they don't preempt valid opcodes.
+ */
+#define JSOP_GETPROP2 256
+#define JSOP_GETELEM2 257
+
+static JSBool
+PushOff(SprintStack *ss, ptrdiff_t off, JSOp op)
+{
+ uintN top;
+
+ if (!SprintAlloc(&ss->sprinter, PAREN_SLOP))
+ return JS_FALSE;
+
+ /* ss->top points to the next free slot; be paranoid about overflow. */
+ top = ss->top;
+ JS_ASSERT(top < ss->printer->script->depth);
+ if (top >= ss->printer->script->depth) {
+ JS_ReportOutOfMemory(ss->sprinter.context);
+ return JS_FALSE;
+ }
+
+ /* The opcodes stack must contain real bytecodes that index js_CodeSpec. */
+ ss->offsets[top] = off;
+ ss->opcodes[top] = (op == JSOP_GETPROP2) ? JSOP_GETPROP
+ : (op == JSOP_GETELEM2) ? JSOP_GETELEM
+ : (jsbytecode) op;
+ ss->top = ++top;
+ memset(OFF2STR(&ss->sprinter, ss->sprinter.offset), 0, PAREN_SLOP);
+ ss->sprinter.offset += PAREN_SLOP;
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+PopOff(SprintStack *ss, JSOp op)
+{
+ uintN top;
+ const JSCodeSpec *cs, *topcs;
+ ptrdiff_t off;
+
+ /* ss->top points to the next free slot; be paranoid about underflow. */
+ top = ss->top;
+ JS_ASSERT(top != 0);
+ if (top == 0)
+ return 0;
+
+ ss->top = --top;
+ off = GetOff(ss, top);
+ topcs = &js_CodeSpec[ss->opcodes[top]];
+ cs = &js_CodeSpec[op];
+ if (topcs->prec != 0 && topcs->prec < cs->prec) {
+ ss->sprinter.offset = ss->offsets[top] = off - 2;
+ off = Sprint(&ss->sprinter, "(%s)", OFF2STR(&ss->sprinter, off));
+ } else {
+ ss->sprinter.offset = off;
+ }
+ return off;
+}
+
+static const char *
+PopStr(SprintStack *ss, JSOp op)
+{
+ ptrdiff_t off;
+
+ off = PopOff(ss, op);
+ return OFF2STR(&ss->sprinter, off);
+}
+
+typedef struct TableEntry {
+ jsval key;
+ ptrdiff_t offset;
+ JSAtom *label;
+ jsint order; /* source order for stable tableswitch sort */
+} TableEntry;
+
+static JSBool
+CompareOffsets(void *arg, const void *v1, const void *v2, int *result)
+{
+ ptrdiff_t offset_diff;
+ const TableEntry *te1 = (const TableEntry *) v1,
+ *te2 = (const TableEntry *) v2;
+
+ offset_diff = te1->offset - te2->offset;
+ *result = (offset_diff == 0 ? te1->order - te2->order
+ : offset_diff < 0 ? -1
+ : 1);
+ return JS_TRUE;
+}
+
+static jsbytecode *
+Decompile(SprintStack *ss, jsbytecode *pc, intN nb);
+
+static JSBool
+DecompileSwitch(SprintStack *ss, TableEntry *table, uintN tableLength,
+ jsbytecode *pc, ptrdiff_t switchLength,
+ ptrdiff_t defaultOffset, JSBool isCondSwitch)
+{
+ JSContext *cx;
+ JSPrinter *jp;
+ ptrdiff_t off, off2, diff, caseExprOff;
+ char *lval, *rval;
+ uintN i;
+ jsval key;
+ JSString *str;
+
+ cx = ss->sprinter.context;
+ jp = ss->printer;
+
+ /* JSOP_CONDSWITCH doesn't pop, unlike JSOP_{LOOKUP,TABLE}SWITCH. */
+ off = isCondSwitch ? GetOff(ss, ss->top-1) : PopOff(ss, JSOP_NOP);
+ lval = OFF2STR(&ss->sprinter, off);
+
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\tswitch (%s) {\n", lval);
+
+ if (tableLength) {
+ diff = table[0].offset - defaultOffset;
+ if (diff > 0) {
+ jp->indent += 2;
+ js_printf(jp, "\t%s:\n", js_default_str);
+ jp->indent += 2;
+ if (!Decompile(ss, pc + defaultOffset, diff))
+ return JS_FALSE;
+ jp->indent -= 4;
+ }
+
+ caseExprOff = isCondSwitch ? JSOP_CONDSWITCH_LENGTH : 0;
+
+ for (i = 0; i < tableLength; i++) {
+ off = table[i].offset;
+ off2 = (i + 1 < tableLength) ? table[i + 1].offset : switchLength;
+
+ key = table[i].key;
+ if (isCondSwitch) {
+ ptrdiff_t nextCaseExprOff;
+
+ /*
+ * key encodes the JSOP_CASE bytecode's offset from switchtop.
+ * The next case expression follows immediately, unless we are
+ * at the last case.
+ */
+ nextCaseExprOff = (ptrdiff_t)JSVAL_TO_INT(key);
+ nextCaseExprOff += js_CodeSpec[pc[nextCaseExprOff]].length;
+ jp->indent += 2;
+ if (!Decompile(ss, pc + caseExprOff,
+ nextCaseExprOff - caseExprOff)) {
+ return JS_FALSE;
+ }
+ caseExprOff = nextCaseExprOff;
+
+ /* Balance the stack as if this JSOP_CASE matched. */
+ --ss->top;
+ } else {
+ /*
+ * key comes from an atom, not the decompiler, so we need to
+ * quote it if it's a string literal. But if table[i].label
+ * is non-null, key was constant-propagated and label is the
+ * name of the const we should show as the case label. We set
+ * key to undefined so this identifier is escaped, if required
+ * by non-ASCII characters, but not quoted, by QuoteString.
+ */
+ if (table[i].label) {
+ str = ATOM_TO_STRING(table[i].label);
+ key = JSVAL_VOID;
+ } else {
+ str = js_ValueToString(cx, key);
+ if (!str)
+ return JS_FALSE;
+ }
+ rval = QuoteString(&ss->sprinter, str,
+ (jschar)(JSVAL_IS_STRING(key) ? '"' : 0));
+ if (!rval)
+ return JS_FALSE;
+ RETRACT(&ss->sprinter, rval);
+ jp->indent += 2;
+ js_printf(jp, "\tcase %s:\n", rval);
+ }
+
+ jp->indent += 2;
+ if (off <= defaultOffset && defaultOffset < off2) {
+ diff = defaultOffset - off;
+ if (diff != 0) {
+ if (!Decompile(ss, pc + off, diff))
+ return JS_FALSE;
+ off = defaultOffset;
+ }
+ jp->indent -= 2;
+ js_printf(jp, "\t%s:\n", js_default_str);
+ jp->indent += 2;
+ }
+ if (!Decompile(ss, pc + off, off2 - off))
+ return JS_FALSE;
+ jp->indent -= 4;
+
+ /* Re-balance as if last JSOP_CASE or JSOP_DEFAULT mismatched. */
+ if (isCondSwitch)
+ ++ss->top;
+ }
+ }
+
+ if (defaultOffset == switchLength) {
+ jp->indent += 2;
+ js_printf(jp, "\t%s:;\n", js_default_str);
+ jp->indent -= 2;
+ }
+ js_printf(jp, "\t}\n");
+
+ /* By the end of a JSOP_CONDSWITCH, the discriminant has been popped. */
+ if (isCondSwitch)
+ --ss->top;
+ return JS_TRUE;
+}
+
+static JSAtom *
+GetSlotAtom(JSPrinter *jp, JSPropertyOp getter, uintN slot)
+{
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSObject *obj, *proto;
+
+ scope = jp->scope;
+ while (scope) {
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (sprop->getter != getter)
+ continue;
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ JS_ASSERT(JSID_IS_ATOM(sprop->id));
+ if ((uintN) sprop->shortid == slot)
+ return JSID_TO_ATOM(sprop->id);
+ }
+ obj = scope->object;
+ if (!obj)
+ break;
+ proto = OBJ_GET_PROTO(jp->sprinter.context, obj);
+ if (!proto)
+ break;
+ scope = OBJ_SCOPE(proto);
+ }
+ return NULL;
+}
+
+/*
+ * NB: Indexed by SRC_DECL_* defines from jsemit.h.
+ */
+static const char * const var_prefix[] = {"var ", "const ", "let "};
+
+static const char *
+VarPrefix(jssrcnote *sn)
+{
+ if (sn && (SN_TYPE(sn) == SRC_DECL || SN_TYPE(sn) == SRC_GROUPASSIGN)) {
+ ptrdiff_t type = js_GetSrcNoteOffset(sn, 0);
+ if ((uintN)type <= SRC_DECL_LET)
+ return var_prefix[type];
+ }
+ return "";
+}
+#define LOCAL_ASSERT_RV(expr, rv) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT(expr); \
+ if (!(expr)) return (rv); \
+ JS_END_MACRO
+
+const char *
+GetLocal(SprintStack *ss, jsint i)
+{
+ ptrdiff_t off;
+ JSContext *cx;
+ JSScript *script;
+ jsatomid j, n;
+ JSAtom *atom;
+ JSObject *obj;
+ jsint depth, count;
+ JSScopeProperty *sprop;
+ const char *rval;
+
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, "")
+
+ off = ss->offsets[i];
+ if (off >= 0)
+ return OFF2STR(&ss->sprinter, off);
+
+ /*
+ * We must be called from js_DecompileValueGenerator (via Decompile) when
+ * dereferencing a local that's undefined or null. Search script->atomMap
+ * for the block containing this local by its stack index, i.
+ */
+ cx = ss->sprinter.context;
+ script = ss->printer->script;
+ for (j = 0, n = script->atomMap.length; j < n; j++) {
+ atom = script->atomMap.vector[j];
+ if (ATOM_IS_OBJECT(atom)) {
+ obj = ATOM_TO_OBJECT(atom);
+ if (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
+ depth = OBJ_BLOCK_DEPTH(cx, obj);
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ if ((jsuint)(i - depth) < (jsuint)count)
+ break;
+ }
+ }
+ }
+
+ LOCAL_ASSERT(j < n);
+ i -= depth;
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop; sprop = sprop->parent) {
+ if (sprop->shortid == i)
+ break;
+ }
+
+ LOCAL_ASSERT(sprop && JSID_IS_ATOM(sprop->id));
+ atom = JSID_TO_ATOM(sprop->id);
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ return rval;
+
+#undef LOCAL_ASSERT
+}
+
+#if JS_HAS_DESTRUCTURING
+
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, NULL)
+#define LOAD_OP_DATA(pc) (oplen = (cs = &js_CodeSpec[op = *pc])->length)
+
+static jsbytecode *
+DecompileDestructuring(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc);
+
+static jsbytecode *
+DecompileDestructuringLHS(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc,
+ JSBool *hole)
+{
+ JSContext *cx;
+ JSPrinter *jp;
+ JSOp op;
+ const JSCodeSpec *cs;
+ uintN oplen, i;
+ const char *lval, *xval;
+ ptrdiff_t todo;
+ JSAtom *atom;
+
+ *hole = JS_FALSE;
+ cx = ss->sprinter.context;
+ jp = ss->printer;
+ LOAD_OP_DATA(pc);
+
+ switch (op) {
+ case JSOP_POP:
+ *hole = JS_TRUE;
+ todo = SprintPut(&ss->sprinter, ", ", 2);
+ break;
+
+ case JSOP_DUP:
+ pc = DecompileDestructuring(ss, pc, endpc);
+ if (!pc)
+ return NULL;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ lval = PopStr(ss, JSOP_NOP);
+ todo = SprintCString(&ss->sprinter, lval);
+ if (op == JSOP_SETSP)
+ return pc;
+ LOCAL_ASSERT(*pc == JSOP_POP);
+ break;
+
+ case JSOP_SETARG:
+ case JSOP_SETVAR:
+ case JSOP_SETGVAR:
+ case JSOP_SETLOCAL:
+ LOCAL_ASSERT(pc[oplen] == JSOP_POP || pc[oplen] == JSOP_SETSP);
+ /* FALL THROUGH */
+
+ case JSOP_SETLOCALPOP:
+ i = GET_UINT16(pc);
+ atom = NULL;
+ lval = NULL;
+ if (op == JSOP_SETARG)
+ atom = GetSlotAtom(jp, js_GetArgument, i);
+ else if (op == JSOP_SETVAR)
+ atom = GetSlotAtom(jp, js_GetLocalVariable, i);
+ else if (op == JSOP_SETGVAR)
+ atom = GET_ATOM(cx, jp->script, pc);
+ else
+ lval = GetLocal(ss, i);
+ if (atom)
+ lval = js_AtomToPrintableString(cx, atom);
+ LOCAL_ASSERT(lval);
+ todo = SprintCString(&ss->sprinter, lval);
+ if (op != JSOP_SETLOCALPOP) {
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ if (op == JSOP_SETSP)
+ return pc;
+ LOCAL_ASSERT(op == JSOP_POP);
+ }
+ break;
+
+ default:
+ /*
+ * We may need to auto-parenthesize the left-most value decompiled
+ * here, so add back PAREN_SLOP temporarily. Then decompile until the
+ * opcode that would reduce the stack depth to (ss->top-1), which we
+ * pass to Decompile encoded as -(ss->top-1) - 1 or just -ss->top for
+ * the nb parameter.
+ */
+ todo = ss->sprinter.offset;
+ ss->sprinter.offset = todo + PAREN_SLOP;
+ pc = Decompile(ss, pc, -ss->top);
+ if (!pc)
+ return NULL;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ LOCAL_ASSERT(op == JSOP_ENUMELEM || op == JSOP_ENUMCONSTELEM);
+ xval = PopStr(ss, JSOP_NOP);
+ lval = PopStr(ss, JSOP_GETPROP);
+ ss->sprinter.offset = todo;
+ if (*lval == '\0') {
+ /* lval is from JSOP_BINDNAME, so just print xval. */
+ todo = SprintCString(&ss->sprinter, xval);
+ } else if (*xval == '\0') {
+ /* xval is from JSOP_SETCALL or JSOP_BINDXMLNAME, print lval. */
+ todo = SprintCString(&ss->sprinter, lval);
+ } else {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[ss->opcodes[ss->top+1]].format
+ & JOF_XMLNAME)
+ ? "%s.%s"
+ : "%s[%s]",
+ lval, xval);
+ }
+ break;
+ }
+
+ if (todo < 0)
+ return NULL;
+
+ LOCAL_ASSERT(pc < endpc);
+ pc += oplen;
+ return pc;
+}
+
+/*
+ * Starting with a SRC_DESTRUCT-annotated JSOP_DUP, decompile a destructuring
+ * left-hand side object or array initialiser, including nested destructuring
+ * initialisers. On successful return, the decompilation will be pushed on ss
+ * and the return value will point to the POP or GROUP bytecode following the
+ * destructuring expression.
+ *
+ * At any point, if pc is equal to endpc and would otherwise advance, we stop
+ * immediately and return endpc.
+ */
+static jsbytecode *
+DecompileDestructuring(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc)
+{
+ ptrdiff_t head, todo;
+ JSContext *cx;
+ JSPrinter *jp;
+ JSOp op, saveop;
+ const JSCodeSpec *cs;
+ uintN oplen;
+ jsint i, lasti;
+ jsdouble d;
+ const char *lval;
+ jsbytecode *pc2;
+ jsatomid atomIndex;
+ JSAtom *atom;
+ jssrcnote *sn;
+ JSString *str;
+ JSBool hole;
+
+ LOCAL_ASSERT(*pc == JSOP_DUP);
+ pc += JSOP_DUP_LENGTH;
+
+ /*
+ * Set head so we can rewrite '[' to '{' as needed. Back up PAREN_SLOP
+ * chars so the destructuring decompilation accumulates contiguously in
+ * ss->sprinter starting with "[".
+ */
+ head = SprintPut(&ss->sprinter, "[", 1);
+ if (head < 0 || !PushOff(ss, head, JSOP_NOP))
+ return NULL;
+ ss->sprinter.offset -= PAREN_SLOP;
+ LOCAL_ASSERT(head == ss->sprinter.offset - 1);
+ LOCAL_ASSERT(*OFF2STR(&ss->sprinter, head) == '[');
+
+ cx = ss->sprinter.context;
+ jp = ss->printer;
+ lasti = -1;
+
+ while (pc < endpc) {
+ LOAD_OP_DATA(pc);
+ saveop = op;
+
+ switch (op) {
+ case JSOP_POP:
+ pc += oplen;
+ goto out;
+
+ /* Handle the optimized number-pushing opcodes. */
+ case JSOP_ZERO: d = i = 0; goto do_getelem;
+ case JSOP_ONE: d = i = 1; goto do_getelem;
+ case JSOP_UINT16: d = i = GET_UINT16(pc); goto do_getelem;
+ case JSOP_UINT24: d = i = GET_UINT24(pc); goto do_getelem;
+
+ /* Handle the extended literal form of JSOP_NUMBER. */
+ case JSOP_LITOPX:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = *pc2;
+ LOCAL_ASSERT(op == JSOP_NUMBER);
+ goto do_getatom;
+
+ case JSOP_NUMBER:
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_getatom:
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+ d = *ATOM_TO_DOUBLE(atom);
+ LOCAL_ASSERT(JSDOUBLE_IS_FINITE(d) && !JSDOUBLE_IS_NEGZERO(d));
+ i = (jsint)d;
+
+ do_getelem:
+ sn = js_GetSrcNote(jp->script, pc);
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ LOCAL_ASSERT(op == JSOP_GETELEM);
+
+ /* Distinguish object from array by opcode or source note. */
+ if (saveop == JSOP_LITERAL ||
+ (sn && SN_TYPE(sn) == SRC_INITPROP)) {
+ *OFF2STR(&ss->sprinter, head) = '{';
+ if (Sprint(&ss->sprinter, "%g: ", d) < 0)
+ return NULL;
+ } else {
+ /* Sanity check for the gnarly control flow above. */
+ LOCAL_ASSERT(i == d);
+
+ /* Fill in any holes (holes at the end don't matter). */
+ while (++lasti < i) {
+ if (SprintPut(&ss->sprinter, ", ", 2) < 0)
+ return NULL;
+ }
+ }
+ break;
+
+ case JSOP_LITERAL:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ goto do_getatom;
+
+ case JSOP_GETPROP:
+ *OFF2STR(&ss->sprinter, head) = '{';
+ atom = GET_ATOM(cx, jp->script, pc);
+ str = ATOM_TO_STRING(atom);
+ if (!QuoteString(&ss->sprinter, str,
+ js_IsIdentifier(str) ? 0 : (jschar)'\'')) {
+ return NULL;
+ }
+ if (SprintPut(&ss->sprinter, ": ", 2) < 0)
+ return NULL;
+ break;
+
+ default:
+ LOCAL_ASSERT(0);
+ }
+
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+
+ /*
+ * Decompile the left-hand side expression whose bytecode starts at pc
+ * and continues for a bounded number of bytecodes or stack operations
+ * (and which in any event stops before endpc).
+ */
+ pc = DecompileDestructuringLHS(ss, pc, endpc, &hole);
+ if (!pc)
+ return NULL;
+ if (pc == endpc || *pc != JSOP_DUP)
+ break;
+
+ /*
+ * Check for SRC_DESTRUCT on this JSOP_DUP, which would mean another
+ * destructuring initialiser abuts this one, and we should stop. This
+ * happens with source of the form '[a] = [b] = c'.
+ */
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_DESTRUCT)
+ break;
+
+ if (!hole && SprintPut(&ss->sprinter, ", ", 2) < 0)
+ return NULL;
+
+ pc += JSOP_DUP_LENGTH;
+ }
+
+out:
+ lval = OFF2STR(&ss->sprinter, head);
+ todo = SprintPut(&ss->sprinter, (*lval == '[') ? "]" : "}", 1);
+ if (todo < 0)
+ return NULL;
+ return pc;
+}
+
+static jsbytecode *
+DecompileGroupAssignment(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc,
+ jssrcnote *sn, ptrdiff_t *todop)
+{
+ JSOp op;
+ const JSCodeSpec *cs;
+ uintN oplen, start, end, i;
+ ptrdiff_t todo;
+ JSBool hole;
+ const char *rval;
+
+ LOAD_OP_DATA(pc);
+ LOCAL_ASSERT(op == JSOP_PUSH || op == JSOP_GETLOCAL);
+
+ todo = Sprint(&ss->sprinter, "%s[", VarPrefix(sn));
+ if (todo < 0 || !PushOff(ss, todo, JSOP_NOP))
+ return NULL;
+ ss->sprinter.offset -= PAREN_SLOP;
+
+ for (;;) {
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+ pc = DecompileDestructuringLHS(ss, pc, endpc, &hole);
+ if (!pc)
+ return NULL;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ if (op != JSOP_PUSH && op != JSOP_GETLOCAL)
+ break;
+ if (!hole && SprintPut(&ss->sprinter, ", ", 2) < 0)
+ return NULL;
+ }
+
+ LOCAL_ASSERT(op == JSOP_SETSP);
+ if (SprintPut(&ss->sprinter, "] = [", 5) < 0)
+ return NULL;
+
+ start = GET_UINT16(pc);
+ end = ss->top - 1;
+ for (i = start; i < end; i++) {
+ rval = GetStr(ss, i);
+ if (Sprint(&ss->sprinter, "%s%s",
+ (i == start) ? "" : ", ",
+ (i == end - 1 && *rval == '\0') ? ", " : rval) < 0) {
+ return NULL;
+ }
+ }
+
+ if (SprintPut(&ss->sprinter, "]", 1) < 0)
+ return NULL;
+ ss->sprinter.offset = ss->offsets[i];
+ ss->top = start;
+ *todop = todo;
+ return pc;
+}
+
+#undef LOCAL_ASSERT
+#undef LOAD_OP_DATA
+
+#endif /* JS_HAS_DESTRUCTURING */
+
+/*
+ * If nb is non-negative, decompile nb bytecodes starting at pc. Otherwise
+ * the decompiler starts at pc and continues until it reaches an opcode for
+ * which decompiling would result in the stack depth equaling -(nb + 1).
+ */
+static jsbytecode *
+Decompile(SprintStack *ss, jsbytecode *pc, intN nb)
+{
+ JSContext *cx;
+ JSPrinter *jp, *jp2;
+ jsbytecode *startpc, *endpc, *pc2, *done, *forelem_tail, *forelem_done;
+ ptrdiff_t tail, todo, len, oplen, cond, next;
+ JSOp op, lastop, saveop;
+ const JSCodeSpec *cs;
+ jssrcnote *sn, *sn2;
+ const char *lval, *rval, *xval, *fmt;
+ jsint i, argc;
+ char **argv;
+ jsatomid atomIndex;
+ JSAtom *atom;
+ JSObject *obj;
+ JSFunction *fun;
+ JSString *str;
+ JSBool ok;
+#if JS_HAS_XML_SUPPORT
+ JSBool foreach, inXML, quoteAttr;
+#else
+#define inXML JS_FALSE
+#endif
+ jsval val;
+ int stackDummy;
+
+ static const char exception_cookie[] = "/*EXCEPTION*/";
+ static const char retsub_pc_cookie[] = "/*RETSUB_PC*/";
+ static const char forelem_cookie[] = "/*FORELEM*/";
+ static const char with_cookie[] = "/*WITH*/";
+ static const char dot_format[] = "%s.%s";
+ static const char index_format[] = "%s[%s]";
+ static const char predot_format[] = "%s%s.%s";
+ static const char postdot_format[] = "%s.%s%s";
+ static const char preindex_format[] = "%s%s[%s]";
+ static const char postindex_format[] = "%s[%s]%s";
+ static const char ss_format[] = "%s%s";
+
+/*
+ * Local macros
+ */
+#define DECOMPILE_CODE(pc,nb) if (!Decompile(ss, pc, nb)) return NULL
+#define POP_STR() PopStr(ss, op)
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, JS_FALSE)
+
+/*
+ * Callers know that ATOM_IS_STRING(atom), and we leave it to the optimizer to
+ * common ATOM_TO_STRING(atom) here and near the call sites.
+ */
+#define ATOM_IS_IDENTIFIER(atom) js_IsIdentifier(ATOM_TO_STRING(atom))
+#define ATOM_IS_KEYWORD(atom) \
+ (js_CheckKeyword(JSSTRING_CHARS(ATOM_TO_STRING(atom)), \
+ JSSTRING_LENGTH(ATOM_TO_STRING(atom))) != TOK_EOF)
+
+/*
+ * Given an atom already fetched from jp->script's atom map, quote/escape its
+ * string appropriately into rval, and select fmt from the quoted and unquoted
+ * alternatives.
+ */
+#define GET_QUOTE_AND_FMT(qfmt, ufmt, rval) \
+ JS_BEGIN_MACRO \
+ jschar quote_; \
+ if (!ATOM_IS_IDENTIFIER(atom)) { \
+ quote_ = '\''; \
+ fmt = qfmt; \
+ } else { \
+ quote_ = 0; \
+ fmt = ufmt; \
+ } \
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), quote_); \
+ if (!rval) \
+ return NULL; \
+ JS_END_MACRO
+
+/*
+ * Get atom from jp->script's atom map, quote/escape its string appropriately
+ * into rval, and select fmt from the quoted and unquoted alternatives.
+ */
+#define GET_ATOM_QUOTE_AND_FMT(qfmt, ufmt, rval) \
+ JS_BEGIN_MACRO \
+ atom = GET_ATOM(cx, jp->script, pc); \
+ GET_QUOTE_AND_FMT(qfmt, ufmt, rval); \
+ JS_END_MACRO
+
+ cx = ss->sprinter.context;
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return NULL;
+ }
+
+ jp = ss->printer;
+ startpc = pc;
+ endpc = (nb < 0) ? jp->script->code + jp->script->length : pc + nb;
+ forelem_tail = forelem_done = NULL;
+ tail = -1;
+ todo = -2; /* NB: different from Sprint() error return. */
+ saveop = JSOP_NOP;
+ sn = NULL;
+ rval = NULL;
+#if JS_HAS_XML_SUPPORT
+ foreach = inXML = quoteAttr = JS_FALSE;
+#endif
+
+ while (nb < 0 || pc < endpc) {
+ /*
+ * Move saveop to lastop so prefixed bytecodes can take special action
+ * while sharing maximal code. Set op and saveop to the new bytecode,
+ * use op in POP_STR to trigger automatic parenthesization, but push
+ * saveop at the bottom of the loop if this op pushes. Thus op may be
+ * set to nop or otherwise mutated to suppress auto-parens.
+ */
+ lastop = saveop;
+ op = saveop = (JSOp) *pc;
+ cs = &js_CodeSpec[saveop];
+ len = oplen = cs->length;
+
+ if (nb < 0 && -(nb + 1) == (intN)ss->top - cs->nuses + cs->ndefs)
+ return pc;
+
+ if (pc + oplen == jp->dvgfence) {
+ JSStackFrame *fp;
+ uint32 format, mode, type;
+
+ /*
+ * Rewrite non-get ops to their "get" format if the error is in
+ * the bytecode at pc, so we don't decompile more than the error
+ * expression.
+ */
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ format = cs->format;
+ if (((fp && pc == fp->pc) ||
+ (pc == startpc && cs->nuses != 0)) &&
+ format & (JOF_SET|JOF_DEL|JOF_INCDEC|JOF_IMPORT|JOF_FOR)) {
+ mode = (format & JOF_MODEMASK);
+ if (mode == JOF_NAME) {
+ /*
+ * JOF_NAME does not imply JOF_CONST, so we must check for
+ * the QARG and QVAR format types, and translate those to
+ * JSOP_GETARG or JSOP_GETVAR appropriately, instead of to
+ * JSOP_NAME.
+ */
+ type = format & JOF_TYPEMASK;
+ op = (type == JOF_QARG)
+ ? JSOP_GETARG
+ : (type == JOF_QVAR)
+ ? JSOP_GETVAR
+ : (type == JOF_LOCAL)
+ ? JSOP_GETLOCAL
+ : JSOP_NAME;
+
+ i = cs->nuses - js_CodeSpec[op].nuses;
+ while (--i >= 0)
+ PopOff(ss, JSOP_NOP);
+ } else {
+ /*
+ * We must replace the faulting pc's bytecode with a
+ * corresponding JSOP_GET* code. For JSOP_SET{PROP,ELEM},
+ * we must use the "2nd" form of JSOP_GET{PROP,ELEM}, to
+ * throw away the assignment op's right-hand operand and
+ * decompile it as if it were a GET of its left-hand
+ * operand.
+ */
+ if (mode == JOF_PROP) {
+ op = (format & JOF_SET) ? JSOP_GETPROP2 : JSOP_GETPROP;
+ } else if (mode == JOF_ELEM) {
+ op = (format & JOF_SET) ? JSOP_GETELEM2 : JSOP_GETELEM;
+ } else {
+ /*
+ * Zero mode means precisely that op is uncategorized
+ * for our purposes, so we must write per-op special
+ * case code here.
+ */
+ switch (op) {
+ case JSOP_ENUMELEM:
+ case JSOP_ENUMCONSTELEM:
+ op = JSOP_GETELEM;
+ break;
+#if JS_HAS_LVALUE_RETURN
+ case JSOP_SETCALL:
+ op = JSOP_CALL;
+ break;
+#endif
+ default:
+ LOCAL_ASSERT(0);
+ }
+ }
+ }
+ }
+
+ saveop = op;
+ if (op >= JSOP_LIMIT) {
+ switch (op) {
+ case JSOP_GETPROP2:
+ saveop = JSOP_GETPROP;
+ break;
+ case JSOP_GETELEM2:
+ saveop = JSOP_GETELEM;
+ break;
+ default:;
+ }
+ }
+ LOCAL_ASSERT(js_CodeSpec[saveop].length == oplen);
+
+ jp->dvgfence = NULL;
+ }
+
+ if (cs->token) {
+ switch (cs->nuses) {
+ case 2:
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_ASSIGNOP) {
+ /*
+ * Avoid over-parenthesizing y in x op= y based on its
+ * expansion: x = x op y (replace y by z = w to see the
+ * problem).
+ */
+ op = pc[oplen];
+ LOCAL_ASSERT(op != saveop);
+ }
+ rval = POP_STR();
+ lval = POP_STR();
+ if (op != saveop) {
+ /* Print only the right operand of the assignment-op. */
+ todo = SprintCString(&ss->sprinter, rval);
+ op = saveop;
+ } else if (!inXML) {
+ todo = Sprint(&ss->sprinter, "%s %s %s",
+ lval, cs->token, rval);
+ } else {
+ /* In XML, just concatenate the two operands. */
+ LOCAL_ASSERT(op == JSOP_ADD);
+ todo = Sprint(&ss->sprinter, ss_format, lval, rval);
+ }
+ break;
+
+ case 1:
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, ss_format, cs->token, rval);
+ break;
+
+ case 0:
+ todo = SprintCString(&ss->sprinter, cs->token);
+ break;
+
+ default:
+ todo = -2;
+ break;
+ }
+ } else {
+ switch (op) {
+#define BEGIN_LITOPX_CASE(OP) \
+ case OP: \
+ atomIndex = GET_ATOM_INDEX(pc); \
+ do_##OP: \
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+
+#define END_LITOPX_CASE \
+ break;
+
+ case JSOP_NOP:
+ /*
+ * Check for a do-while loop, a for-loop with an empty
+ * initializer part, a labeled statement, a function
+ * definition, or try/finally.
+ */
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_WHILE:
+ js_printf(SET_MAYBE_BRACE(jp), "\tdo {\n");
+ jp->indent += 4;
+ break;
+
+ case SRC_FOR:
+ rval = "";
+
+ do_forloop:
+ /* Skip the JSOP_NOP or JSOP_POP bytecode. */
+ pc++;
+
+ /* Get the cond, next, and loop-closing tail offsets. */
+ cond = js_GetSrcNoteOffset(sn, 0);
+ next = js_GetSrcNoteOffset(sn, 1);
+ tail = js_GetSrcNoteOffset(sn, 2);
+ LOCAL_ASSERT(tail + GetJumpOffset(pc+tail, pc+tail) == 0);
+
+ /* Print the keyword and the possibly empty init-part. */
+ js_printf(jp, "\tfor (%s;", rval);
+
+ if (pc[cond] == JSOP_IFEQ || pc[cond] == JSOP_IFEQX) {
+ /* Decompile the loop condition. */
+ DECOMPILE_CODE(pc, cond);
+ js_printf(jp, " %s", POP_STR());
+ }
+
+ /* Need a semicolon whether or not there was a cond. */
+ js_puts(jp, ";");
+
+ if (pc[next] != JSOP_GOTO && pc[next] != JSOP_GOTOX) {
+ /* Decompile the loop updater. */
+ DECOMPILE_CODE(pc + next, tail - next - 1);
+ js_printf(jp, " %s", POP_STR());
+ }
+
+ /* Do the loop body. */
+ js_printf(SET_MAYBE_BRACE(jp), ") {\n");
+ jp->indent += 4;
+ oplen = (cond) ? js_CodeSpec[pc[cond]].length : 0;
+ DECOMPILE_CODE(pc + cond + oplen, next - cond - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+
+ /* Set len so pc skips over the entire loop. */
+ len = tail + js_CodeSpec[pc[tail]].length;
+ break;
+
+ case SRC_LABEL:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ jp->indent -= 4;
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t%s:\n", rval);
+ jp->indent += 4;
+ break;
+
+ case SRC_LABELBRACE:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t%s: {\n", rval);
+ jp->indent += 4;
+ break;
+
+ case SRC_ENDBRACE:
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+
+ case SRC_FUNCDEF:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ LOCAL_ASSERT(ATOM_IS_OBJECT(atom));
+ do_function:
+ obj = ATOM_TO_OBJECT(atom);
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ jp2 = js_NewPrinter(cx, JS_GetFunctionName(fun),
+ jp->indent, jp->pretty);
+ if (!jp2)
+ return NULL;
+ jp2->scope = jp->scope;
+ js_puts(jp2, "\n");
+ ok = js_DecompileFunction(jp2, fun);
+ if (ok) {
+ js_puts(jp2, "\n");
+ str = js_GetPrinterOutput(jp2);
+ if (str)
+ js_printf(jp, "%s\n", JS_GetStringBytes(str));
+ else
+ ok = JS_FALSE;
+ }
+ js_DestroyPrinter(jp2);
+ if (!ok)
+ return NULL;
+
+ break;
+
+ case SRC_BRACE:
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t{\n");
+ jp->indent += 4;
+ len = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+
+ default:;
+ }
+ break;
+
+ case JSOP_GROUP:
+ cs = &js_CodeSpec[lastop];
+ if ((cs->prec != 0 &&
+ cs->prec == js_CodeSpec[pc[JSOP_GROUP_LENGTH]].prec) ||
+ pc[JSOP_GROUP_LENGTH] == JSOP_PUSHOBJ ||
+ pc[JSOP_GROUP_LENGTH] == JSOP_DUP) {
+ /*
+ * Force parens if this JSOP_GROUP forced re-association
+ * against precedence, or if this is a call or constructor
+ * expression, or if it is destructured (JSOP_DUP).
+ *
+ * This is necessary to handle the operator new grammar,
+ * by which new x(y).z means (new x(y))).z. For example
+ * new (x(y).z) must decompile with the constructor
+ * parenthesized, but normal precedence has JSOP_GETPROP
+ * (for the final .z) higher than JSOP_NEW. In general,
+ * if the call or constructor expression is parenthesized,
+ * we preserve parens.
+ */
+ op = JSOP_NAME;
+ rval = POP_STR();
+ todo = SprintCString(&ss->sprinter, rval);
+ } else {
+ /*
+ * Don't explicitly parenthesize -- just fix the top
+ * opcode so that the auto-parens magic in PopOff can do
+ * its thing.
+ */
+ LOCAL_ASSERT(ss->top != 0);
+ ss->opcodes[ss->top-1] = saveop = lastop;
+ todo = -2;
+ }
+ break;
+
+ case JSOP_STARTITER:
+ todo = -2;
+ break;
+
+ case JSOP_PUSH:
+#if JS_HAS_DESTRUCTURING
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_GROUPASSIGN) {
+ pc = DecompileGroupAssignment(ss, pc, endpc, sn, &todo);
+ if (!pc)
+ return NULL;
+ LOCAL_ASSERT(*pc == JSOP_SETSP);
+ len = oplen = JSOP_SETSP_LENGTH;
+ goto end_groupassignment;
+ }
+#endif
+ /* FALL THROUGH */
+
+ case JSOP_PUSHOBJ:
+ case JSOP_BINDNAME:
+ do_JSOP_BINDNAME:
+ todo = Sprint(&ss->sprinter, "");
+ break;
+
+ case JSOP_TRY:
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\ttry {\n");
+ jp->indent += 4;
+ todo = -2;
+ break;
+
+ case JSOP_FINALLY:
+ jp->indent -= 4;
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t} finally {\n");
+ jp->indent += 4;
+
+ /*
+ * We must push an empty string placeholder for gosub's return
+ * address, popped by JSOP_RETSUB and counted by script->depth
+ * but not by ss->top (see JSOP_SETSP, below).
+ */
+ todo = Sprint(&ss->sprinter, exception_cookie);
+ if (todo < 0 || !PushOff(ss, todo, op))
+ return NULL;
+ todo = Sprint(&ss->sprinter, retsub_pc_cookie);
+ break;
+
+ case JSOP_RETSUB:
+ rval = POP_STR();
+ LOCAL_ASSERT(strcmp(rval, retsub_pc_cookie) == 0);
+ lval = POP_STR();
+ LOCAL_ASSERT(strcmp(lval, exception_cookie) == 0);
+ todo = -2;
+ break;
+
+ case JSOP_SWAP:
+ /*
+ * We don't generate this opcode currently, and previously we
+ * did not need to decompile it. If old, serialized bytecode
+ * uses it still, we should fall through and set todo = -2.
+ */
+ /* FALL THROUGH */
+
+ case JSOP_GOSUB:
+ case JSOP_GOSUBX:
+ /*
+ * JSOP_GOSUB and GOSUBX have no effect on the decompiler's
+ * string stack because the next op in bytecode order finds
+ * the stack balanced by a JSOP_RETSUB executed elsewhere.
+ */
+ todo = -2;
+ break;
+
+ case JSOP_SETSP:
+ {
+ uintN newtop, oldtop, i;
+
+ /*
+ * The compiler models operand stack depth and fixes the stack
+ * pointer on entry to a catch clause based on its depth model.
+ * The decompiler must match the code generator's model, which
+ * is why JSOP_FINALLY pushes a cookie that JSOP_RETSUB pops.
+ */
+ newtop = (uintN) GET_UINT16(pc);
+ oldtop = ss->top;
+ LOCAL_ASSERT(newtop <= oldtop);
+ todo = -2;
+
+#if JS_HAS_DESTRUCTURING
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_GROUPASSIGN) {
+ todo = Sprint(&ss->sprinter, "%s[] = [",
+ VarPrefix(sn));
+ if (todo < 0)
+ return NULL;
+ for (i = newtop; i < oldtop; i++) {
+ rval = OFF2STR(&ss->sprinter, ss->offsets[i]);
+ if (Sprint(&ss->sprinter, ss_format,
+ (i == newtop) ? "" : ", ",
+ (i == oldtop - 1 && *rval == '\0')
+ ? ", " : rval) < 0) {
+ return NULL;
+ }
+ }
+ if (SprintPut(&ss->sprinter, "]", 1) < 0)
+ return NULL;
+
+ /*
+ * Kill newtop before the end_groupassignment: label by
+ * retracting/popping early. Control will either jump to
+ * do_forloop: or do_letheadbody: or else break from our
+ * case JSOP_SETSP: after the switch (*pc2) below.
+ */
+ if (newtop < oldtop) {
+ ss->sprinter.offset = GetOff(ss, newtop);
+ ss->top = newtop;
+ }
+
+ end_groupassignment:
+ /*
+ * Thread directly to the next opcode if we can, to handle
+ * the special cases of a group assignment in the first or
+ * last part of a for(;;) loop head, or in a let block or
+ * expression head.
+ *
+ * NB: todo at this point indexes space in ss->sprinter
+ * that is liable to be overwritten. The code below knows
+ * exactly how long rval lives, or else copies it down via
+ * SprintCString.
+ */
+ rval = OFF2STR(&ss->sprinter, todo);
+ todo = -2;
+ pc2 = pc + oplen;
+ switch (*pc2) {
+ case JSOP_NOP:
+ /* First part of for(;;) or let block/expr head. */
+ sn = js_GetSrcNote(jp->script, pc2);
+ if (sn) {
+ if (SN_TYPE(sn) == SRC_FOR) {
+ pc = pc2;
+ goto do_forloop;
+ }
+ if (SN_TYPE(sn) == SRC_DECL) {
+ if (ss->top == jp->script->depth) {
+ /*
+ * This must be an empty destructuring
+ * in the head of a let whose body block
+ * is also empty.
+ */
+ pc = pc2 + 1;
+ len = js_GetSrcNoteOffset(sn, 0);
+ LOCAL_ASSERT(pc[len] == JSOP_LEAVEBLOCK);
+ js_printf(jp, "\tlet (%s) {\n", rval);
+ js_printf(jp, "\t}\n");
+ goto end_setsp;
+ }
+ todo = SprintCString(&ss->sprinter, rval);
+ if (todo < 0 || !PushOff(ss, todo, JSOP_NOP))
+ return NULL;
+ op = JSOP_POP;
+ pc = pc2 + 1;
+ goto do_letheadbody;
+ }
+ }
+ break;
+
+ case JSOP_GOTO:
+ case JSOP_GOTOX:
+ /* Third part of for(;;) loop head. */
+ cond = GetJumpOffset(pc2, pc2);
+ sn = js_GetSrcNote(jp->script, pc2 + cond - 1);
+ if (sn && SN_TYPE(sn) == SRC_FOR) {
+ todo = SprintCString(&ss->sprinter, rval);
+ saveop = JSOP_NOP;
+ }
+ break;
+ }
+
+ /*
+ * If control flow reaches this point with todo still -2,
+ * just print rval as an expression statement.
+ */
+ if (todo == -2)
+ js_printf(jp, "\t%s;\n", rval);
+ end_setsp:
+ break;
+ }
+#endif
+ if (newtop < oldtop) {
+ ss->sprinter.offset = GetOff(ss, newtop);
+ ss->top = newtop;
+ }
+ break;
+ }
+
+ case JSOP_EXCEPTION:
+ /* The catch decompiler handles this op itself. */
+ LOCAL_ASSERT(JS_FALSE);
+ break;
+
+ case JSOP_POP:
+ /*
+ * By default, do not automatically parenthesize when popping
+ * a stacked expression decompilation. We auto-parenthesize
+ * only when JSOP_POP is annotated with SRC_PCDELTA, meaning
+ * comma operator.
+ */
+ op = JSOP_POPV;
+ /* FALL THROUGH */
+
+ case JSOP_POPV:
+ sn = js_GetSrcNote(jp->script, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_FOR:
+ /* Force parens around 'in' expression at 'for' front. */
+ if (ss->opcodes[ss->top-1] == JSOP_IN)
+ op = JSOP_LSH;
+ rval = POP_STR();
+ todo = -2;
+ goto do_forloop;
+
+ case SRC_PCDELTA:
+ /* Comma operator: use JSOP_POP for correct precedence. */
+ op = JSOP_POP;
+
+ /* Pop and save to avoid blowing stack depth budget. */
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval)
+ return NULL;
+
+ /*
+ * The offset tells distance to the end of the right-hand
+ * operand of the comma operator.
+ */
+ done = pc + len;
+ pc += js_GetSrcNoteOffset(sn, 0);
+ len = 0;
+
+ if (!Decompile(ss, done, pc - done)) {
+ JS_free(cx, (char *)lval);
+ return NULL;
+ }
+
+ /* Pop Decompile result and print comma expression. */
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s, %s", lval, rval);
+ JS_free(cx, (char *)lval);
+ break;
+
+ case SRC_HIDDEN:
+ /* Hide this pop, it's from a goto in a with or for/in. */
+ todo = -2;
+ break;
+
+ case SRC_DECL:
+ /* This pop is at the end of the let block/expr head. */
+ pc += JSOP_POP_LENGTH;
+#if JS_HAS_DESTRUCTURING
+ do_letheadbody:
+#endif
+ len = js_GetSrcNoteOffset(sn, 0);
+ if (pc[len] == JSOP_LEAVEBLOCK) {
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\tlet (%s) {\n",
+ POP_STR());
+ jp->indent += 4;
+ DECOMPILE_CODE(pc, len);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ todo = -2;
+ } else {
+ LOCAL_ASSERT(pc[len] == JSOP_LEAVEBLOCKEXPR);
+
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval)
+ return NULL;
+
+ if (!Decompile(ss, pc, len)) {
+ JS_free(cx, (char *)lval);
+ return NULL;
+ }
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter,
+ (*rval == '{')
+ ? "let (%s) (%s)"
+ : "let (%s) %s",
+ lval, rval);
+ JS_free(cx, (char *)lval);
+ }
+ break;
+
+ default:
+ /* Turn off parens around a yield statement. */
+ if (ss->opcodes[ss->top-1] == JSOP_YIELD)
+ op = JSOP_NOP;
+
+ rval = POP_STR();
+ if (*rval != '\0') {
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * If a let declaration is the only child of a control
+ * structure that does not require braces, it must not
+ * be braced. If it were braced explicitly, it would
+ * be bracketed by JSOP_ENTERBLOCK/JSOP_LEAVEBLOCK.
+ */
+ if (jp->braceState == MAYBE_BRACE &&
+ pc + JSOP_POP_LENGTH == endpc &&
+ !strncmp(rval, var_prefix[SRC_DECL_LET], 4) &&
+ rval[4] != '(') {
+ SetDontBrace(jp);
+ }
+#endif
+ js_printf(jp,
+ (*rval == '{' ||
+ (strncmp(rval, js_function_str, 8) == 0 &&
+ rval[8] == ' '))
+ ? "\t(%s);\n"
+ : "\t%s;\n",
+ rval);
+ }
+ todo = -2;
+ break;
+ }
+ break;
+
+ case JSOP_POP2:
+ case JSOP_ENDITER:
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ (void) PopOff(ss, op);
+ if (op == JSOP_POP2)
+ (void) PopOff(ss, op);
+ break;
+
+ case JSOP_ENTERWITH:
+ LOCAL_ASSERT(!js_GetSrcNote(jp->script, pc));
+ rval = POP_STR();
+ js_printf(SET_MAYBE_BRACE(jp), "\twith (%s) {\n", rval);
+ jp->indent += 4;
+ todo = Sprint(&ss->sprinter, with_cookie);
+ break;
+
+ case JSOP_LEAVEWITH:
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ rval = POP_STR();
+ LOCAL_ASSERT(strcmp(rval, with_cookie) == 0);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_ENTERBLOCK)
+ {
+ JSAtom **atomv, *smallv[5];
+ JSScopeProperty *sprop;
+
+ obj = ATOM_TO_OBJECT(atom);
+ argc = OBJ_BLOCK_COUNT(cx, obj);
+ if ((size_t)argc <= sizeof smallv / sizeof smallv[0]) {
+ atomv = smallv;
+ } else {
+ atomv = (JSAtom **) JS_malloc(cx, argc * sizeof(JSAtom *));
+ if (!atomv)
+ return NULL;
+ }
+
+ /* From here on, control must flow through enterblock_out. */
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop;
+ sprop = sprop->parent) {
+ if (!(sprop->flags & SPROP_HAS_SHORTID))
+ continue;
+ LOCAL_ASSERT(sprop->shortid < argc);
+ atomv[sprop->shortid] = JSID_TO_ATOM(sprop->id);
+ }
+ ok = JS_TRUE;
+ for (i = 0; i < argc; i++) {
+ atom = atomv[i];
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval ||
+ !PushOff(ss, STR2OFF(&ss->sprinter, rval), op)) {
+ ok = JS_FALSE;
+ goto enterblock_out;
+ }
+ }
+
+ sn = js_GetSrcNote(jp->script, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+#if JS_HAS_BLOCK_SCOPE
+ case SRC_BRACE:
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t{\n");
+ jp->indent += 4;
+ len = js_GetSrcNoteOffset(sn, 0);
+ ok = Decompile(ss, pc + oplen, len - oplen) != NULL;
+ if (!ok)
+ goto enterblock_out;
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+#endif
+
+ case SRC_CATCH:
+ jp->indent -= 4;
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t} catch (");
+
+ pc2 = pc;
+ pc += oplen;
+ LOCAL_ASSERT(*pc == JSOP_EXCEPTION);
+ pc += JSOP_EXCEPTION_LENGTH;
+ if (*pc == JSOP_DUP) {
+ sn2 = js_GetSrcNote(jp->script, pc);
+ if (sn2 && SN_TYPE(sn2) == SRC_HIDDEN) {
+ /*
+ * This is a hidden dup to save the exception for
+ * later. It must exist only when the catch has
+ * an exception guard.
+ */
+ LOCAL_ASSERT(js_GetSrcNoteOffset(sn, 0) != 0);
+ pc += JSOP_DUP_LENGTH;
+ }
+ }
+#if JS_HAS_DESTRUCTURING
+ if (*pc == JSOP_DUP) {
+ pc = DecompileDestructuring(ss, pc, endpc);
+ if (!pc) {
+ ok = JS_FALSE;
+ goto enterblock_out;
+ }
+ LOCAL_ASSERT(*pc == JSOP_POP);
+ pc += JSOP_POP_LENGTH;
+ lval = PopStr(ss, JSOP_NOP);
+ js_puts(jp, lval);
+ } else {
+#endif
+ LOCAL_ASSERT(*pc == JSOP_SETLOCALPOP);
+ i = GET_UINT16(pc);
+ pc += JSOP_SETLOCALPOP_LENGTH;
+ atom = atomv[i - OBJ_BLOCK_DEPTH(cx, obj)];
+ str = ATOM_TO_STRING(atom);
+ if (!QuoteString(&jp->sprinter, str, 0)) {
+ ok = JS_FALSE;
+ goto enterblock_out;
+ }
+#if JS_HAS_DESTRUCTURING
+ }
+#endif
+
+ len = js_GetSrcNoteOffset(sn, 0);
+ if (len) {
+ len -= PTRDIFF(pc, pc2, jsbytecode);
+ LOCAL_ASSERT(len > 0);
+ js_printf(jp, " if ");
+ ok = Decompile(ss, pc, len) != NULL;
+ if (!ok)
+ goto enterblock_out;
+ js_printf(jp, "%s", POP_STR());
+ pc += len;
+ LOCAL_ASSERT(*pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+ pc += js_CodeSpec[*pc].length;
+ }
+
+ js_printf(jp, ") {\n");
+ jp->indent += 4;
+ len = 0;
+ break;
+ }
+
+ todo = -2;
+
+ enterblock_out:
+ if (atomv != smallv)
+ JS_free(cx, atomv);
+ if (!ok)
+ return NULL;
+ }
+ END_LITOPX_CASE
+
+ case JSOP_LEAVEBLOCK:
+ case JSOP_LEAVEBLOCKEXPR:
+ {
+ uintN top, depth;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (op == JSOP_LEAVEBLOCKEXPR) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_PCBASE);
+ rval = POP_STR();
+ } else if (sn) {
+ LOCAL_ASSERT(op == JSOP_LEAVEBLOCK);
+ if (SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_CATCH);
+ LOCAL_ASSERT((uintN)js_GetSrcNoteOffset(sn, 0) == ss->top);
+ }
+ top = ss->top;
+ depth = GET_UINT16(pc);
+ LOCAL_ASSERT(top >= depth);
+ top -= depth;
+ ss->top = top;
+ ss->sprinter.offset = GetOff(ss, top);
+ if (op == JSOP_LEAVEBLOCKEXPR)
+ todo = SprintCString(&ss->sprinter, rval);
+ break;
+ }
+
+ case JSOP_GETLOCAL:
+ i = GET_UINT16(pc);
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT((uintN)i < ss->top);
+ rval = GetLocal(ss, i);
+
+#if JS_HAS_DESTRUCTURING
+ if (sn && SN_TYPE(sn) == SRC_GROUPASSIGN) {
+ pc = DecompileGroupAssignment(ss, pc, endpc, sn, &todo);
+ if (!pc)
+ return NULL;
+ LOCAL_ASSERT(*pc == JSOP_SETSP);
+ len = oplen = JSOP_SETSP_LENGTH;
+ goto end_groupassignment;
+ }
+#endif
+
+ todo = Sprint(&ss->sprinter, ss_format, VarPrefix(sn), rval);
+ break;
+
+ case JSOP_SETLOCAL:
+ case JSOP_SETLOCALPOP:
+ i = GET_UINT16(pc);
+ lval = GetStr(ss, i);
+ rval = POP_STR();
+ goto do_setlval;
+
+ case JSOP_INCLOCAL:
+ case JSOP_DECLOCAL:
+ i = GET_UINT16(pc);
+ lval = GetLocal(ss, i);
+ goto do_inclval;
+
+ case JSOP_LOCALINC:
+ case JSOP_LOCALDEC:
+ i = GET_UINT16(pc);
+ lval = GetLocal(ss, i);
+ goto do_lvalinc;
+
+ case JSOP_FORLOCAL:
+ i = GET_UINT16(pc);
+ lval = GetStr(ss, i);
+ atom = NULL;
+ goto do_forlvalinloop;
+
+ case JSOP_RETRVAL:
+ todo = -2;
+ break;
+
+ case JSOP_SETRVAL:
+ case JSOP_RETURN:
+ rval = POP_STR();
+ if (*rval != '\0')
+ js_printf(jp, "\t%s %s;\n", js_return_str, rval);
+ else
+ js_printf(jp, "\t%s;\n", js_return_str);
+ todo = -2;
+ break;
+
+#if JS_HAS_GENERATORS
+ case JSOP_YIELD:
+ op = JSOP_SETNAME; /* turn off most parens */
+ rval = POP_STR();
+ todo = (*rval != '\0')
+ ? Sprint(&ss->sprinter,
+ (strncmp(rval, js_yield_str, 5) == 0 &&
+ (rval[5] == ' ' || rval[5] == '\0'))
+ ? "%s (%s)"
+ : "%s %s",
+ js_yield_str, rval)
+ : SprintCString(&ss->sprinter, js_yield_str);
+ break;
+
+ case JSOP_ARRAYPUSH:
+ {
+ uintN pos, blockpos, startpos;
+ ptrdiff_t start;
+
+ rval = POP_STR();
+ pos = ss->top;
+ while ((op = ss->opcodes[--pos]) != JSOP_ENTERBLOCK &&
+ op != JSOP_NEWINIT) {
+ LOCAL_ASSERT(pos != 0);
+ }
+ blockpos = pos;
+ while (ss->opcodes[pos] == JSOP_ENTERBLOCK) {
+ if (pos == 0)
+ break;
+ --pos;
+ }
+ LOCAL_ASSERT(ss->opcodes[pos] == JSOP_NEWINIT);
+ startpos = pos;
+ start = ss->offsets[pos];
+ LOCAL_ASSERT(ss->sprinter.base[start] == '[' ||
+ ss->sprinter.base[start] == '#');
+ pos = blockpos;
+ while (ss->opcodes[++pos] == JSOP_STARTITER)
+ LOCAL_ASSERT(pos < ss->top);
+ LOCAL_ASSERT(pos < ss->top);
+ xval = OFF2STR(&ss->sprinter, ss->offsets[pos]);
+ lval = OFF2STR(&ss->sprinter, start);
+ RETRACT(&ss->sprinter, lval);
+ todo = Sprint(&ss->sprinter, "%s%s%.*s",
+ lval, rval, rval - xval, xval);
+ if (todo < 0)
+ return NULL;
+ ss->offsets[startpos] = todo;
+ todo = -2;
+ break;
+ }
+#endif
+
+ case JSOP_THROWING:
+ todo = -2;
+ break;
+
+ case JSOP_THROW:
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ rval = POP_STR();
+ js_printf(jp, "\t%s %s;\n", cs->name, rval);
+ break;
+
+ case JSOP_GOTO:
+ case JSOP_GOTOX:
+ sn = js_GetSrcNote(jp->script, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_CONT2LABEL:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(jp, "\tcontinue %s;\n", rval);
+ break;
+ case SRC_CONTINUE:
+ js_printf(jp, "\tcontinue;\n");
+ break;
+ case SRC_BREAK2LABEL:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(jp, "\tbreak %s;\n", rval);
+ break;
+ case SRC_HIDDEN:
+ break;
+ default:
+ js_printf(jp, "\tbreak;\n");
+ break;
+ }
+ todo = -2;
+ break;
+
+ case JSOP_IFEQ:
+ case JSOP_IFEQX:
+ {
+ JSBool elseif = JS_FALSE;
+
+ if_again:
+ len = GetJumpOffset(pc, pc);
+ sn = js_GetSrcNote(jp->script, pc);
+
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_IF:
+ case SRC_IF_ELSE:
+ op = JSOP_NOP; /* turn off parens */
+ rval = POP_STR();
+ if (ss->inArrayInit) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_IF);
+ if (Sprint(&ss->sprinter, " if (%s)", rval) < 0)
+ return NULL;
+ } else {
+ js_printf(SET_MAYBE_BRACE(jp),
+ elseif ? " if (%s) {\n" : "\tif (%s) {\n",
+ rval);
+ jp->indent += 4;
+ }
+
+ if (SN_TYPE(sn) == SRC_IF) {
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ } else {
+ LOCAL_ASSERT(!ss->inArrayInit);
+ tail = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ jp->indent -= 4;
+ pc += tail;
+ LOCAL_ASSERT(*pc == JSOP_GOTO || *pc == JSOP_GOTOX);
+ oplen = js_CodeSpec[*pc].length;
+ len = GetJumpOffset(pc, pc);
+ js_printf(jp, "\t} else");
+
+ /*
+ * If the second offset for sn is non-zero, it tells
+ * the distance from the goto around the else, to the
+ * ifeq for the if inside the else that forms an "if
+ * else if" chain. Thus cond spans the condition of
+ * the second if, so we simply decompile it and start
+ * over at label if_again.
+ */
+ cond = js_GetSrcNoteOffset(sn, 1);
+ if (cond != 0) {
+ DECOMPILE_CODE(pc + oplen, cond - oplen);
+ pc += cond;
+ elseif = JS_TRUE;
+ goto if_again;
+ }
+
+ js_printf(SET_MAYBE_BRACE(jp), " {\n");
+ jp->indent += 4;
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ }
+
+ if (!ss->inArrayInit) {
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ }
+ todo = -2;
+ break;
+
+ case SRC_WHILE:
+ rval = POP_STR();
+ js_printf(SET_MAYBE_BRACE(jp), "\twhile (%s) {\n", rval);
+ jp->indent += 4;
+ tail = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ todo = -2;
+ break;
+
+ case SRC_COND:
+ xval = JS_strdup(cx, POP_STR());
+ if (!xval)
+ return NULL;
+ len = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval) {
+ JS_free(cx, (void *)xval);
+ return NULL;
+ }
+ pc += len;
+ LOCAL_ASSERT(*pc == JSOP_GOTO || *pc == JSOP_GOTOX);
+ oplen = js_CodeSpec[*pc].length;
+ len = GetJumpOffset(pc, pc);
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s ? %s : %s",
+ xval, lval, rval);
+ JS_free(cx, (void *)xval);
+ JS_free(cx, (void *)lval);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case JSOP_IFNE:
+ case JSOP_IFNEX:
+ /* Currently, this must be a do-while loop's upward branch. */
+ jp->indent -= 4;
+ js_printf(jp, "\t} while (%s);\n", POP_STR());
+ todo = -2;
+ break;
+
+ case JSOP_OR:
+ case JSOP_ORX:
+ xval = "||";
+
+ do_logical_connective:
+ /* Top of stack is the first clause in a disjunction (||). */
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval)
+ return NULL;
+ done = pc + GetJumpOffset(pc, pc);
+ pc += len;
+ len = PTRDIFF(done, pc, jsbytecode);
+ DECOMPILE_CODE(pc, len);
+ rval = POP_STR();
+ if (jp->pretty &&
+ jp->indent + 4 + strlen(lval) + 4 + strlen(rval) > 75) {
+ rval = JS_strdup(cx, rval);
+ if (!rval) {
+ tail = -1;
+ } else {
+ todo = Sprint(&ss->sprinter, "%s %s\n", lval, xval);
+ tail = Sprint(&ss->sprinter, "%*s%s",
+ jp->indent + 4, "", rval);
+ JS_free(cx, (char *)rval);
+ }
+ if (tail < 0)
+ todo = -1;
+ } else {
+ todo = Sprint(&ss->sprinter, "%s %s %s", lval, xval, rval);
+ }
+ JS_free(cx, (char *)lval);
+ break;
+
+ case JSOP_AND:
+ case JSOP_ANDX:
+ xval = "&&";
+ goto do_logical_connective;
+
+ case JSOP_FORARG:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_fornameinloop;
+
+ case JSOP_FORVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_fornameinloop;
+
+ case JSOP_FORNAME:
+ atom = GET_ATOM(cx, jp->script, pc);
+
+ do_fornameinloop:
+ lval = "";
+ do_forlvalinloop:
+ sn = js_GetSrcNote(jp->script, pc);
+ xval = NULL;
+ goto do_forinloop;
+
+ case JSOP_FORPROP:
+ xval = NULL;
+ atom = GET_ATOM(cx, jp->script, pc);
+ if (!ATOM_IS_IDENTIFIER(atom)) {
+ xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom),
+ (jschar)'\'');
+ if (!xval)
+ return NULL;
+ atom = NULL;
+ }
+ lval = POP_STR();
+ sn = NULL;
+
+ do_forinloop:
+ pc += oplen;
+ LOCAL_ASSERT(*pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+ oplen = js_CodeSpec[*pc].length;
+ len = GetJumpOffset(pc, pc);
+ sn2 = js_GetSrcNote(jp->script, pc);
+ tail = js_GetSrcNoteOffset(sn2, 0);
+
+ do_forinhead:
+ if (!atom && xval) {
+ /*
+ * If xval is not a dummy empty string, we have to strdup
+ * it to save it from being clobbered by the first Sprint
+ * below. Standard dumb decompiler operating procedure!
+ */
+ if (*xval == '\0') {
+ xval = NULL;
+ } else {
+ xval = JS_strdup(cx, xval);
+ if (!xval)
+ return NULL;
+ }
+ }
+
+#if JS_HAS_XML_SUPPORT
+ if (foreach) {
+ foreach = JS_FALSE;
+ todo = Sprint(&ss->sprinter, "for %s (%s%s",
+ js_each_str, VarPrefix(sn), lval);
+ } else
+#endif
+ {
+ todo = Sprint(&ss->sprinter, "for (%s%s",
+ VarPrefix(sn), lval);
+ }
+ if (atom) {
+ if (*lval && SprintPut(&ss->sprinter, ".", 1) < 0)
+ return NULL;
+ xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!xval)
+ return NULL;
+ } else if (xval) {
+ LOCAL_ASSERT(*xval != '\0');
+ ok = (Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? ".%s"
+ : "[%s]",
+ xval)
+ >= 0);
+ JS_free(cx, (char *)xval);
+ if (!ok)
+ return NULL;
+ }
+ if (todo < 0)
+ return NULL;
+
+ lval = OFF2STR(&ss->sprinter, todo);
+ rval = GetStr(ss, ss->top-1);
+ RETRACT(&ss->sprinter, rval);
+ if (ss->inArrayInit) {
+ todo = Sprint(&ss->sprinter, " %s in %s)", lval, rval);
+ if (todo < 0)
+ return NULL;
+ ss->offsets[ss->top-1] = todo;
+ ss->sprinter.offset += PAREN_SLOP;
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ } else {
+ js_printf(SET_MAYBE_BRACE(jp), "\t%s in %s) {\n",
+ lval, rval);
+ jp->indent += 4;
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ }
+ todo = -2;
+ break;
+
+ case JSOP_FORELEM:
+ pc++;
+ LOCAL_ASSERT(*pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+ len = js_CodeSpec[*pc].length;
+
+ /*
+ * Arrange for the JSOP_ENUMELEM case to set tail for use by
+ * do_forinhead: code that uses on it to find the loop-closing
+ * jump (whatever its format, normal or extended), in order to
+ * bound the recursively decompiled loop body.
+ */
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(!forelem_tail);
+ forelem_tail = pc + js_GetSrcNoteOffset(sn, 0);
+
+ /*
+ * This gets a little wacky. Only the length of the for loop
+ * body PLUS the element-indexing expression is known here, so
+ * we pass the after-loop pc to the JSOP_ENUMELEM case, which
+ * is immediately below, to decompile that helper bytecode via
+ * the 'forelem_done' local.
+ *
+ * Since a for..in loop can't nest in the head of another for
+ * loop, we can use forelem_{tail,done} singletons to remember
+ * state from JSOP_FORELEM to JSOP_ENUMELEM, thence (via goto)
+ * to label do_forinhead.
+ */
+ LOCAL_ASSERT(!forelem_done);
+ forelem_done = pc + GetJumpOffset(pc, pc);
+
+ /* Our net stack balance after forelem;ifeq is +1. */
+ todo = SprintCString(&ss->sprinter, forelem_cookie);
+ break;
+
+ case JSOP_ENUMELEM:
+ case JSOP_ENUMCONSTELEM:
+ /*
+ * The stack has the object under the (top) index expression.
+ * The "rval" property id is underneath those two on the stack.
+ * The for loop body net and gross lengths can now be adjusted
+ * to account for the length of the indexing expression that
+ * came after JSOP_FORELEM and before JSOP_ENUMELEM.
+ */
+ atom = NULL;
+ xval = POP_STR();
+ op = JSOP_GETELEM; /* lval must have high precedence */
+ lval = POP_STR();
+ op = saveop;
+ rval = POP_STR();
+ LOCAL_ASSERT(strcmp(rval, forelem_cookie) == 0);
+ LOCAL_ASSERT(forelem_tail > pc);
+ tail = forelem_tail - pc;
+ forelem_tail = NULL;
+ LOCAL_ASSERT(forelem_done > pc);
+ len = forelem_done - pc;
+ forelem_done = NULL;
+ goto do_forinhead;
+
+#if JS_HAS_GETTER_SETTER
+ case JSOP_GETTER:
+ case JSOP_SETTER:
+ todo = -2;
+ break;
+#endif
+
+ case JSOP_DUP2:
+ rval = GetStr(ss, ss->top-2);
+ todo = SprintCString(&ss->sprinter, rval);
+ if (todo < 0 || !PushOff(ss, todo, ss->opcodes[ss->top-2]))
+ return NULL;
+ /* FALL THROUGH */
+
+ case JSOP_DUP:
+#if JS_HAS_DESTRUCTURING
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_DESTRUCT);
+ pc = DecompileDestructuring(ss, pc, endpc);
+ if (!pc)
+ return NULL;
+ len = 0;
+ lval = POP_STR();
+ op = saveop = JSOP_ENUMELEM;
+ rval = POP_STR();
+
+ if (strcmp(rval, forelem_cookie) == 0) {
+ LOCAL_ASSERT(forelem_tail > pc);
+ tail = forelem_tail - pc;
+ forelem_tail = NULL;
+ LOCAL_ASSERT(forelem_done > pc);
+ len = forelem_done - pc;
+ forelem_done = NULL;
+ xval = NULL;
+ atom = NULL;
+
+ /*
+ * Null sn if this is a 'for (var [k, v] = i in o)'
+ * loop, because 'var [k, v = i;' has already been
+ * hoisted.
+ */
+ if (js_GetSrcNoteOffset(sn, 0) == SRC_DECL_VAR)
+ sn = NULL;
+ goto do_forinhead;
+ }
+
+ todo = Sprint(&ss->sprinter, "%s%s = %s",
+ VarPrefix(sn), lval, rval);
+ break;
+ }
+#endif
+
+ rval = GetStr(ss, ss->top-1);
+ saveop = ss->opcodes[ss->top-1];
+ todo = SprintCString(&ss->sprinter, rval);
+ break;
+
+ case JSOP_SETARG:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_setname;
+
+ case JSOP_SETVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_setname;
+
+ case JSOP_SETCONST:
+ case JSOP_SETNAME:
+ case JSOP_SETGVAR:
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_SETCONST:
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+
+ do_setname:
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ rval = POP_STR();
+ if (op == JSOP_SETNAME)
+ (void) PopOff(ss, op);
+
+ do_setlval:
+ sn = js_GetSrcNote(jp->script, pc - 1);
+ if (sn && SN_TYPE(sn) == SRC_ASSIGNOP) {
+ todo = Sprint(&ss->sprinter, "%s %s= %s",
+ lval,
+ (lastop == JSOP_GETTER)
+ ? js_getter_str
+ : (lastop == JSOP_SETTER)
+ ? js_setter_str
+ : js_CodeSpec[lastop].token,
+ rval);
+ } else {
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = Sprint(&ss->sprinter, "%s%s = %s",
+ VarPrefix(sn), lval, rval);
+ }
+ if (op == JSOP_SETLOCALPOP) {
+ if (!PushOff(ss, todo, saveop))
+ return NULL;
+ rval = POP_STR();
+ LOCAL_ASSERT(*rval != '\0');
+ js_printf(jp, "\t%s;\n", rval);
+ todo = -2;
+ }
+ break;
+
+ case JSOP_NEW:
+ case JSOP_CALL:
+ case JSOP_EVAL:
+#if JS_HAS_LVALUE_RETURN
+ case JSOP_SETCALL:
+#endif
+ op = JSOP_SETNAME; /* turn off most parens */
+ argc = GET_ARGC(pc);
+ argv = (char **)
+ JS_malloc(cx, (size_t)(argc + 1) * sizeof *argv);
+ if (!argv)
+ return NULL;
+
+ ok = JS_TRUE;
+ for (i = argc; i > 0; i--) {
+ argv[i] = JS_strdup(cx, POP_STR());
+ if (!argv[i]) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+
+ /* Skip the JSOP_PUSHOBJ-created empty string. */
+ LOCAL_ASSERT(ss->top >= 2);
+ (void) PopOff(ss, op);
+
+ op = saveop;
+ argv[0] = JS_strdup(cx, POP_STR());
+ if (!argv[i])
+ ok = JS_FALSE;
+
+ lval = "(", rval = ")";
+ if (op == JSOP_NEW) {
+ if (argc == 0)
+ lval = rval = "";
+ todo = Sprint(&ss->sprinter, "%s %s%s",
+ js_new_str, argv[0], lval);
+ } else {
+ todo = Sprint(&ss->sprinter, ss_format,
+ argv[0], lval);
+ }
+ if (todo < 0)
+ ok = JS_FALSE;
+
+ for (i = 1; i <= argc; i++) {
+ if (!argv[i] ||
+ Sprint(&ss->sprinter, ss_format,
+ argv[i], (i < argc) ? ", " : "") < 0) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ if (Sprint(&ss->sprinter, rval) < 0)
+ ok = JS_FALSE;
+
+ for (i = 0; i <= argc; i++) {
+ if (argv[i])
+ JS_free(cx, argv[i]);
+ }
+ JS_free(cx, argv);
+ if (!ok)
+ return NULL;
+#if JS_HAS_LVALUE_RETURN
+ if (op == JSOP_SETCALL) {
+ if (!PushOff(ss, todo, op))
+ return NULL;
+ todo = Sprint(&ss->sprinter, "");
+ }
+#endif
+ break;
+
+ case JSOP_DELNAME:
+ atom = GET_ATOM(cx, jp->script, pc);
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ RETRACT(&ss->sprinter, lval);
+ do_delete_lval:
+ todo = Sprint(&ss->sprinter, "%s %s", js_delete_str, lval);
+ break;
+
+ case JSOP_DELPROP:
+ GET_ATOM_QUOTE_AND_FMT("%s %s[%s]", "%s %s.%s", rval);
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt, js_delete_str, lval, rval);
+ break;
+
+ case JSOP_DELELEM:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = saveop;
+ lval = POP_STR();
+ if (*xval == '\0')
+ goto do_delete_lval;
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? "%s %s.%s"
+ : "%s %s[%s]",
+ js_delete_str, lval, xval);
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case JSOP_DELDESC:
+ xval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %s..%s",
+ js_delete_str, lval, xval);
+ break;
+#endif
+
+ case JSOP_TYPEOFEXPR:
+ case JSOP_TYPEOF:
+ case JSOP_VOID:
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %s", cs->name, rval);
+ break;
+
+ case JSOP_INCARG:
+ case JSOP_DECARG:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_incatom;
+
+ case JSOP_INCVAR:
+ case JSOP_DECVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_incatom;
+
+ case JSOP_INCNAME:
+ case JSOP_DECNAME:
+ case JSOP_INCGVAR:
+ case JSOP_DECGVAR:
+ atom = GET_ATOM(cx, jp->script, pc);
+ do_incatom:
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ RETRACT(&ss->sprinter, lval);
+ do_inclval:
+ todo = Sprint(&ss->sprinter, ss_format,
+ js_incop_strs[!(cs->format & JOF_INC)], lval);
+ break;
+
+ case JSOP_INCPROP:
+ case JSOP_DECPROP:
+ GET_ATOM_QUOTE_AND_FMT(preindex_format, predot_format, rval);
+
+ /*
+ * Force precedence below the numeric literal opcodes, so that
+ * 42..foo or 10000..toString(16), e.g., decompile with parens
+ * around the left-hand side of dot.
+ */
+ op = JSOP_GETPROP;
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt,
+ js_incop_strs[!(cs->format & JOF_INC)],
+ lval, rval);
+ break;
+
+ case JSOP_INCELEM:
+ case JSOP_DECELEM:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = JSOP_GETELEM;
+ lval = POP_STR();
+ if (*xval != '\0') {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? predot_format
+ : preindex_format,
+ js_incop_strs[!(cs->format & JOF_INC)],
+ lval, xval);
+ } else {
+ todo = Sprint(&ss->sprinter, ss_format,
+ js_incop_strs[!(cs->format & JOF_INC)], lval);
+ }
+ break;
+
+ case JSOP_ARGINC:
+ case JSOP_ARGDEC:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_atominc;
+
+ case JSOP_VARINC:
+ case JSOP_VARDEC:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_atominc;
+
+ case JSOP_NAMEINC:
+ case JSOP_NAMEDEC:
+ case JSOP_GVARINC:
+ case JSOP_GVARDEC:
+ atom = GET_ATOM(cx, jp->script, pc);
+ do_atominc:
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ RETRACT(&ss->sprinter, lval);
+ do_lvalinc:
+ todo = Sprint(&ss->sprinter, ss_format,
+ lval, js_incop_strs[!(cs->format & JOF_INC)]);
+ break;
+
+ case JSOP_PROPINC:
+ case JSOP_PROPDEC:
+ GET_ATOM_QUOTE_AND_FMT(postindex_format, postdot_format, rval);
+
+ /*
+ * Force precedence below the numeric literal opcodes, so that
+ * 42..foo or 10000..toString(16), e.g., decompile with parens
+ * around the left-hand side of dot.
+ */
+ op = JSOP_GETPROP;
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt, lval, rval,
+ js_incop_strs[!(cs->format & JOF_INC)]);
+ break;
+
+ case JSOP_ELEMINC:
+ case JSOP_ELEMDEC:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = JSOP_GETELEM;
+ lval = POP_STR();
+ if (*xval != '\0') {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? postdot_format
+ : postindex_format,
+ lval, xval,
+ js_incop_strs[!(cs->format & JOF_INC)]);
+ } else {
+ todo = Sprint(&ss->sprinter, ss_format,
+ lval, js_incop_strs[!(cs->format & JOF_INC)]);
+ }
+ break;
+
+ case JSOP_GETPROP2:
+ op = JSOP_GETPROP;
+ (void) PopOff(ss, lastop);
+ /* FALL THROUGH */
+
+ case JSOP_GETPROP:
+ case JSOP_GETXPROP:
+ atom = GET_ATOM(cx, jp->script, pc);
+
+ do_getprop:
+ GET_QUOTE_AND_FMT(index_format, dot_format, rval);
+
+ do_getprop_lval:
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt, lval, rval);
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ BEGIN_LITOPX_CASE(JSOP_GETMETHOD)
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_PCBASE)
+ goto do_getprop;
+ GET_QUOTE_AND_FMT("%s.function::[%s]", "%s.function::%s", rval);
+ goto do_getprop_lval;
+
+ BEGIN_LITOPX_CASE(JSOP_SETMETHOD)
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_PCBASE)
+ goto do_setprop;
+ GET_QUOTE_AND_FMT("%s.function::[%s] %s= %s",
+ "%s.function::%s %s= %s",
+ xval);
+ goto do_setprop_rval;
+#endif
+
+ case JSOP_SETPROP:
+ atom = GET_ATOM(cx, jp->script, pc);
+
+ do_setprop:
+ GET_QUOTE_AND_FMT("%s[%s] %s= %s", "%s.%s %s= %s", xval);
+
+ do_setprop_rval:
+ rval = POP_STR();
+
+ /*
+ * Force precedence below the numeric literal opcodes, so that
+ * 42..foo or 10000..toString(16), e.g., decompile with parens
+ * around the left-hand side of dot.
+ */
+ op = JSOP_GETPROP;
+ lval = POP_STR();
+ sn = js_GetSrcNote(jp->script, pc - 1);
+ todo = Sprint(&ss->sprinter, fmt, lval, xval,
+ (sn && SN_TYPE(sn) == SRC_ASSIGNOP)
+ ? (lastop == JSOP_GETTER)
+ ? js_getter_str
+ : (lastop == JSOP_SETTER)
+ ? js_setter_str
+ : js_CodeSpec[lastop].token
+ : "",
+ rval);
+ break;
+
+ case JSOP_GETELEM2:
+ op = JSOP_GETELEM;
+ (void) PopOff(ss, lastop);
+ /* FALL THROUGH */
+
+ case JSOP_GETELEM:
+ case JSOP_GETXELEM:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = saveop;
+ lval = POP_STR();
+ if (*xval == '\0') {
+ todo = Sprint(&ss->sprinter, "%s", lval);
+ } else {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? dot_format
+ : index_format,
+ lval, xval);
+ }
+ break;
+
+ case JSOP_SETELEM:
+ rval = POP_STR();
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ cs = &js_CodeSpec[ss->opcodes[ss->top]];
+ op = JSOP_GETELEM; /* lval must have high precedence */
+ lval = POP_STR();
+ op = saveop;
+ if (*xval == '\0')
+ goto do_setlval;
+ sn = js_GetSrcNote(jp->script, pc - 1);
+ todo = Sprint(&ss->sprinter,
+ (cs->format & JOF_XMLNAME)
+ ? "%s.%s %s= %s"
+ : "%s[%s] %s= %s",
+ lval, xval,
+ (sn && SN_TYPE(sn) == SRC_ASSIGNOP)
+ ? (lastop == JSOP_GETTER)
+ ? js_getter_str
+ : (lastop == JSOP_SETTER)
+ ? js_setter_str
+ : js_CodeSpec[lastop].token
+ : "",
+ rval);
+ break;
+
+ case JSOP_ARGSUB:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ todo = Sprint(&ss->sprinter, "%s[%d]",
+ js_arguments_str, (int) i);
+ break;
+
+ case JSOP_ARGCNT:
+ todo = Sprint(&ss->sprinter, dot_format,
+ js_arguments_str, js_length_str);
+ break;
+
+ case JSOP_GETARG:
+ i = GET_ARGNO(pc);
+ atom = GetSlotAtom(jp, js_GetArgument, i);
+#if JS_HAS_DESTRUCTURING
+ if (!atom) {
+ todo = Sprint(&ss->sprinter, "%s[%d]", js_arguments_str, i);
+ break;
+ }
+#else
+ LOCAL_ASSERT(atom);
+#endif
+ goto do_name;
+
+ case JSOP_GETVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_name;
+
+ case JSOP_NAME:
+ case JSOP_GETGVAR:
+ atom = GET_ATOM(cx, jp->script, pc);
+ do_name:
+ lval = "";
+ do_qname:
+ sn = js_GetSrcNote(jp->script, pc);
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ todo = Sprint(&ss->sprinter, "%s%s%s",
+ VarPrefix(sn), lval, rval);
+ break;
+
+ case JSOP_UINT16:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ goto do_sprint_int;
+
+ case JSOP_UINT24:
+ i = (jsint) GET_UINT24(pc);
+ do_sprint_int:
+ todo = Sprint(&ss->sprinter, "%u", (unsigned) i);
+ break;
+
+ case JSOP_LITERAL:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ goto do_JSOP_STRING;
+
+ case JSOP_FINDNAME:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ todo = Sprint(&ss->sprinter, "");
+ if (todo < 0 || !PushOff(ss, todo, op))
+ return NULL;
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+ goto do_name;
+
+ case JSOP_LITOPX:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = saveop = *pc2;
+ pc += len - (1 + ATOM_INDEX_LEN);
+ cs = &js_CodeSpec[op];
+ len = cs->length;
+ switch (op) {
+ case JSOP_ANONFUNOBJ: goto do_JSOP_ANONFUNOBJ;
+ case JSOP_BINDNAME: goto do_JSOP_BINDNAME;
+ case JSOP_CLOSURE: goto do_JSOP_CLOSURE;
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTNAME: goto do_JSOP_EXPORTNAME;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case JSOP_GETMETHOD: goto do_JSOP_GETMETHOD;
+ case JSOP_SETMETHOD: goto do_JSOP_SETMETHOD;
+#endif
+ case JSOP_NAMEDFUNOBJ: goto do_JSOP_NAMEDFUNOBJ;
+ case JSOP_NUMBER: goto do_JSOP_NUMBER;
+ case JSOP_OBJECT: goto do_JSOP_OBJECT;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_QNAMECONST: goto do_JSOP_QNAMECONST;
+ case JSOP_QNAMEPART: goto do_JSOP_QNAMEPART;
+#endif
+ case JSOP_REGEXP: goto do_JSOP_REGEXP;
+ case JSOP_SETCONST: goto do_JSOP_SETCONST;
+ case JSOP_STRING: goto do_JSOP_STRING;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_XMLCDATA: goto do_JSOP_XMLCDATA;
+ case JSOP_XMLCOMMENT: goto do_JSOP_XMLCOMMENT;
+ case JSOP_XMLOBJECT: goto do_JSOP_XMLOBJECT;
+ case JSOP_XMLPI: goto do_JSOP_XMLPI;
+#endif
+ case JSOP_ENTERBLOCK: goto do_JSOP_ENTERBLOCK;
+ default: LOCAL_ASSERT(0);
+ }
+ /* NOTREACHED */
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_NUMBER)
+ val = ATOM_KEY(atom);
+ if (JSVAL_IS_INT(val)) {
+ long ival = (long)JSVAL_TO_INT(val);
+ todo = Sprint(&ss->sprinter, "%ld", ival);
+ } else {
+ char buf[DTOSTR_STANDARD_BUFFER_SIZE];
+ char *numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD,
+ 0, *JSVAL_TO_DOUBLE(val));
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ todo = Sprint(&ss->sprinter, numStr);
+ }
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_STRING)
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom),
+ inXML ? DONT_ESCAPE : '"');
+ if (!rval)
+ return NULL;
+ todo = STR2OFF(&ss->sprinter, rval);
+ END_LITOPX_CASE
+
+ case JSOP_OBJECT:
+ case JSOP_REGEXP:
+ case JSOP_ANONFUNOBJ:
+ case JSOP_NAMEDFUNOBJ:
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_OBJECT:
+ do_JSOP_REGEXP:
+ do_JSOP_ANONFUNOBJ:
+ do_JSOP_NAMEDFUNOBJ:
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+ if (op == JSOP_OBJECT || op == JSOP_REGEXP) {
+ if (!js_regexp_toString(cx, ATOM_TO_OBJECT(atom), 0, NULL,
+ &val)) {
+ return NULL;
+ }
+ } else {
+ if (!js_fun_toString(cx, ATOM_TO_OBJECT(atom),
+ JS_IN_GROUP_CONTEXT |
+ JS_DONT_PRETTY_PRINT,
+ 0, NULL, &val)) {
+ return NULL;
+ }
+ }
+ str = JSVAL_TO_STRING(val);
+ todo = SprintPut(&ss->sprinter, JS_GetStringBytes(str),
+ JSSTRING_LENGTH(str));
+ break;
+
+ case JSOP_TABLESWITCH:
+ case JSOP_TABLESWITCHX:
+ {
+ ptrdiff_t jmplen, off, off2;
+ jsint j, n, low, high;
+ TableEntry *table, pivot;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_SWITCH);
+ len = js_GetSrcNoteOffset(sn, 0);
+ jmplen = (op == JSOP_TABLESWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+
+ n = high - low + 1;
+ if (n == 0) {
+ table = NULL;
+ j = 0;
+ } else {
+ table = (TableEntry *)
+ JS_malloc(cx, (size_t)n * sizeof *table);
+ if (!table)
+ return NULL;
+ for (i = j = 0; i < n; i++) {
+ table[j].label = NULL;
+ off2 = GetJumpOffset(pc, pc2);
+ if (off2) {
+ sn = js_GetSrcNote(jp->script, pc2);
+ if (sn) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_LABEL);
+ table[j].label =
+ js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid)
+ js_GetSrcNoteOffset(sn, 0));
+ }
+ table[j].key = INT_TO_JSVAL(low + i);
+ table[j].offset = off2;
+ table[j].order = j;
+ j++;
+ }
+ pc2 += jmplen;
+ }
+ js_HeapSort(table, (size_t) j, &pivot, sizeof(TableEntry),
+ CompareOffsets, NULL);
+ }
+
+ ok = DecompileSwitch(ss, table, (uintN)j, pc, len, off,
+ JS_FALSE);
+ JS_free(cx, table);
+ if (!ok)
+ return NULL;
+ todo = -2;
+ break;
+ }
+
+ case JSOP_LOOKUPSWITCH:
+ case JSOP_LOOKUPSWITCHX:
+ {
+ ptrdiff_t jmplen, off, off2;
+ jsatomid npairs, k;
+ TableEntry *table;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_SWITCH);
+ len = js_GetSrcNoteOffset(sn, 0);
+ jmplen = (op == JSOP_LOOKUPSWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ npairs = GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+
+ table = (TableEntry *)
+ JS_malloc(cx, (size_t)npairs * sizeof *table);
+ if (!table)
+ return NULL;
+ for (k = 0; k < npairs; k++) {
+ sn = js_GetSrcNote(jp->script, pc2);
+ if (sn) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_LABEL);
+ table[k].label =
+ js_GetAtom(cx, &jp->script->atomMap, (jsatomid)
+ js_GetSrcNoteOffset(sn, 0));
+ } else {
+ table[k].label = NULL;
+ }
+ atom = GET_ATOM(cx, jp->script, pc2);
+ pc2 += ATOM_INDEX_LEN;
+ off2 = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ table[k].key = ATOM_KEY(atom);
+ table[k].offset = off2;
+ }
+
+ ok = DecompileSwitch(ss, table, (uintN)npairs, pc, len, off,
+ JS_FALSE);
+ JS_free(cx, table);
+ if (!ok)
+ return NULL;
+ todo = -2;
+ break;
+ }
+
+ case JSOP_CONDSWITCH:
+ {
+ ptrdiff_t off, off2, caseOff;
+ jsint ncases;
+ TableEntry *table;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_SWITCH);
+ len = js_GetSrcNoteOffset(sn, 0);
+ off = js_GetSrcNoteOffset(sn, 1);
+
+ /*
+ * Count the cases using offsets from switch to first case,
+ * and case to case, stored in srcnote immediates.
+ */
+ pc2 = pc;
+ off2 = off;
+ for (ncases = 0; off2 != 0; ncases++) {
+ pc2 += off2;
+ LOCAL_ASSERT(*pc2 == JSOP_CASE || *pc2 == JSOP_DEFAULT ||
+ *pc2 == JSOP_CASEX || *pc2 == JSOP_DEFAULTX);
+ if (*pc2 == JSOP_DEFAULT || *pc2 == JSOP_DEFAULTX) {
+ /* End of cases, but count default as a case. */
+ off2 = 0;
+ } else {
+ sn = js_GetSrcNote(jp->script, pc2);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_PCDELTA);
+ off2 = js_GetSrcNoteOffset(sn, 0);
+ }
+ }
+
+ /*
+ * Allocate table and rescan the cases using their srcnotes,
+ * stashing each case's delta from switch top in table[i].key,
+ * and the distance to its statements in table[i].offset.
+ */
+ table = (TableEntry *)
+ JS_malloc(cx, (size_t)ncases * sizeof *table);
+ if (!table)
+ return NULL;
+ pc2 = pc;
+ off2 = off;
+ for (i = 0; i < ncases; i++) {
+ pc2 += off2;
+ LOCAL_ASSERT(*pc2 == JSOP_CASE || *pc2 == JSOP_DEFAULT ||
+ *pc2 == JSOP_CASEX || *pc2 == JSOP_DEFAULTX);
+ caseOff = pc2 - pc;
+ table[i].key = INT_TO_JSVAL((jsint) caseOff);
+ table[i].offset = caseOff + GetJumpOffset(pc2, pc2);
+ if (*pc2 == JSOP_CASE || *pc2 == JSOP_CASEX) {
+ sn = js_GetSrcNote(jp->script, pc2);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_PCDELTA);
+ off2 = js_GetSrcNoteOffset(sn, 0);
+ }
+ }
+
+ /*
+ * Find offset of default code by fetching the default offset
+ * from the end of table. JSOP_CONDSWITCH always has a default
+ * case at the end.
+ */
+ off = JSVAL_TO_INT(table[ncases-1].key);
+ pc2 = pc + off;
+ off += GetJumpOffset(pc2, pc2);
+
+ ok = DecompileSwitch(ss, table, (uintN)ncases, pc, len, off,
+ JS_TRUE);
+ JS_free(cx, table);
+ if (!ok)
+ return NULL;
+ todo = -2;
+ break;
+ }
+
+ case JSOP_CASE:
+ case JSOP_CASEX:
+ {
+ lval = POP_STR();
+ if (!lval)
+ return NULL;
+ js_printf(jp, "\tcase %s:\n", lval);
+ todo = -2;
+ break;
+ }
+
+ case JSOP_NEW_EQ:
+ case JSOP_NEW_NE:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %c== %s",
+ lval, (op == JSOP_NEW_EQ) ? '=' : '!', rval);
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_CLOSURE)
+ LOCAL_ASSERT(ATOM_IS_OBJECT(atom));
+ todo = -2;
+ goto do_function;
+ END_LITOPX_CASE
+
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTALL:
+ js_printf(jp, "\texport *;\n");
+ todo = -2;
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_EXPORTNAME)
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(jp, "\texport %s;\n", rval);
+ todo = -2;
+ END_LITOPX_CASE
+
+ case JSOP_IMPORTALL:
+ lval = POP_STR();
+ js_printf(jp, "\timport %s.*;\n", lval);
+ todo = -2;
+ break;
+
+ case JSOP_IMPORTPROP:
+ do_importprop:
+ GET_ATOM_QUOTE_AND_FMT("\timport %s[%s];\n",
+ "\timport %s.%s;\n",
+ rval);
+ lval = POP_STR();
+ js_printf(jp, fmt, lval, rval);
+ todo = -2;
+ break;
+
+ case JSOP_IMPORTELEM:
+ xval = POP_STR();
+ op = JSOP_GETELEM;
+ if (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ goto do_importprop;
+ lval = POP_STR();
+ js_printf(jp, "\timport %s[%s];\n", lval, xval);
+ todo = -2;
+ break;
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ case JSOP_TRAP:
+ op = JS_GetTrapOpcode(cx, jp->script, pc);
+ if (op == JSOP_LIMIT)
+ return NULL;
+ saveop = op;
+ *pc = op;
+ cs = &js_CodeSpec[op];
+ len = cs->length;
+ DECOMPILE_CODE(pc, len);
+ *pc = JSOP_TRAP;
+ todo = -2;
+ break;
+
+ case JSOP_NEWINIT:
+ {
+ JSBool isArray;
+
+ LOCAL_ASSERT(ss->top >= 2);
+ (void) PopOff(ss, op);
+ lval = POP_STR();
+ isArray = (*lval == 'A');
+ todo = ss->sprinter.offset;
+#if JS_HAS_SHARP_VARS
+ op = (JSOp)pc[len];
+ if (op == JSOP_DEFSHARP) {
+ pc += len;
+ cs = &js_CodeSpec[op];
+ len = cs->length;
+ i = (jsint) GET_ATOM_INDEX(pc);
+ if (Sprint(&ss->sprinter, "#%u=", (unsigned) i) < 0)
+ return NULL;
+ }
+#endif /* JS_HAS_SHARP_VARS */
+ if (isArray) {
+ ++ss->inArrayInit;
+ if (SprintCString(&ss->sprinter, "[") < 0)
+ return NULL;
+ } else {
+ if (SprintCString(&ss->sprinter, "{") < 0)
+ return NULL;
+ }
+ break;
+ }
+
+ case JSOP_ENDINIT:
+ op = JSOP_NOP; /* turn off parens */
+ rval = POP_STR();
+ sn = js_GetSrcNote(jp->script, pc);
+
+ /* Skip any #n= prefix to find the opening bracket. */
+ for (xval = rval; *xval != '[' && *xval != '{'; xval++)
+ continue;
+ if (*xval == '[')
+ --ss->inArrayInit;
+ todo = Sprint(&ss->sprinter, "%s%s%c",
+ rval,
+ (sn && SN_TYPE(sn) == SRC_CONTINUE) ? ", " : "",
+ (*xval == '[') ? ']' : '}');
+ break;
+
+ case JSOP_INITPROP:
+ atom = GET_ATOM(cx, jp->script, pc);
+ xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom),
+ (jschar)
+ (ATOM_IS_IDENTIFIER(atom) ? 0 : '\''));
+ if (!xval)
+ return NULL;
+ rval = POP_STR();
+ lval = POP_STR();
+ do_initprop:
+#ifdef OLD_GETTER_SETTER
+ todo = Sprint(&ss->sprinter, "%s%s%s%s%s:%s",
+ lval,
+ (lval[1] != '\0') ? ", " : "",
+ xval,
+ (lastop == JSOP_GETTER || lastop == JSOP_SETTER)
+ ? " " : "",
+ (lastop == JSOP_GETTER) ? js_getter_str :
+ (lastop == JSOP_SETTER) ? js_setter_str :
+ "",
+ rval);
+#else
+ if (lastop == JSOP_GETTER || lastop == JSOP_SETTER) {
+ if (!atom || !ATOM_IS_STRING(atom) ||
+ !ATOM_IS_IDENTIFIER(atom) ||
+ ATOM_IS_KEYWORD(atom) ||
+ ((ss->opcodes[ss->top+1] != JSOP_ANONFUNOBJ ||
+ strncmp(rval, js_function_str, 8) != 0) &&
+ ss->opcodes[ss->top+1] != JSOP_NAMEDFUNOBJ)) {
+ todo = Sprint(&ss->sprinter, "%s%s%s%s%s:%s", lval,
+ (lval[1] != '\0') ? ", " : "", xval,
+ (lastop == JSOP_GETTER ||
+ lastop == JSOP_SETTER)
+ ? " " : "",
+ (lastop == JSOP_GETTER) ? js_getter_str :
+ (lastop == JSOP_SETTER) ? js_setter_str :
+ "",
+ rval);
+ } else {
+ rval += 8 + 1;
+ LOCAL_ASSERT(rval[strlen(rval)-1] == '}');
+ todo = Sprint(&ss->sprinter, "%s%s%s %s%s",
+ lval,
+ (lval[1] != '\0') ? ", " : "",
+ (lastop == JSOP_GETTER)
+ ? js_get_str : js_set_str,
+ xval,
+ rval);
+ }
+ } else {
+ todo = Sprint(&ss->sprinter, "%s%s%s:%s",
+ lval,
+ (lval[1] != '\0') ? ", " : "",
+ xval,
+ rval);
+ }
+#endif
+ break;
+
+ case JSOP_INITELEM:
+ rval = POP_STR();
+ xval = POP_STR();
+ lval = POP_STR();
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_INITPROP) {
+ atom = NULL;
+ goto do_initprop;
+ }
+ todo = Sprint(&ss->sprinter, "%s%s%s",
+ lval,
+ (lval[1] != '\0' || *xval != '0') ? ", " : "",
+ rval);
+ break;
+
+#if JS_HAS_SHARP_VARS
+ case JSOP_DEFSHARP:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "#%u=%s", (unsigned) i, rval);
+ break;
+
+ case JSOP_USESHARP:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ todo = Sprint(&ss->sprinter, "#%u#", (unsigned) i);
+ break;
+#endif /* JS_HAS_SHARP_VARS */
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ case JSOP_DEBUGGER:
+ js_printf(jp, "\tdebugger;\n");
+ todo = -2;
+ break;
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ case JSOP_STARTXML:
+ case JSOP_STARTXMLEXPR:
+ inXML = op == JSOP_STARTXML;
+ todo = -2;
+ break;
+
+ case JSOP_DEFXMLNS:
+ rval = POP_STR();
+ js_printf(jp, "\t%s %s %s = %s;\n",
+ js_default_str, js_xml_str, js_namespace_str, rval);
+ todo = -2;
+ break;
+
+ case JSOP_ANYNAME:
+ if (pc[JSOP_ANYNAME_LENGTH] == JSOP_TOATTRNAME) {
+ len += JSOP_TOATTRNAME_LENGTH;
+ todo = SprintPut(&ss->sprinter, "@*", 2);
+ } else {
+ todo = SprintPut(&ss->sprinter, "*", 1);
+ }
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMEPART)
+ if (pc[JSOP_QNAMEPART_LENGTH] == JSOP_TOATTRNAME) {
+ saveop = JSOP_TOATTRNAME;
+ len += JSOP_TOATTRNAME_LENGTH;
+ lval = "@";
+ goto do_qname;
+ }
+ goto do_name;
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMECONST)
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s::%s", lval, rval);
+ END_LITOPX_CASE
+
+ case JSOP_QNAME:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s::[%s]", lval, rval);
+ break;
+
+ case JSOP_TOATTRNAME:
+ op = JSOP_NOP; /* turn off parens */
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "@[%s]", rval);
+ break;
+
+ case JSOP_TOATTRVAL:
+ todo = -2;
+ break;
+
+ case JSOP_ADDATTRNAME:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %s", lval, rval);
+ /* This gets reset by all XML tag expressions. */
+ quoteAttr = JS_TRUE;
+ break;
+
+ case JSOP_ADDATTRVAL:
+ rval = POP_STR();
+ lval = POP_STR();
+ if (quoteAttr)
+ todo = Sprint(&ss->sprinter, "%s=\"%s\"", lval, rval);
+ else
+ todo = Sprint(&ss->sprinter, "%s=%s", lval, rval);
+ break;
+
+ case JSOP_BINDXMLNAME:
+ /* Leave the name stacked and push a dummy string. */
+ todo = Sprint(&ss->sprinter, "");
+ break;
+
+ case JSOP_SETXMLNAME:
+ /* Pop the r.h.s., the dummy string, and the name. */
+ rval = POP_STR();
+ (void) PopOff(ss, op);
+ lval = POP_STR();
+ goto do_setlval;
+
+ case JSOP_XMLELTEXPR:
+ case JSOP_XMLTAGEXPR:
+ todo = Sprint(&ss->sprinter, "{%s}", POP_STR());
+ inXML = JS_TRUE;
+ /* If we're an attribute value, we shouldn't quote this. */
+ quoteAttr = JS_FALSE;
+ break;
+
+ case JSOP_TOXMLLIST:
+ op = JSOP_NOP; /* turn off parens */
+ todo = Sprint(&ss->sprinter, "<>%s</>", POP_STR());
+ inXML = JS_FALSE;
+ break;
+
+ case JSOP_FOREACH:
+ foreach = JS_TRUE;
+ todo = -2;
+ break;
+
+ case JSOP_TOXML:
+ inXML = JS_FALSE;
+ /* FALL THROUGH */
+
+ case JSOP_XMLNAME:
+ case JSOP_FILTER:
+ /* Conversion and prefix ops do nothing in the decompiler. */
+ todo = -2;
+ break;
+
+ case JSOP_ENDFILTER:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s.(%s)", lval, rval);
+ break;
+
+ case JSOP_DESCENDANTS:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s..%s", lval, rval);
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_XMLOBJECT)
+ todo = Sprint(&ss->sprinter, "<xml address='%p'>",
+ ATOM_TO_OBJECT(atom));
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCDATA)
+ todo = SprintPut(&ss->sprinter, "<![CDATA[", 9);
+ if (!QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0))
+ return NULL;
+ SprintPut(&ss->sprinter, "]]>", 3);
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCOMMENT)
+ todo = SprintPut(&ss->sprinter, "<!--", 4);
+ if (!QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0))
+ return NULL;
+ SprintPut(&ss->sprinter, "-->", 3);
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_XMLPI)
+ rval = JS_strdup(cx, POP_STR());
+ if (!rval)
+ return NULL;
+ todo = SprintPut(&ss->sprinter, "<?", 2);
+ ok = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0) &&
+ (*rval == '\0' ||
+ (SprintPut(&ss->sprinter, " ", 1) >= 0 &&
+ SprintCString(&ss->sprinter, rval)));
+ JS_free(cx, (char *)rval);
+ if (!ok)
+ return NULL;
+ SprintPut(&ss->sprinter, "?>", 2);
+ END_LITOPX_CASE
+
+ case JSOP_GETFUNNS:
+ todo = SprintPut(&ss->sprinter, js_function_str, 8);
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ default:
+ todo = -2;
+ break;
+
+#undef BEGIN_LITOPX_CASE
+#undef END_LITOPX_CASE
+ }
+ }
+
+ if (todo < 0) {
+ /* -2 means "don't push", -1 means reported error. */
+ if (todo == -1)
+ return NULL;
+ } else {
+ if (!PushOff(ss, todo, saveop))
+ return NULL;
+ }
+ pc += len;
+ }
+
+/*
+ * Undefine local macros.
+ */
+#undef inXML
+#undef DECOMPILE_CODE
+#undef POP_STR
+#undef LOCAL_ASSERT
+#undef ATOM_IS_IDENTIFIER
+#undef GET_QUOTE_AND_FMT
+#undef GET_ATOM_QUOTE_AND_FMT
+
+ return pc;
+}
+
+static JSBool
+InitSprintStack(JSContext *cx, SprintStack *ss, JSPrinter *jp, uintN depth)
+{
+ size_t offsetsz, opcodesz;
+ void *space;
+
+ INIT_SPRINTER(cx, &ss->sprinter, &cx->tempPool, PAREN_SLOP);
+
+ /* Allocate the parallel (to avoid padding) offset and opcode stacks. */
+ offsetsz = depth * sizeof(ptrdiff_t);
+ opcodesz = depth * sizeof(jsbytecode);
+ JS_ARENA_ALLOCATE(space, &cx->tempPool, offsetsz + opcodesz);
+ if (!space)
+ return JS_FALSE;
+ ss->offsets = (ptrdiff_t *) space;
+ ss->opcodes = (jsbytecode *) ((char *)space + offsetsz);
+
+ ss->top = ss->inArrayInit = 0;
+ ss->printer = jp;
+ return JS_TRUE;
+}
+
+JSBool
+js_DecompileCode(JSPrinter *jp, JSScript *script, jsbytecode *pc, uintN len,
+ uintN pcdepth)
+{
+ uintN depth, i;
+ SprintStack ss;
+ JSContext *cx;
+ void *mark;
+ JSBool ok;
+ JSScript *oldscript;
+ char *last;
+
+ depth = script->depth;
+ JS_ASSERT(pcdepth <= depth);
+
+ /* Initialize a sprinter for use with the offset stack. */
+ cx = jp->sprinter.context;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ok = InitSprintStack(cx, &ss, jp, depth);
+ if (!ok)
+ goto out;
+
+ /*
+ * If we are called from js_DecompileValueGenerator with a portion of
+ * script's bytecode that starts with a non-zero model stack depth given
+ * by pcdepth, attempt to initialize the missing string offsets in ss to
+ * |spindex| negative indexes from fp->sp for the activation fp in which
+ * the error arose.
+ *
+ * See js_DecompileValueGenerator for how its |spindex| parameter is used,
+ * and see also GetOff, which makes use of the ss.offsets[i] < -1 that are
+ * potentially stored below.
+ */
+ ss.top = pcdepth;
+ if (pcdepth != 0) {
+ JSStackFrame *fp;
+ ptrdiff_t top;
+
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ top = fp ? fp->sp - fp->spbase : 0;
+ for (i = 0; i < pcdepth; i++) {
+ ss.offsets[i] = -1;
+ ss.opcodes[i] = JSOP_NOP;
+ }
+ if (fp && fp->pc == pc && (uintN)top == pcdepth) {
+ for (i = 0; i < pcdepth; i++) {
+ ptrdiff_t off;
+ jsbytecode *genpc;
+
+ off = (intN)i - (intN)depth;
+ genpc = (jsbytecode *) fp->spbase[off];
+ if (JS_UPTRDIFF(genpc, script->code) < script->length) {
+ ss.offsets[i] += (ptrdiff_t)i - top;
+ ss.opcodes[i] = *genpc;
+ }
+ }
+ }
+ }
+
+ /* Call recursive subroutine to do the hard work. */
+ oldscript = jp->script;
+ jp->script = script;
+ ok = Decompile(&ss, pc, len) != NULL;
+ jp->script = oldscript;
+
+ /* If the given code didn't empty the stack, do it now. */
+ if (ss.top) {
+ do {
+ last = OFF2STR(&ss.sprinter, PopOff(&ss, JSOP_POP));
+ } while (ss.top > pcdepth);
+ js_printf(jp, "%s", last);
+ }
+
+out:
+ /* Free all temporary stuff allocated under this call. */
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return ok;
+}
+
+JSBool
+js_DecompileScript(JSPrinter *jp, JSScript *script)
+{
+ return js_DecompileCode(jp, script, script->code, (uintN)script->length, 0);
+}
+
+static const char native_code_str[] = "\t[native code]\n";
+
+JSBool
+js_DecompileFunctionBody(JSPrinter *jp, JSFunction *fun)
+{
+ JSScript *script;
+ JSScope *scope, *save;
+ JSBool ok;
+
+ if (!FUN_INTERPRETED(fun)) {
+ js_printf(jp, native_code_str);
+ return JS_TRUE;
+ }
+ script = fun->u.i.script;
+ scope = fun->object ? OBJ_SCOPE(fun->object) : NULL;
+ save = jp->scope;
+ jp->scope = scope;
+ ok = js_DecompileCode(jp, script, script->code, (uintN)script->length, 0);
+ jp->scope = save;
+ return ok;
+}
+
+JSBool
+js_DecompileFunction(JSPrinter *jp, JSFunction *fun)
+{
+ JSContext *cx;
+ uintN i, nargs, indent;
+ void *mark;
+ JSAtom **params;
+ JSScope *scope, *oldscope;
+ JSScopeProperty *sprop;
+ jsbytecode *pc, *endpc;
+ ptrdiff_t len;
+ JSBool ok;
+
+ /*
+ * If pretty, conform to ECMA-262 Edition 3, 15.3.4.2, by decompiling a
+ * FunctionDeclaration. Otherwise, check the JSFUN_LAMBDA flag and force
+ * an expression by parenthesizing.
+ */
+ if (jp->pretty) {
+ js_printf(jp, "\t");
+ } else {
+ if (!jp->grouped && (fun->flags & JSFUN_LAMBDA))
+ js_puts(jp, "(");
+ }
+ if (JSFUN_GETTER_TEST(fun->flags))
+ js_printf(jp, "%s ", js_getter_str);
+ else if (JSFUN_SETTER_TEST(fun->flags))
+ js_printf(jp, "%s ", js_setter_str);
+
+ js_printf(jp, "%s ", js_function_str);
+ if (fun->atom && !QuoteString(&jp->sprinter, ATOM_TO_STRING(fun->atom), 0))
+ return JS_FALSE;
+ js_puts(jp, "(");
+
+ if (FUN_INTERPRETED(fun) && fun->object) {
+ size_t paramsize;
+#ifdef JS_HAS_DESTRUCTURING
+ SprintStack ss;
+ JSScript *oldscript;
+#endif
+
+ /*
+ * Print the parameters.
+ *
+ * This code is complicated by the need to handle duplicate parameter
+ * names, as required by ECMA (bah!). A duplicate parameter is stored
+ * as another node with the same id (the parameter name) but different
+ * shortid (the argument index) along the property tree ancestor line
+ * starting at SCOPE_LAST_PROP(scope). Only the last duplicate param
+ * is mapped by the scope's hash table.
+ */
+ cx = jp->sprinter.context;
+ nargs = fun->nargs;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ paramsize = nargs * sizeof(JSAtom *);
+ JS_ARENA_ALLOCATE_CAST(params, JSAtom **, &cx->tempPool, paramsize);
+ if (!params) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ memset(params, 0, paramsize);
+ scope = OBJ_SCOPE(fun->object);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (sprop->getter != js_GetArgument)
+ continue;
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ JS_ASSERT((uint16) sprop->shortid < nargs);
+ JS_ASSERT(JSID_IS_ATOM(sprop->id));
+ params[(uint16) sprop->shortid] = JSID_TO_ATOM(sprop->id);
+ }
+
+ pc = fun->u.i.script->main;
+ endpc = pc + fun->u.i.script->length;
+ ok = JS_TRUE;
+
+#ifdef JS_HAS_DESTRUCTURING
+ /* Skip JSOP_GENERATOR in case of destructuring parameters. */
+ if (*pc == JSOP_GENERATOR)
+ pc += JSOP_GENERATOR_LENGTH;
+
+ ss.printer = NULL;
+ oldscript = jp->script;
+ jp->script = fun->u.i.script;
+ oldscope = jp->scope;
+ jp->scope = scope;
+#endif
+
+ for (i = 0; i < nargs; i++) {
+ if (i > 0)
+ js_puts(jp, ", ");
+
+#if JS_HAS_DESTRUCTURING
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, JS_FALSE)
+
+ if (!params[i]) {
+ ptrdiff_t todo;
+ const char *lval;
+
+ LOCAL_ASSERT(*pc == JSOP_GETARG);
+ pc += JSOP_GETARG_LENGTH;
+ LOCAL_ASSERT(*pc == JSOP_DUP);
+ if (!ss.printer) {
+ ok = InitSprintStack(cx, &ss, jp, fun->u.i.script->depth);
+ if (!ok)
+ break;
+ }
+ pc = DecompileDestructuring(&ss, pc, endpc);
+ if (!pc) {
+ ok = JS_FALSE;
+ break;
+ }
+ LOCAL_ASSERT(*pc == JSOP_POP);
+ pc += JSOP_POP_LENGTH;
+ lval = PopStr(&ss, JSOP_NOP);
+ todo = SprintCString(&jp->sprinter, lval);
+ if (todo < 0) {
+ ok = JS_FALSE;
+ break;
+ }
+ continue;
+ }
+
+#undef LOCAL_ASSERT
+#endif
+
+ if (!QuoteString(&jp->sprinter, ATOM_TO_STRING(params[i]), 0)) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+
+#ifdef JS_HAS_DESTRUCTURING
+ jp->script = oldscript;
+ jp->scope = oldscope;
+#endif
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!ok)
+ return JS_FALSE;
+#ifdef __GNUC__
+ } else {
+ scope = NULL;
+ pc = NULL;
+#endif
+ }
+
+ js_printf(jp, ") {\n");
+ indent = jp->indent;
+ jp->indent += 4;
+ if (FUN_INTERPRETED(fun) && fun->object) {
+ oldscope = jp->scope;
+ jp->scope = scope;
+ len = fun->u.i.script->code + fun->u.i.script->length - pc;
+ ok = js_DecompileCode(jp, fun->u.i.script, pc, (uintN)len, 0);
+ jp->scope = oldscope;
+ if (!ok) {
+ jp->indent = indent;
+ return JS_FALSE;
+ }
+ } else {
+ js_printf(jp, native_code_str);
+ }
+ jp->indent -= 4;
+ js_printf(jp, "\t}");
+
+ if (!jp->pretty) {
+ if (!jp->grouped && (fun->flags & JSFUN_LAMBDA))
+ js_puts(jp, ")");
+ }
+ return JS_TRUE;
+}
+
+#undef LOCAL_ASSERT_RV
+
+JSString *
+js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v,
+ JSString *fallback)
+{
+ JSStackFrame *fp, *down;
+ jsbytecode *pc, *begin, *end;
+ jsval *sp, *spbase, *base, *limit;
+ intN depth, pcdepth;
+ JSScript *script;
+ JSOp op;
+ const JSCodeSpec *cs;
+ jssrcnote *sn;
+ ptrdiff_t len, oplen;
+ JSPrinter *jp;
+ JSString *name;
+
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ if (!fp)
+ goto do_fallback;
+
+ /* Try to find sp's generating pc depth slots under it on the stack. */
+ pc = fp->pc;
+ sp = fp->sp;
+ spbase = fp->spbase;
+ if ((uintN)(sp - spbase) > fp->script->depth) {
+ /*
+ * Preparing to make an internal invocation, using an argv stack
+ * segment pushed just above fp's operand stack space. Such an argv
+ * stack has no generating pc "basement", so we must fall back.
+ */
+ goto do_fallback;
+ }
+
+ if (spindex == JSDVG_SEARCH_STACK) {
+ if (!pc) {
+ /*
+ * Current frame is native: look under it for a scripted call
+ * in which a decompilable bytecode string that generated the
+ * value as an actual argument might exist.
+ */
+ JS_ASSERT(!fp->script && !(fp->fun && FUN_INTERPRETED(fp->fun)));
+ down = fp->down;
+ if (!down)
+ goto do_fallback;
+ script = down->script;
+ spbase = down->spbase;
+ base = fp->argv;
+ limit = base + fp->argc;
+ } else {
+ /*
+ * This should be a script activation, either a top-level
+ * script or a scripted function. But be paranoid about calls
+ * to js_DecompileValueGenerator from code that hasn't fully
+ * initialized a (default-all-zeroes) frame.
+ */
+ script = fp->script;
+ spbase = base = fp->spbase;
+ limit = fp->sp;
+ }
+
+ /*
+ * Pure paranoia about default-zeroed frames being active while
+ * js_DecompileValueGenerator is called. It can't hurt much now;
+ * error reporting performance is not an issue.
+ */
+ if (!script || !base || !limit)
+ goto do_fallback;
+
+ /*
+ * Try to find operand-generating pc depth slots below sp.
+ *
+ * In the native case, we know the arguments have generating pc's
+ * under them, on account of fp->down->script being non-null: all
+ * compiled scripts get depth slots for generating pc's allocated
+ * upon activation, at the top of js_Interpret.
+ *
+ * In the script or scripted function case, the same reasoning
+ * applies to fp rather than to fp->down.
+ *
+ * We search from limit to base to find the most recently calculated
+ * value matching v under assumption that it is it that caused
+ * exception, see bug 328664.
+ */
+ for (sp = limit;;) {
+ if (sp <= base)
+ goto do_fallback;
+ --sp;
+ if (*sp == v) {
+ depth = (intN)script->depth;
+ sp -= depth;
+ pc = (jsbytecode *) *sp;
+ break;
+ }
+ }
+ } else {
+ /*
+ * At this point, pc may or may not be null, i.e., we could be in
+ * a script activation, or we could be in a native frame that was
+ * called by another native function. Check pc and script.
+ */
+ if (!pc)
+ goto do_fallback;
+ script = fp->script;
+ if (!script)
+ goto do_fallback;
+
+ if (spindex != JSDVG_IGNORE_STACK) {
+ JS_ASSERT(spindex < 0);
+ depth = (intN)script->depth;
+#if !JS_HAS_NO_SUCH_METHOD
+ JS_ASSERT(-depth <= spindex);
+#endif
+ spindex -= depth;
+
+ base = (jsval *) cx->stackPool.current->base;
+ limit = (jsval *) cx->stackPool.current->avail;
+ sp = fp->sp + spindex;
+ if (JS_UPTRDIFF(sp, base) < JS_UPTRDIFF(limit, base))
+ pc = (jsbytecode *) *sp;
+ }
+ }
+
+ /*
+ * Again, be paranoid, this time about possibly loading an invalid pc
+ * from fp->sp[-(1+depth)].
+ */
+ if (JS_UPTRDIFF(pc, script->code) >= (jsuword)script->length) {
+ pc = fp->pc;
+ if (!pc)
+ goto do_fallback;
+ }
+ op = (JSOp) *pc;
+ if (op == JSOP_TRAP)
+ op = JS_GetTrapOpcode(cx, script, pc);
+
+ /* None of these stack-writing ops generates novel values. */
+ JS_ASSERT(op != JSOP_CASE && op != JSOP_CASEX &&
+ op != JSOP_DUP && op != JSOP_DUP2 &&
+ op != JSOP_SWAP);
+
+ /*
+ * |this| could convert to a very long object initialiser, so cite it by
+ * its keyword name instead.
+ */
+ if (op == JSOP_THIS)
+ return JS_NewStringCopyZ(cx, js_this_str);
+
+ /*
+ * JSOP_BINDNAME is special: it generates a value, the base object of a
+ * reference. But if it is the generating op for a diagnostic produced by
+ * js_DecompileValueGenerator, the name being bound is irrelevant. Just
+ * fall back to the base object.
+ */
+ if (op == JSOP_BINDNAME)
+ goto do_fallback;
+
+ /* NAME ops are self-contained, others require left or right context. */
+ cs = &js_CodeSpec[op];
+ begin = pc;
+ end = pc + cs->length;
+ if ((cs->format & JOF_MODEMASK) != JOF_NAME) {
+ JSSrcNoteType noteType;
+
+ sn = js_GetSrcNote(script, pc);
+ if (!sn)
+ goto do_fallback;
+ noteType = SN_TYPE(sn);
+ if (noteType == SRC_PCBASE) {
+ begin -= js_GetSrcNoteOffset(sn, 0);
+ } else if (noteType == SRC_PCDELTA) {
+ end = begin + js_GetSrcNoteOffset(sn, 0);
+ begin += cs->length;
+ } else {
+ goto do_fallback;
+ }
+ }
+ len = PTRDIFF(end, begin, jsbytecode);
+ if (len <= 0)
+ goto do_fallback;
+
+ /*
+ * Walk forward from script->main and compute starting stack depth.
+ * FIXME: Code to compute oplen copied from js_Disassemble1 and reduced.
+ * FIXME: Optimize to use last empty-stack sequence point.
+ */
+ pcdepth = 0;
+ for (pc = script->main; pc < begin; pc += oplen) {
+ jsbytecode *pc2;
+ uint32 type;
+ intN nuses, ndefs;
+
+ /* Let pc2 be non-null only for JSOP_LITOPX. */
+ pc2 = NULL;
+ op = (JSOp) *pc;
+ if (op == JSOP_TRAP)
+ op = JS_GetTrapOpcode(cx, script, pc);
+ cs = &js_CodeSpec[op];
+ oplen = cs->length;
+
+ if (op == JSOP_SETSP) {
+ pcdepth = GET_UINT16(pc);
+ continue;
+ }
+
+ /*
+ * A (C ? T : E) expression requires skipping either T (if begin is in
+ * E) or both T and E (if begin is after the whole expression) before
+ * adjusting pcdepth based on the JSOP_IFEQ or JSOP_IFEQX at pc that
+ * tests condition C. We know that the stack depth can't change from
+ * what it was with C on top of stack.
+ */
+ sn = js_GetSrcNote(script, pc);
+ if (sn && SN_TYPE(sn) == SRC_COND) {
+ ptrdiff_t jmpoff, jmplen;
+
+ jmpoff = js_GetSrcNoteOffset(sn, 0);
+ if (pc + jmpoff < begin) {
+ pc += jmpoff;
+ op = *pc;
+ JS_ASSERT(op == JSOP_GOTO || op == JSOP_GOTOX);
+ cs = &js_CodeSpec[op];
+ oplen = cs->length;
+ jmplen = GetJumpOffset(pc, pc);
+ if (pc + jmplen < begin) {
+ oplen = (uintN) jmplen;
+ continue;
+ }
+
+ /*
+ * Ok, begin lies in E. Manually pop C off the model stack,
+ * since we have moved beyond the IFEQ now.
+ */
+ --pcdepth;
+ }
+ }
+
+ type = cs->format & JOF_TYPEMASK;
+ switch (type) {
+ case JOF_TABLESWITCH:
+ case JOF_TABLESWITCHX:
+ {
+ jsint jmplen, i, low, high;
+
+ jmplen = (type == JOF_TABLESWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ pc2 += jmplen;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ for (i = low; i <= high; i++)
+ pc2 += jmplen;
+ oplen = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LOOKUPSWITCH:
+ case JOF_LOOKUPSWITCHX:
+ {
+ jsint jmplen;
+ jsbytecode *pc2;
+ jsatomid npairs;
+
+ jmplen = (type == JOF_LOOKUPSWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ pc2 += jmplen;
+ npairs = GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+ while (npairs) {
+ pc2 += ATOM_INDEX_LEN;
+ pc2 += jmplen;
+ npairs--;
+ }
+ oplen = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LITOPX:
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = *pc2;
+ cs = &js_CodeSpec[op];
+ JS_ASSERT(cs->length > ATOM_INDEX_LEN);
+ oplen += cs->length - (1 + ATOM_INDEX_LEN);
+ break;
+
+ default:;
+ }
+
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ continue;
+
+ nuses = cs->nuses;
+ if (nuses < 0) {
+ /* Call opcode pushes [callee, this, argv...]. */
+ nuses = 2 + GET_ARGC(pc);
+ } else if (op == JSOP_RETSUB) {
+ /* Pop [exception or hole, retsub pc-index]. */
+ JS_ASSERT(nuses == 0);
+ nuses = 2;
+ } else if (op == JSOP_LEAVEBLOCK || op == JSOP_LEAVEBLOCKEXPR) {
+ JS_ASSERT(nuses == 0);
+ nuses = GET_UINT16(pc);
+ }
+ pcdepth -= nuses;
+ JS_ASSERT(pcdepth >= 0);
+
+ ndefs = cs->ndefs;
+ if (op == JSOP_FINALLY) {
+ /* Push [exception or hole, retsub pc-index]. */
+ JS_ASSERT(ndefs == 0);
+ ndefs = 2;
+ } else if (op == JSOP_ENTERBLOCK) {
+ jsatomid atomIndex;
+ JSAtom *atom;
+ JSObject *obj;
+
+ JS_ASSERT(ndefs == 0);
+ atomIndex = pc2 ? GET_LITERAL_INDEX(pc) : GET_ATOM_INDEX(pc);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ obj = ATOM_TO_OBJECT(atom);
+ JS_ASSERT(OBJ_BLOCK_DEPTH(cx, obj) == pcdepth);
+ ndefs = OBJ_BLOCK_COUNT(cx, obj);
+ }
+ pcdepth += ndefs;
+ }
+
+ name = NULL;
+ jp = js_NewPrinter(cx, "js_DecompileValueGenerator", 0, JS_FALSE);
+ if (jp) {
+ if (fp->fun && fp->fun->object) {
+ JS_ASSERT(OBJ_IS_NATIVE(fp->fun->object));
+ jp->scope = OBJ_SCOPE(fp->fun->object);
+ }
+ jp->dvgfence = end;
+ if (js_DecompileCode(jp, script, begin, (uintN)len, (uintN)pcdepth))
+ name = js_GetPrinterOutput(jp);
+ js_DestroyPrinter(jp);
+ }
+ return name;
+
+ do_fallback:
+ return fallback ? fallback : js_ValueToSource(cx, v);
+}
diff --git a/third_party/js-1.7/jsopcode.h b/third_party/js-1.7/jsopcode.h
new file mode 100644
index 0000000..3f7e1de
--- /dev/null
+++ b/third_party/js-1.7/jsopcode.h
@@ -0,0 +1,318 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsopcode_h___
+#define jsopcode_h___
+/*
+ * JS bytecode definitions.
+ */
+#include <stddef.h>
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * JS operation bytecodes.
+ */
+typedef enum JSOp {
+#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ op = val,
+#include "jsopcode.tbl"
+#undef OPDEF
+ JSOP_LIMIT
+} JSOp;
+
+typedef enum JSOpLength {
+#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ op##_LENGTH = length,
+#include "jsopcode.tbl"
+#undef OPDEF
+ JSOP_LIMIT_LENGTH
+} JSOpLength;
+
+/*
+ * JS bytecode formats.
+ */
+#define JOF_BYTE 0 /* single bytecode, no immediates */
+#define JOF_JUMP 1 /* signed 16-bit jump offset immediate */
+#define JOF_CONST 2 /* unsigned 16-bit constant pool index */
+#define JOF_UINT16 3 /* unsigned 16-bit immediate operand */
+#define JOF_TABLESWITCH 4 /* table switch */
+#define JOF_LOOKUPSWITCH 5 /* lookup switch */
+#define JOF_QARG 6 /* quickened get/set function argument ops */
+#define JOF_QVAR 7 /* quickened get/set local variable ops */
+#define JOF_INDEXCONST 8 /* uint16 slot index + constant pool index */
+#define JOF_JUMPX 9 /* signed 32-bit jump offset immediate */
+#define JOF_TABLESWITCHX 10 /* extended (32-bit offset) table switch */
+#define JOF_LOOKUPSWITCHX 11 /* extended (32-bit offset) lookup switch */
+#define JOF_UINT24 12 /* extended unsigned 24-bit literal (index) */
+#define JOF_LITOPX 13 /* JOF_UINT24 followed by op being extended,
+ where op if JOF_CONST has no unsigned 16-
+ bit immediate operand */
+#define JOF_LOCAL 14 /* block-local operand stack variable */
+#define JOF_TYPEMASK 0x000f /* mask for above immediate types */
+#define JOF_NAME 0x0010 /* name operation */
+#define JOF_PROP 0x0020 /* obj.prop operation */
+#define JOF_ELEM 0x0030 /* obj[index] operation */
+#define JOF_MODEMASK 0x0030 /* mask for above addressing modes */
+#define JOF_SET 0x0040 /* set (i.e., assignment) operation */
+#define JOF_DEL 0x0080 /* delete operation */
+#define JOF_DEC 0x0100 /* decrement (--, not ++) opcode */
+#define JOF_INC 0x0200 /* increment (++, not --) opcode */
+#define JOF_INCDEC 0x0300 /* increment or decrement opcode */
+#define JOF_POST 0x0400 /* postorder increment or decrement */
+#define JOF_IMPORT 0x0800 /* import property op */
+#define JOF_FOR 0x1000 /* for-in property op */
+#define JOF_ASSIGNING JOF_SET /* hint for JSClass.resolve, used for ops
+ that do simplex assignment */
+#define JOF_DETECTING 0x2000 /* object detection flag for JSNewResolveOp */
+#define JOF_BACKPATCH 0x4000 /* backpatch placeholder during codegen */
+#define JOF_LEFTASSOC 0x8000 /* left-associative operator */
+#define JOF_DECLARING 0x10000 /* var, const, or function declaration op */
+#define JOF_XMLNAME 0x20000 /* XML name: *, a::b, @a, @a::b, etc. */
+
+#define JOF_TYPE_IS_EXTENDED_JUMP(t) \
+ ((unsigned)((t) - JOF_JUMPX) <= (unsigned)(JOF_LOOKUPSWITCHX - JOF_JUMPX))
+
+/*
+ * Immediate operand getters, setters, and bounds.
+ */
+
+/* Short (2-byte signed offset) relative jump macros. */
+#define JUMP_OFFSET_LEN 2
+#define JUMP_OFFSET_HI(off) ((jsbytecode)((off) >> 8))
+#define JUMP_OFFSET_LO(off) ((jsbytecode)(off))
+#define GET_JUMP_OFFSET(pc) ((int16)(((pc)[1] << 8) | (pc)[2]))
+#define SET_JUMP_OFFSET(pc,off) ((pc)[1] = JUMP_OFFSET_HI(off), \
+ (pc)[2] = JUMP_OFFSET_LO(off))
+#define JUMP_OFFSET_MIN ((int16)0x8000)
+#define JUMP_OFFSET_MAX ((int16)0x7fff)
+
+/*
+ * When a short jump won't hold a relative offset, its 2-byte immediate offset
+ * operand is an unsigned index of a span-dependency record, maintained until
+ * code generation finishes -- after which some (but we hope not nearly all)
+ * span-dependent jumps must be extended (see OptimizeSpanDeps in jsemit.c).
+ *
+ * If the span-dependency record index overflows SPANDEP_INDEX_MAX, the jump
+ * offset will contain SPANDEP_INDEX_HUGE, indicating that the record must be
+ * found (via binary search) by its "before span-dependency optimization" pc
+ * offset (from script main entry point).
+ */
+#define GET_SPANDEP_INDEX(pc) ((uint16)(((pc)[1] << 8) | (pc)[2]))
+#define SET_SPANDEP_INDEX(pc,i) ((pc)[1] = JUMP_OFFSET_HI(i), \
+ (pc)[2] = JUMP_OFFSET_LO(i))
+#define SPANDEP_INDEX_MAX ((uint16)0xfffe)
+#define SPANDEP_INDEX_HUGE ((uint16)0xffff)
+
+/* Ultimately, if short jumps won't do, emit long (4-byte signed) offsets. */
+#define JUMPX_OFFSET_LEN 4
+#define JUMPX_OFFSET_B3(off) ((jsbytecode)((off) >> 24))
+#define JUMPX_OFFSET_B2(off) ((jsbytecode)((off) >> 16))
+#define JUMPX_OFFSET_B1(off) ((jsbytecode)((off) >> 8))
+#define JUMPX_OFFSET_B0(off) ((jsbytecode)(off))
+#define GET_JUMPX_OFFSET(pc) ((int32)(((pc)[1] << 24) | ((pc)[2] << 16) \
+ | ((pc)[3] << 8) | (pc)[4]))
+#define SET_JUMPX_OFFSET(pc,off)((pc)[1] = JUMPX_OFFSET_B3(off), \
+ (pc)[2] = JUMPX_OFFSET_B2(off), \
+ (pc)[3] = JUMPX_OFFSET_B1(off), \
+ (pc)[4] = JUMPX_OFFSET_B0(off))
+#define JUMPX_OFFSET_MIN ((int32)0x80000000)
+#define JUMPX_OFFSET_MAX ((int32)0x7fffffff)
+
+/*
+ * A literal is indexed by a per-script atom map. Most scripts have relatively
+ * few literals, so the standard JOF_CONST format specifies a fixed 16 bits of
+ * immediate operand index. A script with more than 64K literals must push all
+ * high-indexed literals on the stack using JSOP_LITERAL, then use JOF_ELEM ops
+ * instead of JOF_PROP, etc.
+ */
+#define ATOM_INDEX_LEN 2
+#define ATOM_INDEX_HI(i) ((jsbytecode)((i) >> 8))
+#define ATOM_INDEX_LO(i) ((jsbytecode)(i))
+#define GET_ATOM_INDEX(pc) ((jsatomid)(((pc)[1] << 8) | (pc)[2]))
+#define SET_ATOM_INDEX(pc,i) ((pc)[1] = ATOM_INDEX_HI(i), \
+ (pc)[2] = ATOM_INDEX_LO(i))
+#define GET_ATOM(cx,script,pc) js_GetAtom((cx), &(script)->atomMap, \
+ GET_ATOM_INDEX(pc))
+
+/* A full atom index for JSOP_UINT24 uses 24 bits of immediate operand. */
+#define UINT24_HI(i) ((jsbytecode)((i) >> 16))
+#define UINT24_MID(i) ((jsbytecode)((i) >> 8))
+#define UINT24_LO(i) ((jsbytecode)(i))
+#define GET_UINT24(pc) ((jsatomid)(((pc)[1] << 16) | \
+ ((pc)[2] << 8) | \
+ (pc)[3]))
+#define SET_UINT24(pc,i) ((pc)[1] = UINT24_HI(i), \
+ (pc)[2] = UINT24_MID(i), \
+ (pc)[3] = UINT24_LO(i))
+
+/* Same format for JSOP_LITERAL, etc., but future-proof with different names. */
+#define LITERAL_INDEX_LEN 3
+#define LITERAL_INDEX_HI(i) UINT24_HI(i)
+#define LITERAL_INDEX_MID(i) UINT24_MID(i)
+#define LITERAL_INDEX_LO(i) UINT24_LO(i)
+#define GET_LITERAL_INDEX(pc) GET_UINT24(pc)
+#define SET_LITERAL_INDEX(pc,i) SET_UINT24(pc,i)
+
+/* Atom index limit is determined by SN_3BYTE_OFFSET_FLAG, see jsemit.h. */
+#define ATOM_INDEX_LIMIT_LOG2 23
+#define ATOM_INDEX_LIMIT ((uint32)1 << ATOM_INDEX_LIMIT_LOG2)
+
+JS_STATIC_ASSERT(sizeof(jsatomid) * JS_BITS_PER_BYTE >=
+ ATOM_INDEX_LIMIT_LOG2 + 1);
+
+/* Common uint16 immediate format helpers. */
+#define UINT16_HI(i) ((jsbytecode)((i) >> 8))
+#define UINT16_LO(i) ((jsbytecode)(i))
+#define GET_UINT16(pc) ((uintN)(((pc)[1] << 8) | (pc)[2]))
+#define SET_UINT16(pc,i) ((pc)[1] = UINT16_HI(i), (pc)[2] = UINT16_LO(i))
+#define UINT16_LIMIT ((uintN)1 << 16)
+
+/* Actual argument count operand format helpers. */
+#define ARGC_HI(argc) UINT16_HI(argc)
+#define ARGC_LO(argc) UINT16_LO(argc)
+#define GET_ARGC(pc) GET_UINT16(pc)
+#define ARGC_LIMIT UINT16_LIMIT
+
+/* Synonyms for quick JOF_QARG and JOF_QVAR bytecodes. */
+#define GET_ARGNO(pc) GET_UINT16(pc)
+#define SET_ARGNO(pc,argno) SET_UINT16(pc,argno)
+#define ARGNO_LEN 2
+#define ARGNO_LIMIT UINT16_LIMIT
+
+#define GET_VARNO(pc) GET_UINT16(pc)
+#define SET_VARNO(pc,varno) SET_UINT16(pc,varno)
+#define VARNO_LEN 2
+#define VARNO_LIMIT UINT16_LIMIT
+
+struct JSCodeSpec {
+ const char *name; /* JS bytecode name */
+ const char *token; /* JS source literal or null */
+ int8 length; /* length including opcode byte */
+ int8 nuses; /* arity, -1 if variadic */
+ int8 ndefs; /* number of stack results */
+ uint8 prec; /* operator precedence */
+ uint32 format; /* immediate operand format */
+};
+
+extern const JSCodeSpec js_CodeSpec[];
+extern uintN js_NumCodeSpecs;
+extern const jschar js_EscapeMap[];
+
+/*
+ * Return a GC'ed string containing the chars in str, with any non-printing
+ * chars or quotes (' or " as specified by the quote argument) escaped, and
+ * with the quote character at the beginning and end of the result string.
+ */
+extern JSString *
+js_QuoteString(JSContext *cx, JSString *str, jschar quote);
+
+/*
+ * JSPrinter operations, for printf style message formatting. The return
+ * value from js_GetPrinterOutput() is the printer's cumulative output, in
+ * a GC'ed string.
+ */
+extern JSPrinter *
+js_NewPrinter(JSContext *cx, const char *name, uintN indent, JSBool pretty);
+
+extern void
+js_DestroyPrinter(JSPrinter *jp);
+
+extern JSString *
+js_GetPrinterOutput(JSPrinter *jp);
+
+extern int
+js_printf(JSPrinter *jp, const char *format, ...);
+
+extern JSBool
+js_puts(JSPrinter *jp, const char *s);
+
+#ifdef DEBUG
+/*
+ * Disassemblers, for debugging only.
+ */
+#include <stdio.h>
+
+extern JS_FRIEND_API(JSBool)
+js_Disassemble(JSContext *cx, JSScript *script, JSBool lines, FILE *fp);
+
+extern JS_FRIEND_API(uintN)
+js_Disassemble1(JSContext *cx, JSScript *script, jsbytecode *pc, uintN loc,
+ JSBool lines, FILE *fp);
+#endif /* DEBUG */
+
+/*
+ * Decompilers, for script, function, and expression pretty-printing.
+ */
+extern JSBool
+js_DecompileCode(JSPrinter *jp, JSScript *script, jsbytecode *pc, uintN len,
+ uintN pcdepth);
+
+extern JSBool
+js_DecompileScript(JSPrinter *jp, JSScript *script);
+
+extern JSBool
+js_DecompileFunctionBody(JSPrinter *jp, JSFunction *fun);
+
+extern JSBool
+js_DecompileFunction(JSPrinter *jp, JSFunction *fun);
+
+/*
+ * Find the source expression that resulted in v, and return a new string
+ * containing it. Fall back on v's string conversion (fallback) if we can't
+ * find the bytecode that generated and pushed v on the operand stack.
+ *
+ * Search the current stack frame if spindex is JSDVG_SEARCH_STACK. Don't
+ * look for v on the stack if spindex is JSDVG_IGNORE_STACK. Otherwise,
+ * spindex is the negative index of v, measured from cx->fp->sp, or from a
+ * lower frame's sp if cx->fp is native.
+ */
+extern JSString *
+js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v,
+ JSString *fallback);
+
+#define JSDVG_IGNORE_STACK 0
+#define JSDVG_SEARCH_STACK 1
+
+JS_END_EXTERN_C
+
+#endif /* jsopcode_h___ */
diff --git a/third_party/js-1.7/jsopcode.tbl b/third_party/js-1.7/jsopcode.tbl
new file mode 100644
index 0000000..4a4ca89
--- /dev/null
+++ b/third_party/js-1.7/jsopcode.tbl
@@ -0,0 +1,478 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=0 ft=C:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript operation bytecodes. If you need to allocate a bytecode, look
+ * for a name of the form JSOP_UNUSED* and claim it. Otherwise, always add at
+ * the end of the table.
+ *
+ * Includers must define an OPDEF macro of the following form:
+ *
+ * #define OPDEF(op,val,name,image,length,nuses,ndefs,prec,format) ...
+ *
+ * Selected arguments can be expanded in initializers. The op argument is
+ * expanded followed by comma in the JSOp enum (jsopcode.h), e.g. The value
+ * field must be dense for now, because jsopcode.c uses an OPDEF() expansion
+ * inside the js_CodeSpec[] initializer.
+ *
+ * Field Description
+ * op Bytecode name, which is the JSOp enumerator name
+ * value Bytecode value, which is the JSOp enumerator value
+ * name C string containing name for disassembler
+ * image C string containing "image" for pretty-printer, null if ugly
+ * length Number of bytes including any immediate operands
+ * nuses Number of stack slots consumed by bytecode, -1 if variadic
+ * ndefs Number of stack slots produced by bytecode
+ * prec Operator precedence, zero if not an operator
+ * format Bytecode plus immediate operand encoding format
+ *
+ * Precedence Operators Opcodes
+ * 1 let (x = y) z, w JSOP_LEAVEBLOCKEXPR
+ * 2 , JSOP_POP with SRC_PCDELTA note
+ * 3 =, +=, etc. JSOP_SETNAME, etc. (all JOF_ASSIGNING)
+ * 4 ?: JSOP_IFEQ, JSOP_IFEQX
+ * 5 || JSOP_OR, JSOP_ORX
+ * 6 && JSOP_AND, JSOP_ANDX
+ * 7 | JSOP_BITOR
+ * 8 ^ JSOP_BITXOR
+ * 9 & JSOP_BITAND
+ * 10 ==, !=, etc. JSOP_EQ, JSOP_NE, etc.
+ * 11 <, in, etc. JSOP_LT, JSOP_IN, etc.
+ * 12 <<, >>, >>> JSOP_LSH, JSOP_RSH, JSOP_URSH
+ * 13 +, -, etc. JSOP_ADD, JSOP_SUB, etc.
+ * 14 *, /, % JSOP_MUL, JSOP_DIV, JSOP_MOD
+ * 15 !, ~, etc. JSOP_NOT, JSOP_BITNOT, etc.
+ * 16 0, function(){} etc. JSOP_ZERO, JSOP_ANONFUNOBJ, etc.
+ * 17 delete, new JSOP_DEL*, JSOP_NEW
+ * 18 x.y, f(), etc. JSOP_GETPROP, JSOP_CALL, etc.
+ * 19 x, null, etc. JSOP_NAME, JSOP_NULL, etc.
+ *
+ * The push-numeric-constant operators, JSOP_ZERO, JSOP_NUMBER, etc., have
+ * lower precedence than the member operators emitted for the . operator, to
+ * cause the decompiler to parenthesize the . left operand, e.g. (0).foo.
+ * Otherwise the . could be taken as a decimal point. We use the same level
+ * 16 for function expressions too, to force parenthesization.
+ *
+ * This file is best viewed with 128 columns:
+12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678
+ */
+
+/* legend: op val name image len use def prec format */
+
+/* Longstanding JavaScript bytecodes. */
+OPDEF(JSOP_NOP, 0, "nop", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_PUSH, 1, "push", NULL, 1, 0, 1, 0, JOF_BYTE)
+OPDEF(JSOP_POPV, 2, "popv", NULL, 1, 1, 0, 2, JOF_BYTE)
+OPDEF(JSOP_ENTERWITH, 3, "enterwith", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_LEAVEWITH, 4, "leavewith", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_RETURN, 5, "return", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_GOTO, 6, "goto", NULL, 3, 0, 0, 0, JOF_JUMP)
+OPDEF(JSOP_IFEQ, 7, "ifeq", NULL, 3, 1, 0, 4, JOF_JUMP|JOF_DETECTING)
+OPDEF(JSOP_IFNE, 8, "ifne", NULL, 3, 1, 0, 0, JOF_JUMP)
+
+/* Get the arguments object for the current, lightweight function activation. */
+OPDEF(JSOP_ARGUMENTS, 9, js_arguments_str, js_arguments_str, 1, 0, 1, 18, JOF_BYTE)
+
+/* ECMA-compliant for-in loop with argument or local variable loop control. */
+OPDEF(JSOP_FORARG, 10, "forarg", NULL, 3, 0, 1, 19, JOF_QARG|JOF_NAME|JOF_FOR)
+OPDEF(JSOP_FORVAR, 11, "forvar", NULL, 3, 0, 1, 19, JOF_QVAR|JOF_NAME|JOF_FOR)
+
+/* More longstanding bytecodes. */
+OPDEF(JSOP_DUP, 12, "dup", NULL, 1, 1, 2, 0, JOF_BYTE)
+OPDEF(JSOP_DUP2, 13, "dup2", NULL, 1, 2, 4, 0, JOF_BYTE)
+OPDEF(JSOP_SETCONST, 14, "setconst", NULL, 3, 1, 1, 3, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_BITOR, 15, "bitor", "|", 1, 2, 1, 7, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_BITXOR, 16, "bitxor", "^", 1, 2, 1, 8, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_BITAND, 17, "bitand", "&", 1, 2, 1, 9, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_EQ, 18, "eq", "==", 1, 2, 1, 10, JOF_BYTE|JOF_LEFTASSOC|JOF_DETECTING)
+OPDEF(JSOP_NE, 19, "ne", "!=", 1, 2, 1, 10, JOF_BYTE|JOF_LEFTASSOC|JOF_DETECTING)
+OPDEF(JSOP_LT, 20, "lt", "<", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_LE, 21, "le", "<=", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_GT, 22, "gt", ">", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_GE, 23, "ge", ">=", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_LSH, 24, "lsh", "<<", 1, 2, 1, 12, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_RSH, 25, "rsh", ">>", 1, 2, 1, 12, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_URSH, 26, "ursh", ">>>", 1, 2, 1, 12, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_ADD, 27, "add", "+", 1, 2, 1, 13, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_SUB, 28, "sub", "-", 1, 2, 1, 13, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_MUL, 29, "mul", "*", 1, 2, 1, 14, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_DIV, 30, "div", "/", 1, 2, 1, 14, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_MOD, 31, "mod", "%", 1, 2, 1, 14, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_NOT, 32, "not", "!", 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
+OPDEF(JSOP_BITNOT, 33, "bitnot", "~", 1, 1, 1, 15, JOF_BYTE)
+OPDEF(JSOP_NEG, 34, "neg", "- ", 1, 1, 1, 15, JOF_BYTE)
+OPDEF(JSOP_NEW, 35, js_new_str, NULL, 3, -1, 1, 17, JOF_UINT16)
+OPDEF(JSOP_DELNAME, 36, "delname", NULL, 3, 0, 1, 17, JOF_CONST|JOF_NAME|JOF_DEL)
+OPDEF(JSOP_DELPROP, 37, "delprop", NULL, 3, 1, 1, 17, JOF_CONST|JOF_PROP|JOF_DEL)
+OPDEF(JSOP_DELELEM, 38, "delelem", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL)
+OPDEF(JSOP_TYPEOF, 39, js_typeof_str,NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
+OPDEF(JSOP_VOID, 40, js_void_str, NULL, 1, 1, 1, 15, JOF_BYTE)
+OPDEF(JSOP_INCNAME, 41, "incname", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC)
+OPDEF(JSOP_INCPROP, 42, "incprop", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_INC)
+OPDEF(JSOP_INCELEM, 43, "incelem", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_INC)
+OPDEF(JSOP_DECNAME, 44, "decname", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC)
+OPDEF(JSOP_DECPROP, 45, "decprop", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_DEC)
+OPDEF(JSOP_DECELEM, 46, "decelem", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_DEC)
+OPDEF(JSOP_NAMEINC, 47, "nameinc", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_PROPINC, 48, "propinc", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_INC|JOF_POST)
+OPDEF(JSOP_ELEMINC, 49, "eleminc", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_INC|JOF_POST)
+OPDEF(JSOP_NAMEDEC, 50, "namedec", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC|JOF_POST)
+OPDEF(JSOP_PROPDEC, 51, "propdec", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_DEC|JOF_POST)
+OPDEF(JSOP_ELEMDEC, 52, "elemdec", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_DEC|JOF_POST)
+OPDEF(JSOP_GETPROP, 53, "getprop", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP)
+OPDEF(JSOP_SETPROP, 54, "setprop", NULL, 3, 2, 1, 3, JOF_CONST|JOF_PROP|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_GETELEM, 55, "getelem", NULL, 1, 2, 1, 18, JOF_BYTE |JOF_ELEM|JOF_LEFTASSOC)
+OPDEF(JSOP_SETELEM, 56, "setelem", NULL, 1, 3, 1, 3, JOF_BYTE |JOF_ELEM|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_PUSHOBJ, 57, "pushobj", NULL, 1, 0, 1, 0, JOF_BYTE)
+OPDEF(JSOP_CALL, 58, "call", NULL, 3, -1, 1, 18, JOF_UINT16)
+OPDEF(JSOP_NAME, 59, "name", NULL, 3, 0, 1, 19, JOF_CONST|JOF_NAME)
+OPDEF(JSOP_NUMBER, 60, "number", NULL, 3, 0, 1, 16, JOF_CONST)
+OPDEF(JSOP_STRING, 61, "string", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_ZERO, 62, "zero", "0", 1, 0, 1, 16, JOF_BYTE)
+OPDEF(JSOP_ONE, 63, "one", "1", 1, 0, 1, 16, JOF_BYTE)
+OPDEF(JSOP_NULL, 64, js_null_str, js_null_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_THIS, 65, js_this_str, js_this_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_FALSE, 66, js_false_str, js_false_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_TRUE, 67, js_true_str, js_true_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_OR, 68, "or", NULL, 3, 1, 0, 5, JOF_JUMP|JOF_DETECTING)
+OPDEF(JSOP_AND, 69, "and", NULL, 3, 1, 0, 6, JOF_JUMP|JOF_DETECTING)
+
+/* The switch bytecodes have variable length. */
+OPDEF(JSOP_TABLESWITCH, 70, "tableswitch", NULL, -1, 1, 0, 0, JOF_TABLESWITCH|JOF_DETECTING)
+OPDEF(JSOP_LOOKUPSWITCH, 71, "lookupswitch", NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCH|JOF_DETECTING)
+
+/* New, infallible/transitive identity ops. */
+OPDEF(JSOP_NEW_EQ, 72, "eq", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING)
+OPDEF(JSOP_NEW_NE, 73, "ne", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING)
+
+/* Lexical closure constructor. */
+OPDEF(JSOP_CLOSURE, 74, "closure", NULL, 3, 0, 0, 0, JOF_CONST)
+
+/* Export and import ops. */
+OPDEF(JSOP_EXPORTALL, 75, "exportall", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_EXPORTNAME,76, "exportname", NULL, 3, 0, 0, 0, JOF_CONST|JOF_NAME)
+OPDEF(JSOP_IMPORTALL, 77, "importall", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_IMPORTPROP,78, "importprop", NULL, 3, 1, 0, 0, JOF_CONST|JOF_PROP|JOF_IMPORT)
+OPDEF(JSOP_IMPORTELEM,79, "importelem", NULL, 1, 2, 0, 0, JOF_BYTE |JOF_ELEM|JOF_IMPORT)
+
+/* Push object literal. */
+OPDEF(JSOP_OBJECT, 80, "object", NULL, 3, 0, 1, 19, JOF_CONST)
+
+/* Pop value and discard it. */
+OPDEF(JSOP_POP, 81, "pop", NULL, 1, 1, 0, 2, JOF_BYTE)
+
+/* Convert value to number, for unary +. */
+OPDEF(JSOP_POS, 82, "pos", "+ ", 1, 1, 1, 15, JOF_BYTE)
+
+/* Trap into debugger for breakpoint, etc. */
+OPDEF(JSOP_TRAP, 83, "trap", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* Fast get/set ops for function arguments and local variables. */
+OPDEF(JSOP_GETARG, 84, "getarg", NULL, 3, 0, 1, 19, JOF_QARG |JOF_NAME)
+OPDEF(JSOP_SETARG, 85, "setarg", NULL, 3, 1, 1, 3, JOF_QARG |JOF_NAME|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_GETVAR, 86, "getvar", NULL, 3, 0, 1, 19, JOF_QVAR |JOF_NAME)
+OPDEF(JSOP_SETVAR, 87, "setvar", NULL, 3, 1, 1, 3, JOF_QVAR |JOF_NAME|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+
+/* Push unsigned 16-bit int constant. */
+OPDEF(JSOP_UINT16, 88, "uint16", NULL, 3, 0, 1, 16, JOF_UINT16)
+
+/* Object and array literal support. */
+OPDEF(JSOP_NEWINIT, 89, "newinit", NULL, 1, 2, 1, 0, JOF_BYTE)
+OPDEF(JSOP_ENDINIT, 90, "endinit", NULL, 1, 0, 0, 19, JOF_BYTE)
+OPDEF(JSOP_INITPROP, 91, "initprop", NULL, 3, 1, 0, 3, JOF_CONST|JOF_PROP|JOF_DETECTING)
+OPDEF(JSOP_INITELEM, 92, "initelem", NULL, 1, 2, 0, 3, JOF_BYTE |JOF_ELEM|JOF_DETECTING)
+OPDEF(JSOP_DEFSHARP, 93, "defsharp", NULL, 3, 0, 0, 0, JOF_UINT16)
+OPDEF(JSOP_USESHARP, 94, "usesharp", NULL, 3, 0, 1, 0, JOF_UINT16)
+
+/* Fast inc/dec ops for args and local vars. */
+OPDEF(JSOP_INCARG, 95, "incarg", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_INC)
+OPDEF(JSOP_INCVAR, 96, "incvar", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_INC)
+OPDEF(JSOP_DECARG, 97, "decarg", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_DEC)
+OPDEF(JSOP_DECVAR, 98, "decvar", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_DEC)
+OPDEF(JSOP_ARGINC, 99, "arginc", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_VARINC, 100,"varinc", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_ARGDEC, 101,"argdec", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_DEC|JOF_POST)
+OPDEF(JSOP_VARDEC, 102,"vardec", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_DEC|JOF_POST)
+
+/*
+ * Initialize for-in iterator. See also JSOP_FOREACH and JSOP_FOREACHKEYVAL.
+ */
+OPDEF(JSOP_FORIN, 103,"forin", NULL, 1, 1, 1, 0, JOF_BYTE)
+
+/* ECMA-compliant for/in ops. */
+OPDEF(JSOP_FORNAME, 104,"forname", NULL, 3, 0, 1, 19, JOF_CONST|JOF_NAME|JOF_FOR)
+OPDEF(JSOP_FORPROP, 105,"forprop", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP|JOF_FOR)
+OPDEF(JSOP_FORELEM, 106,"forelem", NULL, 1, 1, 3, 18, JOF_BYTE |JOF_ELEM|JOF_FOR)
+OPDEF(JSOP_POP2, 107,"pop2", NULL, 1, 2, 0, 0, JOF_BYTE)
+
+/* ECMA-compliant assignment ops. */
+OPDEF(JSOP_BINDNAME, 108,"bindname", NULL, 3, 0, 1, 0, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_SETNAME, 109,"setname", NULL, 3, 2, 1, 3, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+
+/* Exception handling ops. */
+OPDEF(JSOP_THROW, 110,"throw", NULL, 1, 1, 0, 0, JOF_BYTE)
+
+/* 'in' and 'instanceof' ops. */
+OPDEF(JSOP_IN, 111,js_in_str, js_in_str, 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_INSTANCEOF,112,js_instanceof_str,js_instanceof_str,1,2,1,11,JOF_BYTE|JOF_LEFTASSOC)
+
+/* debugger op */
+OPDEF(JSOP_DEBUGGER, 113,"debugger", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* gosub/retsub for finally handling */
+OPDEF(JSOP_GOSUB, 114,"gosub", NULL, 3, 0, 0, 0, JOF_JUMP)
+OPDEF(JSOP_RETSUB, 115,"retsub", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* More exception handling ops. */
+OPDEF(JSOP_EXCEPTION, 116,"exception", NULL, 1, 0, 1, 0, JOF_BYTE)
+OPDEF(JSOP_SETSP, 117,"setsp", NULL, 3, 0, 0, 0, JOF_UINT16)
+
+/*
+ * ECMA-compliant switch statement ops.
+ * CONDSWITCH is a decompilable NOP; CASE is ===, POP, jump if true, re-push
+ * lval if false; and DEFAULT is POP lval and GOTO.
+ */
+OPDEF(JSOP_CONDSWITCH,118,"condswitch", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_CASE, 119,"case", NULL, 3, 1, 0, 0, JOF_JUMP)
+OPDEF(JSOP_DEFAULT, 120,"default", NULL, 3, 1, 0, 0, JOF_JUMP)
+
+/*
+ * ECMA-compliant call to eval op
+ */
+OPDEF(JSOP_EVAL, 121,"eval", NULL, 3, -1, 1, 18, JOF_UINT16)
+
+/*
+ * ECMA-compliant helper for 'for (x[i] in o)' loops.
+ */
+OPDEF(JSOP_ENUMELEM, 122,"enumelem", NULL, 1, 3, 0, 3, JOF_BYTE |JOF_SET|JOF_ASSIGNING)
+
+/*
+ * Getter and setter prefix bytecodes. These modify the next bytecode, either
+ * an assignment or a property initializer code, which then defines a property
+ * getter or setter.
+ */
+OPDEF(JSOP_GETTER, 123,js_getter_str,NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_SETTER, 124,js_setter_str,NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/*
+ * Prolog bytecodes for defining function, var, and const names.
+ */
+OPDEF(JSOP_DEFFUN, 125,"deffun", NULL, 3, 0, 0, 0, JOF_CONST|JOF_DECLARING)
+OPDEF(JSOP_DEFCONST, 126,"defconst", NULL, 3, 0, 0, 0, JOF_CONST|JOF_DECLARING)
+OPDEF(JSOP_DEFVAR, 127,"defvar", NULL, 3, 0, 0, 0, JOF_CONST|JOF_DECLARING)
+
+/* Auto-clone (if needed due to re-parenting) and push an anonymous function. */
+OPDEF(JSOP_ANONFUNOBJ, 128, "anonfunobj", NULL, 3, 0, 1, 16, JOF_CONST)
+
+/* ECMA ed. 3 named function expression. */
+OPDEF(JSOP_NAMEDFUNOBJ, 129, "namedfunobj", NULL, 3, 0, 1, 16, JOF_CONST)
+
+/*
+ * Like JSOP_SETLOCAL, but specialized to avoid requiring JSOP_POP immediately
+ * after to throw away the exception value.
+ */
+OPDEF(JSOP_SETLOCALPOP, 130, "setlocalpop", NULL, 3, 1, 0, 3, JOF_LOCAL|JOF_NAME|JOF_SET)
+
+/* ECMA-mandated parenthesization opcode, which nulls the reference base register, obj; see jsinterp.c. */
+OPDEF(JSOP_GROUP, 131, "group", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* Host object extension: given 'o.item(i) = j', the left-hand side compiles JSOP_SETCALL, rather than JSOP_CALL. */
+OPDEF(JSOP_SETCALL, 132, "setcall", NULL, 3, -1, 2, 18, JOF_UINT16|JOF_SET|JOF_ASSIGNING)
+
+/*
+ * Exception handling no-ops, for more economical byte-coding than SRC_TRYFIN
+ * srcnote-annotated JSOP_NOPs.
+ */
+OPDEF(JSOP_TRY, 133,"try", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_FINALLY, 134,"finally", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/*
+ * Swap the top two stack elements.
+ */
+OPDEF(JSOP_SWAP, 135,"swap", NULL, 1, 2, 2, 0, JOF_BYTE)
+
+/*
+ * Bytecodes that avoid making an arguments object in most cases:
+ * JSOP_ARGSUB gets arguments[i] from fp->argv, iff i is in [0, fp->argc-1].
+ * JSOP_ARGCNT returns fp->argc.
+ */
+OPDEF(JSOP_ARGSUB, 136,"argsub", NULL, 3, 0, 1, 18, JOF_QARG |JOF_NAME)
+OPDEF(JSOP_ARGCNT, 137,"argcnt", NULL, 1, 0, 1, 18, JOF_BYTE)
+
+/*
+ * Define a local function object as a local variable.
+ * The local variable's slot number is the first immediate two-byte operand.
+ * The function object's atom index is the second immediate operand.
+ */
+OPDEF(JSOP_DEFLOCALFUN, 138,"deflocalfun",NULL, 5, 0, 0, 0, JOF_INDEXCONST|JOF_DECLARING)
+
+/* Extended jumps. */
+OPDEF(JSOP_GOTOX, 139,"gotox", NULL, 5, 0, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_IFEQX, 140,"ifeqx", NULL, 5, 1, 0, 3, JOF_JUMPX|JOF_DETECTING)
+OPDEF(JSOP_IFNEX, 141,"ifnex", NULL, 5, 1, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_ORX, 142,"orx", NULL, 5, 1, 0, 5, JOF_JUMPX|JOF_DETECTING)
+OPDEF(JSOP_ANDX, 143,"andx", NULL, 5, 1, 0, 6, JOF_JUMPX|JOF_DETECTING)
+OPDEF(JSOP_GOSUBX, 144,"gosubx", NULL, 5, 0, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_CASEX, 145,"casex", NULL, 5, 1, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_DEFAULTX, 146,"defaultx", NULL, 5, 1, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_TABLESWITCHX, 147,"tableswitchx",NULL, -1, 1, 0, 0, JOF_TABLESWITCHX|JOF_DETECTING)
+OPDEF(JSOP_LOOKUPSWITCHX, 148,"lookupswitchx",NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCHX|JOF_DETECTING)
+
+/* Placeholders for a real jump opcode set during backpatch chain fixup. */
+OPDEF(JSOP_BACKPATCH, 149,"backpatch",NULL, 3, 0, 0, 0, JOF_JUMP|JOF_BACKPATCH)
+OPDEF(JSOP_BACKPATCH_POP, 150,"backpatch_pop",NULL, 3, 1, 0, 0, JOF_JUMP|JOF_BACKPATCH)
+
+/* Set pending exception from the stack, to trigger rethrow. */
+OPDEF(JSOP_THROWING, 151,"throwing", NULL, 1, 1, 0, 0, JOF_BYTE)
+
+/* Set and get return value pseudo-register in stack frame. */
+OPDEF(JSOP_SETRVAL, 152,"setrval", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_RETRVAL, 153,"retrval", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* Optimized global variable ops (we don't bother doing a JSOP_FORGVAR op). */
+OPDEF(JSOP_GETGVAR, 154,"getgvar", NULL, 3, 0, 1, 19, JOF_CONST|JOF_NAME)
+OPDEF(JSOP_SETGVAR, 155,"setgvar", NULL, 3, 1, 1, 3, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_INCGVAR, 156,"incgvar", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC)
+OPDEF(JSOP_DECGVAR, 157,"decgvar", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC)
+OPDEF(JSOP_GVARINC, 158,"gvarinc", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_GVARDEC, 159,"gvardec", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC|JOF_POST)
+
+/* Regular expression literal requiring special "fork on exec" handling. */
+OPDEF(JSOP_REGEXP, 160,"regexp", NULL, 3, 0, 1, 19, JOF_CONST)
+
+/* XML (ECMA-357, a.k.a. "E4X") support. */
+OPDEF(JSOP_DEFXMLNS, 161,"defxmlns", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_ANYNAME, 162,"anyname", NULL, 1, 0, 1, 19, JOF_BYTE|JOF_XMLNAME)
+OPDEF(JSOP_QNAMEPART, 163,"qnamepart", NULL, 3, 0, 1, 19, JOF_CONST|JOF_XMLNAME)
+OPDEF(JSOP_QNAMECONST, 164,"qnameconst", NULL, 3, 1, 1, 19, JOF_CONST|JOF_XMLNAME)
+OPDEF(JSOP_QNAME, 165,"qname", NULL, 1, 2, 1, 0, JOF_BYTE|JOF_XMLNAME)
+OPDEF(JSOP_TOATTRNAME, 166,"toattrname", NULL, 1, 1, 1, 19, JOF_BYTE|JOF_XMLNAME)
+OPDEF(JSOP_TOATTRVAL, 167,"toattrval", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_ADDATTRNAME, 168,"addattrname",NULL, 1, 2, 1, 13, JOF_BYTE)
+OPDEF(JSOP_ADDATTRVAL, 169,"addattrval", NULL, 1, 2, 1, 13, JOF_BYTE)
+OPDEF(JSOP_BINDXMLNAME, 170,"bindxmlname",NULL, 1, 1, 2, 3, JOF_BYTE|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_SETXMLNAME, 171,"setxmlname", NULL, 1, 3, 1, 3, JOF_BYTE|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_XMLNAME, 172,"xmlname", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_DESCENDANTS, 173,"descendants",NULL, 1, 2, 1, 18, JOF_BYTE)
+OPDEF(JSOP_FILTER, 174,"filter", NULL, 3, 1, 1, 0, JOF_JUMP)
+OPDEF(JSOP_ENDFILTER, 175,"endfilter", NULL, 1, 1, 0, 18, JOF_BYTE)
+OPDEF(JSOP_TOXML, 176,"toxml", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_TOXMLLIST, 177,"toxmllist", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_XMLTAGEXPR, 178,"xmltagexpr", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_XMLELTEXPR, 179,"xmleltexpr", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_XMLOBJECT, 180,"xmlobject", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_XMLCDATA, 181,"xmlcdata", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_XMLCOMMENT, 182,"xmlcomment", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_XMLPI, 183,"xmlpi", NULL, 3, 1, 1, 19, JOF_CONST)
+OPDEF(JSOP_GETMETHOD, 184,"getmethod", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP)
+OPDEF(JSOP_GETFUNNS, 185,"getfunns", NULL, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_FOREACH, 186,"foreach", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_DELDESC, 187,"deldesc", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL)
+
+/*
+ * Opcodes for extended literal addressing, using unsigned 24-bit immediate
+ * operands to hold integer operands (JSOP_UINT24), extended atom indexes in
+ * script->atomMap (JSOP_LITERAL, JSOP_FINDNAME), and ops prefixed by such
+ * atom index immediates (JSOP_LITOPX). See jsemit.c, EmitAtomIndexOp.
+ */
+OPDEF(JSOP_UINT24, 188,"uint24", NULL, 4, 0, 1, 16, JOF_UINT24)
+OPDEF(JSOP_LITERAL, 189,"literal", NULL, 4, 0, 1, 19, JOF_UINT24)
+OPDEF(JSOP_FINDNAME, 190,"findname", NULL, 4, 0, 2, 0, JOF_UINT24)
+OPDEF(JSOP_LITOPX, 191,"litopx", NULL, 5, 0, 0, 0, JOF_LITOPX)
+
+/*
+ * Opcodes to help the decompiler deal with XML.
+ */
+OPDEF(JSOP_STARTXML, 192,"startxml", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_STARTXMLEXPR, 193,"startxmlexpr",NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_SETMETHOD, 194,"setmethod", NULL, 3, 2, 1, 3, JOF_CONST|JOF_PROP|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+
+/*
+ * Stop interpretation, emitted at end of script to save the threaded bytecode
+ * interpreter an extra branch test on every DO_NEXT_OP (see jsinterp.c).
+ */
+OPDEF(JSOP_STOP, 195,"stop", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/*
+ * Get an extant property or element value, throwing ReferenceError if the
+ * identified property does not exist.
+ */
+OPDEF(JSOP_GETXPROP, 196,"getxprop", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP)
+OPDEF(JSOP_GETXELEM, 197,"getxelem", NULL, 1, 2, 1, 18, JOF_BYTE |JOF_ELEM|JOF_LEFTASSOC)
+
+/*
+ * Specialized JSOP_TYPEOF to avoid reporting undefined for typeof(0, undef).
+ */
+OPDEF(JSOP_TYPEOFEXPR, 198,js_typeof_str, NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
+
+/*
+ * Block-local scope support.
+ */
+OPDEF(JSOP_ENTERBLOCK, 199,"enterblock", NULL, 3, 0, 0, 0, JOF_CONST)
+OPDEF(JSOP_LEAVEBLOCK, 200,"leaveblock", NULL, 3, 0, 0, 0, JOF_UINT16)
+OPDEF(JSOP_GETLOCAL, 201,"getlocal", NULL, 3, 0, 1, 19, JOF_LOCAL|JOF_NAME)
+OPDEF(JSOP_SETLOCAL, 202,"setlocal", NULL, 3, 1, 1, 3, JOF_LOCAL|JOF_NAME|JOF_SET)
+OPDEF(JSOP_INCLOCAL, 203,"inclocal", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_INC)
+OPDEF(JSOP_DECLOCAL, 204,"declocal", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_DEC)
+OPDEF(JSOP_LOCALINC, 205,"localinc", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_LOCALDEC, 206,"localdec", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_DEC|JOF_POST)
+OPDEF(JSOP_FORLOCAL, 207,"forlocal", NULL, 3, 0, 1, 19, JOF_LOCAL|JOF_NAME|JOF_FOR)
+
+/*
+ * Iterator, generator, and array comprehension support.
+ */
+OPDEF(JSOP_STARTITER, 208,"startiter", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_ENDITER, 209,"enditer", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_GENERATOR, 210,"generator", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_YIELD, 211,"yield", NULL, 1, 1, 1, 1, JOF_BYTE)
+OPDEF(JSOP_ARRAYPUSH, 212,"arraypush", NULL, 3, 1, 0, 3, JOF_LOCAL)
+
+OPDEF(JSOP_FOREACHKEYVAL, 213,"foreachkeyval",NULL, 1, 1, 1, 0, JOF_BYTE)
+
+/*
+ * Variant of JSOP_ENUMELEM for destructuring const (const [a, b] = ...).
+ */
+OPDEF(JSOP_ENUMCONSTELEM, 214,"enumconstelem",NULL, 1, 3, 0, 3, JOF_BYTE|JOF_SET|JOF_ASSIGNING)
+
+/*
+ * Variant of JSOP_LEAVEBLOCK has a result on the stack above the locals,
+ * which must be moved down when the block pops.
+ */
+OPDEF(JSOP_LEAVEBLOCKEXPR,215,"leaveblockexpr",NULL, 3, 0, 0, 1, JOF_UINT16)
diff --git a/third_party/js-1.7/jsosdep.h b/third_party/js-1.7/jsosdep.h
new file mode 100644
index 0000000..a266144
--- /dev/null
+++ b/third_party/js-1.7/jsosdep.h
@@ -0,0 +1,115 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsosdep_h___
+#define jsosdep_h___
+/*
+ * OS (and machine, and compiler XXX) dependent information.
+ */
+
+#if defined(XP_WIN) || defined(XP_OS2)
+
+#if defined(_WIN32) || defined (XP_OS2)
+#define JS_HAVE_LONG_LONG
+#else
+#undef JS_HAVE_LONG_LONG
+#endif
+#endif /* XP_WIN || XP_OS2 */
+
+#ifdef XP_BEOS
+#define JS_HAVE_LONG_LONG
+#endif
+
+
+#ifdef XP_UNIX
+
+/*
+ * Get OS specific header information.
+ */
+#if defined(XP_MACOSX) || defined(DARWIN)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(AIXV3) || defined(AIX)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(BSDI)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(HPUX)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(IRIX)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(linux)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(OSF1)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(_SCO_DS)
+#undef JS_HAVE_LONG_LONG
+
+#elif defined(SOLARIS)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(FREEBSD)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(SUNOS4)
+#undef JS_HAVE_LONG_LONG
+
+/*
+** Missing function prototypes
+*/
+
+extern void *sbrk(int);
+
+#elif defined(UNIXWARE)
+#undef JS_HAVE_LONG_LONG
+
+#elif defined(VMS) && defined(__ALPHA)
+#define JS_HAVE_LONG_LONG
+
+#endif
+
+#endif /* XP_UNIX */
+
+#endif /* jsosdep_h___ */
+
diff --git a/third_party/js-1.7/jsotypes.h b/third_party/js-1.7/jsotypes.h
new file mode 100644
index 0000000..38d7286
--- /dev/null
+++ b/third_party/js-1.7/jsotypes.h
@@ -0,0 +1,202 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * This section typedefs the old 'native' types to the new PR<type>s.
+ * These definitions are scheduled to be eliminated at the earliest
+ * possible time. The NSPR API is implemented and documented using
+ * the new definitions.
+ */
+
+/*
+ * Note that we test for PROTYPES_H, not JSOTYPES_H. This is to avoid
+ * double-definitions of scalar types such as uint32, if NSPR's
+ * protypes.h is also included.
+ */
+#ifndef PROTYPES_H
+#define PROTYPES_H
+
+#ifdef XP_BEOS
+/* BeOS defines most int types in SupportDefs.h (int8, uint8, int16,
+ * uint16, int32, uint32, int64, uint64), so in the interest of
+ * not conflicting with other definitions elsewhere we have to skip the
+ * #ifdef jungle below, duplicate some definitions, and do our stuff.
+ */
+#include <SupportDefs.h>
+
+typedef JSUintn uintn;
+#ifndef _XP_Core_
+typedef JSIntn intn;
+#endif
+
+#else
+
+/* SVR4 typedef of uint is commonly found on UNIX machines. */
+#if defined(XP_UNIX) && !defined(__QNXNTO__)
+#include <sys/types.h>
+#else
+typedef JSUintn uint;
+#endif
+
+typedef JSUintn uintn;
+typedef JSUint64 uint64;
+#if !defined(_WIN32) && !defined(XP_OS2)
+typedef JSUint32 uint32;
+#else
+typedef unsigned long uint32;
+#endif
+typedef JSUint16 uint16;
+typedef JSUint8 uint8;
+
+#ifndef _XP_Core_
+typedef JSIntn intn;
+#endif
+
+/*
+ * On AIX 4.3, sys/inttypes.h (which is included by sys/types.h, a very
+ * common header file) defines the types int8, int16, int32, and int64.
+ * So we don't define these four types here to avoid conflicts in case
+ * the code also includes sys/types.h.
+ */
+#if defined(AIX) && defined(HAVE_SYS_INTTYPES_H)
+#include <sys/inttypes.h>
+#else
+typedef JSInt64 int64;
+
+/* /usr/include/model.h on HP-UX defines int8, int16, and int32 */
+#ifdef HPUX
+#include <model.h>
+#else
+#if !defined(_WIN32) && !defined(XP_OS2)
+typedef JSInt32 int32;
+#else
+typedef long int32;
+#endif
+typedef JSInt16 int16;
+typedef JSInt8 int8;
+#endif /* HPUX */
+#endif /* AIX && HAVE_SYS_INTTYPES_H */
+
+#endif /* XP_BEOS */
+
+typedef JSFloat64 float64;
+
+/* Re: jsbit.h */
+#define TEST_BIT JS_TEST_BIT
+#define SET_BIT JS_SET_BIT
+#define CLEAR_BIT JS_CLEAR_BIT
+
+/* Re: prarena.h->plarena.h */
+#define PRArena PLArena
+#define PRArenaPool PLArenaPool
+#define PRArenaStats PLArenaStats
+#define PR_ARENA_ALIGN PL_ARENA_ALIGN
+#define PR_INIT_ARENA_POOL PL_INIT_ARENA_POOL
+#define PR_ARENA_ALLOCATE PL_ARENA_ALLOCATE
+#define PR_ARENA_GROW PL_ARENA_GROW
+#define PR_ARENA_MARK PL_ARENA_MARK
+#define PR_CLEAR_UNUSED PL_CLEAR_UNUSED
+#define PR_CLEAR_ARENA PL_CLEAR_ARENA
+#define PR_ARENA_RELEASE PL_ARENA_RELEASE
+#define PR_COUNT_ARENA PL_COUNT_ARENA
+#define PR_ARENA_DESTROY PL_ARENA_DESTROY
+#define PR_InitArenaPool PL_InitArenaPool
+#define PR_FreeArenaPool PL_FreeArenaPool
+#define PR_FinishArenaPool PL_FinishArenaPool
+#define PR_CompactArenaPool PL_CompactArenaPool
+#define PR_ArenaFinish PL_ArenaFinish
+#define PR_ArenaAllocate PL_ArenaAllocate
+#define PR_ArenaGrow PL_ArenaGrow
+#define PR_ArenaRelease PL_ArenaRelease
+#define PR_ArenaCountAllocation PL_ArenaCountAllocation
+#define PR_ArenaCountInplaceGrowth PL_ArenaCountInplaceGrowth
+#define PR_ArenaCountGrowth PL_ArenaCountGrowth
+#define PR_ArenaCountRelease PL_ArenaCountRelease
+#define PR_ArenaCountRetract PL_ArenaCountRetract
+
+/* Re: prevent.h->plevent.h */
+#define PREvent PLEvent
+#define PREventQueue PLEventQueue
+#define PR_CreateEventQueue PL_CreateEventQueue
+#define PR_DestroyEventQueue PL_DestroyEventQueue
+#define PR_GetEventQueueMonitor PL_GetEventQueueMonitor
+#define PR_ENTER_EVENT_QUEUE_MONITOR PL_ENTER_EVENT_QUEUE_MONITOR
+#define PR_EXIT_EVENT_QUEUE_MONITOR PL_EXIT_EVENT_QUEUE_MONITOR
+#define PR_PostEvent PL_PostEvent
+#define PR_PostSynchronousEvent PL_PostSynchronousEvent
+#define PR_GetEvent PL_GetEvent
+#define PR_EventAvailable PL_EventAvailable
+#define PREventFunProc PLEventFunProc
+#define PR_MapEvents PL_MapEvents
+#define PR_RevokeEvents PL_RevokeEvents
+#define PR_ProcessPendingEvents PL_ProcessPendingEvents
+#define PR_WaitForEvent PL_WaitForEvent
+#define PR_EventLoop PL_EventLoop
+#define PR_GetEventQueueSelectFD PL_GetEventQueueSelectFD
+#define PRHandleEventProc PLHandleEventProc
+#define PRDestroyEventProc PLDestroyEventProc
+#define PR_InitEvent PL_InitEvent
+#define PR_GetEventOwner PL_GetEventOwner
+#define PR_HandleEvent PL_HandleEvent
+#define PR_DestroyEvent PL_DestroyEvent
+#define PR_DequeueEvent PL_DequeueEvent
+#define PR_GetMainEventQueue PL_GetMainEventQueue
+
+/* Re: prhash.h->plhash.h */
+#define PRHashEntry PLHashEntry
+#define PRHashTable PLHashTable
+#define PRHashNumber PLHashNumber
+#define PRHashFunction PLHashFunction
+#define PRHashComparator PLHashComparator
+#define PRHashEnumerator PLHashEnumerator
+#define PRHashAllocOps PLHashAllocOps
+#define PR_NewHashTable PL_NewHashTable
+#define PR_HashTableDestroy PL_HashTableDestroy
+#define PR_HashTableRawLookup PL_HashTableRawLookup
+#define PR_HashTableRawAdd PL_HashTableRawAdd
+#define PR_HashTableRawRemove PL_HashTableRawRemove
+#define PR_HashTableAdd PL_HashTableAdd
+#define PR_HashTableRemove PL_HashTableRemove
+#define PR_HashTableEnumerateEntries PL_HashTableEnumerateEntries
+#define PR_HashTableLookup PL_HashTableLookup
+#define PR_HashTableDump PL_HashTableDump
+#define PR_HashString PL_HashString
+#define PR_CompareStrings PL_CompareStrings
+#define PR_CompareValues PL_CompareValues
+
+#endif /* !defined(PROTYPES_H) */
diff --git a/third_party/js-1.7/jsparse.c b/third_party/js-1.7/jsparse.c
new file mode 100644
index 0000000..132e2ad
--- /dev/null
+++ b/third_party/js-1.7/jsparse.c
@@ -0,0 +1,6547 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS parser.
+ *
+ * This is a recursive-descent parser for the JavaScript language specified by
+ * "The JavaScript 1.5 Language Specification". It uses lexical and semantic
+ * feedback to disambiguate non-LL(1) structures. It generates trees of nodes
+ * induced by the recursive parsing (not precise syntax trees, see jsparse.h).
+ * After tree construction, it rewrites trees to fold constants and evaluate
+ * compile-time expressions. Finally, it calls js_EmitTree (see jsemit.h) to
+ * generate bytecode.
+ *
+ * This parser attempts no error recovery.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#if JS_HAS_DESTRUCTURING
+#include "jsdhash.h"
+#endif
+
+/*
+ * JS parsers, from lowest to highest precedence.
+ *
+ * Each parser takes a context, a token stream, and a tree context struct.
+ * Each returns a parse node tree or null on error.
+ */
+
+typedef JSParseNode *
+JSParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc);
+
+typedef JSParseNode *
+JSMemberParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowCallSyntax);
+
+typedef JSParseNode *
+JSPrimaryParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSTokenType tt, JSBool afterDot);
+
+static JSParser FunctionStmt;
+static JSParser FunctionExpr;
+static JSParser Statements;
+static JSParser Statement;
+static JSParser Variables;
+static JSParser Expr;
+static JSParser AssignExpr;
+static JSParser CondExpr;
+static JSParser OrExpr;
+static JSParser AndExpr;
+static JSParser BitOrExpr;
+static JSParser BitXorExpr;
+static JSParser BitAndExpr;
+static JSParser EqExpr;
+static JSParser RelExpr;
+static JSParser ShiftExpr;
+static JSParser AddExpr;
+static JSParser MulExpr;
+static JSParser UnaryExpr;
+static JSMemberParser MemberExpr;
+static JSPrimaryParser PrimaryExpr;
+
+/*
+ * Insist that the next token be of type tt, or report errno and return null.
+ * NB: this macro uses cx and ts from its lexical environment.
+ */
+#define MUST_MATCH_TOKEN(tt, errno) \
+ JS_BEGIN_MACRO \
+ if (js_GetToken(cx, ts) != tt) { \
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR, \
+ errno); \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+#define CHECK_RECURSION() \
+ JS_BEGIN_MACRO \
+ int stackDummy; \
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) { \
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR, \
+ JSMSG_OVER_RECURSED); \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+#ifdef METER_PARSENODES
+static uint32 parsenodes = 0;
+static uint32 maxparsenodes = 0;
+static uint32 recyclednodes = 0;
+#endif
+
+static JSParseNode *
+RecycleTree(JSParseNode *pn, JSTreeContext *tc)
+{
+ JSParseNode *next;
+
+ if (!pn)
+ return NULL;
+ JS_ASSERT(pn != tc->nodeList); /* catch back-to-back dup recycles */
+ next = pn->pn_next;
+ pn->pn_next = tc->nodeList;
+ tc->nodeList = pn;
+#ifdef METER_PARSENODES
+ recyclednodes++;
+#endif
+ return next;
+}
+
+static JSParseNode *
+NewOrRecycledNode(JSContext *cx, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = tc->nodeList;
+ if (!pn) {
+ JS_ARENA_ALLOCATE_TYPE(pn, JSParseNode, &cx->tempPool);
+ if (!pn)
+ JS_ReportOutOfMemory(cx);
+ } else {
+ tc->nodeList = pn->pn_next;
+
+ /* Recycle immediate descendents only, to save work and working set. */
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ RecycleTree(pn->pn_body, tc);
+ break;
+ case PN_LIST:
+ if (pn->pn_head) {
+ /* XXX check for dup recycles in the list */
+ *pn->pn_tail = tc->nodeList;
+ tc->nodeList = pn->pn_head;
+#ifdef METER_PARSENODES
+ recyclednodes += pn->pn_count;
+#endif
+ }
+ break;
+ case PN_TERNARY:
+ RecycleTree(pn->pn_kid1, tc);
+ RecycleTree(pn->pn_kid2, tc);
+ RecycleTree(pn->pn_kid3, tc);
+ break;
+ case PN_BINARY:
+ RecycleTree(pn->pn_left, tc);
+ RecycleTree(pn->pn_right, tc);
+ break;
+ case PN_UNARY:
+ RecycleTree(pn->pn_kid, tc);
+ break;
+ case PN_NAME:
+ RecycleTree(pn->pn_expr, tc);
+ break;
+ case PN_NULLARY:
+ break;
+ }
+ }
+#ifdef METER_PARSENODES
+ if (pn) {
+ parsenodes++;
+ if (parsenodes - recyclednodes > maxparsenodes)
+ maxparsenodes = parsenodes - recyclednodes;
+ }
+#endif
+ return pn;
+}
+
+/*
+ * Allocate a JSParseNode from cx's temporary arena.
+ */
+static JSParseNode *
+NewParseNode(JSContext *cx, JSTokenStream *ts, JSParseNodeArity arity,
+ JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSToken *tp;
+
+ pn = NewOrRecycledNode(cx, tc);
+ if (!pn)
+ return NULL;
+ tp = &CURRENT_TOKEN(ts);
+ pn->pn_type = tp->type;
+ pn->pn_pos = tp->pos;
+ pn->pn_op = JSOP_NOP;
+ pn->pn_arity = arity;
+ pn->pn_next = NULL;
+ pn->pn_ts = ts;
+ pn->pn_source = NULL;
+ return pn;
+}
+
+static JSParseNode *
+NewBinary(JSContext *cx, JSTokenType tt,
+ JSOp op, JSParseNode *left, JSParseNode *right,
+ JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn1, *pn2;
+
+ if (!left || !right)
+ return NULL;
+
+ /*
+ * Flatten a left-associative (left-heavy) tree of a given operator into
+ * a list, to reduce js_FoldConstants and js_EmitTree recursion.
+ */
+ if (left->pn_type == tt &&
+ left->pn_op == op &&
+ (js_CodeSpec[op].format & JOF_LEFTASSOC)) {
+ if (left->pn_arity != PN_LIST) {
+ pn1 = left->pn_left, pn2 = left->pn_right;
+ left->pn_arity = PN_LIST;
+ PN_INIT_LIST_1(left, pn1);
+ PN_APPEND(left, pn2);
+ if (tt == TOK_PLUS) {
+ if (pn1->pn_type == TOK_STRING)
+ left->pn_extra |= PNX_STRCAT;
+ else if (pn1->pn_type != TOK_NUMBER)
+ left->pn_extra |= PNX_CANTFOLD;
+ if (pn2->pn_type == TOK_STRING)
+ left->pn_extra |= PNX_STRCAT;
+ else if (pn2->pn_type != TOK_NUMBER)
+ left->pn_extra |= PNX_CANTFOLD;
+ }
+ }
+ PN_APPEND(left, right);
+ left->pn_pos.end = right->pn_pos.end;
+ if (tt == TOK_PLUS) {
+ if (right->pn_type == TOK_STRING)
+ left->pn_extra |= PNX_STRCAT;
+ else if (right->pn_type != TOK_NUMBER)
+ left->pn_extra |= PNX_CANTFOLD;
+ }
+ return left;
+ }
+
+ /*
+ * Fold constant addition immediately, to conserve node space and, what's
+ * more, so js_FoldConstants never sees mixed addition and concatenation
+ * operations with more than one leading non-string operand in a PN_LIST
+ * generated for expressions such as 1 + 2 + "pt" (which should evaluate
+ * to "3pt", not "12pt").
+ */
+ if (tt == TOK_PLUS &&
+ left->pn_type == TOK_NUMBER &&
+ right->pn_type == TOK_NUMBER) {
+ left->pn_dval += right->pn_dval;
+ left->pn_pos.end = right->pn_pos.end;
+ RecycleTree(right, tc);
+ return left;
+ }
+
+ pn = NewOrRecycledNode(cx, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = tt;
+ pn->pn_pos.begin = left->pn_pos.begin;
+ pn->pn_pos.end = right->pn_pos.end;
+ pn->pn_op = op;
+ pn->pn_arity = PN_BINARY;
+ pn->pn_left = left;
+ pn->pn_right = right;
+ pn->pn_next = NULL;
+ pn->pn_ts = NULL;
+ pn->pn_source = NULL;
+ return pn;
+}
+
+#if JS_HAS_GETTER_SETTER
+static JSTokenType
+CheckGetterOrSetter(JSContext *cx, JSTokenStream *ts, JSTokenType tt)
+{
+ JSAtom *atom;
+ JSRuntime *rt;
+ JSOp op;
+ const char *name;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_NAME);
+ atom = CURRENT_TOKEN(ts).t_atom;
+ rt = cx->runtime;
+ if (atom == rt->atomState.getterAtom)
+ op = JSOP_GETTER;
+ else if (atom == rt->atomState.setterAtom)
+ op = JSOP_SETTER;
+ else
+ return TOK_NAME;
+ if (js_PeekTokenSameLine(cx, ts) != tt)
+ return TOK_NAME;
+ (void) js_GetToken(cx, ts);
+ if (CURRENT_TOKEN(ts).t_op != JSOP_NOP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ (op == JSOP_GETTER)
+ ? js_getter_str
+ : js_setter_str);
+ return TOK_ERROR;
+ }
+ CURRENT_TOKEN(ts).t_op = op;
+ if (JS_HAS_STRICT_OPTION(cx)) {
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_DEPRECATED_USAGE,
+ name)) {
+ return TOK_ERROR;
+ }
+ }
+ return tt;
+}
+#endif
+
+static void
+MaybeSetupFrame(JSContext *cx, JSObject *chain, JSStackFrame *oldfp,
+ JSStackFrame *newfp)
+{
+ /*
+ * Always push a new frame if the current frame is special, so that
+ * Variables gets the correct variables object: the one from the special
+ * frame's caller.
+ */
+ if (oldfp &&
+ oldfp->varobj &&
+ oldfp->scopeChain == chain &&
+ !(oldfp->flags & JSFRAME_SPECIAL)) {
+ return;
+ }
+
+ memset(newfp, 0, sizeof *newfp);
+
+ /* Default to sharing the same variables object and scope chain. */
+ newfp->varobj = newfp->scopeChain = chain;
+ if (cx->options & JSOPTION_VAROBJFIX) {
+ while ((chain = JS_GetParent(cx, chain)) != NULL)
+ newfp->varobj = chain;
+ }
+ newfp->down = oldfp;
+ if (oldfp) {
+ /*
+ * In the case of eval and debugger frames, we need to dig down and find
+ * the real variables objects and function that our new stack frame is
+ * going to use.
+ */
+ newfp->flags = oldfp->flags & (JSFRAME_SPECIAL | JSFRAME_COMPILE_N_GO |
+ JSFRAME_SCRIPT_OBJECT);
+ while (oldfp->flags & JSFRAME_SPECIAL) {
+ oldfp = oldfp->down;
+ if (!oldfp)
+ break;
+ }
+ if (oldfp && (newfp->flags & JSFRAME_SPECIAL)) {
+ newfp->varobj = oldfp->varobj;
+ newfp->vars = oldfp->vars;
+ newfp->fun = oldfp->fun;
+ }
+ }
+ cx->fp = newfp;
+}
+
+/*
+ * Parse a top-level JS script.
+ */
+JS_FRIEND_API(JSParseNode *)
+js_ParseTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts)
+{
+ JSStackFrame *fp, frame;
+ JSTreeContext tc;
+ JSParseNode *pn;
+
+ /*
+ * Push a compiler frame if we have no frames, or if the top frame is a
+ * lightweight function activation, or if its scope chain doesn't match
+ * the one passed to us.
+ */
+ fp = cx->fp;
+ MaybeSetupFrame(cx, chain, fp, &frame);
+
+ /*
+ * Protect atoms from being collected by a GC activation, which might
+ * - nest on this thread due to out of memory (the so-called "last ditch"
+ * GC attempted within js_NewGCThing), or
+ * - run for any reason on another thread if this thread is suspended on
+ * an object lock before it finishes generating bytecode into a script
+ * protected from the GC by a root or a stack frame reference.
+ */
+ JS_KEEP_ATOMS(cx->runtime);
+ TREE_CONTEXT_INIT(&tc);
+ pn = Statements(cx, ts, &tc);
+ if (pn) {
+ if (!js_MatchToken(cx, ts, TOK_EOF)) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ pn = NULL;
+ } else {
+ pn->pn_type = TOK_LC;
+ if (!js_FoldConstants(cx, pn, &tc))
+ pn = NULL;
+ }
+ }
+
+ TREE_CONTEXT_FINISH(&tc);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ cx->fp = fp;
+ return pn;
+}
+
+/*
+ * Compile a top-level script.
+ */
+JS_FRIEND_API(JSBool)
+js_CompileTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSCodeGenerator *cg)
+{
+ JSStackFrame *fp, frame;
+ uint32 flags;
+ JSParseNode *pn;
+ JSBool ok;
+#ifdef METER_PARSENODES
+ void *sbrk(ptrdiff_t), *before = sbrk(0);
+#endif
+
+ /*
+ * Push a compiler frame if we have no frames, or if the top frame is a
+ * lightweight function activation, or if its scope chain doesn't match
+ * the one passed to us.
+ */
+ fp = cx->fp;
+ MaybeSetupFrame(cx, chain, fp, &frame);
+ flags = cx->fp->flags;
+ cx->fp->flags = flags |
+ (JS_HAS_COMPILE_N_GO_OPTION(cx)
+ ? JSFRAME_COMPILING | JSFRAME_COMPILE_N_GO
+ : JSFRAME_COMPILING);
+
+ /* Prevent GC activation while compiling. */
+ JS_KEEP_ATOMS(cx->runtime);
+
+ pn = Statements(cx, ts, &cg->treeContext);
+ if (!pn) {
+ ok = JS_FALSE;
+ } else if (!js_MatchToken(cx, ts, TOK_EOF)) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ ok = JS_FALSE;
+ } else {
+#ifdef METER_PARSENODES
+ printf("Parser growth: %d (%u nodes, %u max, %u unrecycled)\n",
+ (char *)sbrk(0) - (char *)before,
+ parsenodes,
+ maxparsenodes,
+ parsenodes - recyclednodes);
+ before = sbrk(0);
+#endif
+
+ /*
+ * No need to emit bytecode here -- Statements already has, for each
+ * statement in turn. Search for TCF_COMPILING in Statements, below.
+ * That flag is set for every tc == &cg->treeContext, and it implies
+ * that the tc can be downcast to a cg and used to emit code during
+ * parsing, rather than at the end of the parse phase.
+ *
+ * Nowadays the threaded interpreter needs a stop instruction, so we
+ * do have to emit that here.
+ */
+ JS_ASSERT(cg->treeContext.flags & TCF_COMPILING);
+ ok = js_Emit1(cx, cg, JSOP_STOP) >= 0;
+ }
+
+#ifdef METER_PARSENODES
+ printf("Code-gen growth: %d (%u bytecodes, %u srcnotes)\n",
+ (char *)sbrk(0) - (char *)before, CG_OFFSET(cg), cg->noteCount);
+#endif
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(stdout);
+#endif
+ JS_UNKEEP_ATOMS(cx->runtime);
+ cx->fp->flags = flags;
+ cx->fp = fp;
+ return ok;
+}
+
+/*
+ * Insist on a final return before control flows out of pn. Try to be a bit
+ * smart about loops: do {...; return e2;} while(0) at the end of a function
+ * that contains an early return e1 will get a strict warning. Similarly for
+ * iloops: while (true){...} is treated as though ... returns.
+ */
+#define ENDS_IN_OTHER 0
+#define ENDS_IN_RETURN 1
+#define ENDS_IN_BREAK 2
+
+static int
+HasFinalReturn(JSParseNode *pn)
+{
+ JSParseNode *pn2, *pn3;
+ uintN rv, rv2, hasDefault;
+
+ switch (pn->pn_type) {
+ case TOK_LC:
+ if (!pn->pn_head)
+ return ENDS_IN_OTHER;
+ return HasFinalReturn(PN_LAST(pn));
+
+ case TOK_IF:
+ if (!pn->pn_kid3)
+ return ENDS_IN_OTHER;
+ return HasFinalReturn(pn->pn_kid2) & HasFinalReturn(pn->pn_kid3);
+
+ case TOK_WHILE:
+ pn2 = pn->pn_left;
+ if (pn2->pn_type == TOK_PRIMARY && pn2->pn_op == JSOP_TRUE)
+ return ENDS_IN_RETURN;
+ if (pn2->pn_type == TOK_NUMBER && pn2->pn_dval)
+ return ENDS_IN_RETURN;
+ return ENDS_IN_OTHER;
+
+ case TOK_DO:
+ pn2 = pn->pn_right;
+ if (pn2->pn_type == TOK_PRIMARY) {
+ if (pn2->pn_op == JSOP_FALSE)
+ return HasFinalReturn(pn->pn_left);
+ if (pn2->pn_op == JSOP_TRUE)
+ return ENDS_IN_RETURN;
+ }
+ if (pn2->pn_type == TOK_NUMBER) {
+ if (pn2->pn_dval == 0)
+ return HasFinalReturn(pn->pn_left);
+ return ENDS_IN_RETURN;
+ }
+ return ENDS_IN_OTHER;
+
+ case TOK_FOR:
+ pn2 = pn->pn_left;
+ if (pn2->pn_arity == PN_TERNARY && !pn2->pn_kid2)
+ return ENDS_IN_RETURN;
+ return ENDS_IN_OTHER;
+
+ case TOK_SWITCH:
+ rv = ENDS_IN_RETURN;
+ hasDefault = ENDS_IN_OTHER;
+ pn2 = pn->pn_right;
+ if (pn2->pn_type == TOK_LEXICALSCOPE)
+ pn2 = pn2->pn_expr;
+ for (pn2 = pn2->pn_head; rv && pn2; pn2 = pn2->pn_next) {
+ if (pn2->pn_type == TOK_DEFAULT)
+ hasDefault = ENDS_IN_RETURN;
+ pn3 = pn2->pn_right;
+ JS_ASSERT(pn3->pn_type == TOK_LC);
+ if (pn3->pn_head) {
+ rv2 = HasFinalReturn(PN_LAST(pn3));
+ if (rv2 == ENDS_IN_OTHER && pn2->pn_next)
+ /* Falling through to next case or default. */;
+ else
+ rv &= rv2;
+ }
+ }
+ /* If a final switch has no default case, we judge it harshly. */
+ rv &= hasDefault;
+ return rv;
+
+ case TOK_BREAK:
+ return ENDS_IN_BREAK;
+
+ case TOK_WITH:
+ return HasFinalReturn(pn->pn_right);
+
+ case TOK_RETURN:
+ return ENDS_IN_RETURN;
+
+ case TOK_COLON:
+ case TOK_LEXICALSCOPE:
+ return HasFinalReturn(pn->pn_expr);
+
+ case TOK_THROW:
+ return ENDS_IN_RETURN;
+
+ case TOK_TRY:
+ /* If we have a finally block that returns, we are done. */
+ if (pn->pn_kid3) {
+ rv = HasFinalReturn(pn->pn_kid3);
+ if (rv == ENDS_IN_RETURN)
+ return rv;
+ }
+
+ /* Else check the try block and any and all catch statements. */
+ rv = HasFinalReturn(pn->pn_kid1);
+ if (pn->pn_kid2) {
+ JS_ASSERT(pn->pn_kid2->pn_arity == PN_LIST);
+ for (pn2 = pn->pn_kid2->pn_head; pn2; pn2 = pn2->pn_next)
+ rv &= HasFinalReturn(pn2);
+ }
+ return rv;
+
+ case TOK_CATCH:
+ /* Check this catch block's body. */
+ return HasFinalReturn(pn->pn_kid3);
+
+ case TOK_LET:
+ /* Non-binary let statements are let declarations. */
+ if (pn->pn_arity != PN_BINARY)
+ return ENDS_IN_OTHER;
+ return HasFinalReturn(pn->pn_right);
+
+ default:
+ return ENDS_IN_OTHER;
+ }
+}
+
+static JSBool
+ReportBadReturn(JSContext *cx, JSTokenStream *ts, uintN flags, uintN errnum,
+ uintN anonerrnum)
+{
+ JSFunction *fun;
+ const char *name;
+
+ fun = cx->fp->fun;
+ if (fun->atom) {
+ name = js_AtomToPrintableString(cx, fun->atom);
+ } else {
+ errnum = anonerrnum;
+ name = NULL;
+ }
+ return js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | flags, errnum,
+ name);
+}
+
+static JSBool
+CheckFinalReturn(JSContext *cx, JSTokenStream *ts, JSParseNode *pn)
+{
+ return HasFinalReturn(pn) == ENDS_IN_RETURN ||
+ ReportBadReturn(cx, ts, JSREPORT_WARNING | JSREPORT_STRICT,
+ JSMSG_NO_RETURN_VALUE, JSMSG_ANON_NO_RETURN_VALUE);
+}
+
+static JSParseNode *
+FunctionBody(JSContext *cx, JSTokenStream *ts, JSFunction *fun,
+ JSTreeContext *tc)
+{
+ JSStackFrame *fp, frame;
+ JSObject *funobj;
+ JSStmtInfo stmtInfo;
+ uintN oldflags, firstLine;
+ JSParseNode *pn;
+
+ fp = cx->fp;
+ funobj = fun->object;
+ if (!fp || fp->fun != fun || fp->varobj != funobj ||
+ fp->scopeChain != funobj) {
+ memset(&frame, 0, sizeof frame);
+ frame.fun = fun;
+ frame.varobj = frame.scopeChain = funobj;
+ frame.down = fp;
+ if (fp)
+ frame.flags = fp->flags & JSFRAME_COMPILE_N_GO;
+ cx->fp = &frame;
+ }
+
+ /*
+ * Set interpreted early so js_EmitTree can test it to decide whether to
+ * eliminate useless expressions.
+ */
+ fun->flags |= JSFUN_INTERPRETED;
+
+ js_PushStatement(tc, &stmtInfo, STMT_BLOCK, -1);
+ stmtInfo.flags = SIF_BODY_BLOCK;
+
+ oldflags = tc->flags;
+ tc->flags &= ~(TCF_RETURN_EXPR | TCF_RETURN_VOID);
+ tc->flags |= TCF_IN_FUNCTION;
+
+ /*
+ * Save the body's first line, and store it in pn->pn_pos.begin.lineno
+ * later, because we may have not peeked in ts yet, so Statements won't
+ * acquire a valid pn->pn_pos.begin from the current token.
+ */
+ firstLine = ts->lineno;
+ pn = Statements(cx, ts, tc);
+
+ js_PopStatement(tc);
+
+ /* Check for falling off the end of a function that returns a value. */
+ if (pn && JS_HAS_STRICT_OPTION(cx) && (tc->flags & TCF_RETURN_EXPR)) {
+ if (!CheckFinalReturn(cx, ts, pn))
+ pn = NULL;
+ }
+
+ /*
+ * If we have a parse tree in pn and a code generator in tc, emit this
+ * function's code. We must do this here, not in js_CompileFunctionBody,
+ * in order to detect TCF_IN_FUNCTION among tc->flags.
+ */
+ if (pn) {
+ pn->pn_pos.begin.lineno = firstLine;
+ if ((tc->flags & TCF_COMPILING)) {
+ JSCodeGenerator *cg = (JSCodeGenerator *) tc;
+
+ if (!js_FoldConstants(cx, pn, tc) ||
+ !js_EmitFunctionBytecode(cx, cg, pn)) {
+ pn = NULL;
+ }
+ }
+ }
+
+ cx->fp = fp;
+ tc->flags = oldflags | (tc->flags & (TCF_FUN_FLAGS | TCF_HAS_DEFXMLNS));
+ return pn;
+}
+
+/*
+ * Compile a JS function body, which might appear as the value of an event
+ * handler attribute in an HTML <INPUT> tag.
+ */
+JSBool
+js_CompileFunctionBody(JSContext *cx, JSTokenStream *ts, JSFunction *fun)
+{
+ JSArenaPool codePool, notePool;
+ JSCodeGenerator funcg;
+ JSStackFrame *fp, frame;
+ JSObject *funobj;
+ JSParseNode *pn;
+
+ JS_InitArenaPool(&codePool, "code", 1024, sizeof(jsbytecode));
+ JS_InitArenaPool(&notePool, "note", 1024, sizeof(jssrcnote));
+ if (!js_InitCodeGenerator(cx, &funcg, &codePool, &notePool,
+ ts->filename, ts->lineno,
+ ts->principals)) {
+ return JS_FALSE;
+ }
+
+ /* Prevent GC activation while compiling. */
+ JS_KEEP_ATOMS(cx->runtime);
+
+ /* Push a JSStackFrame for use by FunctionBody. */
+ fp = cx->fp;
+ funobj = fun->object;
+ JS_ASSERT(!fp || (fp->fun != fun && fp->varobj != funobj &&
+ fp->scopeChain != funobj));
+ memset(&frame, 0, sizeof frame);
+ frame.fun = fun;
+ frame.varobj = frame.scopeChain = funobj;
+ frame.down = fp;
+ frame.flags = JS_HAS_COMPILE_N_GO_OPTION(cx)
+ ? JSFRAME_COMPILING | JSFRAME_COMPILE_N_GO
+ : JSFRAME_COMPILING;
+ cx->fp = &frame;
+
+ /*
+ * Farble the body so that it looks like a block statement to js_EmitTree,
+ * which is called beneath FunctionBody; see Statements, further below in
+ * this file. FunctionBody pushes a STMT_BLOCK record around its call to
+ * Statements, so Statements will not compile each statement as it loops
+ * to save JSParseNode space -- it will not compile at all, only build a
+ * JSParseNode tree.
+ *
+ * Therefore we must fold constants, allocate try notes, and generate code
+ * for this function, including a stop opcode at the end.
+ */
+ CURRENT_TOKEN(ts).type = TOK_LC;
+ pn = FunctionBody(cx, ts, fun, &funcg.treeContext);
+ if (pn && !js_NewScriptFromCG(cx, &funcg, fun))
+ pn = NULL;
+
+ /* Restore saved state and release code generation arenas. */
+ cx->fp = fp;
+ JS_UNKEEP_ATOMS(cx->runtime);
+ js_FinishCodeGenerator(cx, &funcg);
+ JS_FinishArenaPool(&codePool);
+ JS_FinishArenaPool(&notePool);
+ return pn != NULL;
+}
+
+/*
+ * Parameter block types for the several Binder functions. We use a common
+ * helper function signature in order to share code among destructuring and
+ * simple variable declaration parsers. In the destructuring case, the binder
+ * function is called indirectly from the variable declaration parser by way
+ * of CheckDestructuring and its friends.
+ */
+typedef struct BindData BindData;
+
+typedef JSBool
+(*Binder)(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc);
+
+struct BindData {
+ JSParseNode *pn; /* error source coordinate */
+ JSTokenStream *ts; /* fallback if pn is null */
+ JSObject *obj; /* the variable object */
+ JSOp op; /* prolog bytecode or nop */
+ Binder binder; /* binder, discriminates u */
+ union {
+ struct {
+ JSFunction *fun; /* must come first! see next */
+ } arg;
+ struct {
+ JSFunction *fun; /* this overlays u.arg.fun */
+ JSClass *clasp;
+ JSPropertyOp getter;
+ JSPropertyOp setter;
+ uintN attrs;
+ } var;
+ struct {
+ jsuint index;
+ uintN overflow;
+ } let;
+ } u;
+};
+
+/*
+ * Given BindData *data and JSREPORT_* flags, expand to the second and third
+ * actual parameters to js_ReportCompileErrorNumber. Prefer reporting via pn
+ * to reporting via ts, for better destructuring error pointers.
+ */
+#define BIND_DATA_REPORT_ARGS(data, flags) \
+ (data)->pn ? (void *)(data)->pn : (void *)(data)->ts, \
+ ((data)->pn ? JSREPORT_PN : JSREPORT_TS) | (flags)
+
+static JSBool
+BumpFormalCount(JSContext *cx, JSFunction *fun)
+{
+ if (fun->nargs == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_ARGS);
+ return JS_FALSE;
+ }
+ fun->nargs++;
+ return JS_TRUE;
+}
+
+static JSBool
+BindArg(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
+{
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ JSBool ok;
+ uintN dupflag;
+ JSFunction *fun;
+ const char *name;
+
+ obj = data->obj;
+ ok = js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop);
+ if (!ok)
+ return JS_FALSE;
+
+ dupflag = 0;
+ if (prop) {
+ JS_ASSERT(pobj == obj);
+ name = js_AtomToPrintableString(cx, atom);
+
+ /*
+ * A duplicate parameter name, a "feature" required by ECMA-262.
+ * We force a duplicate node on the SCOPE_LAST_PROP(scope) list
+ * with the same id, distinguished by the SPROP_IS_DUPLICATE flag,
+ * and not mapped by an entry in scope.
+ */
+ ok = name &&
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_WARNING |
+ JSREPORT_STRICT),
+ JSMSG_DUPLICATE_FORMAL,
+ name);
+
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ if (!ok)
+ return JS_FALSE;
+
+ dupflag = SPROP_IS_DUPLICATE;
+ }
+
+ fun = data->u.arg.fun;
+ if (!js_AddHiddenProperty(cx, data->obj, ATOM_TO_JSID(atom),
+ js_GetArgument, js_SetArgument,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ dupflag | SPROP_HAS_SHORTID,
+ fun->nargs)) {
+ return JS_FALSE;
+ }
+
+ return BumpFormalCount(cx, fun);
+}
+
+static JSBool
+BindLocalVariable(JSContext *cx, BindData *data, JSAtom *atom)
+{
+ JSFunction *fun;
+
+ /*
+ * Can't increase fun->nvars in an active frame, so insist that getter is
+ * js_GetLocalVariable, not js_GetCallVariable or anything else.
+ */
+ if (data->u.var.getter != js_GetLocalVariable)
+ return JS_TRUE;
+
+ /*
+ * Don't bind a variable with the hidden name 'arguments', per ECMA-262.
+ * Instead 'var arguments' always restates the predefined property of the
+ * activation objects with unhidden name 'arguments'. Assignment to such
+ * a variable must be handled specially.
+ */
+ if (atom == cx->runtime->atomState.argumentsAtom)
+ return JS_TRUE;
+
+ fun = data->u.var.fun;
+ if (!js_AddHiddenProperty(cx, data->obj, ATOM_TO_JSID(atom),
+ data->u.var.getter, data->u.var.setter,
+ SPROP_INVALID_SLOT,
+ data->u.var.attrs | JSPROP_SHARED,
+ SPROP_HAS_SHORTID, fun->u.i.nvars)) {
+ return JS_FALSE;
+ }
+ if (fun->u.i.nvars == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_VARS);
+ return JS_FALSE;
+ }
+ fun->u.i.nvars++;
+ return JS_TRUE;
+}
+
+#if JS_HAS_DESTRUCTURING
+/*
+ * Forward declaration to maintain top-down presentation.
+ */
+static JSParseNode *
+DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc,
+ JSTokenType tt);
+
+static JSBool
+BindDestructuringArg(JSContext *cx, BindData *data, JSAtom *atom,
+ JSTreeContext *tc)
+{
+ JSAtomListElement *ale;
+ JSFunction *fun;
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ const char *name;
+
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ if (!ale) {
+ ale = js_IndexAtom(cx, atom, &tc->decls);
+ if (!ale)
+ return JS_FALSE;
+ ALE_SET_JSOP(ale, data->op);
+ }
+
+ fun = data->u.var.fun;
+ obj = data->obj;
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+
+ if (prop) {
+ JS_ASSERT(pobj == obj && OBJ_IS_NATIVE(pobj));
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_WARNING |
+ JSREPORT_STRICT),
+ JSMSG_DUPLICATE_FORMAL,
+ name)) {
+ return JS_FALSE;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ } else {
+ if (!BindLocalVariable(cx, data, atom))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+#endif /* JS_HAS_DESTRUCTURING */
+
+static JSParseNode *
+FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool lambda)
+{
+ JSOp op, prevop;
+ JSParseNode *pn, *body, *result;
+ JSTokenType tt;
+ JSAtom *funAtom, *objAtom;
+ JSStackFrame *fp;
+ JSObject *varobj, *pobj;
+ JSAtomListElement *ale;
+ JSProperty *prop;
+ JSFunction *fun;
+ JSTreeContext funtc;
+#if JS_HAS_DESTRUCTURING
+ JSParseNode *item, *list = NULL;
+#endif
+
+ /* Make a TOK_FUNCTION node. */
+#if JS_HAS_GETTER_SETTER
+ op = CURRENT_TOKEN(ts).t_op;
+#endif
+ pn = NewParseNode(cx, ts, PN_FUNC, tc);
+ if (!pn)
+ return NULL;
+
+ /* Scan the optional function name into funAtom. */
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_NAME) {
+ funAtom = CURRENT_TOKEN(ts).t_atom;
+ } else {
+ funAtom = NULL;
+ js_UngetToken(ts);
+ }
+
+ /* Find the nearest variable-declaring scope and use it as our parent. */
+ fp = cx->fp;
+ varobj = fp->varobj;
+
+ /*
+ * Record names for function statements in tc->decls so we know when to
+ * avoid optimizing variable references that might name a function.
+ */
+ if (!lambda && funAtom) {
+ ATOM_LIST_SEARCH(ale, &tc->decls, funAtom);
+ if (ale) {
+ prevop = ALE_JSOP(ale);
+ if (JS_HAS_STRICT_OPTION(cx) || prevop == JSOP_DEFCONST) {
+ const char *name = js_AtomToPrintableString(cx, funAtom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx, ts,
+ (prevop != JSOP_DEFCONST)
+ ? JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT
+ : JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REDECLARED_VAR,
+ (prevop == JSOP_DEFFUN ||
+ prevop == JSOP_CLOSURE)
+ ? js_function_str
+ : (prevop == JSOP_DEFCONST)
+ ? js_const_str
+ : js_var_str,
+ name)) {
+ return NULL;
+ }
+ }
+ if (!AT_TOP_LEVEL(tc) && prevop == JSOP_DEFVAR)
+ tc->flags |= TCF_FUN_CLOSURE_VS_VAR;
+ } else {
+ ale = js_IndexAtom(cx, funAtom, &tc->decls);
+ if (!ale)
+ return NULL;
+ }
+ ALE_SET_JSOP(ale, AT_TOP_LEVEL(tc) ? JSOP_DEFFUN : JSOP_CLOSURE);
+
+ /*
+ * A function nested at top level inside another's body needs only a
+ * local variable to bind its name to its value, and not an activation
+ * object property (it might also need the activation property, if the
+ * outer function contains with statements, e.g., but the stack slot
+ * wins when jsemit.c's BindNameToSlot can optimize a JSOP_NAME into a
+ * JSOP_GETVAR bytecode).
+ */
+ if (AT_TOP_LEVEL(tc) && (tc->flags & TCF_IN_FUNCTION)) {
+ JSScopeProperty *sprop;
+
+ /*
+ * Define a property on the outer function so that BindNameToSlot
+ * can properly optimize accesses.
+ */
+ JS_ASSERT(OBJ_GET_CLASS(cx, varobj) == &js_FunctionClass);
+ JS_ASSERT(fp->fun == (JSFunction *) JS_GetPrivate(cx, varobj));
+ if (!js_LookupHiddenProperty(cx, varobj, ATOM_TO_JSID(funAtom),
+ &pobj, &prop)) {
+ return NULL;
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ sprop = NULL;
+ if (!prop ||
+ pobj != varobj ||
+ (sprop = (JSScopeProperty *)prop,
+ sprop->getter != js_GetLocalVariable)) {
+ uintN sflags;
+
+ /*
+ * Use SPROP_IS_DUPLICATE if there is a formal argument of the
+ * same name, so the decompiler can find the parameter name.
+ */
+ sflags = (sprop && sprop->getter == js_GetArgument)
+ ? SPROP_IS_DUPLICATE | SPROP_HAS_SHORTID
+ : SPROP_HAS_SHORTID;
+ if (!js_AddHiddenProperty(cx, varobj, ATOM_TO_JSID(funAtom),
+ js_GetLocalVariable,
+ js_SetLocalVariable,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ sflags, fp->fun->u.i.nvars)) {
+ return NULL;
+ }
+ if (fp->fun->u.i.nvars == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_VARS);
+ return NULL;
+ }
+ fp->fun->u.i.nvars++;
+ }
+ }
+ }
+
+ fun = js_NewFunction(cx, NULL, NULL, 0, lambda ? JSFUN_LAMBDA : 0, varobj,
+ funAtom);
+ if (!fun)
+ return NULL;
+#if JS_HAS_GETTER_SETTER
+ if (op != JSOP_NOP)
+ fun->flags |= (op == JSOP_GETTER) ? JSPROP_GETTER : JSPROP_SETTER;
+#endif
+
+ /*
+ * Atomize fun->object early to protect against a last-ditch GC under
+ * js_LookupHiddenProperty.
+ *
+ * Absent use of the new scoped local GC roots API around compiler calls,
+ * we need to atomize here to protect against a GC activation. Atoms are
+ * protected from GC during compilation by the JS_FRIEND_API entry points
+ * in this file. There doesn't seem to be any gain in switching from the
+ * atom-keeping method to the bulkier, slower scoped local roots method.
+ */
+ objAtom = js_AtomizeObject(cx, fun->object, 0);
+ if (!objAtom)
+ return NULL;
+
+ /* Initialize early for possible flags mutation via DestructuringExpr. */
+ TREE_CONTEXT_INIT(&funtc);
+
+ /* Now parse formal argument list and compute fun->nargs. */
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_FORMAL);
+ if (!js_MatchToken(cx, ts, TOK_RP)) {
+ BindData data;
+
+ data.pn = NULL;
+ data.ts = ts;
+ data.obj = fun->object;
+ data.op = JSOP_NOP;
+ data.binder = BindArg;
+ data.u.arg.fun = fun;
+
+ do {
+ tt = js_GetToken(cx, ts);
+ switch (tt) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_LB:
+ case TOK_LC:
+ {
+ JSParseNode *lhs, *rhs;
+ jsint slot;
+
+ /*
+ * A destructuring formal parameter turns into one or more
+ * local variables initialized from properties of a single
+ * anonymous positional parameter, so here we must tweak our
+ * binder and its data.
+ */
+ data.op = JSOP_DEFVAR;
+ data.binder = BindDestructuringArg;
+ data.u.var.clasp = &js_FunctionClass;
+ data.u.var.getter = js_GetLocalVariable;
+ data.u.var.setter = js_SetLocalVariable;
+ data.u.var.attrs = JSPROP_PERMANENT;
+
+ /*
+ * Temporarily transfer the owneship of the recycle list to
+ * funtc. See bug 313967.
+ */
+ funtc.nodeList = tc->nodeList;
+ tc->nodeList = NULL;
+ lhs = DestructuringExpr(cx, &data, &funtc, tt);
+ tc->nodeList = funtc.nodeList;
+ funtc.nodeList = NULL;
+ if (!lhs)
+ return NULL;
+
+ /*
+ * Restore the formal parameter binder in case there are more
+ * non-destructuring formals in the parameter list.
+ */
+ data.binder = BindArg;
+
+ /*
+ * Adjust fun->nargs to count the single anonymous positional
+ * parameter that is to be destructured.
+ */
+ slot = fun->nargs;
+ if (!BumpFormalCount(cx, fun))
+ return NULL;
+
+ /*
+ * Synthesize a destructuring assignment from the single
+ * anonymous positional parameter into the destructuring
+ * left-hand-side expression and accumulate it in list.
+ */
+ rhs = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!rhs)
+ return NULL;
+ rhs->pn_type = TOK_NAME;
+ rhs->pn_op = JSOP_GETARG;
+ rhs->pn_atom = cx->runtime->atomState.emptyAtom;
+ rhs->pn_expr = NULL;
+ rhs->pn_slot = slot;
+ rhs->pn_attrs = 0;
+
+ item = NewBinary(cx, TOK_ASSIGN, JSOP_NOP, lhs, rhs, tc);
+ if (!item)
+ return NULL;
+ if (!list) {
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = TOK_COMMA;
+ PN_INIT_LIST(list);
+ }
+ PN_APPEND(list, item);
+ break;
+ }
+#endif /* JS_HAS_DESTRUCTURING */
+
+ case TOK_NAME:
+ if (!data.binder(cx, &data, CURRENT_TOKEN(ts).t_atom, tc))
+ return NULL;
+ break;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_MISSING_FORMAL);
+ return NULL;
+ }
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FORMAL);
+ }
+
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_BODY);
+ pn->pn_pos.begin = CURRENT_TOKEN(ts).pos.begin;
+
+ /*
+ * Temporarily transfer the owneship of the recycle list to funtc.
+ * See bug 313967.
+ */
+ funtc.nodeList = tc->nodeList;
+ tc->nodeList = NULL;
+ body = FunctionBody(cx, ts, fun, &funtc);
+ tc->nodeList = funtc.nodeList;
+ funtc.nodeList = NULL;
+
+ if (!body)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_BODY);
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+
+#if JS_HAS_DESTRUCTURING
+ /*
+ * If there were destructuring formal parameters, prepend the initializing
+ * comma expression that we synthesized to body. If the body is a lexical
+ * scope node, we must make a special TOK_BODY node, to prepend the formal
+ * parameter destructuring code without bracing the decompilation of the
+ * function body's lexical scope.
+ */
+ if (list) {
+ if (body->pn_arity != PN_LIST) {
+ JSParseNode *block;
+
+ JS_ASSERT(body->pn_type == TOK_LEXICALSCOPE);
+ JS_ASSERT(body->pn_arity == PN_NAME);
+
+ block = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!block)
+ return NULL;
+ block->pn_type = TOK_BODY;
+ block->pn_pos = body->pn_pos;
+ PN_INIT_LIST_1(block, body);
+
+ body = block;
+ }
+
+ item = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!item)
+ return NULL;
+
+ item->pn_type = TOK_SEMI;
+ item->pn_pos.begin = item->pn_pos.end = body->pn_pos.begin;
+ item->pn_kid = list;
+ item->pn_next = body->pn_head;
+ body->pn_head = item;
+ if (body->pn_tail == &body->pn_head)
+ body->pn_tail = &item->pn_next;
+ ++body->pn_count;
+ }
+#endif
+
+ /*
+ * If we collected flags that indicate nested heavyweight functions, or
+ * this function contains heavyweight-making statements (references to
+ * __parent__ or __proto__; use of with, eval, import, or export; and
+ * assignment to arguments), flag the function as heavyweight (requiring
+ * a call object per invocation).
+ */
+ if (funtc.flags & TCF_FUN_HEAVYWEIGHT) {
+ fun->flags |= JSFUN_HEAVYWEIGHT;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ } else {
+ /*
+ * If this function is a named statement function not at top-level
+ * (i.e. a JSOP_CLOSURE, not a function definiton or expression), then
+ * our enclosing function, if any, must be heavyweight.
+ *
+ * The TCF_FUN_USES_NONLOCALS flag is set only by the code generator,
+ * so it won't be set here. Assert that it's not. We have to check
+ * it later, in js_EmitTree, after js_EmitFunctionBody has traversed
+ * the function's body
+ */
+ JS_ASSERT(!(funtc.flags & TCF_FUN_USES_NONLOCALS));
+ if (!lambda && funAtom && !AT_TOP_LEVEL(tc))
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+
+ result = pn;
+ if (lambda) {
+ /*
+ * ECMA ed. 3 standard: function expression, possibly anonymous.
+ */
+ op = funAtom ? JSOP_NAMEDFUNOBJ : JSOP_ANONFUNOBJ;
+ } else if (!funAtom) {
+ /*
+ * If this anonymous function definition is *not* embedded within a
+ * larger expression, we treat it as an expression statement, not as
+ * a function declaration -- and not as a syntax error (as ECMA-262
+ * Edition 3 would have it). Backward compatibility trumps all.
+ */
+ result = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!result)
+ return NULL;
+ result->pn_type = TOK_SEMI;
+ result->pn_pos = pn->pn_pos;
+ result->pn_kid = pn;
+ op = JSOP_ANONFUNOBJ;
+ } else if (!AT_TOP_LEVEL(tc)) {
+ /*
+ * ECMA ed. 3 extension: a function expression statement not at the
+ * top level, e.g., in a compound statement such as the "then" part
+ * of an "if" statement, binds a closure only if control reaches that
+ * sub-statement.
+ */
+ op = JSOP_CLOSURE;
+ } else {
+ op = JSOP_NOP;
+ }
+
+ pn->pn_funAtom = objAtom;
+ pn->pn_op = op;
+ pn->pn_body = body;
+ pn->pn_flags = funtc.flags & (TCF_FUN_FLAGS | TCF_HAS_DEFXMLNS);
+ pn->pn_tryCount = funtc.tryCount;
+ TREE_CONTEXT_FINISH(&funtc);
+ return result;
+}
+
+static JSParseNode *
+FunctionStmt(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ return FunctionDef(cx, ts, tc, JS_FALSE);
+}
+
+static JSParseNode *
+FunctionExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ return FunctionDef(cx, ts, tc, JS_TRUE);
+}
+
+/*
+ * Parse the statements in a block, creating a TOK_LC node that lists the
+ * statements' trees. If called from block-parsing code, the caller must
+ * match { before and } after.
+ */
+static JSParseNode *
+Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2, *saveBlock;
+ JSTokenType tt;
+
+ CHECK_RECURSION();
+
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ saveBlock = tc->blockNode;
+ tc->blockNode = pn;
+ PN_INIT_LIST(pn);
+
+ ts->flags |= TSF_OPERAND;
+ while ((tt = js_PeekToken(cx, ts)) > TOK_EOF && tt != TOK_RC) {
+ ts->flags &= ~TSF_OPERAND;
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2) {
+ if (ts->flags & TSF_EOF)
+ ts->flags |= TSF_UNEXPECTED_EOF;
+ return NULL;
+ }
+ ts->flags |= TSF_OPERAND;
+
+ /* Detect a function statement for the TOK_LC case in Statement. */
+ if (pn2->pn_type == TOK_FUNCTION && !AT_TOP_LEVEL(tc))
+ tc->flags |= TCF_HAS_FUNCTION_STMT;
+
+ /* If compiling top-level statements, emit as we go to save space. */
+ if (!tc->topStmt && (tc->flags & TCF_COMPILING)) {
+ if (cx->fp->fun &&
+ JS_HAS_STRICT_OPTION(cx) &&
+ (tc->flags & TCF_RETURN_EXPR)) {
+ /*
+ * Check pn2 for lack of a final return statement if it is the
+ * last statement in the block.
+ */
+ tt = js_PeekToken(cx, ts);
+ if ((tt == TOK_EOF || tt == TOK_RC) &&
+ !CheckFinalReturn(cx, ts, pn2)) {
+ tt = TOK_ERROR;
+ break;
+ }
+
+ /*
+ * Clear TCF_RETURN_EXPR so FunctionBody doesn't try to
+ * CheckFinalReturn again.
+ */
+ tc->flags &= ~TCF_RETURN_EXPR;
+ }
+ if (!js_FoldConstants(cx, pn2, tc) ||
+ !js_AllocTryNotes(cx, (JSCodeGenerator *)tc) ||
+ !js_EmitTree(cx, (JSCodeGenerator *)tc, pn2)) {
+ tt = TOK_ERROR;
+ break;
+ }
+ RecycleTree(pn2, tc);
+ } else {
+ PN_APPEND(pn, pn2);
+ }
+ }
+
+ /*
+ * Handle the case where there was a let declaration under this block. If
+ * it replaced tc->blockNode with a new block node then we must refresh pn
+ * and then restore tc->blockNode.
+ */
+ if (tc->blockNode != pn)
+ pn = tc->blockNode;
+ tc->blockNode = saveBlock;
+
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ return pn;
+}
+
+static JSParseNode *
+Condition(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_COND);
+ pn = Expr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_COND);
+
+ /*
+ * Check for (a = b) and "correct" it to (a == b) iff b's operator has
+ * greater precedence than ==.
+ * XXX not ECMA, but documented in several books -- now a strict warning.
+ */
+ if (pn->pn_type == TOK_ASSIGN &&
+ pn->pn_op == JSOP_NOP &&
+ pn->pn_right->pn_type > TOK_EQOP)
+ {
+ JSBool rewrite = !JS_VERSION_IS_ECMA(cx);
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_EQUAL_AS_ASSIGN,
+ rewrite
+ ? "\nAssuming equality test"
+ : "")) {
+ return NULL;
+ }
+ if (rewrite) {
+ pn->pn_type = TOK_EQOP;
+ pn->pn_op = (JSOp)cx->jsop_eq;
+ pn2 = pn->pn_left;
+ switch (pn2->pn_op) {
+ case JSOP_SETNAME:
+ pn2->pn_op = JSOP_NAME;
+ break;
+ case JSOP_SETPROP:
+ pn2->pn_op = JSOP_GETPROP;
+ break;
+ case JSOP_SETELEM:
+ pn2->pn_op = JSOP_GETELEM;
+ break;
+ default:
+ JS_ASSERT(0);
+ }
+ }
+ }
+ return pn;
+}
+
+static JSBool
+MatchLabel(JSContext *cx, JSTokenStream *ts, JSParseNode *pn)
+{
+ JSAtom *label;
+ JSTokenType tt;
+
+ tt = js_PeekTokenSameLine(cx, ts);
+ if (tt == TOK_ERROR)
+ return JS_FALSE;
+ if (tt == TOK_NAME) {
+ (void) js_GetToken(cx, ts);
+ label = CURRENT_TOKEN(ts).t_atom;
+ } else {
+ label = NULL;
+ }
+ pn->pn_atom = label;
+ return JS_TRUE;
+}
+
+#if JS_HAS_EXPORT_IMPORT
+static JSParseNode *
+ImportExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ JSTokenType tt;
+
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NO_IMPORT_NAME);
+ pn = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = JSOP_NAME;
+ pn->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+
+ ts->flags |= TSF_OPERAND;
+ while ((tt = js_GetToken(cx, ts)) == TOK_DOT || tt == TOK_LB) {
+ ts->flags &= ~TSF_OPERAND;
+ if (pn->pn_op == JSOP_IMPORTALL)
+ goto bad_import;
+
+ if (tt == TOK_DOT) {
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ if (js_MatchToken(cx, ts, TOK_STAR)) {
+ pn2->pn_op = JSOP_IMPORTALL;
+ pn2->pn_atom = NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ } else {
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NAME_AFTER_DOT);
+ pn2->pn_op = JSOP_GETPROP;
+ pn2->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ }
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ pn2->pn_expr = pn;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ } else {
+ /* Make a TOK_LB binary node. */
+ pn2 = NewBinary(cx, tt, JSOP_GETELEM, pn, Expr(cx, ts, tc), tc);
+ if (!pn2)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_IN_INDEX);
+ }
+
+ pn = pn2;
+ ts->flags |= TSF_OPERAND;
+ }
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ js_UngetToken(ts);
+
+ switch (pn->pn_op) {
+ case JSOP_GETPROP:
+ pn->pn_op = JSOP_IMPORTPROP;
+ break;
+ case JSOP_GETELEM:
+ pn->pn_op = JSOP_IMPORTELEM;
+ break;
+ case JSOP_IMPORTALL:
+ break;
+ default:
+ goto bad_import;
+ }
+ return pn;
+
+ bad_import:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_IMPORT);
+ return NULL;
+}
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+static JSBool
+BindLet(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
+{
+ JSObject *blockObj;
+ JSScopeProperty *sprop;
+ JSAtomListElement *ale;
+
+ blockObj = data->obj;
+ sprop = SCOPE_GET_PROPERTY(OBJ_SCOPE(blockObj), ATOM_TO_JSID(atom));
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ if (sprop || (ale && ALE_JSOP(ale) == JSOP_DEFCONST)) {
+ const char *name;
+
+ if (sprop) {
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ JS_ASSERT((uint16)sprop->shortid < data->u.let.index);
+ }
+
+ name = js_AtomToPrintableString(cx, atom);
+ if (name) {
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_ERROR),
+ JSMSG_REDECLARED_VAR,
+ (ale && ALE_JSOP(ale) == JSOP_DEFCONST)
+ ? js_const_str
+ : "variable",
+ name);
+ }
+ return JS_FALSE;
+ }
+
+ if (data->u.let.index == JS_BIT(16)) {
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data, JSREPORT_ERROR),
+ data->u.let.overflow);
+ return JS_FALSE;
+ }
+
+ /* Use JSPROP_ENUMERATE to aid the disassembler. */
+ return js_DefineNativeProperty(cx, blockObj, ATOM_TO_JSID(atom),
+ JSVAL_VOID, NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT,
+ SPROP_HAS_SHORTID,
+ (intN)data->u.let.index++,
+ NULL);
+}
+
+static JSBool
+BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
+{
+ JSStmtInfo *stmt;
+ JSAtomListElement *ale;
+ JSOp op, prevop;
+ const char *name;
+ JSFunction *fun;
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ JSBool ok;
+ JSPropertyOp getter, setter;
+ JSScopeProperty *sprop;
+
+ stmt = js_LexicalLookup(tc, atom, NULL, JS_FALSE);
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ op = data->op;
+ if ((stmt && stmt->type != STMT_WITH) || ale) {
+ prevop = ale ? ALE_JSOP(ale) : JSOP_DEFVAR;
+ if (JS_HAS_STRICT_OPTION(cx)
+ ? op != JSOP_DEFVAR || prevop != JSOP_DEFVAR
+ : op == JSOP_DEFCONST || prevop == JSOP_DEFCONST) {
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ (op != JSOP_DEFCONST &&
+ prevop != JSOP_DEFCONST)
+ ? JSREPORT_WARNING |
+ JSREPORT_STRICT
+ : JSREPORT_ERROR),
+ JSMSG_REDECLARED_VAR,
+ (prevop == JSOP_DEFFUN ||
+ prevop == JSOP_CLOSURE)
+ ? js_function_str
+ : (prevop == JSOP_DEFCONST)
+ ? js_const_str
+ : js_var_str,
+ name)) {
+ return JS_FALSE;
+ }
+ }
+ if (op == JSOP_DEFVAR && prevop == JSOP_CLOSURE)
+ tc->flags |= TCF_FUN_CLOSURE_VS_VAR;
+ }
+ if (!ale) {
+ ale = js_IndexAtom(cx, atom, &tc->decls);
+ if (!ale)
+ return JS_FALSE;
+ }
+ ALE_SET_JSOP(ale, op);
+
+ fun = data->u.var.fun;
+ obj = data->obj;
+ if (!fun) {
+ /* Don't lookup global variables at compile time. */
+ prop = NULL;
+ } else {
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ }
+
+ ok = JS_TRUE;
+ getter = data->u.var.getter;
+ setter = data->u.var.setter;
+
+ if (prop && pobj == obj && OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *)prop;
+ if (sprop->getter == js_GetArgument) {
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name) {
+ ok = JS_FALSE;
+ } else if (op == JSOP_DEFCONST) {
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_ERROR),
+ JSMSG_REDECLARED_PARAM,
+ name);
+ ok = JS_FALSE;
+ } else {
+ getter = js_GetArgument;
+ setter = js_SetArgument;
+ ok = js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_WARNING |
+ JSREPORT_STRICT),
+ JSMSG_VAR_HIDES_ARG,
+ name);
+ }
+ } else {
+ JS_ASSERT(getter == js_GetLocalVariable);
+
+ if (fun) {
+ /* Not an argument, must be a redeclared local var. */
+ if (data->u.var.clasp == &js_FunctionClass) {
+ JS_ASSERT(sprop->getter == js_GetLocalVariable);
+ JS_ASSERT((sprop->flags & SPROP_HAS_SHORTID) &&
+ (uint16) sprop->shortid < fun->u.i.nvars);
+ } else if (data->u.var.clasp == &js_CallClass) {
+ if (sprop->getter == js_GetCallVariable) {
+ /*
+ * Referencing a name introduced by a var statement in
+ * the enclosing function. Check that the slot number
+ * we have is in range.
+ */
+ JS_ASSERT((sprop->flags & SPROP_HAS_SHORTID) &&
+ (uint16) sprop->shortid < fun->u.i.nvars);
+ } else {
+ /*
+ * A variable introduced through another eval: don't
+ * use the special getters and setters since we can't
+ * allocate a slot in the frame.
+ */
+ getter = sprop->getter;
+ setter = sprop->setter;
+ }
+ }
+
+ /* Override the old getter and setter, to handle eval. */
+ sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop,
+ 0, sprop->attrs,
+ getter, setter);
+ if (!sprop)
+ ok = JS_FALSE;
+ }
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ } else {
+ /*
+ * Property not found in current variable scope: we have not seen this
+ * variable before. Define a new local variable by adding a property
+ * to the function's scope, allocating one slot in the function's vars
+ * frame. Global variables and any locals declared in with statement
+ * bodies are handled at runtime, by script prolog JSOP_DEFVAR opcodes
+ * generated for slot-less vars.
+ */
+ sprop = NULL;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ prop = NULL;
+ }
+
+ if (cx->fp->scopeChain == obj &&
+ !js_InWithStatement(tc) &&
+ !BindLocalVariable(cx, data, atom)) {
+ return JS_FALSE;
+ }
+ }
+ return ok;
+}
+
+#if JS_HAS_DESTRUCTURING
+
+static JSBool
+BindDestructuringVar(JSContext *cx, BindData *data, JSParseNode *pn,
+ JSTreeContext *tc)
+{
+ JSAtom *atom;
+
+ /*
+ * Destructuring is a form of assignment, so just as for an initialized
+ * simple variable, we must check for assignment to 'arguments' and flag
+ * the enclosing function (if any) as heavyweight.
+ */
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ atom = pn->pn_atom;
+ if (atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+
+ data->pn = pn;
+ if (!data->binder(cx, data, atom, tc))
+ return JS_FALSE;
+ data->pn = NULL;
+
+ /*
+ * Select the appropriate name-setting opcode, which may be specialized
+ * further for local variable and argument slot optimizations. At this
+ * point, we can't select the optimal final opcode, yet we must preserve
+ * the CONST bit and convey "set", not "get".
+ */
+ pn->pn_op = (data->op == JSOP_DEFCONST)
+ ? JSOP_SETCONST
+ : JSOP_SETNAME;
+ pn->pn_attrs = data->u.var.attrs;
+ return JS_TRUE;
+}
+
+/*
+ * Here, we are destructuring {... P: Q, ...} = R, where P is any id, Q is any
+ * LHS expression except a destructuring initialiser, and R is on the stack.
+ * Because R is already evaluated, the usual LHS-specialized bytecodes won't
+ * work. After pushing R[P] we need to evaluate Q's "reference base" QB and
+ * then push its property name QN. At this point the stack looks like
+ *
+ * [... R, R[P], QB, QN]
+ *
+ * We need to set QB[QN] = R[P]. This is a job for JSOP_ENUMELEM, which takes
+ * its operands with left-hand side above right-hand side:
+ *
+ * [rval, lval, xval]
+ *
+ * and pops all three values, setting lval[xval] = rval. But we cannot select
+ * JSOP_ENUMELEM yet, because the LHS may turn out to be an arg or local var,
+ * which can be optimized further. So we select JSOP_SETNAME.
+ */
+static JSBool
+BindDestructuringLHS(JSContext *cx, JSParseNode *pn, JSTreeContext *tc)
+{
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+
+ switch (pn->pn_type) {
+ case TOK_NAME:
+ if (pn->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ /* FALL THROUGH */
+ case TOK_DOT:
+ case TOK_LB:
+ pn->pn_op = JSOP_SETNAME;
+ break;
+
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ JS_ASSERT(pn->pn_op == JSOP_CALL || pn->pn_op == JSOP_EVAL);
+ pn->pn_op = JSOP_SETCALL;
+ break;
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (pn->pn_op == JSOP_XMLNAME) {
+ pn->pn_op = JSOP_BINDXMLNAME;
+ break;
+ }
+ /* FALL THROUGH */
+#endif
+
+ default:
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_LEFTSIDE_OF_ASS);
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+}
+
+typedef struct FindPropValData {
+ uint32 numvars; /* # of destructuring vars in left side */
+ uint32 maxstep; /* max # of steps searching right side */
+ JSDHashTable table; /* hash table for O(1) right side search */
+} FindPropValData;
+
+typedef struct FindPropValEntry {
+ JSDHashEntryHdr hdr;
+ JSParseNode *pnkey;
+ JSParseNode *pnval;
+} FindPropValEntry;
+
+#define ASSERT_VALID_PROPERTY_KEY(pnkey) \
+ JS_ASSERT((pnkey)->pn_arity == PN_NULLARY && \
+ ((pnkey)->pn_type == TOK_NUMBER || \
+ (pnkey)->pn_type == TOK_STRING || \
+ (pnkey)->pn_type == TOK_NAME))
+
+JS_STATIC_DLL_CALLBACK(JSDHashNumber)
+HashFindPropValKey(JSDHashTable *table, const void *key)
+{
+ const JSParseNode *pnkey = (const JSParseNode *)key;
+
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ return (pnkey->pn_type == TOK_NUMBER)
+ ? (JSDHashNumber) (JSDOUBLE_HI32(pnkey->pn_dval) ^
+ JSDOUBLE_LO32(pnkey->pn_dval))
+ : (JSDHashNumber) pnkey->pn_atom->number;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+MatchFindPropValEntry(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key)
+{
+ const FindPropValEntry *fpve = (const FindPropValEntry *)entry;
+ const JSParseNode *pnkey = (const JSParseNode *)key;
+
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ return pnkey->pn_type == fpve->pnkey->pn_type &&
+ ((pnkey->pn_type == TOK_NUMBER)
+ ? pnkey->pn_dval == fpve->pnkey->pn_dval
+ : pnkey->pn_atom == fpve->pnkey->pn_atom);
+}
+
+static const JSDHashTableOps FindPropValOps = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ JS_DHashGetKeyStub,
+ HashFindPropValKey,
+ MatchFindPropValEntry,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+#define STEP_HASH_THRESHOLD 10
+#define BIG_DESTRUCTURING 5
+#define BIG_OBJECT_INIT 20
+
+static JSParseNode *
+FindPropertyValue(JSParseNode *pn, JSParseNode *pnid, FindPropValData *data)
+{
+ FindPropValEntry *entry;
+ JSParseNode *pnhit, *pnprop, *pnkey;
+ uint32 step;
+
+ /* If we have a hash table, use it as the sole source of truth. */
+ if (data->table.ops) {
+ entry = (FindPropValEntry *)
+ JS_DHashTableOperate(&data->table, pnid, JS_DHASH_LOOKUP);
+ return JS_DHASH_ENTRY_IS_BUSY(&entry->hdr) ? entry->pnval : NULL;
+ }
+
+ /* If pn is not an object initialiser node, we can't do anything here. */
+ if (pn->pn_type != TOK_RC)
+ return NULL;
+
+ /*
+ * We must search all the way through pn's list, to handle the case of an
+ * id duplicated for two or more property initialisers.
+ */
+ pnhit = NULL;
+ step = 0;
+ ASSERT_VALID_PROPERTY_KEY(pnid);
+ if (pnid->pn_type == TOK_NUMBER) {
+ for (pnprop = pn->pn_head; pnprop; pnprop = pnprop->pn_next) {
+ JS_ASSERT(pnprop->pn_type == TOK_COLON);
+ if (pnprop->pn_op == JSOP_NOP) {
+ pnkey = pnprop->pn_left;
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ if (pnkey->pn_type == TOK_NUMBER &&
+ pnkey->pn_dval == pnid->pn_dval) {
+ pnhit = pnprop;
+ }
+ ++step;
+ }
+ }
+ } else {
+ for (pnprop = pn->pn_head; pnprop; pnprop = pnprop->pn_next) {
+ JS_ASSERT(pnprop->pn_type == TOK_COLON);
+ if (pnprop->pn_op == JSOP_NOP) {
+ pnkey = pnprop->pn_left;
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ if (pnkey->pn_type == pnid->pn_type &&
+ pnkey->pn_atom == pnid->pn_atom) {
+ pnhit = pnprop;
+ }
+ ++step;
+ }
+ }
+ }
+ if (!pnhit)
+ return NULL;
+
+ /* Hit via full search -- see whether it's time to create the hash table. */
+ JS_ASSERT(!data->table.ops);
+ if (step > data->maxstep) {
+ data->maxstep = step;
+ if (step >= STEP_HASH_THRESHOLD &&
+ data->numvars >= BIG_DESTRUCTURING &&
+ pn->pn_count >= BIG_OBJECT_INIT &&
+ JS_DHashTableInit(&data->table, &FindPropValOps, pn,
+ sizeof(FindPropValEntry), pn->pn_count)) {
+
+ for (pn = pn->pn_head; pn; pn = pn->pn_next) {
+ ASSERT_VALID_PROPERTY_KEY(pn->pn_left);
+ entry = (FindPropValEntry *)
+ JS_DHashTableOperate(&data->table, pn->pn_left,
+ JS_DHASH_ADD);
+ entry->pnval = pn->pn_right;
+ }
+ }
+ }
+ return pnhit->pn_right;
+}
+
+/*
+ * If data is null, the caller is AssignExpr and instead of binding variables,
+ * we specialize lvalues in the propery value positions of the left-hand side.
+ * If right is null, just check for well-formed lvalues.
+ */
+static JSBool
+CheckDestructuring(JSContext *cx, BindData *data,
+ JSParseNode *left, JSParseNode *right,
+ JSTreeContext *tc)
+{
+ JSBool ok;
+ FindPropValData fpvd;
+ JSParseNode *lhs, *rhs, *pn, *pn2;
+
+ if (left->pn_type == TOK_ARRAYCOMP) {
+ js_ReportCompileErrorNumber(cx, left, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_ARRAY_COMP_LEFTSIDE);
+ return JS_FALSE;
+ }
+
+ ok = JS_TRUE;
+ fpvd.table.ops = NULL;
+ lhs = left->pn_head;
+ if (lhs && lhs->pn_type == TOK_DEFSHARP) {
+ pn = lhs;
+ goto no_var_name;
+ }
+
+ if (left->pn_type == TOK_RB) {
+ rhs = (right && right->pn_type == left->pn_type)
+ ? right->pn_head
+ : NULL;
+
+ while (lhs) {
+ pn = lhs, pn2 = rhs;
+ if (!data) {
+ /* Skip parenthesization if not in a variable declaration. */
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+ if (pn2) {
+ while (pn2->pn_type == TOK_RP)
+ pn2 = pn2->pn_kid;
+ }
+ }
+
+ /* Nullary comma is an elision; binary comma is an expression.*/
+ if (pn->pn_type != TOK_COMMA || pn->pn_arity != PN_NULLARY) {
+ if (pn->pn_type == TOK_RB || pn->pn_type == TOK_RC) {
+ ok = CheckDestructuring(cx, data, pn, pn2, tc);
+ } else {
+ if (data) {
+ if (pn->pn_type != TOK_NAME)
+ goto no_var_name;
+
+ ok = BindDestructuringVar(cx, data, pn, tc);
+ } else {
+ ok = BindDestructuringLHS(cx, pn, tc);
+ }
+ }
+ if (!ok)
+ goto out;
+ }
+
+ lhs = lhs->pn_next;
+ if (rhs)
+ rhs = rhs->pn_next;
+ }
+ } else {
+ JS_ASSERT(left->pn_type == TOK_RC);
+ fpvd.numvars = left->pn_count;
+ fpvd.maxstep = 0;
+ rhs = NULL;
+
+ while (lhs) {
+ JS_ASSERT(lhs->pn_type == TOK_COLON);
+ pn = lhs->pn_right;
+ if (!data) {
+ /* Skip parenthesization if not in a variable declaration. */
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+ }
+
+ if (pn->pn_type == TOK_RB || pn->pn_type == TOK_RC) {
+ if (right) {
+ rhs = FindPropertyValue(right, lhs->pn_left, &fpvd);
+ if (rhs && !data) {
+ while (rhs->pn_type == TOK_RP)
+ rhs = rhs->pn_kid;
+ }
+ }
+
+ ok = CheckDestructuring(cx, data, pn, rhs, tc);
+ } else if (data) {
+ if (pn->pn_type != TOK_NAME)
+ goto no_var_name;
+
+ ok = BindDestructuringVar(cx, data, pn, tc);
+ } else {
+ ok = BindDestructuringLHS(cx, pn, tc);
+ }
+ if (!ok)
+ goto out;
+
+ lhs = lhs->pn_next;
+ }
+ }
+
+out:
+ if (fpvd.table.ops)
+ JS_DHashTableFinish(&fpvd.table);
+ return ok;
+
+no_var_name:
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_NO_VARIABLE_NAME);
+ ok = JS_FALSE;
+ goto out;
+}
+
+static JSParseNode *
+DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc,
+ JSTokenType tt)
+{
+ JSParseNode *pn;
+
+ pn = PrimaryExpr(cx, data->ts, tc, tt, JS_FALSE);
+ if (!pn)
+ return NULL;
+ if (!CheckDestructuring(cx, data, pn, NULL, tc))
+ return NULL;
+ return pn;
+}
+
+#endif /* JS_HAS_DESTRUCTURING */
+
+extern const char js_with_statement_str[];
+
+static JSParseNode *
+ContainsStmt(JSParseNode *pn, JSTokenType tt)
+{
+ JSParseNode *pn2, *pnt;
+
+ if (!pn)
+ return NULL;
+ if (pn->pn_type == tt)
+ return pn;
+ switch (pn->pn_arity) {
+ case PN_LIST:
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ pnt = ContainsStmt(pn2, tt);
+ if (pnt)
+ return pnt;
+ }
+ break;
+ case PN_TERNARY:
+ pnt = ContainsStmt(pn->pn_kid1, tt);
+ if (pnt)
+ return pnt;
+ pnt = ContainsStmt(pn->pn_kid2, tt);
+ if (pnt)
+ return pnt;
+ return ContainsStmt(pn->pn_kid3, tt);
+ case PN_BINARY:
+ /*
+ * Limit recursion if pn is a binary expression, which can't contain a
+ * var statement.
+ */
+ if (pn->pn_op != JSOP_NOP)
+ return NULL;
+ pnt = ContainsStmt(pn->pn_left, tt);
+ if (pnt)
+ return pnt;
+ return ContainsStmt(pn->pn_right, tt);
+ case PN_UNARY:
+ if (pn->pn_op != JSOP_NOP)
+ return NULL;
+ return ContainsStmt(pn->pn_kid, tt);
+ case PN_NAME:
+ return ContainsStmt(pn->pn_expr, tt);
+ default:;
+ }
+ return NULL;
+}
+
+static JSParseNode *
+ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSParser operandParser)
+{
+ JSTokenType tt, tt2;
+ JSParseNode *pn, *pn2;
+
+ tt = CURRENT_TOKEN(ts).type;
+ if (!(tc->flags & TCF_IN_FUNCTION)) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_RETURN_OR_YIELD,
+#if JS_HAS_GENERATORS
+ (tt == TOK_YIELD) ? js_yield_str :
+#endif
+ js_return_str);
+ return NULL;
+ }
+
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+
+#if JS_HAS_GENERATORS
+ if (tt == TOK_YIELD)
+ tc->flags |= TCF_FUN_IS_GENERATOR;
+#endif
+
+ /* This is ugly, but we don't want to require a semicolon. */
+ ts->flags |= TSF_OPERAND;
+ tt2 = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt2 == TOK_ERROR)
+ return NULL;
+
+ if (tt2 != TOK_EOF && tt2 != TOK_EOL && tt2 != TOK_SEMI && tt2 != TOK_RC
+#if JS_HAS_GENERATORS
+ && (tt != TOK_YIELD || (tt2 != tt && tt2 != TOK_RB && tt2 != TOK_RP))
+#endif
+ ) {
+ pn2 = operandParser(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+#if JS_HAS_GENERATORS
+ if (tt == TOK_RETURN)
+#endif
+ tc->flags |= TCF_RETURN_EXPR;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_kid = pn2;
+ } else {
+#if JS_HAS_GENERATORS
+ if (tt == TOK_RETURN)
+#endif
+ tc->flags |= TCF_RETURN_VOID;
+ pn->pn_kid = NULL;
+ }
+
+ if ((~tc->flags & (TCF_RETURN_EXPR | TCF_FUN_IS_GENERATOR)) == 0) {
+ /* As in Python (see PEP-255), disallow return v; in generators. */
+ ReportBadReturn(cx, ts, JSREPORT_ERROR,
+ JSMSG_BAD_GENERATOR_RETURN,
+ JSMSG_BAD_ANON_GENERATOR_RETURN);
+ return NULL;
+ }
+
+ if (JS_HAS_STRICT_OPTION(cx) &&
+ (~tc->flags & (TCF_RETURN_EXPR | TCF_RETURN_VOID)) == 0 &&
+ !ReportBadReturn(cx, ts, JSREPORT_WARNING | JSREPORT_STRICT,
+ JSMSG_NO_RETURN_VALUE,
+ JSMSG_ANON_NO_RETURN_VALUE)) {
+ return NULL;
+ }
+
+ return pn;
+}
+
+static JSParseNode *
+PushLexicalScope(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSStmtInfo *stmtInfo)
+{
+ JSParseNode *pn;
+ JSObject *obj;
+ JSAtom *atom;
+
+ pn = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn)
+ return NULL;
+
+ obj = js_NewBlockObject(cx);
+ if (!obj)
+ return NULL;
+
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ return NULL;
+
+ js_PushBlockScope(tc, stmtInfo, atom, -1);
+ pn->pn_type = TOK_LEXICALSCOPE;
+ pn->pn_op = JSOP_LEAVEBLOCK;
+ pn->pn_atom = atom;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+ return pn;
+}
+
+#if JS_HAS_BLOCK_SCOPE
+
+static JSParseNode *
+LetBlock(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, JSBool statement)
+{
+ JSParseNode *pn, *pnblock, *pnlet;
+ JSStmtInfo stmtInfo;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_LET);
+
+ /* Create the let binary node. */
+ pnlet = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pnlet)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_LET);
+
+ /* This is a let block or expression of the form: let (a, b, c) .... */
+ pnblock = PushLexicalScope(cx, ts, tc, &stmtInfo);
+ if (!pnblock)
+ return NULL;
+ pn = pnblock;
+ pn->pn_expr = pnlet;
+
+ pnlet->pn_left = Variables(cx, ts, tc);
+ if (!pnlet->pn_left)
+ return NULL;
+ pnlet->pn_left->pn_extra = PNX_POPVAR;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_LET);
+
+ ts->flags |= TSF_OPERAND;
+ if (statement && !js_MatchToken(cx, ts, TOK_LC)) {
+ /*
+ * If this is really an expression in let statement guise, then we
+ * need to wrap the TOK_LET node in a TOK_SEMI node so that we pop
+ * the return value of the expression.
+ */
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_SEMI;
+ pn->pn_num = -1;
+ pn->pn_kid = pnblock;
+
+ statement = JS_FALSE;
+ }
+ ts->flags &= ~TSF_OPERAND;
+
+ if (statement) {
+ pnlet->pn_right = Statements(cx, ts, tc);
+ if (!pnlet->pn_right)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_LET);
+ } else {
+ /*
+ * Change pnblock's opcode to the variant that propagates the last
+ * result down after popping the block, and clear statement.
+ */
+ pnblock->pn_op = JSOP_LEAVEBLOCKEXPR;
+ pnlet->pn_right = Expr(cx, ts, tc);
+ if (!pnlet->pn_right)
+ return NULL;
+ }
+
+ js_PopStatement(tc);
+ return pn;
+}
+
+#endif /* JS_HAS_BLOCK_SCOPE */
+
+static JSParseNode *
+Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode *pn, *pn1, *pn2, *pn3, *pn4;
+ JSStmtInfo stmtInfo, *stmt, *stmt2;
+ JSAtom *label;
+
+ CHECK_RECURSION();
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_FUNCTION);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+
+ switch (tt) {
+#if JS_HAS_EXPORT_IMPORT
+ case TOK_EXPORT:
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ PN_INIT_LIST(pn);
+ if (js_MatchToken(cx, ts, TOK_STAR)) {
+ pn2 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+ } else {
+ do {
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NO_EXPORT_NAME);
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_op = JSOP_NAME;
+ pn2->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn2->pn_expr = NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ PN_APPEND(pn, pn2);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+ }
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+
+ case TOK_IMPORT:
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ PN_INIT_LIST(pn);
+ do {
+ pn2 = ImportExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ case TOK_FUNCTION:
+#if JS_HAS_XML_SUPPORT
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_DBLCOLON)
+ goto expression;
+#endif
+ return FunctionStmt(cx, ts, tc);
+
+ case TOK_IF:
+ /* An IF node has three kids: condition, then, and optional else. */
+ pn = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn)
+ return NULL;
+ pn1 = Condition(cx, ts, tc);
+ if (!pn1)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_IF, -1);
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ ts->flags |= TSF_OPERAND;
+ if (js_MatchToken(cx, ts, TOK_ELSE)) {
+ ts->flags &= ~TSF_OPERAND;
+ stmtInfo.type = STMT_ELSE;
+ pn3 = Statement(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+ pn->pn_pos.end = pn3->pn_pos.end;
+ } else {
+ ts->flags &= ~TSF_OPERAND;
+ pn3 = NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ }
+ js_PopStatement(tc);
+ pn->pn_kid1 = pn1;
+ pn->pn_kid2 = pn2;
+ pn->pn_kid3 = pn3;
+ return pn;
+
+ case TOK_SWITCH:
+ {
+ JSParseNode *pn5, *saveBlock;
+ JSBool seenDefault = JS_FALSE;
+
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_SWITCH);
+
+ /* pn1 points to the switch's discriminant. */
+ pn1 = Expr(cx, ts, tc);
+ if (!pn1)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_SWITCH);
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_SWITCH);
+
+ /* pn2 is a list of case nodes. The default case has pn_left == NULL */
+ pn2 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn2)
+ return NULL;
+ saveBlock = tc->blockNode;
+ tc->blockNode = pn2;
+ PN_INIT_LIST(pn2);
+
+ js_PushStatement(tc, &stmtInfo, STMT_SWITCH, -1);
+
+ while ((tt = js_GetToken(cx, ts)) != TOK_RC) {
+ switch (tt) {
+ case TOK_DEFAULT:
+ if (seenDefault) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TOO_MANY_DEFAULTS);
+ return NULL;
+ }
+ seenDefault = JS_TRUE;
+ /* fall through */
+
+ case TOK_CASE:
+ pn3 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn3)
+ return NULL;
+ if (tt == TOK_DEFAULT) {
+ pn3->pn_left = NULL;
+ } else {
+ pn3->pn_left = Expr(cx, ts, tc);
+ if (!pn3->pn_left)
+ return NULL;
+ }
+ PN_APPEND(pn2, pn3);
+ if (pn2->pn_count == JS_BIT(16)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TOO_MANY_CASES);
+ return NULL;
+ }
+ break;
+
+ case TOK_ERROR:
+ return NULL;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_SWITCH);
+ return NULL;
+ }
+ MUST_MATCH_TOKEN(TOK_COLON, JSMSG_COLON_AFTER_CASE);
+
+ pn4 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn4)
+ return NULL;
+ pn4->pn_type = TOK_LC;
+ PN_INIT_LIST(pn4);
+ ts->flags |= TSF_OPERAND;
+ while ((tt = js_PeekToken(cx, ts)) != TOK_RC &&
+ tt != TOK_CASE && tt != TOK_DEFAULT) {
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ pn5 = Statement(cx, ts, tc);
+ if (!pn5)
+ return NULL;
+ pn4->pn_pos.end = pn5->pn_pos.end;
+ PN_APPEND(pn4, pn5);
+ ts->flags |= TSF_OPERAND;
+ }
+ ts->flags &= ~TSF_OPERAND;
+
+ /* Fix the PN_LIST so it doesn't begin at the TOK_COLON. */
+ if (pn4->pn_head)
+ pn4->pn_pos.begin = pn4->pn_head->pn_pos.begin;
+ pn3->pn_pos.end = pn4->pn_pos.end;
+ pn3->pn_right = pn4;
+ }
+
+ /*
+ * Handle the case where there was a let declaration in any case in
+ * the switch body, but not within an inner block. If it replaced
+ * tc->blockNode with a new block node then we must refresh pn2 and
+ * then restore tc->blockNode.
+ */
+ if (tc->blockNode != pn2)
+ pn2 = tc->blockNode;
+ tc->blockNode = saveBlock;
+ js_PopStatement(tc);
+
+ pn->pn_pos.end = pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ pn->pn_left = pn1;
+ pn->pn_right = pn2;
+ return pn;
+ }
+
+ case TOK_WHILE:
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_WHILE_LOOP, -1);
+ pn2 = Condition(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_left = pn2;
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ js_PopStatement(tc);
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_right = pn2;
+ return pn;
+
+ case TOK_DO:
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_DO_LOOP, -1);
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_left = pn2;
+ MUST_MATCH_TOKEN(TOK_WHILE, JSMSG_WHILE_AFTER_DO);
+ pn2 = Condition(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ js_PopStatement(tc);
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_right = pn2;
+ if ((cx->version & JSVERSION_MASK) != JSVERSION_ECMA_3) {
+ /*
+ * All legacy and extended versions must do automatic semicolon
+ * insertion after do-while. See the testcase and discussion in
+ * http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
+ */
+ (void) js_MatchToken(cx, ts, TOK_SEMI);
+ return pn;
+ }
+ break;
+
+ case TOK_FOR:
+ {
+#if JS_HAS_BLOCK_SCOPE
+ JSParseNode *pnlet;
+ JSStmtInfo blockInfo;
+
+ pnlet = NULL;
+#endif
+
+ /* A FOR node is binary, left is loop control and right is the body. */
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_FOR_LOOP, -1);
+
+ pn->pn_op = JSOP_FORIN;
+ if (js_MatchToken(cx, ts, TOK_NAME)) {
+ if (CURRENT_TOKEN(ts).t_atom == cx->runtime->atomState.eachAtom)
+ pn->pn_op = JSOP_FOREACH;
+ else
+ js_UngetToken(ts);
+ }
+
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_AFTER_FOR);
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_SEMI) {
+ if (pn->pn_op == JSOP_FOREACH)
+ goto bad_for_each;
+
+ /* No initializer -- set first kid of left sub-node to null. */
+ pn1 = NULL;
+ } else {
+ /*
+ * Set pn1 to a var list or an initializing expression.
+ *
+ * Set the TCF_IN_FOR_INIT flag during parsing of the first clause
+ * of the for statement. This flag will be used by the RelExpr
+ * production; if it is set, then the 'in' keyword will not be
+ * recognized as an operator, leaving it available to be parsed as
+ * part of a for/in loop.
+ *
+ * A side effect of this restriction is that (unparenthesized)
+ * expressions involving an 'in' operator are illegal in the init
+ * clause of an ordinary for loop.
+ */
+ tc->flags |= TCF_IN_FOR_INIT;
+ if (tt == TOK_VAR) {
+ (void) js_GetToken(cx, ts);
+ pn1 = Variables(cx, ts, tc);
+#if JS_HAS_BLOCK_SCOPE
+ } else if (tt == TOK_LET) {
+ (void) js_GetToken(cx, ts);
+ if (js_PeekToken(cx, ts) == TOK_LP) {
+ pn1 = LetBlock(cx, ts, tc, JS_FALSE);
+ tt = TOK_LEXICALSCOPE;
+ } else {
+ pnlet = PushLexicalScope(cx, ts, tc, &blockInfo);
+ if (!pnlet)
+ return NULL;
+ pn1 = Variables(cx, ts, tc);
+ }
+#endif
+ } else {
+ pn1 = Expr(cx, ts, tc);
+ if (pn1) {
+ while (pn1->pn_type == TOK_RP)
+ pn1 = pn1->pn_kid;
+ }
+ }
+ tc->flags &= ~TCF_IN_FOR_INIT;
+ if (!pn1)
+ return NULL;
+ }
+
+ /*
+ * We can be sure that it's a for/in loop if there's still an 'in'
+ * keyword here, even if JavaScript recognizes 'in' as an operator,
+ * as we've excluded 'in' from being parsed in RelExpr by setting
+ * the TCF_IN_FOR_INIT flag in our JSTreeContext.
+ */
+ if (pn1 && js_MatchToken(cx, ts, TOK_IN)) {
+ stmtInfo.type = STMT_FOR_IN_LOOP;
+
+ /* Check that the left side of the 'in' is valid. */
+ JS_ASSERT(!TOKEN_TYPE_IS_DECL(tt) || pn1->pn_type == tt);
+ if (TOKEN_TYPE_IS_DECL(tt)
+ ? (pn1->pn_count > 1 || pn1->pn_op == JSOP_DEFCONST
+#if JS_HAS_DESTRUCTURING
+ || (pn->pn_op == JSOP_FORIN &&
+ (pn1->pn_head->pn_type == TOK_RC ||
+ (pn1->pn_head->pn_type == TOK_RB &&
+ pn1->pn_head->pn_count != 2) ||
+ (pn1->pn_head->pn_type == TOK_ASSIGN &&
+ (pn1->pn_head->pn_left->pn_type != TOK_RB ||
+ pn1->pn_head->pn_left->pn_count != 2))))
+#endif
+ )
+ : (pn1->pn_type != TOK_NAME &&
+ pn1->pn_type != TOK_DOT &&
+#if JS_HAS_DESTRUCTURING
+ ((pn->pn_op == JSOP_FORIN)
+ ? (pn1->pn_type != TOK_RB || pn1->pn_count != 2)
+ : (pn1->pn_type != TOK_RB && pn1->pn_type != TOK_RC)) &&
+#endif
+#if JS_HAS_LVALUE_RETURN
+ pn1->pn_type != TOK_LP &&
+#endif
+#if JS_HAS_XML_SUPPORT
+ (pn1->pn_type != TOK_UNARYOP ||
+ pn1->pn_op != JSOP_XMLNAME) &&
+#endif
+ pn1->pn_type != TOK_LB)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_FOR_LEFTSIDE);
+ return NULL;
+ }
+
+ if (TOKEN_TYPE_IS_DECL(tt)) {
+ /* Tell js_EmitTree(TOK_VAR) that pn1 is part of a for/in. */
+ pn1->pn_extra |= PNX_FORINVAR;
+
+ /*
+ * Generate a final POP only if the variable is a simple name
+ * (which means it is not a destructuring left-hand side) and
+ * it has an initializer.
+ */
+ pn2 = pn1->pn_head;
+ if (pn2->pn_type == TOK_NAME && pn2->pn_expr)
+ pn1->pn_extra |= PNX_POPVAR;
+ } else {
+ pn2 = pn1;
+#if JS_HAS_LVALUE_RETURN
+ if (pn2->pn_type == TOK_LP)
+ pn2->pn_op = JSOP_SETCALL;
+#endif
+#if JS_HAS_XML_SUPPORT
+ if (pn2->pn_type == TOK_UNARYOP)
+ pn2->pn_op = JSOP_BINDXMLNAME;
+#endif
+ }
+
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ /* Beware 'for (arguments in ...)' with or without a 'var'. */
+ if (pn2->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+
+#if JS_HAS_DESTRUCTURING
+ case TOK_ASSIGN:
+ pn2 = pn2->pn_left;
+ JS_ASSERT(pn2->pn_type == TOK_RB || pn2->pn_type == TOK_RC);
+ /* FALL THROUGH */
+ case TOK_RB:
+ case TOK_RC:
+ /* Check for valid lvalues in var-less destructuring for-in. */
+ if (pn1 == pn2 && !CheckDestructuring(cx, NULL, pn2, NULL, tc))
+ return NULL;
+
+ /* Destructuring for-in requires [key, value] enumeration. */
+ if (pn->pn_op != JSOP_FOREACH)
+ pn->pn_op = JSOP_FOREACHKEYVAL;
+ break;
+#endif
+
+ default:;
+ }
+
+ /* Parse the object expression as the right operand of 'in'. */
+ pn2 = NewBinary(cx, TOK_IN, JSOP_NOP, pn1, Expr(cx, ts, tc), tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_left = pn2;
+ } else {
+ if (pn->pn_op == JSOP_FOREACH)
+ goto bad_for_each;
+ pn->pn_op = JSOP_NOP;
+
+ /* Parse the loop condition or null into pn2. */
+ MUST_MATCH_TOKEN(TOK_SEMI, JSMSG_SEMI_AFTER_FOR_INIT);
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_SEMI) {
+ pn2 = NULL;
+ } else {
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ }
+
+ /* Parse the update expression or null into pn3. */
+ MUST_MATCH_TOKEN(TOK_SEMI, JSMSG_SEMI_AFTER_FOR_COND);
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_RP) {
+ pn3 = NULL;
+ } else {
+ pn3 = Expr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+ }
+
+ /* Build the RESERVED node to use as the left kid of pn. */
+ pn4 = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn4)
+ return NULL;
+ pn4->pn_type = TOK_RESERVED;
+ pn4->pn_op = JSOP_NOP;
+ pn4->pn_kid1 = pn1;
+ pn4->pn_kid2 = pn2;
+ pn4->pn_kid3 = pn3;
+ pn->pn_left = pn4;
+ }
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FOR_CTRL);
+
+ /* Parse the loop body into pn->pn_right. */
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_right = pn2;
+
+ /* Record the absolute line number for source note emission. */
+ pn->pn_pos.end = pn2->pn_pos.end;
+
+#if JS_HAS_BLOCK_SCOPE
+ if (pnlet) {
+ js_PopStatement(tc);
+ pnlet->pn_expr = pn;
+ pn = pnlet;
+ }
+#endif
+ js_PopStatement(tc);
+ return pn;
+
+ bad_for_each:
+ js_ReportCompileErrorNumber(cx, pn,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_FOR_EACH_LOOP);
+ return NULL;
+ }
+
+ case TOK_TRY: {
+ JSParseNode *catchList, *lastCatch;
+
+ /*
+ * try nodes are ternary.
+ * kid1 is the try Statement
+ * kid2 is the catch node list or null
+ * kid3 is the finally Statement
+ *
+ * catch nodes are ternary.
+ * kid1 is the lvalue (TOK_NAME, TOK_LB, or TOK_LC)
+ * kid2 is the catch guard or null if no guard
+ * kid3 is the catch block
+ *
+ * catch lvalue nodes are either:
+ * TOK_NAME for a single identifier
+ * TOK_RB or TOK_RC for a destructuring left-hand side
+ *
+ * finally nodes are TOK_LC Statement lists.
+ */
+ pn = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = JSOP_NOP;
+
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_TRY);
+ js_PushStatement(tc, &stmtInfo, STMT_TRY, -1);
+ pn->pn_kid1 = Statements(cx, ts, tc);
+ if (!pn->pn_kid1)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_TRY);
+ js_PopStatement(tc);
+
+ catchList = NULL;
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_CATCH) {
+ catchList = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!catchList)
+ return NULL;
+ catchList->pn_type = TOK_RESERVED;
+ PN_INIT_LIST(catchList);
+ lastCatch = NULL;
+
+ do {
+ JSParseNode *pnblock;
+ BindData data;
+
+ /* Check for another catch after unconditional catch. */
+ if (lastCatch && !lastCatch->pn_kid2) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_AFTER_GENERAL);
+ return NULL;
+ }
+
+ /*
+ * Create a lexical scope node around the whole catch clause,
+ * including the head.
+ */
+ pnblock = PushLexicalScope(cx, ts, tc, &stmtInfo);
+ if (!pnblock)
+ return NULL;
+ stmtInfo.type = STMT_CATCH;
+
+ /*
+ * Legal catch forms are:
+ * catch (lhs)
+ * catch (lhs if <boolean_expression>)
+ * where lhs is a name or a destructuring left-hand side.
+ * (the latter is legal only #ifdef JS_HAS_CATCH_GUARD)
+ */
+ pn2 = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn2)
+ return NULL;
+ pnblock->pn_expr = pn2;
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_CATCH);
+
+ /*
+ * Contrary to ECMA Ed. 3, the catch variable is lexically
+ * scoped, not a property of a new Object instance. This is
+ * an intentional change that anticipates ECMA Ed. 4.
+ */
+ data.pn = NULL;
+ data.ts = ts;
+ data.obj = tc->blockChain;
+ data.op = JSOP_NOP;
+ data.binder = BindLet;
+ data.u.let.index = 0;
+ data.u.let.overflow = JSMSG_TOO_MANY_CATCH_VARS;
+
+ tt = js_GetToken(cx, ts);
+ switch (tt) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_LB:
+ case TOK_LC:
+ pn3 = DestructuringExpr(cx, &data, tc, tt);
+ if (!pn3)
+ return NULL;
+ break;
+#endif
+
+ case TOK_NAME:
+ label = CURRENT_TOKEN(ts).t_atom;
+ if (!data.binder(cx, &data, label, tc))
+ return NULL;
+
+ pn3 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn3)
+ return NULL;
+ pn3->pn_atom = label;
+ pn3->pn_expr = NULL;
+ pn3->pn_slot = 0;
+ pn3->pn_attrs = 0;
+ break;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_IDENTIFIER);
+ return NULL;
+ }
+
+ pn2->pn_kid1 = pn3;
+ pn2->pn_kid2 = NULL;
+#if JS_HAS_CATCH_GUARD
+ /*
+ * We use 'catch (x if x === 5)' (not 'catch (x : x === 5)')
+ * to avoid conflicting with the JS2/ECMAv4 type annotation
+ * catchguard syntax.
+ */
+ if (js_MatchToken(cx, ts, TOK_IF)) {
+ pn2->pn_kid2 = Expr(cx, ts, tc);
+ if (!pn2->pn_kid2)
+ return NULL;
+ }
+#endif
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_CATCH);
+
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_CATCH);
+ pn2->pn_kid3 = Statements(cx, ts, tc);
+ if (!pn2->pn_kid3)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_CATCH);
+ js_PopStatement(tc);
+
+ PN_APPEND(catchList, pnblock);
+ lastCatch = pn2;
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ } while (tt == TOK_CATCH);
+ }
+ pn->pn_kid2 = catchList;
+
+ if (tt == TOK_FINALLY) {
+ tc->tryCount++;
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_FINALLY);
+ js_PushStatement(tc, &stmtInfo, STMT_FINALLY, -1);
+ pn->pn_kid3 = Statements(cx, ts, tc);
+ if (!pn->pn_kid3)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_FINALLY);
+ js_PopStatement(tc);
+ } else {
+ js_UngetToken(ts);
+ pn->pn_kid3 = NULL;
+ }
+ if (!catchList && !pn->pn_kid3) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_OR_FINALLY);
+ return NULL;
+ }
+ tc->tryCount++;
+ return pn;
+ }
+
+ case TOK_THROW:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+
+ /* ECMA-262 Edition 3 says 'throw [no LineTerminator here] Expr'. */
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ if (tt == TOK_EOF || tt == TOK_EOL || tt == TOK_SEMI || tt == TOK_RC) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_op = JSOP_THROW;
+ pn->pn_kid = pn2;
+ break;
+
+ /* TOK_CATCH and TOK_FINALLY are both handled in the TOK_TRY case */
+ case TOK_CATCH:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_WITHOUT_TRY);
+ return NULL;
+
+ case TOK_FINALLY:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_FINALLY_WITHOUT_TRY);
+ return NULL;
+
+ case TOK_BREAK:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ if (!MatchLabel(cx, ts, pn))
+ return NULL;
+ stmt = tc->topStmt;
+ label = pn->pn_atom;
+ if (label) {
+ for (; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_LABEL_NOT_FOUND);
+ return NULL;
+ }
+ if (stmt->type == STMT_LABEL && stmt->atom == label)
+ break;
+ }
+ } else {
+ for (; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TOUGH_BREAK);
+ return NULL;
+ }
+ if (STMT_IS_LOOP(stmt) || stmt->type == STMT_SWITCH)
+ break;
+ }
+ }
+ if (label)
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ break;
+
+ case TOK_CONTINUE:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ if (!MatchLabel(cx, ts, pn))
+ return NULL;
+ stmt = tc->topStmt;
+ label = pn->pn_atom;
+ if (label) {
+ for (stmt2 = NULL; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_LABEL_NOT_FOUND);
+ return NULL;
+ }
+ if (stmt->type == STMT_LABEL) {
+ if (stmt->atom == label) {
+ if (!stmt2 || !STMT_IS_LOOP(stmt2)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_BAD_CONTINUE);
+ return NULL;
+ }
+ break;
+ }
+ } else {
+ stmt2 = stmt;
+ }
+ }
+ } else {
+ for (; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_CONTINUE);
+ return NULL;
+ }
+ if (STMT_IS_LOOP(stmt))
+ break;
+ }
+ }
+ if (label)
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ break;
+
+ case TOK_WITH:
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_WITH);
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_WITH);
+ pn->pn_left = pn2;
+
+ js_PushStatement(tc, &stmtInfo, STMT_WITH, -1);
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ js_PopStatement(tc);
+
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_right = pn2;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ return pn;
+
+ case TOK_VAR:
+ pn = Variables(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ /* Tell js_EmitTree to generate a final POP. */
+ pn->pn_extra |= PNX_POPVAR;
+ break;
+
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+ {
+ JSStmtInfo **sip;
+ JSObject *obj;
+ JSAtom *atom;
+
+ /* Check for a let statement or let expression. */
+ if (js_PeekToken(cx, ts) == TOK_LP) {
+ pn = LetBlock(cx, ts, tc, JS_TRUE);
+ if (!pn || pn->pn_op == JSOP_LEAVEBLOCK)
+ return pn;
+
+ /* Let expressions require automatic semicolon insertion. */
+ JS_ASSERT(pn->pn_type == TOK_SEMI ||
+ pn->pn_op == JSOP_LEAVEBLOCKEXPR);
+ break;
+ }
+
+ /*
+ * This is a let declaration. We must convert the nearest JSStmtInfo
+ * that is a block or a switch body to be our scope statement. Further
+ * let declarations in this block will find this scope statement and
+ * use the same block object. If we are the first let declaration in
+ * this block (i.e., when the nearest maybe-scope JSStmtInfo isn't a
+ * scope statement) then we also need to set tc->blockNode to be our
+ * TOK_LEXICALSCOPE.
+ */
+ sip = &tc->topScopeStmt;
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (STMT_MAYBE_SCOPE(stmt))
+ break;
+ if (stmt == *sip)
+ sip = &stmt->downScope;
+ }
+
+ if (stmt && (stmt->flags & SIF_SCOPE)) {
+ JS_ASSERT(tc->blockChain == ATOM_TO_OBJECT(stmt->atom));
+ obj = tc->blockChain;
+ } else {
+ if (!stmt) {
+ /*
+ * FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=346749
+ *
+ * This is a hard case that requires more work. In particular,
+ * in many cases, we're trying to emit code as we go. However,
+ * this means that we haven't necessarily finished processing
+ * all let declarations in the implicit top-level block when
+ * we emit a reference to one of them. For now, punt on this
+ * and pretend this is a var declaration.
+ */
+ CURRENT_TOKEN(ts).type = TOK_VAR;
+ CURRENT_TOKEN(ts).t_op = JSOP_DEFVAR;
+
+ pn = Variables(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_extra |= PNX_POPVAR;
+ break;
+ }
+
+ /* Convert the block statement into a scope statement. */
+ obj = js_NewBlockObject(cx);
+ if (!obj)
+ return NULL;
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ return NULL;
+
+ /*
+ * Insert stmt on the tc->topScopeStmt/stmtInfo.downScope linked
+ * list stack, if it isn't already there. If it is there, but it
+ * lacks the SIF_SCOPE flag, it must be a try, catch, or finally
+ * block.
+ */
+ JS_ASSERT(!(stmt->flags & SIF_SCOPE));
+ stmt->flags |= SIF_SCOPE;
+ if (stmt != *sip) {
+ JS_ASSERT(!stmt->downScope);
+ JS_ASSERT(stmt->type == STMT_BLOCK ||
+ stmt->type == STMT_SWITCH ||
+ stmt->type == STMT_TRY ||
+ stmt->type == STMT_FINALLY);
+ stmt->downScope = *sip;
+ *sip = stmt;
+ } else {
+ JS_ASSERT(stmt->type == STMT_CATCH);
+ JS_ASSERT(stmt->downScope);
+ }
+
+ obj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(tc->blockChain);
+ tc->blockChain = obj;
+ stmt->atom = atom;
+
+#ifdef DEBUG
+ pn1 = tc->blockNode;
+ JS_ASSERT(!pn1 || pn1->pn_type != TOK_LEXICALSCOPE);
+#endif
+
+ /* Create a new lexical scope node for these statements. */
+ pn1 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn1)
+ return NULL;
+
+ pn1->pn_type = TOK_LEXICALSCOPE;
+ pn1->pn_op = JSOP_LEAVEBLOCK;
+ pn1->pn_pos = tc->blockNode->pn_pos;
+ pn1->pn_atom = atom;
+ pn1->pn_expr = tc->blockNode;
+ pn1->pn_slot = -1;
+ pn1->pn_attrs = 0;
+ tc->blockNode = pn1;
+ }
+
+ pn = Variables(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_extra = PNX_POPVAR;
+ break;
+ }
+#endif /* JS_HAS_BLOCK_SCOPE */
+
+ case TOK_RETURN:
+ pn = ReturnOrYield(cx, ts, tc, Expr);
+ if (!pn)
+ return NULL;
+ break;
+
+ case TOK_LC:
+ {
+ uintN oldflags;
+
+ oldflags = tc->flags;
+ tc->flags = oldflags & ~TCF_HAS_FUNCTION_STMT;
+ js_PushStatement(tc, &stmtInfo, STMT_BLOCK, -1);
+ pn = Statements(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_IN_COMPOUND);
+ js_PopStatement(tc);
+
+ /*
+ * If we contain a function statement and our container is top-level
+ * or another block, flag pn to preserve braces when decompiling.
+ */
+ if ((tc->flags & TCF_HAS_FUNCTION_STMT) &&
+ (!tc->topStmt || tc->topStmt->type == STMT_BLOCK)) {
+ pn->pn_extra |= PNX_NEEDBRACES;
+ }
+ tc->flags = oldflags | (tc->flags & (TCF_FUN_FLAGS | TCF_RETURN_FLAGS));
+ return pn;
+ }
+
+ case TOK_EOL:
+ case TOK_SEMI:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_SEMI;
+ pn->pn_kid = NULL;
+ return pn;
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ case TOK_DEBUGGER:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_DEBUGGER;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_DEFAULT:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ if (!js_MatchToken(cx, ts, TOK_NAME) ||
+ CURRENT_TOKEN(ts).t_atom != cx->runtime->atomState.xmlAtom ||
+ !js_MatchToken(cx, ts, TOK_NAME) ||
+ CURRENT_TOKEN(ts).t_atom != cx->runtime->atomState.namespaceAtom ||
+ !js_MatchToken(cx, ts, TOK_ASSIGN) ||
+ CURRENT_TOKEN(ts).t_op != JSOP_NOP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_DEFAULT_XML_NAMESPACE);
+ return NULL;
+ }
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_op = JSOP_DEFXMLNS;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_kid = pn2;
+ tc->flags |= TCF_HAS_DEFXMLNS;
+ break;
+#endif
+
+ case TOK_ERROR:
+ return NULL;
+
+ default:
+#if JS_HAS_XML_SUPPORT
+ expression:
+#endif
+ js_UngetToken(ts);
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+
+ if (js_PeekToken(cx, ts) == TOK_COLON) {
+ if (pn2->pn_type != TOK_NAME) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_LABEL);
+ return NULL;
+ }
+ label = pn2->pn_atom;
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (stmt->type == STMT_LABEL && stmt->atom == label) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_DUPLICATE_LABEL);
+ return NULL;
+ }
+ }
+ (void) js_GetToken(cx, ts);
+
+ /* Push a label struct and parse the statement. */
+ js_PushStatement(tc, &stmtInfo, STMT_LABEL, -1);
+ stmtInfo.atom = label;
+ pn = Statement(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ /* Normalize empty statement to empty block for the decompiler. */
+ if (pn->pn_type == TOK_SEMI && !pn->pn_kid) {
+ pn->pn_type = TOK_LC;
+ pn->pn_arity = PN_LIST;
+ PN_INIT_LIST(pn);
+ }
+
+ /* Pop the label, set pn_expr, and return early. */
+ js_PopStatement(tc);
+ pn2->pn_type = TOK_COLON;
+ pn2->pn_pos.end = pn->pn_pos.end;
+ pn2->pn_expr = pn;
+ return pn2;
+ }
+
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_SEMI;
+ pn->pn_pos = pn2->pn_pos;
+ pn->pn_kid = pn2;
+ break;
+ }
+
+ /* Check termination of this primitive statement. */
+ if (ON_CURRENT_LINE(ts, pn->pn_pos)) {
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ if (tt != TOK_EOF && tt != TOK_EOL && tt != TOK_SEMI && tt != TOK_RC) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SEMI_BEFORE_STMNT);
+ return NULL;
+ }
+ }
+
+ (void) js_MatchToken(cx, ts, TOK_SEMI);
+ return pn;
+}
+
+static JSParseNode *
+Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSBool let;
+ JSStmtInfo *scopeStmt;
+ BindData data;
+ JSParseNode *pn, *pn2;
+ JSStackFrame *fp;
+ JSAtom *atom;
+
+ /*
+ * The three options here are:
+ * - TOK_LET: We are parsing a let declaration.
+ * - TOK_LP: We are parsing the head of a let block.
+ * - Otherwise, we're parsing var declarations.
+ */
+ tt = CURRENT_TOKEN(ts).type;
+ let = (tt == TOK_LET || tt == TOK_LP);
+ JS_ASSERT(let || tt == TOK_VAR);
+
+ /* Make sure that Statement set the tree context up correctly. */
+ scopeStmt = tc->topScopeStmt;
+ if (let) {
+ while (scopeStmt && !(scopeStmt->flags & SIF_SCOPE)) {
+ JS_ASSERT(!STMT_MAYBE_SCOPE(scopeStmt));
+ scopeStmt = scopeStmt->downScope;
+ }
+ JS_ASSERT(scopeStmt);
+ }
+
+ data.pn = NULL;
+ data.ts = ts;
+ data.op = let ? JSOP_NOP : CURRENT_TOKEN(ts).t_op;
+ data.binder = let ? BindLet : BindVarOrConst;
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = data.op;
+ PN_INIT_LIST(pn);
+
+ /*
+ * The tricky part of this code is to create special parsenode opcodes for
+ * getting and setting variables (which will be stored as special slots in
+ * the frame). The most complicated case is an eval() inside a function.
+ * If the evaluated string references variables in the enclosing function,
+ * then we need to generate the special variable opcodes. We determine
+ * this by looking up the variable's id in the current variable object.
+ * Fortunately, we can avoid doing this for let declared variables.
+ */
+ fp = cx->fp;
+ if (let) {
+ JS_ASSERT(tc->blockChain == ATOM_TO_OBJECT(scopeStmt->atom));
+ data.obj = tc->blockChain;
+ data.u.let.index = OBJ_BLOCK_COUNT(cx, data.obj);
+ data.u.let.overflow = JSMSG_TOO_MANY_FUN_VARS;
+ } else {
+ data.obj = fp->varobj;
+ data.u.var.fun = fp->fun;
+ data.u.var.clasp = OBJ_GET_CLASS(cx, data.obj);
+ if (data.u.var.fun && data.u.var.clasp == &js_FunctionClass) {
+ /* We are compiling code inside a function */
+ data.u.var.getter = js_GetLocalVariable;
+ data.u.var.setter = js_SetLocalVariable;
+ } else if (data.u.var.fun && data.u.var.clasp == &js_CallClass) {
+ /* We are compiling code from an eval inside a function */
+ data.u.var.getter = js_GetCallVariable;
+ data.u.var.setter = js_SetCallVariable;
+ } else {
+ data.u.var.getter = data.u.var.clasp->getProperty;
+ data.u.var.setter = data.u.var.clasp->setProperty;
+ }
+
+ data.u.var.attrs = (data.op == JSOP_DEFCONST)
+ ? JSPROP_PERMANENT | JSPROP_READONLY
+ : JSPROP_PERMANENT;
+ }
+
+ do {
+ tt = js_GetToken(cx, ts);
+#if JS_HAS_DESTRUCTURING
+ if (tt == TOK_LB || tt == TOK_LC) {
+ pn2 = PrimaryExpr(cx, ts, tc, tt, JS_FALSE);
+ if (!pn2)
+ return NULL;
+
+ if ((tc->flags & TCF_IN_FOR_INIT) &&
+ js_PeekToken(cx, ts) == TOK_IN) {
+ if (!CheckDestructuring(cx, &data, pn2, NULL, tc))
+ return NULL;
+ PN_APPEND(pn, pn2);
+ continue;
+ }
+
+ MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_BAD_DESTRUCT_DECL);
+ if (CURRENT_TOKEN(ts).t_op != JSOP_NOP)
+ goto bad_var_init;
+
+ pn2 = NewBinary(cx, TOK_ASSIGN, JSOP_NOP,
+ pn2, AssignExpr(cx, ts, tc),
+ tc);
+ if (!pn2 ||
+ !CheckDestructuring(cx, &data,
+ pn2->pn_left, pn2->pn_right,
+ tc)) {
+ return NULL;
+ }
+ PN_APPEND(pn, pn2);
+ continue;
+ }
+#endif
+
+ if (tt != TOK_NAME) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_NO_VARIABLE_NAME);
+ return NULL;
+ }
+ atom = CURRENT_TOKEN(ts).t_atom;
+ if (!data.binder(cx, &data, atom, tc))
+ return NULL;
+
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_op = JSOP_NAME;
+ pn2->pn_atom = atom;
+ pn2->pn_expr = NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = let ? 0 : data.u.var.attrs;
+ PN_APPEND(pn, pn2);
+
+ if (js_MatchToken(cx, ts, TOK_ASSIGN)) {
+ if (CURRENT_TOKEN(ts).t_op != JSOP_NOP)
+ goto bad_var_init;
+
+ pn2->pn_expr = AssignExpr(cx, ts, tc);
+ if (!pn2->pn_expr)
+ return NULL;
+ pn2->pn_op = (!let && data.op == JSOP_DEFCONST)
+ ? JSOP_SETCONST
+ : JSOP_SETNAME;
+ if (!let && atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ return pn;
+
+bad_var_init:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_VAR_INIT);
+ return NULL;
+}
+
+static JSParseNode *
+Expr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+
+ pn = AssignExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_COMMA)) {
+ pn2 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ PN_INIT_LIST_1(pn2, pn);
+ pn = pn2;
+ do {
+#if JS_HAS_GENERATORS
+ pn2 = PN_LAST(pn);
+ if (pn2->pn_type == TOK_YIELD) {
+ js_ReportCompileErrorNumber(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_YIELD_SYNTAX);
+ return NULL;
+ }
+#endif
+ pn2 = AssignExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ }
+ return pn;
+}
+
+static JSParseNode *
+AssignExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ JSTokenType tt;
+ JSOp op;
+
+ CHECK_RECURSION();
+
+#if JS_HAS_GENERATORS
+ ts->flags |= TSF_OPERAND;
+ if (js_MatchToken(cx, ts, TOK_YIELD)) {
+ ts->flags &= ~TSF_OPERAND;
+ return ReturnOrYield(cx, ts, tc, AssignExpr);
+ }
+ ts->flags &= ~TSF_OPERAND;
+#endif
+
+ pn = CondExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ tt = js_GetToken(cx, ts);
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_ASSIGN);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+ if (tt != TOK_ASSIGN) {
+ js_UngetToken(ts);
+ return pn;
+ }
+
+ op = CURRENT_TOKEN(ts).t_op;
+ for (pn2 = pn; pn2->pn_type == TOK_RP; pn2 = pn2->pn_kid)
+ continue;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ pn2->pn_op = JSOP_SETNAME;
+ if (pn2->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+ case TOK_DOT:
+ pn2->pn_op = (pn2->pn_op == JSOP_GETMETHOD)
+ ? JSOP_SETMETHOD
+ : JSOP_SETPROP;
+ break;
+ case TOK_LB:
+ pn2->pn_op = JSOP_SETELEM;
+ break;
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ if (op != JSOP_NOP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_DESTRUCT_ASS);
+ return NULL;
+ }
+ pn = AssignExpr(cx, ts, tc);
+ if (!pn || !CheckDestructuring(cx, NULL, pn2, pn, tc))
+ return NULL;
+ return NewBinary(cx, TOK_ASSIGN, op, pn2, pn, tc);
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ JS_ASSERT(pn2->pn_op == JSOP_CALL || pn2->pn_op == JSOP_EVAL);
+ pn2->pn_op = JSOP_SETCALL;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (pn2->pn_op == JSOP_XMLNAME) {
+ pn2->pn_op = JSOP_SETXMLNAME;
+ break;
+ }
+ /* FALL THROUGH */
+#endif
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_LEFTSIDE_OF_ASS);
+ return NULL;
+ }
+
+ return NewBinary(cx, TOK_ASSIGN, op, pn2, AssignExpr(cx, ts, tc), tc);
+}
+
+static JSParseNode *
+CondExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn1, *pn2, *pn3;
+ uintN oldflags;
+
+ pn = OrExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_HOOK)) {
+ pn1 = pn;
+ pn = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn)
+ return NULL;
+ /*
+ * Always accept the 'in' operator in the middle clause of a ternary,
+ * where it's unambiguous, even if we might be parsing the init of a
+ * for statement.
+ */
+ oldflags = tc->flags;
+ tc->flags &= ~TCF_IN_FOR_INIT;
+ pn2 = AssignExpr(cx, ts, tc);
+ tc->flags = oldflags | (tc->flags & TCF_FUN_FLAGS);
+
+ if (!pn2)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_COLON, JSMSG_COLON_IN_COND);
+ pn3 = AssignExpr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+ pn->pn_pos.begin = pn1->pn_pos.begin;
+ pn->pn_pos.end = pn3->pn_pos.end;
+ pn->pn_kid1 = pn1;
+ pn->pn_kid2 = pn2;
+ pn->pn_kid3 = pn3;
+ }
+ return pn;
+}
+
+static JSParseNode *
+OrExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = AndExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_OR))
+ pn = NewBinary(cx, TOK_OR, JSOP_OR, pn, OrExpr(cx, ts, tc), tc);
+ return pn;
+}
+
+static JSParseNode *
+AndExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BitOrExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_AND))
+ pn = NewBinary(cx, TOK_AND, JSOP_AND, pn, AndExpr(cx, ts, tc), tc);
+ return pn;
+}
+
+static JSParseNode *
+BitOrExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BitXorExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_BITOR)) {
+ pn = NewBinary(cx, TOK_BITOR, JSOP_BITOR, pn, BitXorExpr(cx, ts, tc),
+ tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+BitXorExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BitAndExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_BITXOR)) {
+ pn = NewBinary(cx, TOK_BITXOR, JSOP_BITXOR, pn, BitAndExpr(cx, ts, tc),
+ tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+BitAndExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = EqExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_BITAND))
+ pn = NewBinary(cx, TOK_BITAND, JSOP_BITAND, pn, EqExpr(cx, ts, tc), tc);
+ return pn;
+}
+
+static JSParseNode *
+EqExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSOp op;
+
+ pn = RelExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_EQOP)) {
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, TOK_EQOP, op, pn, RelExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+RelExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSTokenType tt;
+ JSOp op;
+ uintN inForInitFlag = tc->flags & TCF_IN_FOR_INIT;
+
+ /*
+ * Uses of the in operator in ShiftExprs are always unambiguous,
+ * so unset the flag that prohibits recognizing it.
+ */
+ tc->flags &= ~TCF_IN_FOR_INIT;
+
+ pn = ShiftExpr(cx, ts, tc);
+ while (pn &&
+ (js_MatchToken(cx, ts, TOK_RELOP) ||
+ /*
+ * Recognize the 'in' token as an operator only if we're not
+ * currently in the init expr of a for loop.
+ */
+ (inForInitFlag == 0 && js_MatchToken(cx, ts, TOK_IN)) ||
+ js_MatchToken(cx, ts, TOK_INSTANCEOF))) {
+ tt = CURRENT_TOKEN(ts).type;
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, tt, op, pn, ShiftExpr(cx, ts, tc), tc);
+ }
+ /* Restore previous state of inForInit flag. */
+ tc->flags |= inForInitFlag;
+
+ return pn;
+}
+
+static JSParseNode *
+ShiftExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSOp op;
+
+ pn = AddExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_SHOP)) {
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, TOK_SHOP, op, pn, AddExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+AddExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSTokenType tt;
+ JSOp op;
+
+ pn = MulExpr(cx, ts, tc);
+ while (pn &&
+ (js_MatchToken(cx, ts, TOK_PLUS) ||
+ js_MatchToken(cx, ts, TOK_MINUS))) {
+ tt = CURRENT_TOKEN(ts).type;
+ op = (tt == TOK_PLUS) ? JSOP_ADD : JSOP_SUB;
+ pn = NewBinary(cx, tt, op, pn, MulExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+MulExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSTokenType tt;
+ JSOp op;
+
+ pn = UnaryExpr(cx, ts, tc);
+ while (pn &&
+ (js_MatchToken(cx, ts, TOK_STAR) ||
+ js_MatchToken(cx, ts, TOK_DIVOP))) {
+ tt = CURRENT_TOKEN(ts).type;
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, tt, op, pn, UnaryExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+SetLvalKid(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, JSParseNode *kid,
+ const char *name)
+{
+ while (kid->pn_type == TOK_RP)
+ kid = kid->pn_kid;
+ if (kid->pn_type != TOK_NAME &&
+ kid->pn_type != TOK_DOT &&
+#if JS_HAS_LVALUE_RETURN
+ (kid->pn_type != TOK_LP || kid->pn_op != JSOP_CALL) &&
+#endif
+#if JS_HAS_XML_SUPPORT
+ (kid->pn_type != TOK_UNARYOP || kid->pn_op != JSOP_XMLNAME) &&
+#endif
+ kid->pn_type != TOK_LB) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_OPERAND, name);
+ return NULL;
+ }
+ pn->pn_kid = kid;
+ return kid;
+}
+
+static const char incop_name_str[][10] = {"increment", "decrement"};
+
+static JSBool
+SetIncOpKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSParseNode *pn, JSParseNode *kid,
+ JSTokenType tt, JSBool preorder)
+{
+ JSOp op;
+
+ kid = SetLvalKid(cx, ts, pn, kid, incop_name_str[tt == TOK_DEC]);
+ if (!kid)
+ return JS_FALSE;
+ switch (kid->pn_type) {
+ case TOK_NAME:
+ op = (tt == TOK_INC)
+ ? (preorder ? JSOP_INCNAME : JSOP_NAMEINC)
+ : (preorder ? JSOP_DECNAME : JSOP_NAMEDEC);
+ if (kid->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+
+ case TOK_DOT:
+ op = (tt == TOK_INC)
+ ? (preorder ? JSOP_INCPROP : JSOP_PROPINC)
+ : (preorder ? JSOP_DECPROP : JSOP_PROPDEC);
+ break;
+
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ JS_ASSERT(kid->pn_op == JSOP_CALL);
+ kid->pn_op = JSOP_SETCALL;
+ /* FALL THROUGH */
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (kid->pn_op == JSOP_XMLNAME)
+ kid->pn_op = JSOP_SETXMLNAME;
+ /* FALL THROUGH */
+#endif
+ case TOK_LB:
+ op = (tt == TOK_INC)
+ ? (preorder ? JSOP_INCELEM : JSOP_ELEMINC)
+ : (preorder ? JSOP_DECELEM : JSOP_ELEMDEC);
+ break;
+
+ default:
+ JS_ASSERT(0);
+ op = JSOP_NOP;
+ }
+ pn->pn_op = op;
+ return JS_TRUE;
+}
+
+static JSParseNode *
+UnaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode *pn, *pn2;
+
+ CHECK_RECURSION();
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+
+ switch (tt) {
+ case TOK_UNARYOP:
+ case TOK_PLUS:
+ case TOK_MINUS:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_UNARYOP; /* PLUS and MINUS are binary */
+ pn->pn_op = CURRENT_TOKEN(ts).t_op;
+ pn2 = UnaryExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_kid = pn2;
+ break;
+
+ case TOK_INC:
+ case TOK_DEC:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn2 = MemberExpr(cx, ts, tc, JS_TRUE);
+ if (!pn2)
+ return NULL;
+ if (!SetIncOpKid(cx, ts, tc, pn, pn2, tt, JS_TRUE))
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ break;
+
+ case TOK_DELETE:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn2 = UnaryExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+
+ /*
+ * Under ECMA3, deleting any unary expression is valid -- it simply
+ * returns true. Here we strip off any parentheses.
+ */
+ while (pn2->pn_type == TOK_RP)
+ pn2 = pn2->pn_kid;
+ pn->pn_kid = pn2;
+ break;
+
+ case TOK_ERROR:
+ return NULL;
+
+ default:
+ js_UngetToken(ts);
+ pn = MemberExpr(cx, ts, tc, JS_TRUE);
+ if (!pn)
+ return NULL;
+
+ /* Don't look across a newline boundary for a postfix incop. */
+ if (ON_CURRENT_LINE(ts, pn->pn_pos)) {
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_INC || tt == TOK_DEC) {
+ (void) js_GetToken(cx, ts);
+ pn2 = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn2)
+ return NULL;
+ if (!SetIncOpKid(cx, ts, tc, pn2, pn, tt, JS_FALSE))
+ return NULL;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn = pn2;
+ }
+ }
+ break;
+ }
+ return pn;
+}
+
+static JSBool
+ArgumentList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSParseNode *listNode)
+{
+ JSBool matched;
+
+ ts->flags |= TSF_OPERAND;
+ matched = js_MatchToken(cx, ts, TOK_RP);
+ ts->flags &= ~TSF_OPERAND;
+ if (!matched) {
+ do {
+ JSParseNode *argNode = AssignExpr(cx, ts, tc);
+ if (!argNode)
+ return JS_FALSE;
+#if JS_HAS_GENERATORS
+ if (argNode->pn_type == TOK_YIELD) {
+ js_ReportCompileErrorNumber(cx, argNode,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_YIELD_SYNTAX);
+ return JS_FALSE;
+ }
+#endif
+ PN_APPEND(listNode, argNode);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+
+ if (js_GetToken(cx, ts) != TOK_RP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_PAREN_AFTER_ARGS);
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSParseNode *
+MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowCallSyntax)
+{
+ JSParseNode *pn, *pn2, *pn3;
+ JSTokenType tt;
+
+ CHECK_RECURSION();
+
+ /* Check for new expression first. */
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_NEW) {
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn2 = MemberExpr(cx, ts, tc, JS_FALSE);
+ if (!pn2)
+ return NULL;
+ pn->pn_op = JSOP_NEW;
+ PN_INIT_LIST_1(pn, pn2);
+ pn->pn_pos.begin = pn2->pn_pos.begin;
+
+ if (js_MatchToken(cx, ts, TOK_LP) && !ArgumentList(cx, ts, tc, pn))
+ return NULL;
+ if (pn->pn_count > ARGC_LIMIT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_CON_ARGS);
+ return NULL;
+ }
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ } else {
+ pn = PrimaryExpr(cx, ts, tc, tt, JS_FALSE);
+ if (!pn)
+ return NULL;
+
+ if (pn->pn_type == TOK_ANYNAME ||
+ pn->pn_type == TOK_AT ||
+ pn->pn_type == TOK_DBLCOLON) {
+ pn2 = NewOrRecycledNode(cx, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_type = TOK_UNARYOP;
+ pn2->pn_pos = pn->pn_pos;
+ pn2->pn_op = JSOP_XMLNAME;
+ pn2->pn_arity = PN_UNARY;
+ pn2->pn_kid = pn;
+ pn2->pn_next = NULL;
+ pn2->pn_ts = ts;
+ pn2->pn_source = NULL;
+ pn = pn2;
+ }
+ }
+
+ while ((tt = js_GetToken(cx, ts)) > TOK_EOF) {
+ if (tt == TOK_DOT) {
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+#if JS_HAS_XML_SUPPORT
+ ts->flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME);
+ pn3 = PrimaryExpr(cx, ts, tc, tt, JS_TRUE);
+ if (!pn3)
+ return NULL;
+ tt = pn3->pn_type;
+ if (tt == TOK_NAME ||
+ (tt == TOK_DBLCOLON &&
+ pn3->pn_arity == PN_NAME &&
+ pn3->pn_expr->pn_type == TOK_FUNCTION)) {
+ pn2->pn_op = (tt == TOK_NAME) ? JSOP_GETPROP : JSOP_GETMETHOD;
+ pn2->pn_expr = pn;
+ pn2->pn_atom = pn3->pn_atom;
+ RecycleTree(pn3, tc);
+ } else {
+ if (TOKEN_TYPE_IS_XML(tt)) {
+ pn2->pn_type = TOK_LB;
+ pn2->pn_op = JSOP_GETELEM;
+ } else if (tt == TOK_RP) {
+ JSParseNode *group = pn3;
+
+ /* Recycle the useless TOK_RP/JSOP_GROUP node. */
+ pn3 = group->pn_kid;
+ group->pn_kid = NULL;
+ RecycleTree(group, tc);
+ pn2->pn_type = TOK_FILTER;
+ pn2->pn_op = JSOP_FILTER;
+
+ /* A filtering predicate is like a with statement. */
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ } else {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_NAME_AFTER_DOT);
+ return NULL;
+ }
+ pn2->pn_arity = PN_BINARY;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ }
+#else
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NAME_AFTER_DOT);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ pn2->pn_op = JSOP_GETPROP;
+ pn2->pn_expr = pn;
+ pn2->pn_atom = CURRENT_TOKEN(ts).t_atom;
+#endif
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+#if JS_HAS_XML_SUPPORT
+ } else if (tt == TOK_DBLDOT) {
+ pn2 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn2)
+ return NULL;
+ ts->flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME);
+ pn3 = PrimaryExpr(cx, ts, tc, tt, JS_TRUE);
+ if (!pn3)
+ return NULL;
+ tt = pn3->pn_type;
+ if (tt == TOK_NAME) {
+ pn3->pn_type = TOK_STRING;
+ pn3->pn_arity = PN_NULLARY;
+ pn3->pn_op = JSOP_QNAMEPART;
+ } else if (!TOKEN_TYPE_IS_XML(tt)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_NAME_AFTER_DOT);
+ return NULL;
+ }
+ pn2->pn_op = JSOP_DESCENDANTS;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+#endif
+ } else if (tt == TOK_LB) {
+ pn2 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn2)
+ return NULL;
+ pn3 = Expr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_IN_INDEX);
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+
+ /* Optimize o['p'] to o.p by rewriting pn2. */
+ if (pn3->pn_type == TOK_STRING) {
+ pn2->pn_type = TOK_DOT;
+ pn2->pn_op = JSOP_GETPROP;
+ pn2->pn_arity = PN_NAME;
+ pn2->pn_expr = pn;
+ pn2->pn_atom = pn3->pn_atom;
+ } else {
+ pn2->pn_op = JSOP_GETELEM;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ }
+ } else if (allowCallSyntax && tt == TOK_LP) {
+ pn2 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn2)
+ return NULL;
+
+ /* Pick JSOP_EVAL and flag tc as heavyweight if eval(...). */
+ pn2->pn_op = JSOP_CALL;
+ if (pn->pn_op == JSOP_NAME &&
+ pn->pn_atom == cx->runtime->atomState.evalAtom) {
+ pn2->pn_op = JSOP_EVAL;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+
+ PN_INIT_LIST_1(pn2, pn);
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+
+ if (!ArgumentList(cx, ts, tc, pn2))
+ return NULL;
+ if (pn2->pn_count > ARGC_LIMIT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_ARGS);
+ return NULL;
+ }
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ } else {
+ js_UngetToken(ts);
+ return pn;
+ }
+
+ pn = pn2;
+ }
+ if (tt == TOK_ERROR)
+ return NULL;
+ return pn;
+}
+
+static JSParseNode *
+BracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ uintN oldflags;
+ JSParseNode *pn;
+
+ /*
+ * Always accept the 'in' operator in a parenthesized expression,
+ * where it's unambiguous, even if we might be parsing the init of a
+ * for statement.
+ */
+ oldflags = tc->flags;
+ tc->flags &= ~TCF_IN_FOR_INIT;
+ pn = Expr(cx, ts, tc);
+ tc->flags = oldflags | (tc->flags & TCF_FUN_FLAGS);
+ return pn;
+}
+
+#if JS_HAS_XML_SUPPORT
+
+static JSParseNode *
+EndBracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BracketedExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_AFTER_ATTR_EXPR);
+ return pn;
+}
+
+/*
+ * From the ECMA-357 grammar in 11.1.1 and 11.1.2:
+ *
+ * AttributeIdentifier:
+ * @ PropertySelector
+ * @ QualifiedIdentifier
+ * @ [ Expression ]
+ *
+ * PropertySelector:
+ * Identifier
+ * *
+ *
+ * QualifiedIdentifier:
+ * PropertySelector :: PropertySelector
+ * PropertySelector :: [ Expression ]
+ *
+ * We adapt AttributeIdentifier and QualifiedIdentier to be LL(1), like so:
+ *
+ * AttributeIdentifier:
+ * @ QualifiedIdentifier
+ * @ [ Expression ]
+ *
+ * PropertySelector:
+ * Identifier
+ * *
+ *
+ * QualifiedIdentifier:
+ * PropertySelector :: PropertySelector
+ * PropertySelector :: [ Expression ]
+ * PropertySelector
+ *
+ * As PrimaryExpression: Identifier is in ECMA-262 and we want the semantics
+ * for that rule to result in a name node, but ECMA-357 extends the grammar
+ * to include PrimaryExpression: QualifiedIdentifier, we must factor further:
+ *
+ * QualifiedIdentifier:
+ * PropertySelector QualifiedSuffix
+ *
+ * QualifiedSuffix:
+ * :: PropertySelector
+ * :: [ Expression ]
+ * /nothing/
+ *
+ * And use this production instead of PrimaryExpression: QualifiedIdentifier:
+ *
+ * PrimaryExpression:
+ * Identifier QualifiedSuffix
+ *
+ * We hoist the :: match into callers of QualifiedSuffix, in order to tweak
+ * PropertySelector vs. Identifier pn_arity, pn_op, and other members.
+ */
+static JSParseNode *
+PropertySelector(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ if (pn->pn_type == TOK_STAR) {
+ pn->pn_type = TOK_ANYNAME;
+ pn->pn_op = JSOP_ANYNAME;
+ pn->pn_atom = cx->runtime->atomState.starAtom;
+ } else {
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ pn->pn_op = JSOP_QNAMEPART;
+ pn->pn_arity = PN_NAME;
+ pn->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+ }
+ return pn;
+}
+
+static JSParseNode *
+QualifiedSuffix(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
+ JSTreeContext *tc)
+{
+ JSParseNode *pn2, *pn3;
+ JSTokenType tt;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_DBLCOLON);
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+
+ /* Left operand of :: must be evaluated if it is an identifier. */
+ if (pn->pn_op == JSOP_QNAMEPART)
+ pn->pn_op = JSOP_NAME;
+
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_STAR || tt == TOK_NAME) {
+ /* Inline and specialize PropertySelector for JSOP_QNAMECONST. */
+ pn2->pn_op = JSOP_QNAMECONST;
+ pn2->pn_atom = (tt == TOK_STAR)
+ ? cx->runtime->atomState.starAtom
+ : CURRENT_TOKEN(ts).t_atom;
+ pn2->pn_expr = pn;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ return pn2;
+ }
+
+ if (tt != TOK_LB) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+ pn3 = EndBracketedExpr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+
+ pn2->pn_op = JSOP_QNAME;
+ pn2->pn_arity = PN_BINARY;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ return pn2;
+}
+
+static JSParseNode *
+QualifiedIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = PropertySelector(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ if (js_MatchToken(cx, ts, TOK_DBLCOLON))
+ pn = QualifiedSuffix(cx, ts, pn, tc);
+ return pn;
+}
+
+static JSParseNode *
+AttributeIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ JSTokenType tt;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_AT);
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = JSOP_TOATTRNAME;
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_STAR || tt == TOK_NAME) {
+ pn2 = QualifiedIdentifier(cx, ts, tc);
+ } else if (tt == TOK_LB) {
+ pn2 = EndBracketedExpr(cx, ts, tc);
+ } else {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+ if (!pn2)
+ return NULL;
+ pn->pn_kid = pn2;
+ return pn;
+}
+
+/*
+ * Make a TOK_LC unary node whose pn_kid is an expression.
+ */
+static JSParseNode *
+XMLExpr(JSContext *cx, JSTokenStream *ts, JSBool inTag, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ uintN oldflags;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_LC);
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+
+ /*
+ * Turn off XML tag mode, but don't restore it after parsing this braced
+ * expression. Instead, simply restore ts's old flags. This is required
+ * because XMLExpr is called both from within a tag, and from within text
+ * contained in an element, but outside of any start, end, or point tag.
+ */
+ oldflags = ts->flags;
+ ts->flags = oldflags & ~TSF_XMLTAGMODE;
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_IN_XML_EXPR);
+ ts->flags = oldflags;
+ pn->pn_kid = pn2;
+ pn->pn_op = inTag ? JSOP_XMLTAGEXPR : JSOP_XMLELTEXPR;
+ return pn;
+}
+
+/*
+ * Make a terminal node for one of TOK_XMLNAME, TOK_XMLATTR, TOK_XMLSPACE,
+ * TOK_XMLTEXT, TOK_XMLCDATA, TOK_XMLCOMMENT, or TOK_XMLPI. When converting
+ * parse tree to XML, we preserve a TOK_XMLSPACE node only if it's the sole
+ * child of a container tag.
+ */
+static JSParseNode *
+XMLAtomNode(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSToken *tp;
+
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ tp = &CURRENT_TOKEN(ts);
+ pn->pn_op = tp->t_op;
+ pn->pn_atom = tp->t_atom;
+ if (tp->type == TOK_XMLPI)
+ pn->pn_atom2 = tp->t_atom2;
+ return pn;
+}
+
+/*
+ * Parse the productions:
+ *
+ * XMLNameExpr:
+ * XMLName XMLNameExpr?
+ * { Expr } XMLNameExpr?
+ *
+ * Return a PN_LIST, PN_UNARY, or PN_NULLARY according as XMLNameExpr produces
+ * a list of names and/or expressions, a single expression, or a single name.
+ * If PN_LIST or PN_NULLARY, pn_type will be TOK_XMLNAME; if PN_UNARY, pn_type
+ * will be TOK_LC.
+ */
+static JSParseNode *
+XMLNameExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2, *list;
+ JSTokenType tt;
+
+ pn = list = NULL;
+ do {
+ tt = CURRENT_TOKEN(ts).type;
+ if (tt == TOK_LC) {
+ pn2 = XMLExpr(cx, ts, JS_TRUE, tc);
+ if (!pn2)
+ return NULL;
+ } else {
+ JS_ASSERT(tt == TOK_XMLNAME);
+ pn2 = XMLAtomNode(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ }
+
+ if (!pn) {
+ pn = pn2;
+ } else {
+ if (!list) {
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = TOK_XMLNAME;
+ list->pn_pos.begin = pn->pn_pos.begin;
+ PN_INIT_LIST_1(list, pn);
+ list->pn_extra = PNX_CANTFOLD;
+ pn = list;
+ }
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+ } while ((tt = js_GetToken(cx, ts)) == TOK_XMLNAME || tt == TOK_LC);
+
+ js_UngetToken(ts);
+ return pn;
+}
+
+/*
+ * Macro to test whether an XMLNameExpr or XMLTagContent node can be folded
+ * at compile time into a JSXML tree.
+ */
+#define XML_FOLDABLE(pn) ((pn)->pn_arity == PN_LIST \
+ ? ((pn)->pn_extra & PNX_CANTFOLD) == 0 \
+ : (pn)->pn_type != TOK_LC)
+
+/*
+ * Parse the productions:
+ *
+ * XMLTagContent:
+ * XMLNameExpr
+ * XMLTagContent S XMLNameExpr S? = S? XMLAttr
+ * XMLTagContent S XMLNameExpr S? = S? { Expr }
+ *
+ * Return a PN_LIST, PN_UNARY, or PN_NULLARY according to how XMLTagContent
+ * produces a list of name and attribute values and/or braced expressions, a
+ * single expression, or a single name.
+ *
+ * If PN_LIST or PN_NULLARY, pn_type will be TOK_XMLNAME for the case where
+ * XMLTagContent: XMLNameExpr. If pn_type is not TOK_XMLNAME but pn_arity is
+ * PN_LIST, pn_type will be tagtype. If PN_UNARY, pn_type will be TOK_LC and
+ * we parsed exactly one expression.
+ */
+static JSParseNode *
+XMLTagContent(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSTokenType tagtype, JSAtom **namep)
+{
+ JSParseNode *pn, *pn2, *list;
+ JSTokenType tt;
+
+ pn = XMLNameExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ *namep = (pn->pn_arity == PN_NULLARY) ? pn->pn_atom : NULL;
+ list = NULL;
+
+ while (js_MatchToken(cx, ts, TOK_XMLSPACE)) {
+ tt = js_GetToken(cx, ts);
+ if (tt != TOK_XMLNAME && tt != TOK_LC) {
+ js_UngetToken(ts);
+ break;
+ }
+
+ pn2 = XMLNameExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ if (!list) {
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = tagtype;
+ list->pn_pos.begin = pn->pn_pos.begin;
+ PN_INIT_LIST_1(list, pn);
+ pn = list;
+ }
+ PN_APPEND(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+ MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_NO_ASSIGN_IN_XML_ATTR);
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_XMLATTR) {
+ pn2 = XMLAtomNode(cx, ts, tc);
+ } else if (tt == TOK_LC) {
+ pn2 = XMLExpr(cx, ts, JS_TRUE, tc);
+ pn->pn_extra |= PNX_CANTFOLD;
+ } else {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_ATTR_VALUE);
+ return NULL;
+ }
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+
+ return pn;
+}
+
+#define XML_CHECK_FOR_ERROR_AND_EOF(tt,result) \
+ JS_BEGIN_MACRO \
+ if ((tt) <= TOK_EOF) { \
+ if ((tt) == TOK_EOF) { \
+ js_ReportCompileErrorNumber(cx, ts, \
+ JSREPORT_TS | JSREPORT_ERROR, \
+ JSMSG_END_OF_XML_SOURCE); \
+ } \
+ return result; \
+ } \
+ JS_END_MACRO
+
+static JSParseNode *
+XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowList);
+
+/*
+ * Consume XML element tag content, including the TOK_XMLETAGO (</) sequence
+ * that opens the end tag for the container.
+ */
+static JSBool
+XMLElementContent(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
+ JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode *pn2;
+ JSAtom *textAtom;
+
+ ts->flags &= ~TSF_XMLTAGMODE;
+ for (;;) {
+ ts->flags |= TSF_XMLTEXTMODE;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_XMLTEXTMODE;
+ XML_CHECK_FOR_ERROR_AND_EOF(tt, JS_FALSE);
+
+ JS_ASSERT(tt == TOK_XMLSPACE || tt == TOK_XMLTEXT);
+ textAtom = CURRENT_TOKEN(ts).t_atom;
+ if (textAtom) {
+ /* Non-zero-length XML text scanned. */
+ pn2 = XMLAtomNode(cx, ts, tc);
+ if (!pn2)
+ return JS_FALSE;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ XML_CHECK_FOR_ERROR_AND_EOF(tt, JS_FALSE);
+ if (tt == TOK_XMLETAGO)
+ break;
+
+ if (tt == TOK_LC) {
+ pn2 = XMLExpr(cx, ts, JS_FALSE, tc);
+ pn->pn_extra |= PNX_CANTFOLD;
+ } else if (tt == TOK_XMLSTAGO) {
+ pn2 = XMLElementOrList(cx, ts, tc, JS_FALSE);
+ if (pn2) {
+ pn2->pn_extra &= ~PNX_XMLROOT;
+ pn->pn_extra |= pn2->pn_extra;
+ }
+ } else {
+ JS_ASSERT(tt == TOK_XMLCDATA || tt == TOK_XMLCOMMENT ||
+ tt == TOK_XMLPI);
+ pn2 = XMLAtomNode(cx, ts, tc);
+ }
+ if (!pn2)
+ return JS_FALSE;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_XMLETAGO);
+ ts->flags |= TSF_XMLTAGMODE;
+ return JS_TRUE;
+}
+
+/*
+ * Return a PN_LIST node containing an XML or XMLList Initialiser.
+ */
+static JSParseNode *
+XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowList)
+{
+ JSParseNode *pn, *pn2, *list;
+ JSBool hadSpace;
+ JSTokenType tt;
+ JSAtom *startAtom, *endAtom;
+
+ CHECK_RECURSION();
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_XMLSTAGO);
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+
+ ts->flags |= TSF_XMLTAGMODE;
+ hadSpace = js_MatchToken(cx, ts, TOK_XMLSPACE);
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_ERROR)
+ return NULL;
+
+ if (tt == TOK_XMLNAME || tt == TOK_LC) {
+ /*
+ * XMLElement. Append the tag and its contents, if any, to pn.
+ */
+ pn2 = XMLTagContent(cx, ts, tc, TOK_XMLSTAGO, &startAtom);
+ if (!pn2)
+ return NULL;
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_XMLPTAGC) {
+ /* Point tag (/>): recycle pn if pn2 is a list of tag contents. */
+ if (pn2->pn_type == TOK_XMLSTAGO) {
+ PN_INIT_LIST(pn);
+ RecycleTree(pn, tc);
+ pn = pn2;
+ } else {
+ JS_ASSERT(pn2->pn_type == TOK_XMLNAME ||
+ pn2->pn_type == TOK_LC);
+ PN_INIT_LIST_1(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+ }
+ pn->pn_type = TOK_XMLPTAGC;
+ pn->pn_extra |= PNX_XMLROOT;
+ } else {
+ /* We had better have a tag-close (>) at this point. */
+ if (tt != TOK_XMLTAGC) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_TAG_SYNTAX);
+ return NULL;
+ }
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+
+ /* Make sure pn2 is a TOK_XMLSTAGO list containing tag contents. */
+ if (pn2->pn_type != TOK_XMLSTAGO) {
+ PN_INIT_LIST_1(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+ pn2 = pn;
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ }
+
+ /* Now make pn a nominal-root TOK_XMLELEM list containing pn2. */
+ pn->pn_type = TOK_XMLELEM;
+ PN_INIT_LIST_1(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+ pn->pn_extra |= PNX_XMLROOT;
+
+ /* Get element contents and delimiting end-tag-open sequence. */
+ if (!XMLElementContent(cx, ts, pn, tc))
+ return NULL;
+
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+ tt = js_GetToken(cx, ts);
+ XML_CHECK_FOR_ERROR_AND_EOF(tt, NULL);
+ if (tt != TOK_XMLNAME && tt != TOK_LC) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_TAG_SYNTAX);
+ return NULL;
+ }
+
+ /* Parse end tag; check mismatch at compile-time if we can. */
+ pn2 = XMLTagContent(cx, ts, tc, TOK_XMLETAGO, &endAtom);
+ if (!pn2)
+ return NULL;
+ if (pn2->pn_type == TOK_XMLETAGO) {
+ /* Oops, end tag has attributes! */
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_TAG_SYNTAX);
+ return NULL;
+ }
+ if (endAtom && startAtom && endAtom != startAtom) {
+ JSString *str = ATOM_TO_STRING(startAtom);
+
+ /* End vs. start tag name mismatch: point to the tag name. */
+ js_ReportCompileErrorNumberUC(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_XML_TAG_NAME_MISMATCH,
+ JSSTRING_CHARS(str));
+ return NULL;
+ }
+
+ /* Make a TOK_XMLETAGO list with pn2 as its single child. */
+ JS_ASSERT(pn2->pn_type == TOK_XMLNAME || pn2->pn_type == TOK_LC);
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = TOK_XMLETAGO;
+ PN_INIT_LIST_1(list, pn2);
+ PN_APPEND(pn, list);
+ if (!XML_FOLDABLE(pn2)) {
+ list->pn_extra |= PNX_CANTFOLD;
+ pn->pn_extra |= PNX_CANTFOLD;
+ }
+
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+ MUST_MATCH_TOKEN(TOK_XMLTAGC, JSMSG_BAD_XML_TAG_SYNTAX);
+ }
+
+ /* Set pn_op now that pn has been updated to its final value. */
+ pn->pn_op = JSOP_TOXML;
+ } else if (!hadSpace && allowList && tt == TOK_XMLTAGC) {
+ /* XMLList Initialiser. */
+ pn->pn_type = TOK_XMLLIST;
+ pn->pn_op = JSOP_TOXMLLIST;
+ PN_INIT_LIST(pn);
+ pn->pn_extra |= PNX_XMLROOT;
+ if (!XMLElementContent(cx, ts, pn, tc))
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_XMLTAGC, JSMSG_BAD_XML_LIST_SYNTAX);
+ } else {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_NAME_SYNTAX);
+ return NULL;
+ }
+
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ ts->flags &= ~TSF_XMLTAGMODE;
+ return pn;
+}
+
+static JSParseNode *
+XMLElementOrListRoot(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowList)
+{
+ uint32 oldopts;
+ JSParseNode *pn;
+
+ /*
+ * Force XML support to be enabled so that comments and CDATA literals
+ * are recognized, instead of <! followed by -- starting an HTML comment
+ * to end of line (used in script tags to hide content from old browsers
+ * that don't recognize <script>).
+ */
+ oldopts = JS_SetOptions(cx, cx->options | JSOPTION_XML);
+ pn = XMLElementOrList(cx, ts, tc, allowList);
+ JS_SetOptions(cx, oldopts);
+ return pn;
+}
+
+JS_FRIEND_API(JSParseNode *)
+js_ParseXMLTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSBool allowList)
+{
+ JSStackFrame *fp, frame;
+ JSParseNode *pn;
+ JSTreeContext tc;
+ JSTokenType tt;
+
+ /*
+ * Push a compiler frame if we have no frames, or if the top frame is a
+ * lightweight function activation, or if its scope chain doesn't match
+ * the one passed to us.
+ */
+ fp = cx->fp;
+ MaybeSetupFrame(cx, chain, fp, &frame);
+ JS_KEEP_ATOMS(cx->runtime);
+ TREE_CONTEXT_INIT(&tc);
+
+ /* Set XML-only mode to turn off special treatment of {expr} in XML. */
+ ts->flags |= TSF_OPERAND | TSF_XMLONLYMODE;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+
+ if (tt != TOK_XMLSTAGO) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_MARKUP);
+ pn = NULL;
+ } else {
+ pn = XMLElementOrListRoot(cx, ts, &tc, allowList);
+ }
+
+ ts->flags &= ~TSF_XMLONLYMODE;
+ TREE_CONTEXT_FINISH(&tc);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ cx->fp = fp;
+ return pn;
+}
+
+#endif /* JS_HAS_XMLSUPPORT */
+
+static JSParseNode *
+PrimaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSTokenType tt, JSBool afterDot)
+{
+ JSParseNode *pn, *pn2, *pn3;
+ JSOp op;
+
+#if JS_HAS_SHARP_VARS
+ JSParseNode *defsharp;
+ JSBool notsharp;
+
+ defsharp = NULL;
+ notsharp = JS_FALSE;
+ again:
+ /*
+ * Control flows here after #n= is scanned. If the following primary is
+ * not valid after such a "sharp variable" definition, the tt switch case
+ * should set notsharp.
+ */
+#endif
+
+ CHECK_RECURSION();
+
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_FUNCTION);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+
+ switch (tt) {
+ case TOK_FUNCTION:
+#if JS_HAS_XML_SUPPORT
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ if (js_MatchToken(cx, ts, TOK_DBLCOLON)) {
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ pn2 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_type = TOK_FUNCTION;
+ pn = QualifiedSuffix(cx, ts, pn2, tc);
+ if (!pn)
+ return NULL;
+ break;
+ }
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+#endif
+ pn = FunctionExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ break;
+
+ case TOK_LB:
+ {
+ JSBool matched;
+ jsuint index;
+
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_RB;
+
+#if JS_HAS_SHARP_VARS
+ if (defsharp) {
+ PN_INIT_LIST_1(pn, defsharp);
+ defsharp = NULL;
+ } else
+#endif
+ PN_INIT_LIST(pn);
+
+ ts->flags |= TSF_OPERAND;
+ matched = js_MatchToken(cx, ts, TOK_RB);
+ ts->flags &= ~TSF_OPERAND;
+ if (!matched) {
+ for (index = 0; ; index++) {
+ if (index == ARRAY_INIT_LIMIT) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_ARRAY_INIT_TOO_BIG);
+ return NULL;
+ }
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_RB) {
+ pn->pn_extra |= PNX_ENDCOMMA;
+ break;
+ }
+
+ if (tt == TOK_COMMA) {
+ /* So CURRENT_TOKEN gets TOK_COMMA and not TOK_LB. */
+ js_MatchToken(cx, ts, TOK_COMMA);
+ pn2 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ } else {
+ pn2 = AssignExpr(cx, ts, tc);
+ }
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+
+ if (tt != TOK_COMMA) {
+ /* If we didn't already match TOK_COMMA in above case. */
+ if (!js_MatchToken(cx, ts, TOK_COMMA))
+ break;
+ }
+ }
+
+#if JS_HAS_GENERATORS
+ /*
+ * At this point, (index == 0 && pn->pn_count != 0) implies one
+ * element initialiser was parsed (possibly with a defsharp before
+ * the left bracket).
+ *
+ * An array comprehension of the form:
+ *
+ * [i * j for (i in o) for (j in p) if (i != j)]
+ *
+ * translates to roughly the following let expression:
+ *
+ * let (array = new Array, i, j) {
+ * for (i in o) let {
+ * for (j in p)
+ * if (i != j)
+ * array.push(i * j)
+ * }
+ * array
+ * }
+ *
+ * where array is a nameless block-local variable. The "roughly"
+ * means that an implementation may optimize away the array.push.
+ * An array comprehension opens exactly one block scope, no matter
+ * how many for heads it contains.
+ *
+ * Each let () {...} or for (let ...) ... compiles to:
+ *
+ * JSOP_ENTERBLOCK <o> ... JSOP_LEAVEBLOCK <n>
+ *
+ * where <o> is a literal object representing the block scope,
+ * with <n> properties, naming each var declared in the block.
+ *
+ * Each var declaration in a let-block binds a name in <o> at
+ * compile time, and allocates a slot on the operand stack at
+ * runtime via JSOP_ENTERBLOCK. A block-local var is accessed
+ * by the JSOP_GETLOCAL and JSOP_SETLOCAL ops, and iterated with
+ * JSOP_FORLOCAL. These ops all have an immediate operand, the
+ * local slot's stack index from fp->spbase.
+ *
+ * The array comprehension iteration step, array.push(i * j) in
+ * the example above, is done by <i * j>; JSOP_ARRAYCOMP <array>,
+ * where <array> is the index of array's stack slot.
+ */
+ if (index == 0 &&
+ pn->pn_count != 0 &&
+ js_MatchToken(cx, ts, TOK_FOR)) {
+ JSParseNode **pnp, *pnexp, *pntop, *pnlet;
+ BindData data;
+ JSRuntime *rt;
+ JSStmtInfo stmtInfo;
+ JSAtom *atom;
+
+ /* Relabel pn as an array comprehension node. */
+ pn->pn_type = TOK_ARRAYCOMP;
+
+ /*
+ * Remove the comprehension expression from pn's linked list
+ * and save it via pnexp. We'll re-install it underneath the
+ * ARRAYPUSH node after we parse the rest of the comprehension.
+ */
+ pnexp = PN_LAST(pn);
+ JS_ASSERT(pn->pn_count == 1 || pn->pn_count == 2);
+ pn->pn_tail = (--pn->pn_count == 1)
+ ? &pn->pn_head->pn_next
+ : &pn->pn_head;
+ *pn->pn_tail = NULL;
+
+ /*
+ * Make a parse-node and literal object representing the array
+ * comprehension's block scope.
+ */
+ pntop = PushLexicalScope(cx, ts, tc, &stmtInfo);
+ if (!pntop)
+ return NULL;
+ pnp = &pntop->pn_expr;
+
+ data.pn = NULL;
+ data.ts = ts;
+ data.obj = tc->blockChain;
+ data.op = JSOP_NOP;
+ data.binder = BindLet;
+ data.u.let.index = 0;
+ data.u.let.overflow = JSMSG_ARRAY_INIT_TOO_BIG;
+
+ rt = cx->runtime;
+ do {
+ /*
+ * FOR node is binary, left is control and right is body.
+ * Use index to count each block-local let-variable on the
+ * left-hand side of IN.
+ */
+ pn2 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn2)
+ return NULL;
+
+ pn2->pn_op = JSOP_FORIN;
+ if (js_MatchToken(cx, ts, TOK_NAME)) {
+ if (CURRENT_TOKEN(ts).t_atom == rt->atomState.eachAtom)
+ pn2->pn_op = JSOP_FOREACH;
+ else
+ js_UngetToken(ts);
+ }
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_AFTER_FOR);
+
+ tt = js_GetToken(cx, ts);
+ switch (tt) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_LB:
+ case TOK_LC:
+ pnlet = DestructuringExpr(cx, &data, tc, tt);
+ if (!pnlet)
+ return NULL;
+
+ if (pnlet->pn_type != TOK_RB || pnlet->pn_count != 2) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_BAD_FOR_LEFTSIDE);
+ return NULL;
+ }
+
+ /* Destructuring requires [key, value] enumeration. */
+ if (pn2->pn_op != JSOP_FOREACH)
+ pn2->pn_op = JSOP_FOREACHKEYVAL;
+ break;
+#endif
+
+ case TOK_NAME:
+ atom = CURRENT_TOKEN(ts).t_atom;
+ if (!data.binder(cx, &data, atom, tc))
+ return NULL;
+
+ /*
+ * Create a name node with op JSOP_NAME. We can't set
+ * op to JSOP_GETLOCAL here, because we don't yet know
+ * the block's depth in the operand stack frame. The
+ * code generator computes that, and it tries to bind
+ * all names to slots, so we must let it do the deed.
+ */
+ pnlet = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pnlet)
+ return NULL;
+ pnlet->pn_op = JSOP_NAME;
+ pnlet->pn_atom = atom;
+ pnlet->pn_expr = NULL;
+ pnlet->pn_slot = -1;
+ pnlet->pn_attrs = 0;
+ break;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS|JSREPORT_ERROR,
+ JSMSG_NO_VARIABLE_NAME);
+ return NULL;
+ }
+
+ MUST_MATCH_TOKEN(TOK_IN, JSMSG_IN_AFTER_FOR_NAME);
+ pn3 = NewBinary(cx, TOK_IN, JSOP_NOP, pnlet,
+ Expr(cx, ts, tc), tc);
+ if (!pn3)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FOR_CTRL);
+ pn2->pn_left = pn3;
+ *pnp = pn2;
+ pnp = &pn2->pn_right;
+ } while (js_MatchToken(cx, ts, TOK_FOR));
+
+ if (js_MatchToken(cx, ts, TOK_IF)) {
+ pn2 = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_kid1 = Condition(cx, ts, tc);
+ if (!pn2->pn_kid1)
+ return NULL;
+ pn2->pn_kid2 = NULL;
+ pn2->pn_kid3 = NULL;
+ *pnp = pn2;
+ pnp = &pn2->pn_kid2;
+ }
+
+ pn2 = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_type = TOK_ARRAYPUSH;
+ pn2->pn_op = JSOP_ARRAYPUSH;
+ pn2->pn_kid = pnexp;
+ *pnp = pn2;
+ PN_APPEND(pn, pntop);
+
+ js_PopStatement(tc);
+ }
+#endif /* JS_HAS_GENERATORS */
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_AFTER_LIST);
+ }
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ return pn;
+ }
+
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+ pn = LetBlock(cx, ts, tc, JS_FALSE);
+ if (!pn)
+ return NULL;
+ break;
+#endif
+
+ case TOK_LC:
+ {
+ JSBool afterComma;
+
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_RC;
+
+#if JS_HAS_SHARP_VARS
+ if (defsharp) {
+ PN_INIT_LIST_1(pn, defsharp);
+ defsharp = NULL;
+ } else
+#endif
+ PN_INIT_LIST(pn);
+
+ afterComma = JS_FALSE;
+ for (;;) {
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ switch (tt) {
+ case TOK_NUMBER:
+ pn3 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (pn3)
+ pn3->pn_dval = CURRENT_TOKEN(ts).t_dval;
+ break;
+ case TOK_NAME:
+#if JS_HAS_GETTER_SETTER
+ {
+ JSAtom *atom;
+ JSRuntime *rt;
+
+ atom = CURRENT_TOKEN(ts).t_atom;
+ rt = cx->runtime;
+ if (atom == rt->atomState.getAtom ||
+ atom == rt->atomState.setAtom) {
+ op = (atom == rt->atomState.getAtom)
+ ? JSOP_GETTER
+ : JSOP_SETTER;
+ if (js_MatchToken(cx, ts, TOK_NAME)) {
+ pn3 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn3)
+ return NULL;
+ pn3->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn3->pn_expr = NULL;
+ pn3->pn_slot = -1;
+ pn3->pn_attrs = 0;
+
+ /* We have to fake a 'function' token here. */
+ CURRENT_TOKEN(ts).t_op = JSOP_NOP;
+ CURRENT_TOKEN(ts).type = TOK_FUNCTION;
+ pn2 = FunctionExpr(cx, ts, tc);
+ pn2 = NewBinary(cx, TOK_COLON, op, pn3, pn2, tc);
+ goto skip;
+ }
+ }
+ /* else fall thru ... */
+ }
+#endif
+ case TOK_STRING:
+ pn3 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (pn3)
+ pn3->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ break;
+ case TOK_RC:
+ if (afterComma &&
+ !js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_TRAILING_COMMA)) {
+ return NULL;
+ }
+ goto end_obj_init;
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_PROP_ID);
+ return NULL;
+ }
+
+ tt = js_GetToken(cx, ts);
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_COLON);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+ if (tt != TOK_COLON) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_COLON_AFTER_ID);
+ return NULL;
+ }
+ op = CURRENT_TOKEN(ts).t_op;
+ pn2 = NewBinary(cx, TOK_COLON, op, pn3, AssignExpr(cx, ts, tc), tc);
+#if JS_HAS_GETTER_SETTER
+ skip:
+#endif
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_RC)
+ goto end_obj_init;
+ if (tt != TOK_COMMA) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CURLY_AFTER_LIST);
+ return NULL;
+ }
+ afterComma = JS_TRUE;
+ }
+ end_obj_init:
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ return pn;
+ }
+
+#if JS_HAS_SHARP_VARS
+ case TOK_DEFSHARP:
+ if (defsharp)
+ goto badsharp;
+ defsharp = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!defsharp)
+ return NULL;
+ defsharp->pn_kid = NULL;
+ defsharp->pn_num = (jsint) CURRENT_TOKEN(ts).t_dval;
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ goto again;
+
+ case TOK_USESHARP:
+ /* Check for forward/dangling references at runtime, to allow eval. */
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_num = (jsint) CURRENT_TOKEN(ts).t_dval;
+ notsharp = JS_TRUE;
+ break;
+#endif /* JS_HAS_SHARP_VARS */
+
+ case TOK_LP:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn2 = BracketedExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_IN_PAREN);
+ if (pn2->pn_type == TOK_RP ||
+ (js_CodeSpec[pn2->pn_op].prec >= js_CodeSpec[JSOP_GETPROP].prec &&
+ !afterDot)) {
+ /*
+ * Avoid redundant JSOP_GROUP opcodes, for efficiency and mainly
+ * to help the decompiler look ahead from a JSOP_ENDINIT to see a
+ * JSOP_GROUP followed by a POP or POPV. That sequence means the
+ * parentheses are mandatory, to disambiguate object initialisers
+ * as expression statements from block statements.
+ *
+ * Also drop pn if pn2 is a member or a primary expression of any
+ * kind. This is required to avoid generating a JSOP_GROUP that
+ * will null the |obj| interpreter register, causing |this| in any
+ * call of that member expression to bind to the global object.
+ */
+ pn->pn_kid = NULL;
+ RecycleTree(pn, tc);
+ pn = pn2;
+ } else {
+ pn->pn_type = TOK_RP;
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ pn->pn_kid = pn2;
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_STAR:
+ pn = QualifiedIdentifier(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ notsharp = JS_TRUE;
+ break;
+
+ case TOK_AT:
+ pn = AttributeIdentifier(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ notsharp = JS_TRUE;
+ break;
+
+ case TOK_XMLSTAGO:
+ pn = XMLElementOrListRoot(cx, ts, tc, JS_TRUE);
+ if (!pn)
+ return NULL;
+ notsharp = JS_TRUE; /* XXXbe could be sharp? */
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ case TOK_STRING:
+#if JS_HAS_SHARP_VARS
+ notsharp = JS_TRUE;
+ /* FALL THROUGH */
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLCDATA:
+ case TOK_XMLCOMMENT:
+ case TOK_XMLPI:
+#endif
+ case TOK_NAME:
+ case TOK_OBJECT:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_atom = CURRENT_TOKEN(ts).t_atom;
+#if JS_HAS_XML_SUPPORT
+ if (tt == TOK_XMLPI)
+ pn->pn_atom2 = CURRENT_TOKEN(ts).t_atom2;
+ else
+#endif
+ pn->pn_op = CURRENT_TOKEN(ts).t_op;
+ if (tt == TOK_NAME) {
+ pn->pn_arity = PN_NAME;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+
+#if JS_HAS_XML_SUPPORT
+ if (js_MatchToken(cx, ts, TOK_DBLCOLON)) {
+ if (afterDot) {
+ JSString *str;
+
+ /*
+ * Here PrimaryExpr is called after '.' or '..' and we
+ * just scanned .name:: or ..name:: . This is the only
+ * case where a keyword after '.' or '..' is not
+ * treated as a property name.
+ */
+ str = ATOM_TO_STRING(pn->pn_atom);
+ tt = js_CheckKeyword(JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str));
+ if (tt == TOK_FUNCTION) {
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_type = TOK_FUNCTION;
+ } else if (tt != TOK_EOF) {
+ js_ReportCompileErrorNumber(
+ cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_KEYWORD_NOT_NS);
+ return NULL;
+ }
+ }
+ pn = QualifiedSuffix(cx, ts, pn, tc);
+ if (!pn)
+ return NULL;
+ break;
+ }
+#endif
+
+ /* Unqualified __parent__ and __proto__ uses require activations. */
+ if (pn->pn_atom == cx->runtime->atomState.parentAtom ||
+ pn->pn_atom == cx->runtime->atomState.protoAtom) {
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ } else {
+ JSAtomListElement *ale;
+ JSStackFrame *fp;
+ JSBool loopy;
+
+ /* Measure optimizable global variable uses. */
+ ATOM_LIST_SEARCH(ale, &tc->decls, pn->pn_atom);
+ if (ale &&
+ !(fp = cx->fp)->fun &&
+ fp->scopeChain == fp->varobj &&
+ js_IsGlobalReference(tc, pn->pn_atom, &loopy)) {
+ tc->globalUses++;
+ if (loopy)
+ tc->loopyGlobalUses++;
+ }
+ }
+ }
+ break;
+
+ case TOK_NUMBER:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_dval = CURRENT_TOKEN(ts).t_dval;
+#if JS_HAS_SHARP_VARS
+ notsharp = JS_TRUE;
+#endif
+ break;
+
+ case TOK_PRIMARY:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = CURRENT_TOKEN(ts).t_op;
+#if JS_HAS_SHARP_VARS
+ notsharp = JS_TRUE;
+#endif
+ break;
+
+#if !JS_HAS_EXPORT_IMPORT
+ case TOK_EXPORT:
+ case TOK_IMPORT:
+#endif
+ case TOK_ERROR:
+ /* The scanner or one of its subroutines reported the error. */
+ return NULL;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+
+#if JS_HAS_SHARP_VARS
+ if (defsharp) {
+ if (notsharp) {
+ badsharp:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_SHARP_VAR_DEF);
+ return NULL;
+ }
+ defsharp->pn_kid = pn;
+ return defsharp;
+ }
+#endif
+ return pn;
+}
+
+/*
+ * Fold from one constant type to another.
+ * XXX handles only strings and numbers for now
+ */
+static JSBool
+FoldType(JSContext *cx, JSParseNode *pn, JSTokenType type)
+{
+ if (pn->pn_type != type) {
+ switch (type) {
+ case TOK_NUMBER:
+ if (pn->pn_type == TOK_STRING) {
+ jsdouble d;
+ if (!js_ValueToNumber(cx, ATOM_KEY(pn->pn_atom), &d))
+ return JS_FALSE;
+ pn->pn_dval = d;
+ pn->pn_type = TOK_NUMBER;
+ pn->pn_op = JSOP_NUMBER;
+ }
+ break;
+
+ case TOK_STRING:
+ if (pn->pn_type == TOK_NUMBER) {
+ JSString *str = js_NumberToString(cx, pn->pn_dval);
+ if (!str)
+ return JS_FALSE;
+ pn->pn_atom = js_AtomizeString(cx, str, 0);
+ if (!pn->pn_atom)
+ return JS_FALSE;
+ pn->pn_type = TOK_STRING;
+ pn->pn_op = JSOP_STRING;
+ }
+ break;
+
+ default:;
+ }
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Fold two numeric constants. Beware that pn1 and pn2 are recycled, unless
+ * one of them aliases pn, so you can't safely fetch pn2->pn_next, e.g., after
+ * a successful call to this function.
+ */
+static JSBool
+FoldBinaryNumeric(JSContext *cx, JSOp op, JSParseNode *pn1, JSParseNode *pn2,
+ JSParseNode *pn, JSTreeContext *tc)
+{
+ jsdouble d, d2;
+ int32 i, j;
+ uint32 u;
+
+ JS_ASSERT(pn1->pn_type == TOK_NUMBER && pn2->pn_type == TOK_NUMBER);
+ d = pn1->pn_dval;
+ d2 = pn2->pn_dval;
+ switch (op) {
+ case JSOP_LSH:
+ case JSOP_RSH:
+ if (!js_DoubleToECMAInt32(cx, d, &i))
+ return JS_FALSE;
+ if (!js_DoubleToECMAInt32(cx, d2, &j))
+ return JS_FALSE;
+ j &= 31;
+ d = (op == JSOP_LSH) ? i << j : i >> j;
+ break;
+
+ case JSOP_URSH:
+ if (!js_DoubleToECMAUint32(cx, d, &u))
+ return JS_FALSE;
+ if (!js_DoubleToECMAInt32(cx, d2, &j))
+ return JS_FALSE;
+ j &= 31;
+ d = u >> j;
+ break;
+
+ case JSOP_ADD:
+ d += d2;
+ break;
+
+ case JSOP_SUB:
+ d -= d2;
+ break;
+
+ case JSOP_MUL:
+ d *= d2;
+ break;
+
+ case JSOP_DIV:
+ if (d2 == 0) {
+#if defined(XP_WIN)
+ /* XXX MSVC miscompiles such that (NaN == 0) */
+ if (JSDOUBLE_IS_NaN(d2))
+ d = *cx->runtime->jsNaN;
+ else
+#endif
+ if (d == 0 || JSDOUBLE_IS_NaN(d))
+ d = *cx->runtime->jsNaN;
+ else if ((JSDOUBLE_HI32(d) ^ JSDOUBLE_HI32(d2)) >> 31)
+ d = *cx->runtime->jsNegativeInfinity;
+ else
+ d = *cx->runtime->jsPositiveInfinity;
+ } else {
+ d /= d2;
+ }
+ break;
+
+ case JSOP_MOD:
+ if (d2 == 0) {
+ d = *cx->runtime->jsNaN;
+ } else {
+#if defined(XP_WIN)
+ /* Workaround MS fmod bug where 42 % (1/0) => NaN, not 42. */
+ if (!(JSDOUBLE_IS_FINITE(d) && JSDOUBLE_IS_INFINITE(d2)))
+#endif
+ d = fmod(d, d2);
+ }
+ break;
+
+ default:;
+ }
+
+ /* Take care to allow pn1 or pn2 to alias pn. */
+ if (pn1 != pn)
+ RecycleTree(pn1, tc);
+ if (pn2 != pn)
+ RecycleTree(pn2, tc);
+ pn->pn_type = TOK_NUMBER;
+ pn->pn_op = JSOP_NUMBER;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_dval = d;
+ return JS_TRUE;
+}
+
+#if JS_HAS_XML_SUPPORT
+
+static JSBool
+FoldXMLConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode **pnp, *pn1, *pn2;
+ JSString *accum, *str;
+ uint32 i, j;
+
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ tt = pn->pn_type;
+ pnp = &pn->pn_head;
+ pn1 = *pnp;
+ accum = NULL;
+ if ((pn->pn_extra & PNX_CANTFOLD) == 0) {
+ if (tt == TOK_XMLETAGO)
+ accum = ATOM_TO_STRING(cx->runtime->atomState.etagoAtom);
+ else if (tt == TOK_XMLSTAGO || tt == TOK_XMLPTAGC)
+ accum = ATOM_TO_STRING(cx->runtime->atomState.stagoAtom);
+ }
+
+ for (pn2 = pn1, i = j = 0; pn2; pn2 = pn2->pn_next, i++) {
+ /* The parser already rejected end-tags with attributes. */
+ JS_ASSERT(tt != TOK_XMLETAGO || i == 0);
+ switch (pn2->pn_type) {
+ case TOK_XMLATTR:
+ if (!accum)
+ goto cantfold;
+ /* FALL THROUGH */
+ case TOK_XMLNAME:
+ case TOK_XMLSPACE:
+ case TOK_XMLTEXT:
+ case TOK_STRING:
+ if (pn2->pn_arity == PN_LIST)
+ goto cantfold;
+ str = ATOM_TO_STRING(pn2->pn_atom);
+ break;
+
+ case TOK_XMLCDATA:
+ str = js_MakeXMLCDATAString(cx, ATOM_TO_STRING(pn2->pn_atom));
+ if (!str)
+ return JS_FALSE;
+ break;
+
+ case TOK_XMLCOMMENT:
+ str = js_MakeXMLCommentString(cx, ATOM_TO_STRING(pn2->pn_atom));
+ if (!str)
+ return JS_FALSE;
+ break;
+
+ case TOK_XMLPI:
+ str = js_MakeXMLPIString(cx, ATOM_TO_STRING(pn2->pn_atom),
+ ATOM_TO_STRING(pn2->pn_atom2));
+ if (!str)
+ return JS_FALSE;
+ break;
+
+ cantfold:
+ default:
+ JS_ASSERT(*pnp == pn1);
+ if ((tt == TOK_XMLSTAGO || tt == TOK_XMLPTAGC) &&
+ (i & 1) ^ (j & 1)) {
+#ifdef DEBUG_brendanXXX
+ printf("1: %d, %d => %s\n",
+ i, j, accum ? JS_GetStringBytes(accum) : "NULL");
+#endif
+ } else if (accum && pn1 != pn2) {
+ while (pn1->pn_next != pn2) {
+ pn1 = RecycleTree(pn1, tc);
+ --pn->pn_count;
+ }
+ pn1->pn_type = TOK_XMLTEXT;
+ pn1->pn_op = JSOP_STRING;
+ pn1->pn_arity = PN_NULLARY;
+ pn1->pn_atom = js_AtomizeString(cx, accum, 0);
+ if (!pn1->pn_atom)
+ return JS_FALSE;
+ JS_ASSERT(pnp != &pn1->pn_next);
+ *pnp = pn1;
+ }
+ pnp = &pn2->pn_next;
+ pn1 = *pnp;
+ accum = NULL;
+ continue;
+ }
+
+ if (accum) {
+ str = ((tt == TOK_XMLSTAGO || tt == TOK_XMLPTAGC) && i != 0)
+ ? js_AddAttributePart(cx, i & 1, accum, str)
+ : js_ConcatStrings(cx, accum, str);
+ if (!str)
+ return JS_FALSE;
+#ifdef DEBUG_brendanXXX
+ printf("2: %d, %d => %s (%u)\n",
+ i, j, JS_GetStringBytes(str), JSSTRING_LENGTH(str));
+#endif
+ ++j;
+ }
+ accum = str;
+ }
+
+ if (accum) {
+ str = NULL;
+ if ((pn->pn_extra & PNX_CANTFOLD) == 0) {
+ if (tt == TOK_XMLPTAGC)
+ str = ATOM_TO_STRING(cx->runtime->atomState.ptagcAtom);
+ else if (tt == TOK_XMLSTAGO || tt == TOK_XMLETAGO)
+ str = ATOM_TO_STRING(cx->runtime->atomState.tagcAtom);
+ }
+ if (str) {
+ accum = js_ConcatStrings(cx, accum, str);
+ if (!accum)
+ return JS_FALSE;
+ }
+
+ JS_ASSERT(*pnp == pn1);
+ while (pn1->pn_next) {
+ pn1 = RecycleTree(pn1, tc);
+ --pn->pn_count;
+ }
+ pn1->pn_type = TOK_XMLTEXT;
+ pn1->pn_op = JSOP_STRING;
+ pn1->pn_arity = PN_NULLARY;
+ pn1->pn_atom = js_AtomizeString(cx, accum, 0);
+ if (!pn1->pn_atom)
+ return JS_FALSE;
+ JS_ASSERT(pnp != &pn1->pn_next);
+ *pnp = pn1;
+ }
+
+ if (pn1 && pn->pn_count == 1) {
+ /*
+ * Only one node under pn, and it has been folded: move pn1 onto pn
+ * unless pn is an XML root (in which case we need it to tell the code
+ * generator to emit a JSOP_TOXML or JSOP_TOXMLLIST op). If pn is an
+ * XML root *and* it's a point-tag, rewrite it to TOK_XMLELEM to avoid
+ * extra "<" and "/>" bracketing at runtime.
+ */
+ if (!(pn->pn_extra & PNX_XMLROOT)) {
+ PN_MOVE_NODE(pn, pn1);
+ } else if (tt == TOK_XMLPTAGC) {
+ pn->pn_type = TOK_XMLELEM;
+ pn->pn_op = JSOP_TOXML;
+ }
+ }
+ return JS_TRUE;
+}
+
+#endif /* JS_HAS_XML_SUPPORT */
+
+static JSBool
+StartsWith(JSParseNode *pn, JSTokenType tt)
+{
+#define TAIL_RECURSE(pn2) JS_BEGIN_MACRO pn = (pn2); goto recur; JS_END_MACRO
+
+recur:
+ if (pn->pn_type == tt)
+ return JS_TRUE;
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ return tt == TOK_FUNCTION;
+ case PN_LIST:
+ if (pn->pn_head)
+ TAIL_RECURSE(pn->pn_head);
+ break;
+ case PN_TERNARY:
+ if (pn->pn_kid1)
+ TAIL_RECURSE(pn->pn_kid1);
+ break;
+ case PN_BINARY:
+ if (pn->pn_left)
+ TAIL_RECURSE(pn->pn_left);
+ break;
+ case PN_UNARY:
+ /* A parenthesized expression starts with a left parenthesis. */
+ if (pn->pn_type == TOK_RP)
+ return tt == TOK_LP;
+ if (pn->pn_kid)
+ TAIL_RECURSE(pn->pn_kid);
+ break;
+ case PN_NAME:
+ if (pn->pn_type == TOK_DOT || pn->pn_type == TOK_DBLDOT)
+ TAIL_RECURSE(pn->pn_expr);
+ /* FALL THROUGH */
+ }
+ return JS_FALSE;
+#undef TAIL_RECURSE
+}
+
+JSBool
+js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc)
+{
+ JSParseNode *pn1 = NULL, *pn2 = NULL, *pn3 = NULL;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ {
+ uint16 oldflags = tc->flags;
+
+ tc->flags = (uint16) pn->pn_flags;
+ if (!js_FoldConstants(cx, pn->pn_body, tc))
+ return JS_FALSE;
+ tc->flags = oldflags;
+ break;
+ }
+
+ case PN_LIST:
+#if 0 /* JS_HAS_XML_SUPPORT */
+ switch (pn->pn_type) {
+ case TOK_XMLELEM:
+ case TOK_XMLLIST:
+ case TOK_XMLPTAGC:
+ /*
+ * Try to fold this XML parse tree once, from the top down, into
+ * a JSXML tree with just one object wrapping the tree root.
+ *
+ * Certain subtrees could be folded similarly, but we'd have to
+ * ensure that none used namespace prefixes declared elsewhere in
+ * its super-tree, and we would have to convert each XML object
+ * created at runtime for such sub-trees back into a string, and
+ * concatenate and re-parse anyway.
+ */
+ if ((pn->pn_extra & (PNX_XMLROOT | PNX_CANTFOLD)) == PNX_XMLROOT &&
+ !(tc->flags & TCF_HAS_DEFXMLNS)) {
+ JSObject *obj;
+ JSAtom *atom;
+
+ obj = js_ParseNodeToXMLObject(cx, pn);
+ if (!obj)
+ return JS_FALSE;
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ return JS_FALSE;
+ pn->pn_op = JSOP_XMLOBJECT;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_atom = atom;
+ return JS_TRUE;
+ }
+
+ /*
+ * Can't fold from parse node to XML tree -- try folding strings
+ * as much as possible, and folding XML sub-trees bottom up to
+ * minimize string concatenation and ToXML/ToXMLList operations
+ * at runtime.
+ */
+ break;
+
+ default:;
+ }
+#endif
+
+ /* Save the list head in pn1 for later use. */
+ for (pn1 = pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_FoldConstants(cx, pn2, tc))
+ return JS_FALSE;
+ }
+ break;
+
+ case PN_TERNARY:
+ /* Any kid may be null (e.g. for (;;)). */
+ pn1 = pn->pn_kid1;
+ pn2 = pn->pn_kid2;
+ pn3 = pn->pn_kid3;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ if (pn2 && !js_FoldConstants(cx, pn2, tc))
+ return JS_FALSE;
+ if (pn3 && !js_FoldConstants(cx, pn3, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_BINARY:
+ /* First kid may be null (for default case in switch). */
+ pn1 = pn->pn_left;
+ pn2 = pn->pn_right;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ if (!js_FoldConstants(cx, pn2, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_UNARY:
+ /* Our kid may be null (e.g. return; vs. return e;). */
+ pn1 = pn->pn_kid;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_NAME:
+ /*
+ * Skip pn1 down along a chain of dotted member expressions to avoid
+ * excessive recursion. Our only goal here is to fold constants (if
+ * any) in the primary expression operand to the left of the first
+ * dot in the chain.
+ */
+ pn1 = pn->pn_expr;
+ while (pn1 && pn1->pn_arity == PN_NAME)
+ pn1 = pn1->pn_expr;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_NULLARY:
+ break;
+ }
+
+ switch (pn->pn_type) {
+ case TOK_IF:
+ if (ContainsStmt(pn2, TOK_VAR) || ContainsStmt(pn3, TOK_VAR))
+ break;
+ /* FALL THROUGH */
+
+ case TOK_HOOK:
+ /* Reduce 'if (C) T; else E' into T for true C, E for false. */
+ while (pn1->pn_type == TOK_RP)
+ pn1 = pn1->pn_kid;
+ switch (pn1->pn_type) {
+ case TOK_NUMBER:
+ if (pn1->pn_dval == 0)
+ pn2 = pn3;
+ break;
+ case TOK_STRING:
+ if (JSSTRING_LENGTH(ATOM_TO_STRING(pn1->pn_atom)) == 0)
+ pn2 = pn3;
+ break;
+ case TOK_PRIMARY:
+ if (pn1->pn_op == JSOP_TRUE)
+ break;
+ if (pn1->pn_op == JSOP_FALSE || pn1->pn_op == JSOP_NULL) {
+ pn2 = pn3;
+ break;
+ }
+ /* FALL THROUGH */
+ default:
+ /* Early return to dodge common code that copies pn2 to pn. */
+ return JS_TRUE;
+ }
+
+ if (pn2) {
+ /*
+ * pn2 is the then- or else-statement subtree to compile. Take
+ * care not to expose an object initialiser, which would be parsed
+ * as a block, to the Statement parser via eval(uneval(e)) where e
+ * is '1 ? {p:2, q:3}[i] : r;' or the like.
+ */
+ if (pn->pn_type == TOK_HOOK && StartsWith(pn2, TOK_RC)) {
+ pn->pn_type = TOK_RP;
+ pn->pn_arity = PN_UNARY;
+ pn->pn_kid = pn2;
+ } else {
+ PN_MOVE_NODE(pn, pn2);
+ }
+ }
+ if (!pn2 || (pn->pn_type == TOK_SEMI && !pn->pn_kid)) {
+ /*
+ * False condition and no else, or an empty then-statement was
+ * moved up over pn. Either way, make pn an empty block (not an
+ * empty statement, which does not decompile, even when labeled).
+ * NB: pn must be a TOK_IF as TOK_HOOK can never have a null kid
+ * or an empty statement for a child.
+ */
+ pn->pn_type = TOK_LC;
+ pn->pn_arity = PN_LIST;
+ PN_INIT_LIST(pn);
+ }
+ RecycleTree(pn2, tc);
+ if (pn3 && pn3 != pn2)
+ RecycleTree(pn3, tc);
+ break;
+
+ case TOK_ASSIGN:
+ /*
+ * Compound operators such as *= should be subject to folding, in case
+ * the left-hand side is constant, and so that the decompiler produces
+ * the same string that you get from decompiling a script or function
+ * compiled from that same string. As with +, += is special.
+ */
+ if (pn->pn_op == JSOP_NOP)
+ break;
+ if (pn->pn_op != JSOP_ADD)
+ goto do_binary_op;
+ /* FALL THROUGH */
+
+ case TOK_PLUS:
+ if (pn->pn_arity == PN_LIST) {
+ size_t length, length2;
+ jschar *chars;
+ JSString *str, *str2;
+
+ /*
+ * Any string literal term with all others number or string means
+ * this is a concatenation. If any term is not a string or number
+ * literal, we can't fold.
+ */
+ JS_ASSERT(pn->pn_count > 2);
+ if (pn->pn_extra & PNX_CANTFOLD)
+ return JS_TRUE;
+ if (pn->pn_extra != PNX_STRCAT)
+ goto do_binary_op;
+
+ /* Ok, we're concatenating: convert non-string constant operands. */
+ length = 0;
+ for (pn2 = pn1; pn2; pn2 = pn2->pn_next) {
+ if (!FoldType(cx, pn2, TOK_STRING))
+ return JS_FALSE;
+ /* XXX fold only if all operands convert to string */
+ if (pn2->pn_type != TOK_STRING)
+ return JS_TRUE;
+ length += ATOM_TO_STRING(pn2->pn_atom)->length;
+ }
+
+ /* Allocate a new buffer and string descriptor for the result. */
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+
+ /* Fill the buffer, advancing chars and recycling kids as we go. */
+ for (pn2 = pn1; pn2; pn2 = RecycleTree(pn2, tc)) {
+ str2 = ATOM_TO_STRING(pn2->pn_atom);
+ length2 = str2->length;
+ js_strncpy(chars, str2->chars, length2);
+ chars += length2;
+ }
+ *chars = 0;
+
+ /* Atomize the result string and mutate pn to refer to it. */
+ pn->pn_atom = js_AtomizeString(cx, str, 0);
+ if (!pn->pn_atom)
+ return JS_FALSE;
+ pn->pn_type = TOK_STRING;
+ pn->pn_op = JSOP_STRING;
+ pn->pn_arity = PN_NULLARY;
+ break;
+ }
+
+ /* Handle a binary string concatenation. */
+ JS_ASSERT(pn->pn_arity == PN_BINARY);
+ if (pn1->pn_type == TOK_STRING || pn2->pn_type == TOK_STRING) {
+ JSString *left, *right, *str;
+
+ if (!FoldType(cx, (pn1->pn_type != TOK_STRING) ? pn1 : pn2,
+ TOK_STRING)) {
+ return JS_FALSE;
+ }
+ if (pn1->pn_type != TOK_STRING || pn2->pn_type != TOK_STRING)
+ return JS_TRUE;
+ left = ATOM_TO_STRING(pn1->pn_atom);
+ right = ATOM_TO_STRING(pn2->pn_atom);
+ str = js_ConcatStrings(cx, left, right);
+ if (!str)
+ return JS_FALSE;
+ pn->pn_atom = js_AtomizeString(cx, str, 0);
+ if (!pn->pn_atom)
+ return JS_FALSE;
+ pn->pn_type = TOK_STRING;
+ pn->pn_op = JSOP_STRING;
+ pn->pn_arity = PN_NULLARY;
+ RecycleTree(pn1, tc);
+ RecycleTree(pn2, tc);
+ break;
+ }
+
+ /* Can't concatenate string literals, let's try numbers. */
+ goto do_binary_op;
+
+ case TOK_STAR:
+ /* The * in 'import *;' parses as a nullary star node. */
+ if (pn->pn_arity == PN_NULLARY)
+ break;
+ /* FALL THROUGH */
+
+ case TOK_SHOP:
+ case TOK_MINUS:
+ case TOK_DIVOP:
+ do_binary_op:
+ if (pn->pn_arity == PN_LIST) {
+ JS_ASSERT(pn->pn_count > 2);
+ for (pn2 = pn1; pn2; pn2 = pn2->pn_next) {
+ if (!FoldType(cx, pn2, TOK_NUMBER))
+ return JS_FALSE;
+ }
+ for (pn2 = pn1; pn2; pn2 = pn2->pn_next) {
+ /* XXX fold only if all operands convert to number */
+ if (pn2->pn_type != TOK_NUMBER)
+ break;
+ }
+ if (!pn2) {
+ JSOp op = pn->pn_op;
+
+ pn2 = pn1->pn_next;
+ pn3 = pn2->pn_next;
+ if (!FoldBinaryNumeric(cx, op, pn1, pn2, pn, tc))
+ return JS_FALSE;
+ while ((pn2 = pn3) != NULL) {
+ pn3 = pn2->pn_next;
+ if (!FoldBinaryNumeric(cx, op, pn, pn2, pn, tc))
+ return JS_FALSE;
+ }
+ }
+ } else {
+ JS_ASSERT(pn->pn_arity == PN_BINARY);
+ if (!FoldType(cx, pn1, TOK_NUMBER) ||
+ !FoldType(cx, pn2, TOK_NUMBER)) {
+ return JS_FALSE;
+ }
+ if (pn1->pn_type == TOK_NUMBER && pn2->pn_type == TOK_NUMBER) {
+ if (!FoldBinaryNumeric(cx, pn->pn_op, pn1, pn2, pn, tc))
+ return JS_FALSE;
+ }
+ }
+ break;
+
+ case TOK_UNARYOP:
+ while (pn1->pn_type == TOK_RP)
+ pn1 = pn1->pn_kid;
+ if (pn1->pn_type == TOK_NUMBER) {
+ jsdouble d;
+ int32 i;
+
+ /* Operate on one numeric constant. */
+ d = pn1->pn_dval;
+ switch (pn->pn_op) {
+ case JSOP_BITNOT:
+ if (!js_DoubleToECMAInt32(cx, d, &i))
+ return JS_FALSE;
+ d = ~i;
+ break;
+
+ case JSOP_NEG:
+#ifdef HPUX
+ /*
+ * Negation of a zero doesn't produce a negative
+ * zero on HPUX. Perform the operation by bit
+ * twiddling.
+ */
+ JSDOUBLE_HI32(d) ^= JSDOUBLE_HI32_SIGNBIT;
+#else
+ d = -d;
+#endif
+ break;
+
+ case JSOP_POS:
+ break;
+
+ case JSOP_NOT:
+ pn->pn_type = TOK_PRIMARY;
+ pn->pn_op = (d == 0) ? JSOP_TRUE : JSOP_FALSE;
+ pn->pn_arity = PN_NULLARY;
+ /* FALL THROUGH */
+
+ default:
+ /* Return early to dodge the common TOK_NUMBER code. */
+ return JS_TRUE;
+ }
+ pn->pn_type = TOK_NUMBER;
+ pn->pn_op = JSOP_NUMBER;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_dval = d;
+ RecycleTree(pn1, tc);
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLELEM:
+ case TOK_XMLLIST:
+ case TOK_XMLPTAGC:
+ case TOK_XMLSTAGO:
+ case TOK_XMLETAGO:
+ case TOK_XMLNAME:
+ if (pn->pn_arity == PN_LIST) {
+ JS_ASSERT(pn->pn_type == TOK_XMLLIST || pn->pn_count != 0);
+ if (!FoldXMLConstants(cx, pn, tc))
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_AT:
+ if (pn1->pn_type == TOK_XMLNAME) {
+ jsval v;
+ JSAtom *atom;
+
+ v = ATOM_KEY(pn1->pn_atom);
+ if (!js_ToAttributeName(cx, &v))
+ return JS_FALSE;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
+ atom = js_AtomizeObject(cx, JSVAL_TO_OBJECT(v), 0);
+ if (!atom)
+ return JS_FALSE;
+
+ pn->pn_type = TOK_XMLNAME;
+ pn->pn_op = JSOP_OBJECT;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_atom = atom;
+ RecycleTree(pn1, tc);
+ }
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ default:;
+ }
+
+ return JS_TRUE;
+}
diff --git a/third_party/js-1.7/jsparse.h b/third_party/js-1.7/jsparse.h
new file mode 100644
index 0000000..7c23927
--- /dev/null
+++ b/third_party/js-1.7/jsparse.h
@@ -0,0 +1,438 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsparse_h___
+#define jsparse_h___
+/*
+ * JS parser definitions.
+ */
+#include "jsconfig.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsscan.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Parsing builds a tree of nodes that directs code generation. This tree is
+ * not a concrete syntax tree in all respects (for example, || and && are left
+ * associative, but (A && B && C) translates into the right-associated tree
+ * <A && <B && C>> so that code generation can emit a left-associative branch
+ * around <B && C> when A is false). Nodes are labeled by token type, with a
+ * JSOp secondary label when needed:
+ *
+ * Label Variant Members
+ * ----- ------- -------
+ * <Definitions>
+ * TOK_FUNCTION func pn_funAtom: atom holding function object containing
+ * arg and var properties. We create the function
+ * object at parse (not emit) time to specialize arg
+ * and var bytecodes early.
+ * pn_body: TOK_LC node for function body statements
+ * pn_flags: TCF_FUN_* flags (see jsemit.h) collected
+ * while parsing the function's body
+ * pn_tryCount: of try statements in function
+ *
+ * <Statements>
+ * TOK_LC list pn_head: list of pn_count statements
+ * TOK_EXPORT list pn_head: list of pn_count TOK_NAMEs or one TOK_STAR
+ * (which is not a multiply node)
+ * TOK_IMPORT list pn_head: list of pn_count sub-trees of the form
+ * a.b.*, a[b].*, a.*, a.b, or a[b] -- but never a.
+ * Each member is expressed with TOK_DOT or TOK_LB.
+ * Each sub-tree's root node has a pn_op in the set
+ * JSOP_IMPORT{ALL,PROP,ELEM}
+ * TOK_IF ternary pn_kid1: cond, pn_kid2: then, pn_kid3: else or null
+ * TOK_SWITCH binary pn_left: discriminant
+ * pn_right: list of TOK_CASE nodes, with at most one
+ * TOK_DEFAULT node, or if there are let bindings
+ * in the top level of the switch body's cases, a
+ * TOK_LEXICALSCOPE node that contains the list of
+ * TOK_CASE nodes.
+ * TOK_CASE, binary pn_left: case expr or null if TOK_DEFAULT
+ * TOK_DEFAULT pn_right: TOK_LC node for this case's statements
+ * pn_val: constant value if lookup or table switch
+ * TOK_WHILE binary pn_left: cond, pn_right: body
+ * TOK_DO binary pn_left: body, pn_right: cond
+ * TOK_FOR binary pn_left: either
+ * for/in loop: a binary TOK_IN node with
+ * pn_left: TOK_VAR or TOK_NAME to left of 'in'
+ * if TOK_VAR, its pn_extra may have PNX_POPVAR
+ * and PNX_FORINVAR bits set
+ * pn_right: object expr to right of 'in'
+ * for(;;) loop: a ternary TOK_RESERVED node with
+ * pn_kid1: init expr before first ';'
+ * pn_kid2: cond expr before second ';'
+ * pn_kid3: update expr after second ';'
+ * any kid may be null
+ * pn_right: body
+ * TOK_THROW unary pn_op: JSOP_THROW, pn_kid: exception
+ * TOK_TRY ternary pn_kid1: try block
+ * pn_kid2: null or TOK_RESERVED list of
+ * TOK_LEXICALSCOPE nodes, each with pn_expr pointing
+ * to a TOK_CATCH node
+ * pn_kid3: null or finally block
+ * TOK_CATCH ternary pn_kid1: TOK_NAME, TOK_RB, or TOK_RC catch var node
+ * (TOK_RB or TOK_RC if destructuring)
+ * pn_kid2: null or the catch guard expression
+ * pn_kid3: catch block statements
+ * TOK_BREAK name pn_atom: label or null
+ * TOK_CONTINUE name pn_atom: label or null
+ * TOK_WITH binary pn_left: head expr, pn_right: body
+ * TOK_VAR list pn_head: list of pn_count TOK_NAME nodes
+ * each name node has
+ * pn_atom: variable name
+ * pn_expr: initializer or null
+ * TOK_RETURN unary pn_kid: return expr or null
+ * TOK_SEMI unary pn_kid: expr or null statement
+ * TOK_COLON name pn_atom: label, pn_expr: labeled statement
+ *
+ * <Expressions>
+ * All left-associated binary trees of the same type are optimized into lists
+ * to avoid recursion when processing expression chains.
+ * TOK_COMMA list pn_head: list of pn_count comma-separated exprs
+ * TOK_ASSIGN binary pn_left: lvalue, pn_right: rvalue
+ * pn_op: JSOP_ADD for +=, etc.
+ * TOK_HOOK ternary pn_kid1: cond, pn_kid2: then, pn_kid3: else
+ * TOK_OR binary pn_left: first in || chain, pn_right: rest of chain
+ * TOK_AND binary pn_left: first in && chain, pn_right: rest of chain
+ * TOK_BITOR binary pn_left: left-assoc | expr, pn_right: ^ expr
+ * TOK_BITXOR binary pn_left: left-assoc ^ expr, pn_right: & expr
+ * TOK_BITAND binary pn_left: left-assoc & expr, pn_right: EQ expr
+ * TOK_EQOP binary pn_left: left-assoc EQ expr, pn_right: REL expr
+ * pn_op: JSOP_EQ, JSOP_NE, JSOP_NEW_EQ, JSOP_NEW_NE
+ * TOK_RELOP binary pn_left: left-assoc REL expr, pn_right: SH expr
+ * pn_op: JSOP_LT, JSOP_LE, JSOP_GT, JSOP_GE
+ * TOK_SHOP binary pn_left: left-assoc SH expr, pn_right: ADD expr
+ * pn_op: JSOP_LSH, JSOP_RSH, JSOP_URSH
+ * TOK_PLUS, binary pn_left: left-assoc ADD expr, pn_right: MUL expr
+ * pn_extra: if a left-associated binary TOK_PLUS
+ * tree has been flattened into a list (see above
+ * under <Expressions>), pn_extra will contain
+ * PNX_STRCAT if at least one list element is a
+ * string literal (TOK_STRING); if such a list has
+ * any non-string, non-number term, pn_extra will
+ * contain PNX_CANTFOLD.
+ * pn_
+ * TOK_MINUS pn_op: JSOP_ADD, JSOP_SUB
+ * TOK_STAR, binary pn_left: left-assoc MUL expr, pn_right: UNARY expr
+ * TOK_DIVOP pn_op: JSOP_MUL, JSOP_DIV, JSOP_MOD
+ * TOK_UNARYOP unary pn_kid: UNARY expr, pn_op: JSOP_NEG, JSOP_POS,
+ * JSOP_NOT, JSOP_BITNOT, JSOP_TYPEOF, JSOP_VOID
+ * TOK_INC, unary pn_kid: MEMBER expr
+ * TOK_DEC
+ * TOK_NEW list pn_head: list of ctor, arg1, arg2, ... argN
+ * pn_count: 1 + N (where N is number of args)
+ * ctor is a MEMBER expr
+ * TOK_DELETE unary pn_kid: MEMBER expr
+ * TOK_DOT, name pn_expr: MEMBER expr to left of .
+ * TOK_DBLDOT pn_atom: name to right of .
+ * TOK_LB binary pn_left: MEMBER expr to left of [
+ * pn_right: expr between [ and ]
+ * TOK_LP list pn_head: list of call, arg1, arg2, ... argN
+ * pn_count: 1 + N (where N is number of args)
+ * call is a MEMBER expr naming a callable object
+ * TOK_RB list pn_head: list of pn_count array element exprs
+ * [,,] holes are represented by TOK_COMMA nodes
+ * #n=[...] produces TOK_DEFSHARP at head of list
+ * pn_extra: PN_ENDCOMMA if extra comma at end
+ * TOK_RC list pn_head: list of pn_count TOK_COLON nodes where
+ * each has pn_left: property id, pn_right: value
+ * #n={...} produces TOK_DEFSHARP at head of list
+ * TOK_DEFSHARP unary pn_num: jsint value of n in #n=
+ * pn_kid: null for #n=[...] and #n={...}, primary
+ * if #n=primary for function, paren, name, object
+ * literal expressions
+ * TOK_USESHARP nullary pn_num: jsint value of n in #n#
+ * TOK_RP unary pn_kid: parenthesized expression
+ * TOK_NAME, name pn_atom: name, string, or object atom
+ * TOK_STRING, pn_op: JSOP_NAME, JSOP_STRING, or JSOP_OBJECT, or
+ * JSOP_REGEXP
+ * TOK_OBJECT If JSOP_NAME, pn_op may be JSOP_*ARG or JSOP_*VAR
+ * with pn_slot >= 0 and pn_attrs telling const-ness
+ * TOK_NUMBER dval pn_dval: double value of numeric literal
+ * TOK_PRIMARY nullary pn_op: JSOp bytecode
+ *
+ * <E4X node descriptions>
+ * TOK_ANYNAME nullary pn_op: JSOP_ANYNAME
+ * pn_atom: cx->runtime->atomState.starAtom
+ * TOK_AT unary pn_op: JSOP_TOATTRNAME; pn_kid attribute id/expr
+ * TOK_DBLCOLON binary pn_op: JSOP_QNAME
+ * pn_left: TOK_ANYNAME or TOK_NAME node
+ * pn_right: TOK_STRING "*" node, or expr within []
+ * name pn_op: JSOP_QNAMECONST
+ * pn_expr: TOK_ANYNAME or TOK_NAME left operand
+ * pn_atom: name on right of ::
+ * TOK_XMLELEM list XML element node
+ * pn_head: start tag, content1, ... contentN, end tag
+ * pn_count: 2 + N where N is number of content nodes
+ * N may be > x.length() if {expr} embedded
+ * TOK_XMLLIST list XML list node
+ * pn_head: content1, ... contentN
+ * TOK_XMLSTAGO, list XML start, end, and point tag contents
+ * TOK_XMLETAGC, pn_head: tag name or {expr}, ... XML attrs ...
+ * TOK_XMLPTAGO
+ * TOK_XMLNAME nullary pn_atom: XML name, with no {expr} embedded
+ * TOK_XMLNAME list pn_head: tag name or {expr}, ... name or {expr}
+ * TOK_XMLATTR, nullary pn_atom: attribute value string; pn_op: JSOP_STRING
+ * TOK_XMLCDATA,
+ * TOK_XMLCOMMENT
+ * TOK_XMLPI nullary pn_atom: XML processing instruction target
+ * pn_atom2: XML PI content, or null if no content
+ * TOK_XMLTEXT nullary pn_atom: marked-up text, or null if empty string
+ * TOK_LC unary {expr} in XML tag or content; pn_kid is expr
+ *
+ * So an XML tag with no {expr} and three attributes is a list with the form:
+ *
+ * (tagname attrname1 attrvalue1 attrname2 attrvalue2 attrname2 attrvalue3)
+ *
+ * An XML tag with embedded expressions like so:
+ *
+ * <name1{expr1} name2{expr2}name3={expr3}>
+ *
+ * would have the form:
+ *
+ * ((name1 {expr1}) (name2 {expr2} name3) {expr3})
+ *
+ * where () bracket a list with elements separated by spaces, and {expr} is a
+ * TOK_LC unary node with expr as its kid.
+ *
+ * Thus, the attribute name/value pairs occupy successive odd and even list
+ * locations, where pn_head is the TOK_XMLNAME node at list location 0. The
+ * parser builds the same sort of structures for elements:
+ *
+ * <a x={x}>Hi there!<b y={y}>How are you?</b><answer>{x + y}</answer></a>
+ *
+ * translates to:
+ *
+ * ((a x {x}) 'Hi there!' ((b y {y}) 'How are you?') ((answer) {x + y}))
+ *
+ * <Non-E4X node descriptions, continued>
+ *
+ * Label Variant Members
+ * ----- ------- -------
+ * TOK_LEXICALSCOPE name pn_op: JSOP_LEAVEBLOCK or JSOP_LEAVEBLOCKEXPR
+ * pn_atom: block object
+ * pn_expr: block body
+ * TOK_ARRAYCOMP list pn_head: list of pn_count (1 or 2) elements
+ * if pn_count is 2, first element is #n=[...]
+ * last element is block enclosing for loop(s)
+ * and optionally if-guarded TOK_ARRAYPUSH
+ * pn_extra: stack slot, used during code gen
+ * TOK_ARRAYPUSH unary pn_op: JSOP_ARRAYCOMP
+ * pn_kid: array comprehension expression
+ */
+typedef enum JSParseNodeArity {
+ PN_FUNC = -3,
+ PN_LIST = -2,
+ PN_TERNARY = 3,
+ PN_BINARY = 2,
+ PN_UNARY = 1,
+ PN_NAME = -1,
+ PN_NULLARY = 0
+} JSParseNodeArity;
+
+struct JSParseNode {
+ uint16 pn_type;
+ uint8 pn_op;
+ int8 pn_arity;
+ JSTokenPos pn_pos;
+ ptrdiff_t pn_offset; /* first generated bytecode offset */
+ union {
+ struct { /* TOK_FUNCTION node */
+ JSAtom *funAtom; /* atomized function object */
+ JSParseNode *body; /* TOK_LC list of statements */
+ uint32 flags; /* accumulated tree context flags */
+ uint32 tryCount; /* count of try statements in body */
+ } func;
+ struct { /* list of next-linked nodes */
+ JSParseNode *head; /* first node in list */
+ JSParseNode **tail; /* ptr to ptr to last node in list */
+ uint32 count; /* number of nodes in list */
+ uint32 extra; /* extra flags, see below */
+ } list;
+ struct { /* ternary: if, for(;;), ?: */
+ JSParseNode *kid1; /* condition, discriminant, etc. */
+ JSParseNode *kid2; /* then-part, case list, etc. */
+ JSParseNode *kid3; /* else-part, default case, etc. */
+ } ternary;
+ struct { /* two kids if binary */
+ JSParseNode *left;
+ JSParseNode *right;
+ jsval val; /* switch case value */
+ } binary;
+ struct { /* one kid if unary */
+ JSParseNode *kid;
+ jsint num; /* -1 or sharp variable number */
+ } unary;
+ struct { /* name, labeled statement, etc. */
+ JSAtom *atom; /* name or label atom, null if slot */
+ JSParseNode *expr; /* object or initializer */
+ jsint slot; /* -1 or arg or local var slot */
+ uintN attrs; /* attributes if local var or const */
+ } name;
+ struct {
+ JSAtom *atom; /* first atom in pair */
+ JSAtom *atom2; /* second atom in pair or null */
+ } apair;
+ jsdouble dval; /* aligned numeric literal value */
+ } pn_u;
+ JSParseNode *pn_next; /* to align dval and pn_u on RISCs */
+ JSTokenStream *pn_ts; /* token stream for error reports */
+ JSAtom *pn_source; /* saved source for decompilation */
+};
+
+#define pn_funAtom pn_u.func.funAtom
+#define pn_body pn_u.func.body
+#define pn_flags pn_u.func.flags
+#define pn_tryCount pn_u.func.tryCount
+#define pn_head pn_u.list.head
+#define pn_tail pn_u.list.tail
+#define pn_count pn_u.list.count
+#define pn_extra pn_u.list.extra
+#define pn_kid1 pn_u.ternary.kid1
+#define pn_kid2 pn_u.ternary.kid2
+#define pn_kid3 pn_u.ternary.kid3
+#define pn_left pn_u.binary.left
+#define pn_right pn_u.binary.right
+#define pn_val pn_u.binary.val
+#define pn_kid pn_u.unary.kid
+#define pn_num pn_u.unary.num
+#define pn_atom pn_u.name.atom
+#define pn_expr pn_u.name.expr
+#define pn_slot pn_u.name.slot
+#define pn_attrs pn_u.name.attrs
+#define pn_dval pn_u.dval
+#define pn_atom2 pn_u.apair.atom2
+
+/* PN_LIST pn_extra flags. */
+#define PNX_STRCAT 0x01 /* TOK_PLUS list has string term */
+#define PNX_CANTFOLD 0x02 /* TOK_PLUS list has unfoldable term */
+#define PNX_POPVAR 0x04 /* TOK_VAR last result needs popping */
+#define PNX_FORINVAR 0x08 /* TOK_VAR is left kid of TOK_IN node,
+ which is left kid of TOK_FOR */
+#define PNX_ENDCOMMA 0x10 /* array literal has comma at end */
+#define PNX_XMLROOT 0x20 /* top-most node in XML literal tree */
+#define PNX_GROUPINIT 0x40 /* var [a, b] = [c, d]; unit list */
+#define PNX_NEEDBRACES 0x80 /* braces necessary due to closure */
+
+/*
+ * Move pn2 into pn, preserving pn->pn_pos and pn->pn_offset and handing off
+ * any kids in pn2->pn_u, by clearing pn2.
+ */
+#define PN_MOVE_NODE(pn, pn2) \
+ JS_BEGIN_MACRO \
+ (pn)->pn_type = (pn2)->pn_type; \
+ (pn)->pn_op = (pn2)->pn_op; \
+ (pn)->pn_arity = (pn2)->pn_arity; \
+ (pn)->pn_u = (pn2)->pn_u; \
+ PN_CLEAR_NODE(pn2); \
+ JS_END_MACRO
+
+#define PN_CLEAR_NODE(pn) \
+ JS_BEGIN_MACRO \
+ (pn)->pn_type = TOK_EOF; \
+ (pn)->pn_op = JSOP_NOP; \
+ (pn)->pn_arity = PN_NULLARY; \
+ JS_END_MACRO
+
+/* True if pn is a parsenode representing a literal constant. */
+#define PN_IS_CONSTANT(pn) \
+ ((pn)->pn_type == TOK_NUMBER || \
+ (pn)->pn_type == TOK_STRING || \
+ ((pn)->pn_type == TOK_PRIMARY && (pn)->pn_op != JSOP_THIS))
+
+/*
+ * Compute a pointer to the last JSParseNode element in a singly-linked list.
+ * NB: list must be non-empty for correct PN_LAST usage!
+ */
+#define PN_LAST(list) \
+ ((JSParseNode *)((char *)(list)->pn_tail - offsetof(JSParseNode, pn_next)))
+
+#define PN_INIT_LIST(list) \
+ JS_BEGIN_MACRO \
+ (list)->pn_head = NULL; \
+ (list)->pn_tail = &(list)->pn_head; \
+ (list)->pn_count = (list)->pn_extra = 0; \
+ JS_END_MACRO
+
+#define PN_INIT_LIST_1(list, pn) \
+ JS_BEGIN_MACRO \
+ (list)->pn_head = (pn); \
+ (list)->pn_tail = &(pn)->pn_next; \
+ (list)->pn_count = 1; \
+ (list)->pn_extra = 0; \
+ JS_END_MACRO
+
+#define PN_APPEND(list, pn) \
+ JS_BEGIN_MACRO \
+ *(list)->pn_tail = (pn); \
+ (list)->pn_tail = &(pn)->pn_next; \
+ (list)->pn_count++; \
+ JS_END_MACRO
+
+/*
+ * Parse a top-level JS script.
+ *
+ * The caller must prevent the GC from running while this function is active,
+ * because atoms and function newborns are not rooted yet.
+ */
+extern JS_FRIEND_API(JSParseNode *)
+js_ParseTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts);
+
+extern JS_FRIEND_API(JSBool)
+js_CompileTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSCodeGenerator *cg);
+
+extern JSBool
+js_CompileFunctionBody(JSContext *cx, JSTokenStream *ts, JSFunction *fun);
+
+extern JSBool
+js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc);
+
+#if JS_HAS_XML_SUPPORT
+JS_FRIEND_API(JSParseNode *)
+js_ParseXMLTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSBool allowList);
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsparse_h___ */
diff --git a/third_party/js-1.7/jsprf.c b/third_party/js-1.7/jsprf.c
new file mode 100644
index 0000000..416c16c
--- /dev/null
+++ b/third_party/js-1.7/jsprf.c
@@ -0,0 +1,1264 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+** Portable safe sprintf code.
+**
+** Author: Kipp E.B. Hickman
+*/
+#include "jsstddef.h"
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "jsprf.h"
+#include "jslong.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jspubtd.h"
+#include "jsstr.h"
+
+/*
+** Note: on some platforms va_list is defined as an array,
+** and requires array notation.
+*/
+#ifdef HAVE_VA_COPY
+#define VARARGS_ASSIGN(foo, bar) VA_COPY(foo,bar)
+#elif defined(HAVE_VA_LIST_AS_ARRAY)
+#define VARARGS_ASSIGN(foo, bar) foo[0] = bar[0]
+#else
+#define VARARGS_ASSIGN(foo, bar) (foo) = (bar)
+#endif
+
+/*
+** WARNING: This code may *NOT* call JS_LOG (because JS_LOG calls it)
+*/
+
+/*
+** XXX This needs to be internationalized!
+*/
+
+typedef struct SprintfStateStr SprintfState;
+
+struct SprintfStateStr {
+ int (*stuff)(SprintfState *ss, const char *sp, JSUint32 len);
+
+ char *base;
+ char *cur;
+ JSUint32 maxlen;
+
+ int (*func)(void *arg, const char *sp, JSUint32 len);
+ void *arg;
+};
+
+/*
+** Numbered Arguement State
+*/
+struct NumArgState{
+ int type; /* type of the current ap */
+ va_list ap; /* point to the corresponding position on ap */
+};
+
+#define NAS_DEFAULT_NUM 20 /* default number of NumberedArgumentState array */
+
+
+#define TYPE_INT16 0
+#define TYPE_UINT16 1
+#define TYPE_INTN 2
+#define TYPE_UINTN 3
+#define TYPE_INT32 4
+#define TYPE_UINT32 5
+#define TYPE_INT64 6
+#define TYPE_UINT64 7
+#define TYPE_STRING 8
+#define TYPE_DOUBLE 9
+#define TYPE_INTSTR 10
+#define TYPE_WSTRING 11
+#define TYPE_UNKNOWN 20
+
+#define FLAG_LEFT 0x1
+#define FLAG_SIGNED 0x2
+#define FLAG_SPACED 0x4
+#define FLAG_ZEROS 0x8
+#define FLAG_NEG 0x10
+
+/*
+** Fill into the buffer using the data in src
+*/
+static int fill2(SprintfState *ss, const char *src, int srclen, int width,
+ int flags)
+{
+ char space = ' ';
+ int rv;
+
+ width -= srclen;
+ if ((width > 0) && ((flags & FLAG_LEFT) == 0)) { /* Right adjusting */
+ if (flags & FLAG_ZEROS) {
+ space = '0';
+ }
+ while (--width >= 0) {
+ rv = (*ss->stuff)(ss, &space, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+
+ /* Copy out the source data */
+ rv = (*ss->stuff)(ss, src, (JSUint32)srclen);
+ if (rv < 0) {
+ return rv;
+ }
+
+ if ((width > 0) && ((flags & FLAG_LEFT) != 0)) { /* Left adjusting */
+ while (--width >= 0) {
+ rv = (*ss->stuff)(ss, &space, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Fill a number. The order is: optional-sign zero-filling conversion-digits
+*/
+static int fill_n(SprintfState *ss, const char *src, int srclen, int width,
+ int prec, int type, int flags)
+{
+ int zerowidth = 0;
+ int precwidth = 0;
+ int signwidth = 0;
+ int leftspaces = 0;
+ int rightspaces = 0;
+ int cvtwidth;
+ int rv;
+ char sign;
+
+ if ((type & 1) == 0) {
+ if (flags & FLAG_NEG) {
+ sign = '-';
+ signwidth = 1;
+ } else if (flags & FLAG_SIGNED) {
+ sign = '+';
+ signwidth = 1;
+ } else if (flags & FLAG_SPACED) {
+ sign = ' ';
+ signwidth = 1;
+ }
+ }
+ cvtwidth = signwidth + srclen;
+
+ if (prec > 0) {
+ if (prec > srclen) {
+ precwidth = prec - srclen; /* Need zero filling */
+ cvtwidth += precwidth;
+ }
+ }
+
+ if ((flags & FLAG_ZEROS) && (prec < 0)) {
+ if (width > cvtwidth) {
+ zerowidth = width - cvtwidth; /* Zero filling */
+ cvtwidth += zerowidth;
+ }
+ }
+
+ if (flags & FLAG_LEFT) {
+ if (width > cvtwidth) {
+ /* Space filling on the right (i.e. left adjusting) */
+ rightspaces = width - cvtwidth;
+ }
+ } else {
+ if (width > cvtwidth) {
+ /* Space filling on the left (i.e. right adjusting) */
+ leftspaces = width - cvtwidth;
+ }
+ }
+ while (--leftspaces >= 0) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ if (signwidth) {
+ rv = (*ss->stuff)(ss, &sign, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ while (--precwidth >= 0) {
+ rv = (*ss->stuff)(ss, "0", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ while (--zerowidth >= 0) {
+ rv = (*ss->stuff)(ss, "0", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ rv = (*ss->stuff)(ss, src, (JSUint32)srclen);
+ if (rv < 0) {
+ return rv;
+ }
+ while (--rightspaces >= 0) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ return 0;
+}
+
+/*
+** Convert a long into its printable form
+*/
+static int cvt_l(SprintfState *ss, long num, int width, int prec, int radix,
+ int type, int flags, const char *hexp)
+{
+ char cvtbuf[100];
+ char *cvt;
+ int digits;
+
+ /* according to the man page this needs to happen */
+ if ((prec == 0) && (num == 0)) {
+ return 0;
+ }
+
+ /*
+ ** Converting decimal is a little tricky. In the unsigned case we
+ ** need to stop when we hit 10 digits. In the signed case, we can
+ ** stop when the number is zero.
+ */
+ cvt = cvtbuf + sizeof(cvtbuf);
+ digits = 0;
+ while (num) {
+ int digit = (((unsigned long)num) % radix) & 0xF;
+ *--cvt = hexp[digit];
+ digits++;
+ num = (long)(((unsigned long)num) / radix);
+ }
+ if (digits == 0) {
+ *--cvt = '0';
+ digits++;
+ }
+
+ /*
+ ** Now that we have the number converted without its sign, deal with
+ ** the sign and zero padding.
+ */
+ return fill_n(ss, cvt, digits, width, prec, type, flags);
+}
+
+/*
+** Convert a 64-bit integer into its printable form
+*/
+static int cvt_ll(SprintfState *ss, JSInt64 num, int width, int prec, int radix,
+ int type, int flags, const char *hexp)
+{
+ char cvtbuf[100];
+ char *cvt;
+ int digits;
+ JSInt64 rad;
+
+ /* according to the man page this needs to happen */
+ if ((prec == 0) && (JSLL_IS_ZERO(num))) {
+ return 0;
+ }
+
+ /*
+ ** Converting decimal is a little tricky. In the unsigned case we
+ ** need to stop when we hit 10 digits. In the signed case, we can
+ ** stop when the number is zero.
+ */
+ JSLL_I2L(rad, radix);
+ cvt = cvtbuf + sizeof(cvtbuf);
+ digits = 0;
+ while (!JSLL_IS_ZERO(num)) {
+ JSInt32 digit;
+ JSInt64 quot, rem;
+ JSLL_UDIVMOD(&quot, &rem, num, rad);
+ JSLL_L2I(digit, rem);
+ *--cvt = hexp[digit & 0xf];
+ digits++;
+ num = quot;
+ }
+ if (digits == 0) {
+ *--cvt = '0';
+ digits++;
+ }
+
+ /*
+ ** Now that we have the number converted without its sign, deal with
+ ** the sign and zero padding.
+ */
+ return fill_n(ss, cvt, digits, width, prec, type, flags);
+}
+
+/*
+** Convert a double precision floating point number into its printable
+** form.
+**
+** XXX stop using sprintf to convert floating point
+*/
+static int cvt_f(SprintfState *ss, double d, const char *fmt0, const char *fmt1)
+{
+ char fin[20];
+ char fout[300];
+ int amount = fmt1 - fmt0;
+
+ JS_ASSERT((amount > 0) && (amount < (int)sizeof(fin)));
+ if (amount >= (int)sizeof(fin)) {
+ /* Totally bogus % command to sprintf. Just ignore it */
+ return 0;
+ }
+ memcpy(fin, fmt0, (size_t)amount);
+ fin[amount] = 0;
+
+ /* Convert floating point using the native sprintf code */
+#ifdef DEBUG
+ {
+ const char *p = fin;
+ while (*p) {
+ JS_ASSERT(*p != 'L');
+ p++;
+ }
+ }
+#endif
+ sprintf(fout, fin, d);
+
+ /*
+ ** This assert will catch overflow's of fout, when building with
+ ** debugging on. At least this way we can track down the evil piece
+ ** of calling code and fix it!
+ */
+ JS_ASSERT(strlen(fout) < sizeof(fout));
+
+ return (*ss->stuff)(ss, fout, strlen(fout));
+}
+
+/*
+** Convert a string into its printable form. "width" is the output
+** width. "prec" is the maximum number of characters of "s" to output,
+** where -1 means until NUL.
+*/
+static int cvt_s(SprintfState *ss, const char *s, int width, int prec,
+ int flags)
+{
+ int slen;
+
+ if (prec == 0)
+ return 0;
+
+ /* Limit string length by precision value */
+ slen = s ? strlen(s) : 6;
+ if (prec > 0) {
+ if (prec < slen) {
+ slen = prec;
+ }
+ }
+
+ /* and away we go */
+ return fill2(ss, s ? s : "(null)", slen, width, flags);
+}
+
+static int cvt_ws(SprintfState *ss, const jschar *ws, int width, int prec,
+ int flags)
+{
+ int result;
+ /*
+ * Supply NULL as the JSContext; errors are not reported,
+ * and malloc() is used to allocate the buffer buffer.
+ */
+ if (ws) {
+ int slen = js_strlen(ws);
+ char *s = js_DeflateString(NULL, ws, slen);
+ if (!s)
+ return -1; /* JSStuffFunc error indicator. */
+ result = cvt_s(ss, s, width, prec, flags);
+ free(s);
+ } else {
+ result = cvt_s(ss, NULL, width, prec, flags);
+ }
+ return result;
+}
+
+/*
+** BuildArgArray stands for Numbered Argument list Sprintf
+** for example,
+** fmp = "%4$i, %2$d, %3s, %1d";
+** the number must start from 1, and no gap among them
+*/
+
+static struct NumArgState* BuildArgArray( const char *fmt, va_list ap, int* rv, struct NumArgState* nasArray )
+{
+ int number = 0, cn = 0, i;
+ const char *p;
+ char c;
+ struct NumArgState *nas;
+
+
+ /*
+ ** first pass:
+ ** detemine how many legal % I have got, then allocate space
+ */
+
+ p = fmt;
+ *rv = 0;
+ i = 0;
+ while( ( c = *p++ ) != 0 ){
+ if( c != '%' )
+ continue;
+ if( ( c = *p++ ) == '%' ) /* skip %% case */
+ continue;
+
+ while( c != 0 ){
+ if( c > '9' || c < '0' ){
+ if( c == '$' ){ /* numbered argument csae */
+ if( i > 0 ){
+ *rv = -1;
+ return NULL;
+ }
+ number++;
+ } else { /* non-numbered argument case */
+ if( number > 0 ){
+ *rv = -1;
+ return NULL;
+ }
+ i = 1;
+ }
+ break;
+ }
+
+ c = *p++;
+ }
+ }
+
+ if( number == 0 ){
+ return NULL;
+ }
+
+
+ if( number > NAS_DEFAULT_NUM ){
+ nas = (struct NumArgState*)malloc( number * sizeof( struct NumArgState ) );
+ if( !nas ){
+ *rv = -1;
+ return NULL;
+ }
+ } else {
+ nas = nasArray;
+ }
+
+ for( i = 0; i < number; i++ ){
+ nas[i].type = TYPE_UNKNOWN;
+ }
+
+
+ /*
+ ** second pass:
+ ** set nas[].type
+ */
+
+ p = fmt;
+ while( ( c = *p++ ) != 0 ){
+ if( c != '%' ) continue;
+ c = *p++;
+ if( c == '%' ) continue;
+
+ cn = 0;
+ while( c && c != '$' ){ /* should improve error check later */
+ cn = cn*10 + c - '0';
+ c = *p++;
+ }
+
+ if( !c || cn < 1 || cn > number ){
+ *rv = -1;
+ break;
+ }
+
+ /* nas[cn] starts from 0, and make sure nas[cn].type is not assigned */
+ cn--;
+ if( nas[cn].type != TYPE_UNKNOWN )
+ continue;
+
+ c = *p++;
+
+ /* width */
+ if (c == '*') {
+ /* not supported feature, for the argument is not numbered */
+ *rv = -1;
+ break;
+ }
+
+ while ((c >= '0') && (c <= '9')) {
+ c = *p++;
+ }
+
+ /* precision */
+ if (c == '.') {
+ c = *p++;
+ if (c == '*') {
+ /* not supported feature, for the argument is not numbered */
+ *rv = -1;
+ break;
+ }
+
+ while ((c >= '0') && (c <= '9')) {
+ c = *p++;
+ }
+ }
+
+ /* size */
+ nas[cn].type = TYPE_INTN;
+ if (c == 'h') {
+ nas[cn].type = TYPE_INT16;
+ c = *p++;
+ } else if (c == 'L') {
+ /* XXX not quite sure here */
+ nas[cn].type = TYPE_INT64;
+ c = *p++;
+ } else if (c == 'l') {
+ nas[cn].type = TYPE_INT32;
+ c = *p++;
+ if (c == 'l') {
+ nas[cn].type = TYPE_INT64;
+ c = *p++;
+ }
+ }
+
+ /* format */
+ switch (c) {
+ case 'd':
+ case 'c':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ break;
+
+ case 'e':
+ case 'f':
+ case 'g':
+ nas[ cn ].type = TYPE_DOUBLE;
+ break;
+
+ case 'p':
+ /* XXX should use cpp */
+ if (sizeof(void *) == sizeof(JSInt32)) {
+ nas[ cn ].type = TYPE_UINT32;
+ } else if (sizeof(void *) == sizeof(JSInt64)) {
+ nas[ cn ].type = TYPE_UINT64;
+ } else if (sizeof(void *) == sizeof(JSIntn)) {
+ nas[ cn ].type = TYPE_UINTN;
+ } else {
+ nas[ cn ].type = TYPE_UNKNOWN;
+ }
+ break;
+
+ case 'C':
+ case 'S':
+ case 'E':
+ case 'G':
+ /* XXX not supported I suppose */
+ JS_ASSERT(0);
+ nas[ cn ].type = TYPE_UNKNOWN;
+ break;
+
+ case 's':
+ nas[ cn ].type = (nas[ cn ].type == TYPE_UINT16) ? TYPE_WSTRING : TYPE_STRING;
+ break;
+
+ case 'n':
+ nas[ cn ].type = TYPE_INTSTR;
+ break;
+
+ default:
+ JS_ASSERT(0);
+ nas[ cn ].type = TYPE_UNKNOWN;
+ break;
+ }
+
+ /* get a legal para. */
+ if( nas[ cn ].type == TYPE_UNKNOWN ){
+ *rv = -1;
+ break;
+ }
+ }
+
+
+ /*
+ ** third pass
+ ** fill the nas[cn].ap
+ */
+
+ if( *rv < 0 ){
+ if( nas != nasArray )
+ free( nas );
+ return NULL;
+ }
+
+ cn = 0;
+ while( cn < number ){
+ if( nas[cn].type == TYPE_UNKNOWN ){
+ cn++;
+ continue;
+ }
+
+ VARARGS_ASSIGN(nas[cn].ap, ap);
+
+ switch( nas[cn].type ){
+ case TYPE_INT16:
+ case TYPE_UINT16:
+ case TYPE_INTN:
+ case TYPE_UINTN: (void)va_arg( ap, JSIntn ); break;
+
+ case TYPE_INT32: (void)va_arg( ap, JSInt32 ); break;
+
+ case TYPE_UINT32: (void)va_arg( ap, JSUint32 ); break;
+
+ case TYPE_INT64: (void)va_arg( ap, JSInt64 ); break;
+
+ case TYPE_UINT64: (void)va_arg( ap, JSUint64 ); break;
+
+ case TYPE_STRING: (void)va_arg( ap, char* ); break;
+
+ case TYPE_WSTRING: (void)va_arg( ap, jschar* ); break;
+
+ case TYPE_INTSTR: (void)va_arg( ap, JSIntn* ); break;
+
+ case TYPE_DOUBLE: (void)va_arg( ap, double ); break;
+
+ default:
+ if( nas != nasArray )
+ free( nas );
+ *rv = -1;
+ return NULL;
+ }
+
+ cn++;
+ }
+
+
+ return nas;
+}
+
+/*
+** The workhorse sprintf code.
+*/
+static int dosprintf(SprintfState *ss, const char *fmt, va_list ap)
+{
+ char c;
+ int flags, width, prec, radix, type;
+ union {
+ char ch;
+ jschar wch;
+ int i;
+ long l;
+ JSInt64 ll;
+ double d;
+ const char *s;
+ const jschar* ws;
+ int *ip;
+ } u;
+ const char *fmt0;
+ static char *hex = "0123456789abcdef";
+ static char *HEX = "0123456789ABCDEF";
+ char *hexp;
+ int rv, i;
+ struct NumArgState *nas = NULL;
+ struct NumArgState nasArray[ NAS_DEFAULT_NUM ];
+ char pattern[20];
+ const char *dolPt = NULL; /* in "%4$.2f", dolPt will poiont to . */
+#ifdef JS_C_STRINGS_ARE_UTF8
+ char utf8buf[6];
+ int utf8len;
+#endif
+
+ /*
+ ** build an argument array, IF the fmt is numbered argument
+ ** list style, to contain the Numbered Argument list pointers
+ */
+
+ nas = BuildArgArray( fmt, ap, &rv, nasArray );
+ if( rv < 0 ){
+ /* the fmt contains error Numbered Argument format, jliu@netscape.com */
+ JS_ASSERT(0);
+ return rv;
+ }
+
+ while ((c = *fmt++) != 0) {
+ if (c != '%') {
+ rv = (*ss->stuff)(ss, fmt - 1, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ continue;
+ }
+ fmt0 = fmt - 1;
+
+ /*
+ ** Gobble up the % format string. Hopefully we have handled all
+ ** of the strange cases!
+ */
+ flags = 0;
+ c = *fmt++;
+ if (c == '%') {
+ /* quoting a % with %% */
+ rv = (*ss->stuff)(ss, fmt - 1, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ continue;
+ }
+
+ if( nas != NULL ){
+ /* the fmt contains the Numbered Arguments feature */
+ i = 0;
+ while( c && c != '$' ){ /* should imporve error check later */
+ i = ( i * 10 ) + ( c - '0' );
+ c = *fmt++;
+ }
+
+ if( nas[i-1].type == TYPE_UNKNOWN ){
+ if( nas && ( nas != nasArray ) )
+ free( nas );
+ return -1;
+ }
+
+ ap = nas[i-1].ap;
+ dolPt = fmt;
+ c = *fmt++;
+ }
+
+ /*
+ * Examine optional flags. Note that we do not implement the
+ * '#' flag of sprintf(). The ANSI C spec. of the '#' flag is
+ * somewhat ambiguous and not ideal, which is perhaps why
+ * the various sprintf() implementations are inconsistent
+ * on this feature.
+ */
+ while ((c == '-') || (c == '+') || (c == ' ') || (c == '0')) {
+ if (c == '-') flags |= FLAG_LEFT;
+ if (c == '+') flags |= FLAG_SIGNED;
+ if (c == ' ') flags |= FLAG_SPACED;
+ if (c == '0') flags |= FLAG_ZEROS;
+ c = *fmt++;
+ }
+ if (flags & FLAG_SIGNED) flags &= ~FLAG_SPACED;
+ if (flags & FLAG_LEFT) flags &= ~FLAG_ZEROS;
+
+ /* width */
+ if (c == '*') {
+ c = *fmt++;
+ width = va_arg(ap, int);
+ } else {
+ width = 0;
+ while ((c >= '0') && (c <= '9')) {
+ width = (width * 10) + (c - '0');
+ c = *fmt++;
+ }
+ }
+
+ /* precision */
+ prec = -1;
+ if (c == '.') {
+ c = *fmt++;
+ if (c == '*') {
+ c = *fmt++;
+ prec = va_arg(ap, int);
+ } else {
+ prec = 0;
+ while ((c >= '0') && (c <= '9')) {
+ prec = (prec * 10) + (c - '0');
+ c = *fmt++;
+ }
+ }
+ }
+
+ /* size */
+ type = TYPE_INTN;
+ if (c == 'h') {
+ type = TYPE_INT16;
+ c = *fmt++;
+ } else if (c == 'L') {
+ /* XXX not quite sure here */
+ type = TYPE_INT64;
+ c = *fmt++;
+ } else if (c == 'l') {
+ type = TYPE_INT32;
+ c = *fmt++;
+ if (c == 'l') {
+ type = TYPE_INT64;
+ c = *fmt++;
+ }
+ }
+
+ /* format */
+ hexp = hex;
+ switch (c) {
+ case 'd': case 'i': /* decimal/integer */
+ radix = 10;
+ goto fetch_and_convert;
+
+ case 'o': /* octal */
+ radix = 8;
+ type |= 1;
+ goto fetch_and_convert;
+
+ case 'u': /* unsigned decimal */
+ radix = 10;
+ type |= 1;
+ goto fetch_and_convert;
+
+ case 'x': /* unsigned hex */
+ radix = 16;
+ type |= 1;
+ goto fetch_and_convert;
+
+ case 'X': /* unsigned HEX */
+ radix = 16;
+ hexp = HEX;
+ type |= 1;
+ goto fetch_and_convert;
+
+ fetch_and_convert:
+ switch (type) {
+ case TYPE_INT16:
+ u.l = va_arg(ap, int);
+ if (u.l < 0) {
+ u.l = -u.l;
+ flags |= FLAG_NEG;
+ }
+ goto do_long;
+ case TYPE_UINT16:
+ u.l = va_arg(ap, int) & 0xffff;
+ goto do_long;
+ case TYPE_INTN:
+ u.l = va_arg(ap, int);
+ if (u.l < 0) {
+ u.l = -u.l;
+ flags |= FLAG_NEG;
+ }
+ goto do_long;
+ case TYPE_UINTN:
+ u.l = (long)va_arg(ap, unsigned int);
+ goto do_long;
+
+ case TYPE_INT32:
+ u.l = va_arg(ap, JSInt32);
+ if (u.l < 0) {
+ u.l = -u.l;
+ flags |= FLAG_NEG;
+ }
+ goto do_long;
+ case TYPE_UINT32:
+ u.l = (long)va_arg(ap, JSUint32);
+ do_long:
+ rv = cvt_l(ss, u.l, width, prec, radix, type, flags, hexp);
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+
+ case TYPE_INT64:
+ u.ll = va_arg(ap, JSInt64);
+ if (!JSLL_GE_ZERO(u.ll)) {
+ JSLL_NEG(u.ll, u.ll);
+ flags |= FLAG_NEG;
+ }
+ goto do_longlong;
+ case TYPE_UINT64:
+ u.ll = va_arg(ap, JSUint64);
+ do_longlong:
+ rv = cvt_ll(ss, u.ll, width, prec, radix, type, flags, hexp);
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+ }
+ break;
+
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'g':
+ u.d = va_arg(ap, double);
+ if( nas != NULL ){
+ i = fmt - dolPt;
+ if( i < (int)sizeof( pattern ) ){
+ pattern[0] = '%';
+ memcpy( &pattern[1], dolPt, (size_t)i );
+ rv = cvt_f(ss, u.d, pattern, &pattern[i+1] );
+ }
+ } else
+ rv = cvt_f(ss, u.d, fmt0, fmt);
+
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+
+ case 'c':
+ if ((flags & FLAG_LEFT) == 0) {
+ while (width-- > 1) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+ switch (type) {
+ case TYPE_INT16:
+ /* Treat %hc as %c if JS_C_STRINGS_ARE_UTF8 is undefined. */
+#ifdef JS_C_STRINGS_ARE_UTF8
+ u.wch = va_arg(ap, int);
+ utf8len = js_OneUcs4ToUtf8Char (utf8buf, u.wch);
+ rv = (*ss->stuff)(ss, utf8buf, utf8len);
+ break;
+#endif
+ case TYPE_INTN:
+ u.ch = va_arg(ap, int);
+ rv = (*ss->stuff)(ss, &u.ch, 1);
+ break;
+ }
+ if (rv < 0) {
+ return rv;
+ }
+ if (flags & FLAG_LEFT) {
+ while (width-- > 1) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+ break;
+
+ case 'p':
+ if (sizeof(void *) == sizeof(JSInt32)) {
+ type = TYPE_UINT32;
+ } else if (sizeof(void *) == sizeof(JSInt64)) {
+ type = TYPE_UINT64;
+ } else if (sizeof(void *) == sizeof(int)) {
+ type = TYPE_UINTN;
+ } else {
+ JS_ASSERT(0);
+ break;
+ }
+ radix = 16;
+ goto fetch_and_convert;
+
+#if 0
+ case 'C':
+ case 'S':
+ case 'E':
+ case 'G':
+ /* XXX not supported I suppose */
+ JS_ASSERT(0);
+ break;
+#endif
+
+ case 's':
+ if(type == TYPE_INT16) {
+ /*
+ * This would do a simple string/byte conversion
+ * if JS_C_STRINGS_ARE_UTF8 is not defined.
+ */
+ u.ws = va_arg(ap, const jschar*);
+ rv = cvt_ws(ss, u.ws, width, prec, flags);
+ } else {
+ u.s = va_arg(ap, const char*);
+ rv = cvt_s(ss, u.s, width, prec, flags);
+ }
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+
+ case 'n':
+ u.ip = va_arg(ap, int*);
+ if (u.ip) {
+ *u.ip = ss->cur - ss->base;
+ }
+ break;
+
+ default:
+ /* Not a % token after all... skip it */
+#if 0
+ JS_ASSERT(0);
+#endif
+ rv = (*ss->stuff)(ss, "%", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ rv = (*ss->stuff)(ss, fmt - 1, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+
+ /* Stuff trailing NUL */
+ rv = (*ss->stuff)(ss, "\0", 1);
+
+ if( nas && ( nas != nasArray ) ){
+ free( nas );
+ }
+
+ return rv;
+}
+
+/************************************************************************/
+
+static int FuncStuff(SprintfState *ss, const char *sp, JSUint32 len)
+{
+ int rv;
+
+ rv = (*ss->func)(ss->arg, sp, len);
+ if (rv < 0) {
+ return rv;
+ }
+ ss->maxlen += len;
+ return 0;
+}
+
+JS_PUBLIC_API(JSUint32) JS_sxprintf(JSStuffFunc func, void *arg,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int rv;
+
+ va_start(ap, fmt);
+ rv = JS_vsxprintf(func, arg, fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+JS_PUBLIC_API(JSUint32) JS_vsxprintf(JSStuffFunc func, void *arg,
+ const char *fmt, va_list ap)
+{
+ SprintfState ss;
+ int rv;
+
+ ss.stuff = FuncStuff;
+ ss.func = func;
+ ss.arg = arg;
+ ss.maxlen = 0;
+ rv = dosprintf(&ss, fmt, ap);
+ return (rv < 0) ? (JSUint32)-1 : ss.maxlen;
+}
+
+/*
+** Stuff routine that automatically grows the malloc'd output buffer
+** before it overflows.
+*/
+static int GrowStuff(SprintfState *ss, const char *sp, JSUint32 len)
+{
+ ptrdiff_t off;
+ char *newbase;
+ JSUint32 newlen;
+
+ off = ss->cur - ss->base;
+ if (off + len >= ss->maxlen) {
+ /* Grow the buffer */
+ newlen = ss->maxlen + ((len > 32) ? len : 32);
+ if (ss->base) {
+ newbase = (char*) realloc(ss->base, newlen);
+ } else {
+ newbase = (char*) malloc(newlen);
+ }
+ if (!newbase) {
+ /* Ran out of memory */
+ return -1;
+ }
+ ss->base = newbase;
+ ss->maxlen = newlen;
+ ss->cur = ss->base + off;
+ }
+
+ /* Copy data */
+ while (len) {
+ --len;
+ *ss->cur++ = *sp++;
+ }
+ JS_ASSERT((JSUint32)(ss->cur - ss->base) <= ss->maxlen);
+ return 0;
+}
+
+/*
+** sprintf into a malloc'd buffer
+*/
+JS_PUBLIC_API(char *) JS_smprintf(const char *fmt, ...)
+{
+ va_list ap;
+ char *rv;
+
+ va_start(ap, fmt);
+ rv = JS_vsmprintf(fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+/*
+** Free memory allocated, for the caller, by JS_smprintf
+*/
+JS_PUBLIC_API(void) JS_smprintf_free(char *mem)
+{
+ free(mem);
+}
+
+JS_PUBLIC_API(char *) JS_vsmprintf(const char *fmt, va_list ap)
+{
+ SprintfState ss;
+ int rv;
+
+ ss.stuff = GrowStuff;
+ ss.base = 0;
+ ss.cur = 0;
+ ss.maxlen = 0;
+ rv = dosprintf(&ss, fmt, ap);
+ if (rv < 0) {
+ if (ss.base) {
+ free(ss.base);
+ }
+ return 0;
+ }
+ return ss.base;
+}
+
+/*
+** Stuff routine that discards overflow data
+*/
+static int LimitStuff(SprintfState *ss, const char *sp, JSUint32 len)
+{
+ JSUint32 limit = ss->maxlen - (ss->cur - ss->base);
+
+ if (len > limit) {
+ len = limit;
+ }
+ while (len) {
+ --len;
+ *ss->cur++ = *sp++;
+ }
+ return 0;
+}
+
+/*
+** sprintf into a fixed size buffer. Make sure there is a NUL at the end
+** when finished.
+*/
+JS_PUBLIC_API(JSUint32) JS_snprintf(char *out, JSUint32 outlen, const char *fmt, ...)
+{
+ va_list ap;
+ int rv;
+
+ JS_ASSERT((JSInt32)outlen > 0);
+ if ((JSInt32)outlen <= 0) {
+ return 0;
+ }
+
+ va_start(ap, fmt);
+ rv = JS_vsnprintf(out, outlen, fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+JS_PUBLIC_API(JSUint32) JS_vsnprintf(char *out, JSUint32 outlen,const char *fmt,
+ va_list ap)
+{
+ SprintfState ss;
+ JSUint32 n;
+
+ JS_ASSERT((JSInt32)outlen > 0);
+ if ((JSInt32)outlen <= 0) {
+ return 0;
+ }
+
+ ss.stuff = LimitStuff;
+ ss.base = out;
+ ss.cur = out;
+ ss.maxlen = outlen;
+ (void) dosprintf(&ss, fmt, ap);
+
+ /* If we added chars, and we didn't append a null, do it now. */
+ if( (ss.cur != ss.base) && (ss.cur[-1] != '\0') )
+ ss.cur[-1] = '\0';
+
+ n = ss.cur - ss.base;
+ return n ? n - 1 : n;
+}
+
+JS_PUBLIC_API(char *) JS_sprintf_append(char *last, const char *fmt, ...)
+{
+ va_list ap;
+ char *rv;
+
+ va_start(ap, fmt);
+ rv = JS_vsprintf_append(last, fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+JS_PUBLIC_API(char *) JS_vsprintf_append(char *last, const char *fmt, va_list ap)
+{
+ SprintfState ss;
+ int rv;
+
+ ss.stuff = GrowStuff;
+ if (last) {
+ int lastlen = strlen(last);
+ ss.base = last;
+ ss.cur = last + lastlen;
+ ss.maxlen = lastlen;
+ } else {
+ ss.base = 0;
+ ss.cur = 0;
+ ss.maxlen = 0;
+ }
+ rv = dosprintf(&ss, fmt, ap);
+ if (rv < 0) {
+ if (ss.base) {
+ free(ss.base);
+ }
+ return 0;
+ }
+ return ss.base;
+}
+
diff --git a/third_party/js-1.7/jsprf.h b/third_party/js-1.7/jsprf.h
new file mode 100644
index 0000000..0eb910f
--- /dev/null
+++ b/third_party/js-1.7/jsprf.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsprf_h___
+#define jsprf_h___
+
+/*
+** API for PR printf like routines. Supports the following formats
+** %d - decimal
+** %u - unsigned decimal
+** %x - unsigned hex
+** %X - unsigned uppercase hex
+** %o - unsigned octal
+** %hd, %hu, %hx, %hX, %ho - 16-bit versions of above
+** %ld, %lu, %lx, %lX, %lo - 32-bit versions of above
+** %lld, %llu, %llx, %llX, %llo - 64 bit versions of above
+** %s - string
+** %hs - 16-bit version of above (only available if compiled with JS_C_STRINGS_ARE_UTF8)
+** %c - character
+** %hc - 16-bit version of above (only available if compiled with JS_C_STRINGS_ARE_UTF8)
+** %p - pointer (deals with machine dependent pointer size)
+** %f - float
+** %g - float
+*/
+#include "jstypes.h"
+#include <stdio.h>
+#include <stdarg.h>
+
+JS_BEGIN_EXTERN_C
+
+/*
+** sprintf into a fixed size buffer. Guarantees that a NUL is at the end
+** of the buffer. Returns the length of the written output, NOT including
+** the NUL, or (JSUint32)-1 if an error occurs.
+*/
+extern JS_PUBLIC_API(JSUint32) JS_snprintf(char *out, JSUint32 outlen, const char *fmt, ...);
+
+/*
+** sprintf into a malloc'd buffer. Return a pointer to the malloc'd
+** buffer on success, NULL on failure. Call "JS_smprintf_free" to release
+** the memory returned.
+*/
+extern JS_PUBLIC_API(char*) JS_smprintf(const char *fmt, ...);
+
+/*
+** Free the memory allocated, for the caller, by JS_smprintf
+*/
+extern JS_PUBLIC_API(void) JS_smprintf_free(char *mem);
+
+/*
+** "append" sprintf into a malloc'd buffer. "last" is the last value of
+** the malloc'd buffer. sprintf will append data to the end of last,
+** growing it as necessary using realloc. If last is NULL, JS_sprintf_append
+** will allocate the initial string. The return value is the new value of
+** last for subsequent calls, or NULL if there is a malloc failure.
+*/
+extern JS_PUBLIC_API(char*) JS_sprintf_append(char *last, const char *fmt, ...);
+
+/*
+** sprintf into a function. The function "f" is called with a string to
+** place into the output. "arg" is an opaque pointer used by the stuff
+** function to hold any state needed to do the storage of the output
+** data. The return value is a count of the number of characters fed to
+** the stuff function, or (JSUint32)-1 if an error occurs.
+*/
+typedef JSIntn (*JSStuffFunc)(void *arg, const char *s, JSUint32 slen);
+
+extern JS_PUBLIC_API(JSUint32) JS_sxprintf(JSStuffFunc f, void *arg, const char *fmt, ...);
+
+/*
+** va_list forms of the above.
+*/
+extern JS_PUBLIC_API(JSUint32) JS_vsnprintf(char *out, JSUint32 outlen, const char *fmt, va_list ap);
+extern JS_PUBLIC_API(char*) JS_vsmprintf(const char *fmt, va_list ap);
+extern JS_PUBLIC_API(char*) JS_vsprintf_append(char *last, const char *fmt, va_list ap);
+extern JS_PUBLIC_API(JSUint32) JS_vsxprintf(JSStuffFunc f, void *arg, const char *fmt, va_list ap);
+
+/*
+***************************************************************************
+** FUNCTION: JS_sscanf
+** DESCRIPTION:
+** JS_sscanf() scans the input character string, performs data
+** conversions, and stores the converted values in the data objects
+** pointed to by its arguments according to the format control
+** string.
+**
+** JS_sscanf() behaves the same way as the sscanf() function in the
+** Standard C Library (stdio.h), with the following exceptions:
+** - JS_sscanf() handles the NSPR integer and floating point types,
+** such as JSInt16, JSInt32, JSInt64, and JSFloat64, whereas
+** sscanf() handles the standard C types like short, int, long,
+** and double.
+** - JS_sscanf() has no multibyte character support, while sscanf()
+** does.
+** INPUTS:
+** const char *buf
+** a character string holding the input to scan
+** const char *fmt
+** the format control string for the conversions
+** ...
+** variable number of arguments, each of them is a pointer to
+** a data object in which the converted value will be stored
+** OUTPUTS: none
+** RETURNS: JSInt32
+** The number of values converted and stored.
+** RESTRICTIONS:
+** Multibyte characters in 'buf' or 'fmt' are not allowed.
+***************************************************************************
+*/
+
+extern JS_PUBLIC_API(JSInt32) JS_sscanf(const char *buf, const char *fmt, ...);
+
+JS_END_EXTERN_C
+
+#endif /* jsprf_h___ */
diff --git a/third_party/js-1.7/jsproto.tbl b/third_party/js-1.7/jsproto.tbl
new file mode 100644
index 0000000..18f2355
--- /dev/null
+++ b/third_party/js-1.7/jsproto.tbl
@@ -0,0 +1,116 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80 ft=c:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey 1.7 work in progress, released
+ * February 14, 2006.
+ *
+ * The Initial Developer of the Original Code is
+ * Brendan Eich <brendan@mozilla.org>
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsconfig.h"
+
+#if JS_HAS_SCRIPT_OBJECT
+# define SCRIPT_INIT js_InitScriptClass
+#else
+# define SCRIPT_INIT js_InitNullClass
+#endif
+
+#if JS_HAS_XML_SUPPORT
+# define XML_INIT js_InitXMLClass
+# define NAMESPACE_INIT js_InitNamespaceClass
+# define QNAME_INIT js_InitQNameClass
+# define ANYNAME_INIT js_InitAnyNameClass
+# define ATTRIBUTE_INIT js_InitAttributeNameClass
+#else
+# define XML_INIT js_InitNullClass
+# define NAMESPACE_INIT js_InitNullClass
+# define QNAME_INIT js_InitNullClass
+# define ANYNAME_INIT js_InitNullClass
+# define ATTRIBUTE_INIT js_InitNullClass
+#endif
+
+#if JS_HAS_GENERATORS
+# define GENERATOR_INIT js_InitIteratorClasses
+#else
+# define GENERATOR_INIT js_InitNullClass
+#endif
+
+#if JS_HAS_FILE_OBJECT
+# define FILE_INIT js_InitFileClass
+#else
+# define FILE_INIT js_InitNullClass
+#endif
+
+/*
+ * Enumerator codes in the second column must not change -- they are part of
+ * the JS XDR API.
+ */
+JS_PROTO(Null, 0, js_InitNullClass)
+JS_PROTO(Object, 1, js_InitFunctionAndObjectClasses)
+JS_PROTO(Function, 2, js_InitFunctionAndObjectClasses)
+JS_PROTO(Array, 3, js_InitArrayClass)
+JS_PROTO(Boolean, 4, js_InitBooleanClass)
+JS_PROTO(Call, 5, js_InitCallClass)
+JS_PROTO(Date, 6, js_InitDateClass)
+JS_PROTO(Math, 7, js_InitMathClass)
+JS_PROTO(Number, 8, js_InitNumberClass)
+JS_PROTO(String, 9, js_InitStringClass)
+JS_PROTO(RegExp, 10, js_InitRegExpClass)
+JS_PROTO(Script, 11, SCRIPT_INIT)
+JS_PROTO(XML, 12, XML_INIT)
+JS_PROTO(Namespace, 13, NAMESPACE_INIT)
+JS_PROTO(QName, 14, QNAME_INIT)
+JS_PROTO(AnyName, 15, ANYNAME_INIT)
+JS_PROTO(AttributeName, 16, ATTRIBUTE_INIT)
+JS_PROTO(Error, 17, js_InitExceptionClasses)
+JS_PROTO(InternalError, 18, js_InitExceptionClasses)
+JS_PROTO(EvalError, 19, js_InitExceptionClasses)
+JS_PROTO(RangeError, 20, js_InitExceptionClasses)
+JS_PROTO(ReferenceError, 21, js_InitExceptionClasses)
+JS_PROTO(SyntaxError, 22, js_InitExceptionClasses)
+JS_PROTO(TypeError, 23, js_InitExceptionClasses)
+JS_PROTO(URIError, 24, js_InitExceptionClasses)
+JS_PROTO(Generator, 25, GENERATOR_INIT)
+JS_PROTO(Iterator, 26, js_InitIteratorClasses)
+JS_PROTO(StopIteration, 27, js_InitIteratorClasses)
+JS_PROTO(UnusedProto28, 28, js_InitNullClass)
+JS_PROTO(File, 29, FILE_INIT)
+JS_PROTO(Block, 30, js_InitBlockClass)
+
+#undef SCRIPT_INIT
+#undef XML_INIT
+#undef NAMESPACE_INIT
+#undef QNAME_INIT
+#undef ANYNAME_INIT
+#undef ATTRIBUTE_INIT
+#undef GENERATOR_INIT
+#undef FILE_INIT
diff --git a/third_party/js-1.7/jsprvtd.h b/third_party/js-1.7/jsprvtd.h
new file mode 100644
index 0000000..f71b9a5
--- /dev/null
+++ b/third_party/js-1.7/jsprvtd.h
@@ -0,0 +1,202 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsprvtd_h___
+#define jsprvtd_h___
+/*
+ * JS private typename definitions.
+ *
+ * This header is included only in other .h files, for convenience and for
+ * simplicity of type naming. The alternative for structures is to use tags,
+ * which are named the same as their typedef names (legal in C/C++, and less
+ * noisy than suffixing the typedef name with "Struct" or "Str"). Instead,
+ * all .h files that include this file may use the same typedef name, whether
+ * declaring a pointer to struct type, or defining a member of struct type.
+ *
+ * A few fundamental scalar types are defined here too. Neither the scalar
+ * nor the struct typedefs should change much, therefore the nearly-global
+ * make dependency induced by this file should not prove painful.
+ */
+
+#include "jspubtd.h"
+
+/* Internal identifier (jsid) macros. */
+#define JSID_ATOM 0x0
+#define JSID_INT 0x1
+#define JSID_OBJECT 0x2
+#define JSID_TAGMASK 0x3
+#define JSID_TAG(id) ((id) & JSID_TAGMASK)
+#define JSID_SETTAG(id,t) ((id) | (t))
+#define JSID_CLRTAG(id) ((id) & ~(jsid)JSID_TAGMASK)
+
+#define JSID_IS_ATOM(id) (JSID_TAG(id) == JSID_ATOM)
+#define JSID_TO_ATOM(id) ((JSAtom *)(id))
+#define ATOM_TO_JSID(atom) ((jsid)(atom))
+#define ATOM_JSID_TO_JSVAL(id) ATOM_KEY(JSID_TO_ATOM(id))
+
+#define JSID_IS_INT(id) ((id) & JSID_INT)
+#define JSID_TO_INT(id) ((jsint)(id) >> 1)
+#define INT_TO_JSID(i) (((jsint)(i) << 1) | JSID_INT)
+#define INT_JSID_TO_JSVAL(id) (id)
+#define INT_JSVAL_TO_JSID(v) (v)
+
+#define JSID_IS_OBJECT(id) (JSID_TAG(id) == JSID_OBJECT)
+#define JSID_TO_OBJECT(id) ((JSObject *) JSID_CLRTAG(id))
+#define OBJECT_TO_JSID(obj) ((jsid)(obj) | JSID_OBJECT)
+#define OBJECT_JSID_TO_JSVAL(id) OBJECT_TO_JSVAL(JSID_CLRTAG(id))
+#define OBJECT_JSVAL_TO_JSID(v) OBJECT_TO_JSID(JSVAL_TO_OBJECT(v))
+
+/* Scalar typedefs. */
+typedef uint8 jsbytecode;
+typedef uint8 jssrcnote;
+typedef uint32 jsatomid;
+
+/* Struct typedefs. */
+typedef struct JSArgumentFormatMap JSArgumentFormatMap;
+typedef struct JSCodeGenerator JSCodeGenerator;
+typedef struct JSDependentString JSDependentString;
+typedef struct JSGCThing JSGCThing;
+typedef struct JSGenerator JSGenerator;
+typedef struct JSParseNode JSParseNode;
+typedef struct JSSharpObjectMap JSSharpObjectMap;
+typedef struct JSThread JSThread;
+typedef struct JSToken JSToken;
+typedef struct JSTokenPos JSTokenPos;
+typedef struct JSTokenPtr JSTokenPtr;
+typedef struct JSTokenStream JSTokenStream;
+typedef struct JSTreeContext JSTreeContext;
+typedef struct JSTryNote JSTryNote;
+
+/* Friend "Advanced API" typedefs. */
+typedef struct JSAtom JSAtom;
+typedef struct JSAtomList JSAtomList;
+typedef struct JSAtomListElement JSAtomListElement;
+typedef struct JSAtomMap JSAtomMap;
+typedef struct JSAtomState JSAtomState;
+typedef struct JSCodeSpec JSCodeSpec;
+typedef struct JSPrinter JSPrinter;
+typedef struct JSRegExp JSRegExp;
+typedef struct JSRegExpStatics JSRegExpStatics;
+typedef struct JSScope JSScope;
+typedef struct JSScopeOps JSScopeOps;
+typedef struct JSScopeProperty JSScopeProperty;
+typedef struct JSStackHeader JSStackHeader;
+typedef struct JSStringBuffer JSStringBuffer;
+typedef struct JSSubString JSSubString;
+typedef struct JSXML JSXML;
+typedef struct JSXMLNamespace JSXMLNamespace;
+typedef struct JSXMLQName JSXMLQName;
+typedef struct JSXMLArray JSXMLArray;
+typedef struct JSXMLArrayCursor JSXMLArrayCursor;
+
+/* "Friend" types used by jscntxt.h and jsdbgapi.h. */
+typedef enum JSTrapStatus {
+ JSTRAP_ERROR,
+ JSTRAP_CONTINUE,
+ JSTRAP_RETURN,
+ JSTRAP_THROW,
+ JSTRAP_LIMIT
+} JSTrapStatus;
+
+typedef JSTrapStatus
+(* JS_DLL_CALLBACK JSTrapHandler)(JSContext *cx, JSScript *script,
+ jsbytecode *pc, jsval *rval, void *closure);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSWatchPointHandler)(JSContext *cx, JSObject *obj, jsval id,
+ jsval old, jsval *newp, void *closure);
+
+/* called just after script creation */
+typedef void
+(* JS_DLL_CALLBACK JSNewScriptHook)(JSContext *cx,
+ const char *filename, /* URL of script */
+ uintN lineno, /* first line */
+ JSScript *script,
+ JSFunction *fun,
+ void *callerdata);
+
+/* called just before script destruction */
+typedef void
+(* JS_DLL_CALLBACK JSDestroyScriptHook)(JSContext *cx,
+ JSScript *script,
+ void *callerdata);
+
+typedef void
+(* JS_DLL_CALLBACK JSSourceHandler)(const char *filename, uintN lineno,
+ jschar *str, size_t length,
+ void **listenerTSData, void *closure);
+
+/*
+ * This hook captures high level script execution and function calls (JS or
+ * native). It is used by JS_SetExecuteHook to hook top level scripts and by
+ * JS_SetCallHook to hook function calls. It will get called twice per script
+ * or function call: just before execution begins and just after it finishes.
+ * In both cases the 'current' frame is that of the executing code.
+ *
+ * The 'before' param is JS_TRUE for the hook invocation before the execution
+ * and JS_FALSE for the invocation after the code has run.
+ *
+ * The 'ok' param is significant only on the post execution invocation to
+ * signify whether or not the code completed 'normally'.
+ *
+ * The 'closure' param is as passed to JS_SetExecuteHook or JS_SetCallHook
+ * for the 'before'invocation, but is whatever value is returned from that
+ * invocation for the 'after' invocation. Thus, the hook implementor *could*
+ * allocate a structure in the 'before' invocation and return a pointer to that
+ * structure. The pointer would then be handed to the hook for the 'after'
+ * invocation. Alternately, the 'before' could just return the same value as
+ * in 'closure' to cause the 'after' invocation to be called with the same
+ * 'closure' value as the 'before'.
+ *
+ * Returning NULL in the 'before' hook will cause the 'after' hook *not* to
+ * be called.
+ */
+typedef void *
+(* JS_DLL_CALLBACK JSInterpreterHook)(JSContext *cx, JSStackFrame *fp, JSBool before,
+ JSBool *ok, void *closure);
+
+typedef void
+(* JS_DLL_CALLBACK JSObjectHook)(JSContext *cx, JSObject *obj, JSBool isNew,
+ void *closure);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSDebugErrorHook)(JSContext *cx, const char *message,
+ JSErrorReport *report, void *closure);
+
+#endif /* jsprvtd_h___ */
diff --git a/third_party/js-1.7/jspubtd.h b/third_party/js-1.7/jspubtd.h
new file mode 100644
index 0000000..4e8c92a
--- /dev/null
+++ b/third_party/js-1.7/jspubtd.h
@@ -0,0 +1,667 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jspubtd_h___
+#define jspubtd_h___
+/*
+ * JS public API typedefs.
+ */
+#include "jstypes.h"
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+/* Scalar typedefs. */
+typedef uint16 jschar;
+typedef int32 jsint;
+typedef uint32 jsuint;
+typedef float64 jsdouble;
+typedef jsword jsval;
+typedef jsword jsid;
+typedef int32 jsrefcount; /* PRInt32 if JS_THREADSAFE, see jslock.h */
+
+/*
+ * Run-time version enumeration. See jsconfig.h for compile-time counterparts
+ * to these values that may be selected by the JS_VERSION macro, and tested by
+ * #if expressions.
+ */
+typedef enum JSVersion {
+ JSVERSION_1_0 = 100,
+ JSVERSION_1_1 = 110,
+ JSVERSION_1_2 = 120,
+ JSVERSION_1_3 = 130,
+ JSVERSION_1_4 = 140,
+ JSVERSION_ECMA_3 = 148,
+ JSVERSION_1_5 = 150,
+ JSVERSION_1_6 = 160,
+ JSVERSION_1_7 = 170,
+ JSVERSION_DEFAULT = 0,
+ JSVERSION_UNKNOWN = -1
+} JSVersion;
+
+#define JSVERSION_IS_ECMA(version) \
+ ((version) == JSVERSION_DEFAULT || (version) >= JSVERSION_1_3)
+
+/* Result of typeof operator enumeration. */
+typedef enum JSType {
+ JSTYPE_VOID, /* undefined */
+ JSTYPE_OBJECT, /* object */
+ JSTYPE_FUNCTION, /* function */
+ JSTYPE_STRING, /* string */
+ JSTYPE_NUMBER, /* number */
+ JSTYPE_BOOLEAN, /* boolean */
+ JSTYPE_NULL, /* null */
+ JSTYPE_XML, /* xml object */
+ JSTYPE_LIMIT
+} JSType;
+
+/* Dense index into cached prototypes and class atoms for standard objects. */
+typedef enum JSProtoKey {
+#define JS_PROTO(name,code,init) JSProto_##name = code,
+#include "jsproto.tbl"
+#undef JS_PROTO
+ JSProto_LIMIT
+} JSProtoKey;
+
+/* JSObjectOps.checkAccess mode enumeration. */
+typedef enum JSAccessMode {
+ JSACC_PROTO = 0, /* XXXbe redundant w.r.t. id */
+ JSACC_PARENT = 1, /* XXXbe redundant w.r.t. id */
+ JSACC_IMPORT = 2, /* import foo.bar */
+ JSACC_WATCH = 3, /* a watchpoint on object foo for id 'bar' */
+ JSACC_READ = 4, /* a "get" of foo.bar */
+ JSACC_WRITE = 8, /* a "set" of foo.bar = baz */
+ JSACC_LIMIT
+} JSAccessMode;
+
+#define JSACC_TYPEMASK (JSACC_WRITE - 1)
+
+/*
+ * This enum type is used to control the behavior of a JSObject property
+ * iterator function that has type JSNewEnumerate.
+ */
+typedef enum JSIterateOp {
+ JSENUMERATE_INIT, /* Create new iterator state */
+ JSENUMERATE_NEXT, /* Iterate once */
+ JSENUMERATE_DESTROY /* Destroy iterator state */
+} JSIterateOp;
+
+/* Struct typedefs. */
+typedef struct JSClass JSClass;
+typedef struct JSExtendedClass JSExtendedClass;
+typedef struct JSConstDoubleSpec JSConstDoubleSpec;
+typedef struct JSContext JSContext;
+typedef struct JSErrorReport JSErrorReport;
+typedef struct JSFunction JSFunction;
+typedef struct JSFunctionSpec JSFunctionSpec;
+typedef struct JSIdArray JSIdArray;
+typedef struct JSProperty JSProperty;
+typedef struct JSPropertySpec JSPropertySpec;
+typedef struct JSObject JSObject;
+typedef struct JSObjectMap JSObjectMap;
+typedef struct JSObjectOps JSObjectOps;
+typedef struct JSXMLObjectOps JSXMLObjectOps;
+typedef struct JSRuntime JSRuntime;
+typedef struct JSRuntime JSTaskState; /* XXX deprecated name */
+typedef struct JSScript JSScript;
+typedef struct JSStackFrame JSStackFrame;
+typedef struct JSString JSString;
+typedef struct JSXDRState JSXDRState;
+typedef struct JSExceptionState JSExceptionState;
+typedef struct JSLocaleCallbacks JSLocaleCallbacks;
+
+/* JSClass (and JSObjectOps where appropriate) function pointer typedefs. */
+
+/*
+ * Add, delete, get or set a property named by id in obj. Note the jsval id
+ * type -- id may be a string (Unicode property identifier) or an int (element
+ * index). The *vp out parameter, on success, is the new property value after
+ * an add, get, or set. After a successful delete, *vp is JSVAL_FALSE iff
+ * obj[id] can't be deleted (because it's permanent).
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSPropertyOp)(JSContext *cx, JSObject *obj, jsval id,
+ jsval *vp);
+
+/*
+ * This function type is used for callbacks that enumerate the properties of
+ * a JSObject. The behavior depends on the value of enum_op:
+ *
+ * JSENUMERATE_INIT
+ * A new, opaque iterator state should be allocated and stored in *statep.
+ * (You can use PRIVATE_TO_JSVAL() to tag the pointer to be stored).
+ *
+ * The number of properties that will be enumerated should be returned as
+ * an integer jsval in *idp, if idp is non-null, and provided the number of
+ * enumerable properties is known. If idp is non-null and the number of
+ * enumerable properties can't be computed in advance, *idp should be set
+ * to JSVAL_ZERO.
+ *
+ * JSENUMERATE_NEXT
+ * A previously allocated opaque iterator state is passed in via statep.
+ * Return the next jsid in the iteration using *idp. The opaque iterator
+ * state pointed at by statep is destroyed and *statep is set to JSVAL_NULL
+ * if there are no properties left to enumerate.
+ *
+ * JSENUMERATE_DESTROY
+ * Destroy the opaque iterator state previously allocated in *statep by a
+ * call to this function when enum_op was JSENUMERATE_INIT.
+ *
+ * The return value is used to indicate success, with a value of JS_FALSE
+ * indicating failure.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSNewEnumerateOp)(JSContext *cx, JSObject *obj,
+ JSIterateOp enum_op,
+ jsval *statep, jsid *idp);
+
+/*
+ * The old-style JSClass.enumerate op should define all lazy properties not
+ * yet reflected in obj.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSEnumerateOp)(JSContext *cx, JSObject *obj);
+
+/*
+ * Resolve a lazy property named by id in obj by defining it directly in obj.
+ * Lazy properties are those reflected from some peer native property space
+ * (e.g., the DOM attributes for a given node reflected as obj) on demand.
+ *
+ * JS looks for a property in an object, and if not found, tries to resolve
+ * the given id. If resolve succeeds, the engine looks again in case resolve
+ * defined obj[id]. If no such property exists directly in obj, the process
+ * is repeated with obj's prototype, etc.
+ *
+ * NB: JSNewResolveOp provides a cheaper way to resolve lazy properties.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSResolveOp)(JSContext *cx, JSObject *obj, jsval id);
+
+/*
+ * Like JSResolveOp, but flags provide contextual information as follows:
+ *
+ * JSRESOLVE_QUALIFIED a qualified property id: obj.id or obj[id], not id
+ * JSRESOLVE_ASSIGNING obj[id] is on the left-hand side of an assignment
+ * JSRESOLVE_DETECTING 'if (o.p)...' or similar detection opcode sequence
+ * JSRESOLVE_DECLARING var, const, or function prolog declaration opcode
+ * JSRESOLVE_CLASSNAME class name used when constructing
+ *
+ * The *objp out parameter, on success, should be null to indicate that id
+ * was not resolved; and non-null, referring to obj or one of its prototypes,
+ * if id was resolved.
+ *
+ * This hook instead of JSResolveOp is called via the JSClass.resolve member
+ * if JSCLASS_NEW_RESOLVE is set in JSClass.flags.
+ *
+ * Setting JSCLASS_NEW_RESOLVE and JSCLASS_NEW_RESOLVE_GETS_START further
+ * extends this hook by passing in the starting object on the prototype chain
+ * via *objp. Thus a resolve hook implementation may define the property id
+ * being resolved in the object in which the id was first sought, rather than
+ * in a prototype object whose class led to the resolve hook being called.
+ *
+ * When using JSCLASS_NEW_RESOLVE_GETS_START, the resolve hook must therefore
+ * null *objp to signify "not resolved". With only JSCLASS_NEW_RESOLVE and no
+ * JSCLASS_NEW_RESOLVE_GETS_START, the hook can assume *objp is null on entry.
+ * This is not good practice, but enough existing hook implementations count
+ * on it that we can't break compatibility by passing the starting object in
+ * *objp without a new JSClass flag.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSNewResolveOp)(JSContext *cx, JSObject *obj, jsval id,
+ uintN flags, JSObject **objp);
+
+/*
+ * Convert obj to the given type, returning true with the resulting value in
+ * *vp on success, and returning false on error or exception.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSConvertOp)(JSContext *cx, JSObject *obj, JSType type,
+ jsval *vp);
+
+/*
+ * Finalize obj, which the garbage collector has determined to be unreachable
+ * from other live objects or from GC roots. Obviously, finalizers must never
+ * store a reference to obj.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSFinalizeOp)(JSContext *cx, JSObject *obj);
+
+/*
+ * Used by JS_AddExternalStringFinalizer and JS_RemoveExternalStringFinalizer
+ * to extend and reduce the set of string types finalized by the GC.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSStringFinalizeOp)(JSContext *cx, JSString *str);
+
+/*
+ * The signature for JSClass.getObjectOps, used by JS_NewObject's internals
+ * to discover the set of high-level object operations to use for new objects
+ * of the given class. All native objects have a JSClass, which is stored as
+ * a private (int-tagged) pointer in obj->slots[JSSLOT_CLASS]. In contrast,
+ * all native and host objects have a JSObjectMap at obj->map, which may be
+ * shared among a number of objects, and which contains the JSObjectOps *ops
+ * pointer used to dispatch object operations from API calls.
+ *
+ * Thus JSClass (which pre-dates JSObjectOps in the API) provides a low-level
+ * interface to class-specific code and data, while JSObjectOps allows for a
+ * higher level of operation, which does not use the object's class except to
+ * find the class's JSObjectOps struct, by calling clasp->getObjectOps, and to
+ * finalize the object.
+ *
+ * If this seems backwards, that's because it is! API compatibility requires
+ * a JSClass *clasp parameter to JS_NewObject, etc. Most host objects do not
+ * need to implement the larger JSObjectOps, and can share the common JSScope
+ * code and data used by the native (js_ObjectOps, see jsobj.c) ops.
+ *
+ * Further extension to preserve API compatibility: if this function returns
+ * a pointer to JSXMLObjectOps.base, not to JSObjectOps, then the engine calls
+ * extended hooks needed for E4X.
+ */
+typedef JSObjectOps *
+(* JS_DLL_CALLBACK JSGetObjectOps)(JSContext *cx, JSClass *clasp);
+
+/*
+ * JSClass.checkAccess type: check whether obj[id] may be accessed per mode,
+ * returning false on error/exception, true on success with obj[id]'s last-got
+ * value in *vp, and its attributes in *attrsp. As for JSPropertyOp above, id
+ * is either a string or an int jsval.
+ *
+ * See JSCheckAccessIdOp, below, for the JSObjectOps counterpart, which takes
+ * a jsid (a tagged int or aligned, unique identifier pointer) rather than a
+ * jsval. The native js_ObjectOps.checkAccess simply forwards to the object's
+ * clasp->checkAccess, so that both JSClass and JSObjectOps implementors may
+ * specialize access checks.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSCheckAccessOp)(JSContext *cx, JSObject *obj, jsval id,
+ JSAccessMode mode, jsval *vp);
+
+/*
+ * Encode or decode an object, given an XDR state record representing external
+ * data. See jsxdrapi.h.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSXDRObjectOp)(JSXDRState *xdr, JSObject **objp);
+
+/*
+ * Check whether v is an instance of obj. Return false on error or exception,
+ * true on success with JS_TRUE in *bp if v is an instance of obj, JS_FALSE in
+ * *bp otherwise.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSHasInstanceOp)(JSContext *cx, JSObject *obj, jsval v,
+ JSBool *bp);
+
+/*
+ * Function type for JSClass.mark and JSObjectOps.mark, called from the GC to
+ * scan live GC-things reachable from obj's private data structure. For each
+ * such thing, a mark implementation must call
+ *
+ * JS_MarkGCThing(cx, thing, name, arg);
+ *
+ * The trailing name and arg parameters are used for GC_MARK_DEBUG-mode heap
+ * dumping and ref-path tracing. The mark function should pass a (typically
+ * literal) string naming the private data member for name, and it must pass
+ * the opaque arg parameter through from its caller.
+ *
+ * For the JSObjectOps.mark hook, the return value is the number of slots at
+ * obj->slots to scan. For JSClass.mark, the return value is ignored.
+ *
+ * NB: JSMarkOp implementations cannot allocate new GC-things (JS_NewObject
+ * called from a mark function will fail silently, e.g.).
+ */
+typedef uint32
+(* JS_DLL_CALLBACK JSMarkOp)(JSContext *cx, JSObject *obj, void *arg);
+
+/*
+ * The optional JSClass.reserveSlots hook allows a class to make computed
+ * per-instance object slots reservations, in addition to or instead of using
+ * JSCLASS_HAS_RESERVED_SLOTS(n) in the JSClass.flags initializer to reserve
+ * a constant-per-class number of slots. Implementations of this hook should
+ * return the number of slots to reserve, not including any reserved by using
+ * JSCLASS_HAS_RESERVED_SLOTS(n) in JSClass.flags.
+ *
+ * NB: called with obj locked by the JSObjectOps-specific mutual exclusion
+ * mechanism appropriate for obj, so don't nest other operations that might
+ * also lock obj.
+ */
+typedef uint32
+(* JS_DLL_CALLBACK JSReserveSlotsOp)(JSContext *cx, JSObject *obj);
+
+/* JSObjectOps function pointer typedefs. */
+
+/*
+ * Create a new subclass of JSObjectMap (see jsobj.h), with the nrefs and ops
+ * members initialized from the same-named parameters, and with the nslots and
+ * freeslot members initialized according to ops and clasp. Return null on
+ * error, non-null on success.
+ *
+ * JSObjectMaps are reference-counted by generic code in the engine. Usually,
+ * the nrefs parameter to JSObjectOps.newObjectMap will be 1, to count the ref
+ * returned to the caller on success. After a successful construction, some
+ * number of js_HoldObjectMap and js_DropObjectMap calls ensue. When nrefs
+ * reaches 0 due to a js_DropObjectMap call, JSObjectOps.destroyObjectMap will
+ * be called to dispose of the map.
+ */
+typedef JSObjectMap *
+(* JS_DLL_CALLBACK JSNewObjectMapOp)(JSContext *cx, jsrefcount nrefs,
+ JSObjectOps *ops, JSClass *clasp,
+ JSObject *obj);
+
+/*
+ * Generic type for an infallible JSObjectMap operation, used currently by
+ * JSObjectOps.destroyObjectMap.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSObjectMapOp)(JSContext *cx, JSObjectMap *map);
+
+/*
+ * Look for id in obj and its prototype chain, returning false on error or
+ * exception, true on success. On success, return null in *propp if id was
+ * not found. If id was found, return the first object searching from obj
+ * along its prototype chain in which id names a direct property in *objp, and
+ * return a non-null, opaque property pointer in *propp.
+ *
+ * If JSLookupPropOp succeeds and returns with *propp non-null, that pointer
+ * may be passed as the prop parameter to a JSAttributesOp, as a short-cut
+ * that bypasses id re-lookup. In any case, a non-null *propp result after a
+ * successful lookup must be dropped via JSObjectOps.dropProperty.
+ *
+ * NB: successful return with non-null *propp means the implementation may
+ * have locked *objp and added a reference count associated with *propp, so
+ * callers should not risk deadlock by nesting or interleaving other lookups
+ * or any obj-bearing ops before dropping *propp.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSLookupPropOp)(JSContext *cx, JSObject *obj, jsid id,
+ JSObject **objp, JSProperty **propp);
+
+/*
+ * Define obj[id], a direct property of obj named id, having the given initial
+ * value, with the specified getter, setter, and attributes. If the propp out
+ * param is non-null, *propp on successful return contains an opaque property
+ * pointer usable as a speedup hint with JSAttributesOp. But note that propp
+ * may be null, indicating that the caller is not interested in recovering an
+ * opaque pointer to the newly-defined property.
+ *
+ * If propp is non-null and JSDefinePropOp succeeds, its caller must be sure
+ * to drop *propp using JSObjectOps.dropProperty in short order, just as with
+ * JSLookupPropOp.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSDefinePropOp)(JSContext *cx, JSObject *obj,
+ jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs, JSProperty **propp);
+
+/*
+ * Get, set, or delete obj[id], returning false on error or exception, true
+ * on success. If getting or setting, the new value is returned in *vp on
+ * success. If deleting without error, *vp will be JSVAL_FALSE if obj[id] is
+ * permanent, and JSVAL_TRUE if id named a direct property of obj that was in
+ * fact deleted, or if id names no direct property of obj (id could name a
+ * prototype property, or no property in obj or its prototype chain).
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSPropertyIdOp)(JSContext *cx, JSObject *obj, jsid id,
+ jsval *vp);
+
+/*
+ * Get or set attributes of the property obj[id]. Return false on error or
+ * exception, true with current attributes in *attrsp. If prop is non-null,
+ * it must come from the *propp out parameter of a prior JSDefinePropOp or
+ * JSLookupPropOp call.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSAttributesOp)(JSContext *cx, JSObject *obj, jsid id,
+ JSProperty *prop, uintN *attrsp);
+
+/*
+ * JSObjectOps.checkAccess type: check whether obj[id] may be accessed per
+ * mode, returning false on error/exception, true on success with obj[id]'s
+ * last-got value in *vp, and its attributes in *attrsp.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSCheckAccessIdOp)(JSContext *cx, JSObject *obj, jsid id,
+ JSAccessMode mode, jsval *vp,
+ uintN *attrsp);
+
+/*
+ * A generic type for functions mapping an object to another object, or null
+ * if an error or exception was thrown on cx. Used by JSObjectOps.thisObject
+ * at present.
+ */
+typedef JSObject *
+(* JS_DLL_CALLBACK JSObjectOp)(JSContext *cx, JSObject *obj);
+
+/*
+ * A generic type for functions taking a context, object, and property, with
+ * no return value. Used by JSObjectOps.dropProperty currently (see above,
+ * JSDefinePropOp and JSLookupPropOp, for the object-locking protocol in which
+ * dropProperty participates).
+ */
+typedef void
+(* JS_DLL_CALLBACK JSPropertyRefOp)(JSContext *cx, JSObject *obj,
+ JSProperty *prop);
+
+/*
+ * Function type for JSObjectOps.setProto and JSObjectOps.setParent. These
+ * hooks must check for cycles without deadlocking, and otherwise take special
+ * steps. See jsobj.c, js_SetProtoOrParent, for an example.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSSetObjectSlotOp)(JSContext *cx, JSObject *obj,
+ uint32 slot, JSObject *pobj);
+
+/*
+ * Get and set a required slot, one that should already have been allocated.
+ * These operations are infallible, so required slots must be pre-allocated,
+ * or implementations must suppress out-of-memory errors. The native ops
+ * (js_ObjectOps, see jsobj.c) access slots reserved by including a call to
+ * the JSCLASS_HAS_RESERVED_SLOTS(n) macro in the JSClass.flags initializer.
+ *
+ * NB: the slot parameter is a zero-based index into obj->slots[], unlike the
+ * index parameter to the JS_GetReservedSlot and JS_SetReservedSlot API entry
+ * points, which is a zero-based index into the JSCLASS_RESERVED_SLOTS(clasp)
+ * reserved slots that come after the initial well-known slots: proto, parent,
+ * class, and optionally, the private data slot.
+ */
+typedef jsval
+(* JS_DLL_CALLBACK JSGetRequiredSlotOp)(JSContext *cx, JSObject *obj,
+ uint32 slot);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSSetRequiredSlotOp)(JSContext *cx, JSObject *obj,
+ uint32 slot, jsval v);
+
+typedef JSObject *
+(* JS_DLL_CALLBACK JSGetMethodOp)(JSContext *cx, JSObject *obj, jsid id,
+ jsval *vp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSSetMethodOp)(JSContext *cx, JSObject *obj, jsid id,
+ jsval *vp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSEnumerateValuesOp)(JSContext *cx, JSObject *obj,
+ JSIterateOp enum_op,
+ jsval *statep, jsid *idp, jsval *vp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSEqualityOp)(JSContext *cx, JSObject *obj, jsval v,
+ JSBool *bp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSConcatenateOp)(JSContext *cx, JSObject *obj, jsval v,
+ jsval *vp);
+
+/* Typedef for native functions called by the JS VM. */
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSNative)(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval);
+
+/* Callbacks and their arguments. */
+
+typedef enum JSContextOp {
+ JSCONTEXT_NEW,
+ JSCONTEXT_DESTROY
+} JSContextOp;
+
+/*
+ * The possible values for contextOp when the runtime calls the callback are:
+ * JSCONTEXT_NEW JS_NewContext succesfully created a new JSContext
+ * instance. The callback can initialize the instance as
+ * required. If the callback returns false, the instance
+ * will be destroyed and JS_NewContext returns null. In
+ * this case the callback is not called again.
+ * JSCONTEXT_DESTROY One of JS_DestroyContext* methods is called. The
+ * callback may perform its own cleanup and must always
+ * return true.
+ * Any other value For future compatibility the callback must do nothing
+ * and return true in this case.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSContextCallback)(JSContext *cx, uintN contextOp);
+
+typedef enum JSGCStatus {
+ JSGC_BEGIN,
+ JSGC_END,
+ JSGC_MARK_END,
+ JSGC_FINALIZE_END
+} JSGCStatus;
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSGCCallback)(JSContext *cx, JSGCStatus status);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSBranchCallback)(JSContext *cx, JSScript *script);
+
+typedef void
+(* JS_DLL_CALLBACK JSErrorReporter)(JSContext *cx, const char *message,
+ JSErrorReport *report);
+
+/*
+ * Possible exception types. These types are part of a JSErrorFormatString
+ * structure. They define which error to throw in case of a runtime error.
+ * JSEXN_NONE marks an unthrowable error.
+ */
+typedef enum JSExnType {
+ JSEXN_NONE = -1,
+ JSEXN_ERR,
+ JSEXN_INTERNALERR,
+ JSEXN_EVALERR,
+ JSEXN_RANGEERR,
+ JSEXN_REFERENCEERR,
+ JSEXN_SYNTAXERR,
+ JSEXN_TYPEERR,
+ JSEXN_URIERR,
+ JSEXN_LIMIT
+} JSExnType;
+
+typedef struct JSErrorFormatString {
+ /* The error format string (UTF-8 if JS_C_STRINGS_ARE_UTF8 is defined). */
+ const char *format;
+
+ /* The number of arguments to expand in the formatted error message. */
+ uint16 argCount;
+
+ /* One of the JSExnType constants above. */
+ int16 exnType;
+} JSErrorFormatString;
+
+typedef const JSErrorFormatString *
+(* JS_DLL_CALLBACK JSErrorCallback)(void *userRef, const char *locale,
+ const uintN errorNumber);
+
+#ifdef va_start
+#define JS_ARGUMENT_FORMATTER_DEFINED 1
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSArgumentFormatter)(JSContext *cx, const char *format,
+ JSBool fromJS, jsval **vpp,
+ va_list *app);
+#endif
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleToUpperCase)(JSContext *cx, JSString *src,
+ jsval *rval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleToLowerCase)(JSContext *cx, JSString *src,
+ jsval *rval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleCompare)(JSContext *cx,
+ JSString *src1, JSString *src2,
+ jsval *rval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleToUnicode)(JSContext *cx, char *src, jsval *rval);
+
+/*
+ * Security protocol types.
+ */
+typedef struct JSPrincipals JSPrincipals;
+
+/*
+ * XDR-encode or -decode a principals instance, based on whether xdr->mode is
+ * JSXDR_ENCODE, in which case *principalsp should be encoded; or JSXDR_DECODE,
+ * in which case implementations must return a held (via JSPRINCIPALS_HOLD),
+ * non-null *principalsp out parameter. Return true on success, false on any
+ * error, which the implementation must have reported.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSPrincipalsTranscoder)(JSXDRState *xdr,
+ JSPrincipals **principalsp);
+
+/*
+ * Return a weak reference to the principals associated with obj, possibly via
+ * the immutable parent chain leading from obj to a top-level container (e.g.,
+ * a window object in the DOM level 0). If there are no principals associated
+ * with obj, return null. Therefore null does not mean an error was reported;
+ * in no event should an error be reported or an exception be thrown by this
+ * callback's implementation.
+ */
+typedef JSPrincipals *
+(* JS_DLL_CALLBACK JSObjectPrincipalsFinder)(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jspubtd_h___ */
diff --git a/third_party/js-1.7/jsregexp.c b/third_party/js-1.7/jsregexp.c
new file mode 100644
index 0000000..5d2fce4
--- /dev/null
+++ b/third_party/js-1.7/jsregexp.c
@@ -0,0 +1,4206 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS regular expressions, after Perl.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsstr.h"
+
+/* Note : contiguity of 'simple opcodes' is important for SimpleMatch() */
+typedef enum REOp {
+ REOP_EMPTY = 0, /* match rest of input against rest of r.e. */
+ REOP_ALT = 1, /* alternative subexpressions in kid and next */
+ REOP_SIMPLE_START = 2, /* start of 'simple opcodes' */
+ REOP_BOL = 2, /* beginning of input (or line if multiline) */
+ REOP_EOL = 3, /* end of input (or line if multiline) */
+ REOP_WBDRY = 4, /* match "" at word boundary */
+ REOP_WNONBDRY = 5, /* match "" at word non-boundary */
+ REOP_DOT = 6, /* stands for any character */
+ REOP_DIGIT = 7, /* match a digit char: [0-9] */
+ REOP_NONDIGIT = 8, /* match a non-digit char: [^0-9] */
+ REOP_ALNUM = 9, /* match an alphanumeric char: [0-9a-z_A-Z] */
+ REOP_NONALNUM = 10, /* match a non-alphanumeric char: [^0-9a-z_A-Z] */
+ REOP_SPACE = 11, /* match a whitespace char */
+ REOP_NONSPACE = 12, /* match a non-whitespace char */
+ REOP_BACKREF = 13, /* back-reference (e.g., \1) to a parenthetical */
+ REOP_FLAT = 14, /* match a flat string */
+ REOP_FLAT1 = 15, /* match a single char */
+ REOP_FLATi = 16, /* case-independent REOP_FLAT */
+ REOP_FLAT1i = 17, /* case-independent REOP_FLAT1 */
+ REOP_UCFLAT1 = 18, /* single Unicode char */
+ REOP_UCFLAT1i = 19, /* case-independent REOP_UCFLAT1 */
+ REOP_UCFLAT = 20, /* flat Unicode string; len immediate counts chars */
+ REOP_UCFLATi = 21, /* case-independent REOP_UCFLAT */
+ REOP_CLASS = 22, /* character class with index */
+ REOP_NCLASS = 23, /* negated character class with index */
+ REOP_SIMPLE_END = 23, /* end of 'simple opcodes' */
+ REOP_QUANT = 25, /* quantified atom: atom{1,2} */
+ REOP_STAR = 26, /* zero or more occurrences of kid */
+ REOP_PLUS = 27, /* one or more occurrences of kid */
+ REOP_OPT = 28, /* optional subexpression in kid */
+ REOP_LPAREN = 29, /* left paren bytecode: kid is u.num'th sub-regexp */
+ REOP_RPAREN = 30, /* right paren bytecode */
+ REOP_JUMP = 31, /* for deoptimized closure loops */
+ REOP_DOTSTAR = 32, /* optimize .* to use a single opcode */
+ REOP_ANCHOR = 33, /* like .* but skips left context to unanchored r.e. */
+ REOP_EOLONLY = 34, /* $ not preceded by any pattern */
+ REOP_BACKREFi = 37, /* case-independent REOP_BACKREF */
+ REOP_LPARENNON = 41, /* non-capturing version of REOP_LPAREN */
+ REOP_ASSERT = 43, /* zero width positive lookahead assertion */
+ REOP_ASSERT_NOT = 44, /* zero width negative lookahead assertion */
+ REOP_ASSERTTEST = 45, /* sentinel at end of assertion child */
+ REOP_ASSERTNOTTEST = 46, /* sentinel at end of !assertion child */
+ REOP_MINIMALSTAR = 47, /* non-greedy version of * */
+ REOP_MINIMALPLUS = 48, /* non-greedy version of + */
+ REOP_MINIMALOPT = 49, /* non-greedy version of ? */
+ REOP_MINIMALQUANT = 50, /* non-greedy version of {} */
+ REOP_ENDCHILD = 51, /* sentinel at end of quantifier child */
+ REOP_REPEAT = 52, /* directs execution of greedy quantifier */
+ REOP_MINIMALREPEAT = 53, /* directs execution of non-greedy quantifier */
+ REOP_ALTPREREQ = 54, /* prerequisite for ALT, either of two chars */
+ REOP_ALTPREREQ2 = 55, /* prerequisite for ALT, a char or a class */
+ REOP_ENDALT = 56, /* end of final alternate */
+ REOP_CONCAT = 57, /* concatenation of terms (parse time only) */
+
+ REOP_END
+} REOp;
+
+#define REOP_IS_SIMPLE(op) ((unsigned)((op) - REOP_SIMPLE_START) < \
+ (unsigned)REOP_SIMPLE_END)
+
+struct RENode {
+ REOp op; /* r.e. op bytecode */
+ RENode *next; /* next in concatenation order */
+ void *kid; /* first operand */
+ union {
+ void *kid2; /* second operand */
+ jsint num; /* could be a number */
+ size_t parenIndex; /* or a parenthesis index */
+ struct { /* or a quantifier range */
+ uintN min;
+ uintN max;
+ JSPackedBool greedy;
+ } range;
+ struct { /* or a character class */
+ size_t startIndex;
+ size_t kidlen; /* length of string at kid, in jschars */
+ size_t index; /* index into class list */
+ uint16 bmsize; /* bitmap size, based on max char code */
+ JSPackedBool sense;
+ } ucclass;
+ struct { /* or a literal sequence */
+ jschar chr; /* of one character */
+ size_t length; /* or many (via the kid) */
+ } flat;
+ struct {
+ RENode *kid2; /* second operand from ALT */
+ jschar ch1; /* match char for ALTPREREQ */
+ jschar ch2; /* ditto, or class index for ALTPREREQ2 */
+ } altprereq;
+ } u;
+};
+
+#define RE_IS_LETTER(c) (((c >= 'A') && (c <= 'Z')) || \
+ ((c >= 'a') && (c <= 'z')) )
+#define RE_IS_LINE_TERM(c) ((c == '\n') || (c == '\r') || \
+ (c == LINE_SEPARATOR) || (c == PARA_SEPARATOR))
+
+#define CLASS_CACHE_SIZE 4
+
+typedef struct CompilerState {
+ JSContext *context;
+ JSTokenStream *tokenStream; /* For reporting errors */
+ const jschar *cpbegin;
+ const jschar *cpend;
+ const jschar *cp;
+ size_t parenCount;
+ size_t classCount; /* number of [] encountered */
+ size_t treeDepth; /* maximum depth of parse tree */
+ size_t progLength; /* estimated bytecode length */
+ RENode *result;
+ size_t classBitmapsMem; /* memory to hold all class bitmaps */
+ struct {
+ const jschar *start; /* small cache of class strings */
+ size_t length; /* since they're often the same */
+ size_t index;
+ } classCache[CLASS_CACHE_SIZE];
+ uint16 flags;
+} CompilerState;
+
+typedef struct EmitStateStackEntry {
+ jsbytecode *altHead; /* start of REOP_ALT* opcode */
+ jsbytecode *nextAltFixup; /* fixup pointer to next-alt offset */
+ jsbytecode *nextTermFixup; /* fixup ptr. to REOP_JUMP offset */
+ jsbytecode *endTermFixup; /* fixup ptr. to REOPT_ALTPREREQ* offset */
+ RENode *continueNode; /* original REOP_ALT* node being stacked */
+ jsbytecode continueOp; /* REOP_JUMP or REOP_ENDALT continuation */
+ JSPackedBool jumpToJumpFlag; /* true if we've patched jump-to-jump to
+ avoid 16-bit unsigned offset overflow */
+} EmitStateStackEntry;
+
+/*
+ * Immediate operand sizes and getter/setters. Unlike the ones in jsopcode.h,
+ * the getters and setters take the pc of the offset, not of the opcode before
+ * the offset.
+ */
+#define ARG_LEN 2
+#define GET_ARG(pc) ((uint16)(((pc)[0] << 8) | (pc)[1]))
+#define SET_ARG(pc, arg) ((pc)[0] = (jsbytecode) ((arg) >> 8), \
+ (pc)[1] = (jsbytecode) (arg))
+
+#define OFFSET_LEN ARG_LEN
+#define OFFSET_MAX (JS_BIT(ARG_LEN * 8) - 1)
+#define GET_OFFSET(pc) GET_ARG(pc)
+
+/*
+ * Maximum supported tree depth is maximum size of EmitStateStackEntry stack.
+ * For sanity, we limit it to 2^24 bytes.
+ */
+#define TREE_DEPTH_MAX (JS_BIT(24) / sizeof(EmitStateStackEntry))
+
+/*
+ * The maximum memory that can be allocated for class bitmaps.
+ * For sanity, we limit it to 2^24 bytes.
+ */
+#define CLASS_BITMAPS_MEM_LIMIT JS_BIT(24)
+
+/*
+ * Functions to get size and write/read bytecode that represent small indexes
+ * compactly.
+ * Each byte in the code represent 7-bit chunk of the index. 8th bit when set
+ * indicates that the following byte brings more bits to the index. Otherwise
+ * this is the last byte in the index bytecode representing highest index bits.
+ */
+static size_t
+GetCompactIndexWidth(size_t index)
+{
+ size_t width;
+
+ for (width = 1; (index >>= 7) != 0; ++width) { }
+ return width;
+}
+
+static jsbytecode *
+WriteCompactIndex(jsbytecode *pc, size_t index)
+{
+ size_t next;
+
+ while ((next = index >> 7) != 0) {
+ *pc++ = (jsbytecode)(index | 0x80);
+ index = next;
+ }
+ *pc++ = (jsbytecode)index;
+ return pc;
+}
+
+static jsbytecode *
+ReadCompactIndex(jsbytecode *pc, size_t *result)
+{
+ size_t nextByte;
+
+ nextByte = *pc++;
+ if ((nextByte & 0x80) == 0) {
+ /*
+ * Short-circuit the most common case when compact index <= 127.
+ */
+ *result = nextByte;
+ } else {
+ size_t shift = 7;
+ *result = 0x7F & nextByte;
+ do {
+ nextByte = *pc++;
+ *result |= (nextByte & 0x7F) << shift;
+ shift += 7;
+ } while ((nextByte & 0x80) != 0);
+ }
+ return pc;
+}
+
+typedef struct RECapture {
+ ptrdiff_t index; /* start of contents, -1 for empty */
+ size_t length; /* length of capture */
+} RECapture;
+
+typedef struct REMatchState {
+ const jschar *cp;
+ RECapture parens[1]; /* first of 're->parenCount' captures,
+ allocated at end of this struct */
+} REMatchState;
+
+struct REBackTrackData;
+
+typedef struct REProgState {
+ jsbytecode *continue_pc; /* current continuation data */
+ jsbytecode continue_op;
+ ptrdiff_t index; /* progress in text */
+ size_t parenSoFar; /* highest indexed paren started */
+ union {
+ struct {
+ uintN min; /* current quantifier limits */
+ uintN max;
+ } quantifier;
+ struct {
+ size_t top; /* backtrack stack state */
+ size_t sz;
+ } assertion;
+ } u;
+} REProgState;
+
+typedef struct REBackTrackData {
+ size_t sz; /* size of previous stack entry */
+ jsbytecode *backtrack_pc; /* where to backtrack to */
+ jsbytecode backtrack_op;
+ const jschar *cp; /* index in text of match at backtrack */
+ size_t parenIndex; /* start index of saved paren contents */
+ size_t parenCount; /* # of saved paren contents */
+ size_t saveStateStackTop; /* number of parent states */
+ /* saved parent states follow */
+ /* saved paren contents follow */
+} REBackTrackData;
+
+#define INITIAL_STATESTACK 100
+#define INITIAL_BACKTRACK 8000
+
+typedef struct REGlobalData {
+ JSContext *cx;
+ JSRegExp *regexp; /* the RE in execution */
+ JSBool ok; /* runtime error (out_of_memory only?) */
+ size_t start; /* offset to start at */
+ ptrdiff_t skipped; /* chars skipped anchoring this r.e. */
+ const jschar *cpbegin; /* text base address */
+ const jschar *cpend; /* text limit address */
+
+ REProgState *stateStack; /* stack of state of current parents */
+ size_t stateStackTop;
+ size_t stateStackLimit;
+
+ REBackTrackData *backTrackStack;/* stack of matched-so-far positions */
+ REBackTrackData *backTrackSP;
+ size_t backTrackStackSize;
+ size_t cursz; /* size of current stack entry */
+
+ JSArenaPool pool; /* It's faster to use one malloc'd pool
+ than to malloc/free the three items
+ that are allocated from this pool */
+} REGlobalData;
+
+/*
+ * 1. If IgnoreCase is false, return ch.
+ * 2. Let u be ch converted to upper case as if by calling
+ * String.prototype.toUpperCase on the one-character string ch.
+ * 3. If u does not consist of a single character, return ch.
+ * 4. Let cu be u's character.
+ * 5. If ch's code point value is greater than or equal to decimal 128 and cu's
+ * code point value is less than decimal 128, then return ch.
+ * 6. Return cu.
+ */
+static jschar
+upcase(jschar ch)
+{
+ jschar cu = JS_TOUPPER(ch);
+ if (ch >= 128 && cu < 128)
+ return ch;
+ return cu;
+}
+
+static jschar
+downcase(jschar ch)
+{
+ jschar cl = JS_TOLOWER(ch);
+ if (cl >= 128 && ch < 128)
+ return ch;
+ return cl;
+}
+
+/* Construct and initialize an RENode, returning NULL for out-of-memory */
+static RENode *
+NewRENode(CompilerState *state, REOp op)
+{
+ JSContext *cx;
+ RENode *ren;
+
+ cx = state->context;
+ JS_ARENA_ALLOCATE_CAST(ren, RENode *, &cx->tempPool, sizeof *ren);
+ if (!ren) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ ren->op = op;
+ ren->next = NULL;
+ ren->kid = NULL;
+ return ren;
+}
+
+/*
+ * Validates and converts hex ascii value.
+ */
+static JSBool
+isASCIIHexDigit(jschar c, uintN *digit)
+{
+ uintN cv = c;
+
+ if (cv < '0')
+ return JS_FALSE;
+ if (cv <= '9') {
+ *digit = cv - '0';
+ return JS_TRUE;
+ }
+ cv |= 0x20;
+ if (cv >= 'a' && cv <= 'f') {
+ *digit = cv - 'a' + 10;
+ return JS_TRUE;
+ }
+ return JS_FALSE;
+}
+
+
+typedef struct {
+ REOp op;
+ const jschar *errPos;
+ size_t parenIndex;
+} REOpData;
+
+
+/*
+ * Process the op against the two top operands, reducing them to a single
+ * operand in the penultimate slot. Update progLength and treeDepth.
+ */
+static JSBool
+ProcessOp(CompilerState *state, REOpData *opData, RENode **operandStack,
+ intN operandSP)
+{
+ RENode *result;
+
+ switch (opData->op) {
+ case REOP_ALT:
+ result = NewRENode(state, REOP_ALT);
+ if (!result)
+ return JS_FALSE;
+ result->kid = operandStack[operandSP - 2];
+ result->u.kid2 = operandStack[operandSP - 1];
+ operandStack[operandSP - 2] = result;
+
+ if (state->treeDepth == TREE_DEPTH_MAX) {
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ return JS_FALSE;
+ }
+ ++state->treeDepth;
+
+ /*
+ * Look at both alternates to see if there's a FLAT or a CLASS at
+ * the start of each. If so, use a prerequisite match.
+ */
+ if (((RENode *) result->kid)->op == REOP_FLAT &&
+ ((RENode *) result->u.kid2)->op == REOP_FLAT &&
+ (state->flags & JSREG_FOLD) == 0) {
+ result->op = REOP_ALTPREREQ;
+ result->u.altprereq.ch1 = ((RENode *) result->kid)->u.flat.chr;
+ result->u.altprereq.ch2 = ((RENode *) result->u.kid2)->u.flat.chr;
+ /* ALTPREREQ, <end>, uch1, uch2, <next>, ...,
+ JUMP, <end> ... ENDALT */
+ state->progLength += 13;
+ }
+ else
+ if (((RENode *) result->kid)->op == REOP_CLASS &&
+ ((RENode *) result->kid)->u.ucclass.index < 256 &&
+ ((RENode *) result->u.kid2)->op == REOP_FLAT &&
+ (state->flags & JSREG_FOLD) == 0) {
+ result->op = REOP_ALTPREREQ2;
+ result->u.altprereq.ch1 = ((RENode *) result->u.kid2)->u.flat.chr;
+ result->u.altprereq.ch2 = ((RENode *) result->kid)->u.ucclass.index;
+ /* ALTPREREQ2, <end>, uch1, uch2, <next>, ...,
+ JUMP, <end> ... ENDALT */
+ state->progLength += 13;
+ }
+ else
+ if (((RENode *) result->kid)->op == REOP_FLAT &&
+ ((RENode *) result->u.kid2)->op == REOP_CLASS &&
+ ((RENode *) result->u.kid2)->u.ucclass.index < 256 &&
+ (state->flags & JSREG_FOLD) == 0) {
+ result->op = REOP_ALTPREREQ2;
+ result->u.altprereq.ch1 = ((RENode *) result->kid)->u.flat.chr;
+ result->u.altprereq.ch2 =
+ ((RENode *) result->u.kid2)->u.ucclass.index;
+ /* ALTPREREQ2, <end>, uch1, uch2, <next>, ...,
+ JUMP, <end> ... ENDALT */
+ state->progLength += 13;
+ }
+ else {
+ /* ALT, <next>, ..., JUMP, <end> ... ENDALT */
+ state->progLength += 7;
+ }
+ break;
+
+ case REOP_CONCAT:
+ result = operandStack[operandSP - 2];
+ while (result->next)
+ result = result->next;
+ result->next = operandStack[operandSP - 1];
+ break;
+
+ case REOP_ASSERT:
+ case REOP_ASSERT_NOT:
+ case REOP_LPARENNON:
+ case REOP_LPAREN:
+ /* These should have been processed by a close paren. */
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_MISSING_PAREN, opData->errPos);
+ return JS_FALSE;
+
+ default:;
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Parser forward declarations.
+ */
+static JSBool ParseTerm(CompilerState *state);
+static JSBool ParseQuantifier(CompilerState *state);
+static intN ParseMinMaxQuantifier(CompilerState *state, JSBool ignoreValues);
+
+/*
+ * Top-down regular expression grammar, based closely on Perl4.
+ *
+ * regexp: altern A regular expression is one or more
+ * altern '|' regexp alternatives separated by vertical bar.
+ */
+#define INITIAL_STACK_SIZE 128
+
+static JSBool
+ParseRegExp(CompilerState *state)
+{
+ size_t parenIndex;
+ RENode *operand;
+ REOpData *operatorStack;
+ RENode **operandStack;
+ REOp op;
+ intN i;
+ JSBool result = JS_FALSE;
+
+ intN operatorSP = 0, operatorStackSize = INITIAL_STACK_SIZE;
+ intN operandSP = 0, operandStackSize = INITIAL_STACK_SIZE;
+
+ /* Watch out for empty regexp */
+ if (state->cp == state->cpend) {
+ state->result = NewRENode(state, REOP_EMPTY);
+ return (state->result != NULL);
+ }
+
+ operatorStack = (REOpData *)
+ JS_malloc(state->context, sizeof(REOpData) * operatorStackSize);
+ if (!operatorStack)
+ return JS_FALSE;
+
+ operandStack = (RENode **)
+ JS_malloc(state->context, sizeof(RENode *) * operandStackSize);
+ if (!operandStack)
+ goto out;
+
+ for (;;) {
+ parenIndex = state->parenCount;
+ if (state->cp == state->cpend) {
+ /*
+ * If we are at the end of the regexp and we're short one or more
+ * operands, the regexp must have the form /x|/ or some such, with
+ * left parentheses making us short more than one operand.
+ */
+ if (operatorSP >= operandSP) {
+ operand = NewRENode(state, REOP_EMPTY);
+ if (!operand)
+ goto out;
+ goto pushOperand;
+ }
+ } else {
+ switch (*state->cp) {
+ case '(':
+ ++state->cp;
+ if (state->cp + 1 < state->cpend &&
+ *state->cp == '?' &&
+ (state->cp[1] == '=' ||
+ state->cp[1] == '!' ||
+ state->cp[1] == ':')) {
+ switch (state->cp[1]) {
+ case '=':
+ op = REOP_ASSERT;
+ /* ASSERT, <next>, ... ASSERTTEST */
+ state->progLength += 4;
+ break;
+ case '!':
+ op = REOP_ASSERT_NOT;
+ /* ASSERTNOT, <next>, ... ASSERTNOTTEST */
+ state->progLength += 4;
+ break;
+ default:
+ op = REOP_LPARENNON;
+ break;
+ }
+ state->cp += 2;
+ } else {
+ op = REOP_LPAREN;
+ /* LPAREN, <index>, ... RPAREN, <index> */
+ state->progLength
+ += 2 * (1 + GetCompactIndexWidth(parenIndex));
+ state->parenCount++;
+ if (state->parenCount == 65535) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_TOO_MANY_PARENS);
+ goto out;
+ }
+ }
+ goto pushOperator;
+
+ case ')':
+ /*
+ * If there's no stacked open parenthesis, throw syntax error.
+ */
+ for (i = operatorSP - 1; ; i--) {
+ if (i < 0) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_UNMATCHED_RIGHT_PAREN);
+ goto out;
+ }
+ if (operatorStack[i].op == REOP_ASSERT ||
+ operatorStack[i].op == REOP_ASSERT_NOT ||
+ operatorStack[i].op == REOP_LPARENNON ||
+ operatorStack[i].op == REOP_LPAREN) {
+ break;
+ }
+ }
+ /* FALL THROUGH */
+
+ case '|':
+ /* Expected an operand before these, so make an empty one */
+ operand = NewRENode(state, REOP_EMPTY);
+ if (!operand)
+ goto out;
+ goto pushOperand;
+
+ default:
+ if (!ParseTerm(state))
+ goto out;
+ operand = state->result;
+pushOperand:
+ if (operandSP == operandStackSize) {
+ operandStackSize += operandStackSize;
+ operandStack = (RENode **)
+ JS_realloc(state->context, operandStack,
+ sizeof(RENode *) * operandStackSize);
+ if (!operandStack)
+ goto out;
+ }
+ operandStack[operandSP++] = operand;
+ break;
+ }
+ }
+
+ /* At the end; process remaining operators. */
+restartOperator:
+ if (state->cp == state->cpend) {
+ while (operatorSP) {
+ --operatorSP;
+ if (!ProcessOp(state, &operatorStack[operatorSP],
+ operandStack, operandSP))
+ goto out;
+ --operandSP;
+ }
+ JS_ASSERT(operandSP == 1);
+ state->result = operandStack[0];
+ result = JS_TRUE;
+ goto out;
+ }
+
+ switch (*state->cp) {
+ case '|':
+ /* Process any stacked 'concat' operators */
+ ++state->cp;
+ while (operatorSP &&
+ operatorStack[operatorSP - 1].op == REOP_CONCAT) {
+ --operatorSP;
+ if (!ProcessOp(state, &operatorStack[operatorSP],
+ operandStack, operandSP)) {
+ goto out;
+ }
+ --operandSP;
+ }
+ op = REOP_ALT;
+ goto pushOperator;
+
+ case ')':
+ /*
+ * If there's no stacked open parenthesis, throw syntax error.
+ */
+ for (i = operatorSP - 1; ; i--) {
+ if (i < 0) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNMATCHED_RIGHT_PAREN);
+ goto out;
+ }
+ if (operatorStack[i].op == REOP_ASSERT ||
+ operatorStack[i].op == REOP_ASSERT_NOT ||
+ operatorStack[i].op == REOP_LPARENNON ||
+ operatorStack[i].op == REOP_LPAREN) {
+ break;
+ }
+ }
+ ++state->cp;
+
+ /* Process everything on the stack until the open parenthesis. */
+ for (;;) {
+ JS_ASSERT(operatorSP);
+ --operatorSP;
+ switch (operatorStack[operatorSP].op) {
+ case REOP_ASSERT:
+ case REOP_ASSERT_NOT:
+ case REOP_LPAREN:
+ operand = NewRENode(state, operatorStack[operatorSP].op);
+ if (!operand)
+ goto out;
+ operand->u.parenIndex =
+ operatorStack[operatorSP].parenIndex;
+ JS_ASSERT(operandSP);
+ operand->kid = operandStack[operandSP - 1];
+ operandStack[operandSP - 1] = operand;
+ if (state->treeDepth == TREE_DEPTH_MAX) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ goto out;
+ }
+ ++state->treeDepth;
+ /* FALL THROUGH */
+
+ case REOP_LPARENNON:
+ state->result = operandStack[operandSP - 1];
+ if (!ParseQuantifier(state))
+ goto out;
+ operandStack[operandSP - 1] = state->result;
+ goto restartOperator;
+ default:
+ if (!ProcessOp(state, &operatorStack[operatorSP],
+ operandStack, operandSP))
+ goto out;
+ --operandSP;
+ break;
+ }
+ }
+ break;
+
+ case '{':
+ {
+ const jschar *errp = state->cp;
+
+ if (ParseMinMaxQuantifier(state, JS_TRUE) < 0) {
+ /*
+ * This didn't even scan correctly as a quantifier, so we should
+ * treat it as flat.
+ */
+ op = REOP_CONCAT;
+ goto pushOperator;
+ }
+
+ state->cp = errp;
+ /* FALL THROUGH */
+ }
+
+ case '+':
+ case '*':
+ case '?':
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_QUANTIFIER, state->cp);
+ result = JS_FALSE;
+ goto out;
+
+ default:
+ /* Anything else is the start of the next term. */
+ op = REOP_CONCAT;
+pushOperator:
+ if (operatorSP == operatorStackSize) {
+ operatorStackSize += operatorStackSize;
+ operatorStack = (REOpData *)
+ JS_realloc(state->context, operatorStack,
+ sizeof(REOpData) * operatorStackSize);
+ if (!operatorStack)
+ goto out;
+ }
+ operatorStack[operatorSP].op = op;
+ operatorStack[operatorSP].errPos = state->cp;
+ operatorStack[operatorSP++].parenIndex = parenIndex;
+ break;
+ }
+ }
+out:
+ if (operatorStack)
+ JS_free(state->context, operatorStack);
+ if (operandStack)
+ JS_free(state->context, operandStack);
+ return result;
+}
+
+/*
+ * Hack two bits in CompilerState.flags, for use within FindParenCount to flag
+ * its being on the stack, and to propagate errors to its callers.
+ */
+#define JSREG_FIND_PAREN_COUNT 0x8000
+#define JSREG_FIND_PAREN_ERROR 0x4000
+
+/*
+ * Magic return value from FindParenCount and GetDecimalValue, to indicate
+ * overflow beyond GetDecimalValue's max parameter, or a computed maximum if
+ * its findMax parameter is non-null.
+ */
+#define OVERFLOW_VALUE ((uintN)-1)
+
+static uintN
+FindParenCount(CompilerState *state)
+{
+ CompilerState temp;
+ int i;
+
+ if (state->flags & JSREG_FIND_PAREN_COUNT)
+ return OVERFLOW_VALUE;
+
+ /*
+ * Copy state into temp, flag it so we never report an invalid backref,
+ * and reset its members to parse the entire regexp. This is obviously
+ * suboptimal, but GetDecimalValue calls us only if a backref appears to
+ * refer to a forward parenthetical, which is rare.
+ */
+ temp = *state;
+ temp.flags |= JSREG_FIND_PAREN_COUNT;
+ temp.cp = temp.cpbegin;
+ temp.parenCount = 0;
+ temp.classCount = 0;
+ temp.progLength = 0;
+ temp.treeDepth = 0;
+ temp.classBitmapsMem = 0;
+ for (i = 0; i < CLASS_CACHE_SIZE; i++)
+ temp.classCache[i].start = NULL;
+
+ if (!ParseRegExp(&temp)) {
+ state->flags |= JSREG_FIND_PAREN_ERROR;
+ return OVERFLOW_VALUE;
+ }
+ return temp.parenCount;
+}
+
+/*
+ * Extract and return a decimal value at state->cp. The initial character c
+ * has already been read. Return OVERFLOW_VALUE if the result exceeds max.
+ * Callers who pass a non-null findMax should test JSREG_FIND_PAREN_ERROR in
+ * state->flags to discover whether an error occurred under findMax.
+ */
+static uintN
+GetDecimalValue(jschar c, uintN max, uintN (*findMax)(CompilerState *state),
+ CompilerState *state)
+{
+ uintN value = JS7_UNDEC(c);
+ JSBool overflow = (value > max && (!findMax || value > findMax(state)));
+
+ /* The following restriction allows simpler overflow checks. */
+ JS_ASSERT(max <= ((uintN)-1 - 9) / 10);
+ while (state->cp < state->cpend) {
+ c = *state->cp;
+ if (!JS7_ISDEC(c))
+ break;
+ value = 10 * value + JS7_UNDEC(c);
+ if (!overflow && value > max && (!findMax || value > findMax(state)))
+ overflow = JS_TRUE;
+ ++state->cp;
+ }
+ return overflow ? OVERFLOW_VALUE : value;
+}
+
+/*
+ * Calculate the total size of the bitmap required for a class expression.
+ */
+static JSBool
+CalculateBitmapSize(CompilerState *state, RENode *target, const jschar *src,
+ const jschar *end)
+{
+ uintN max = 0;
+ JSBool inRange = JS_FALSE;
+ jschar c, rangeStart = 0;
+ uintN n, digit, nDigits, i;
+
+ target->u.ucclass.bmsize = 0;
+ target->u.ucclass.sense = JS_TRUE;
+
+ if (src == end)
+ return JS_TRUE;
+
+ if (*src == '^') {
+ ++src;
+ target->u.ucclass.sense = JS_FALSE;
+ }
+
+ while (src != end) {
+ uintN localMax = 0;
+ switch (*src) {
+ case '\\':
+ ++src;
+ c = *src++;
+ switch (c) {
+ case 'b':
+ localMax = 0x8;
+ break;
+ case 'f':
+ localMax = 0xC;
+ break;
+ case 'n':
+ localMax = 0xA;
+ break;
+ case 'r':
+ localMax = 0xD;
+ break;
+ case 't':
+ localMax = 0x9;
+ break;
+ case 'v':
+ localMax = 0xB;
+ break;
+ case 'c':
+ if (src < end && RE_IS_LETTER(*src)) {
+ localMax = (jschar) (*src++ & 0x1F);
+ } else {
+ --src;
+ localMax = '\\';
+ }
+ break;
+ case 'x':
+ nDigits = 2;
+ goto lexHex;
+ case 'u':
+ nDigits = 4;
+lexHex:
+ n = 0;
+ for (i = 0; (i < nDigits) && (src < end); i++) {
+ c = *src++;
+ if (!isASCIIHexDigit(c, &digit)) {
+ /*
+ * Back off to accepting the original
+ *'\' as a literal.
+ */
+ src -= i + 1;
+ n = '\\';
+ break;
+ }
+ n = (n << 4) | digit;
+ }
+ localMax = n;
+ break;
+ case 'd':
+ if (inRange) {
+ JS_ReportErrorNumber(state->context,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_CLASS_RANGE);
+ return JS_FALSE;
+ }
+ localMax = '9';
+ break;
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ if (inRange) {
+ JS_ReportErrorNumber(state->context,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_CLASS_RANGE);
+ return JS_FALSE;
+ }
+ target->u.ucclass.bmsize = 65535;
+ return JS_TRUE;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ /*
+ * This is a non-ECMA extension - decimal escapes (in this
+ * case, octal!) are supposed to be an error inside class
+ * ranges, but supported here for backwards compatibility.
+ *
+ */
+ n = JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ n = 8 * n + JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ i = 8 * n + JS7_UNDEC(c);
+ if (i <= 0377)
+ n = i;
+ else
+ src--;
+ }
+ }
+ localMax = n;
+ break;
+
+ default:
+ localMax = c;
+ break;
+ }
+ break;
+ default:
+ localMax = *src++;
+ break;
+ }
+ if (state->flags & JSREG_FOLD) {
+ c = JS_MAX(upcase((jschar) localMax), downcase((jschar) localMax));
+ if (c > localMax)
+ localMax = c;
+ }
+ if (inRange) {
+ if (rangeStart > localMax) {
+ JS_ReportErrorNumber(state->context,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_CLASS_RANGE);
+ return JS_FALSE;
+ }
+ inRange = JS_FALSE;
+ } else {
+ if (src < end - 1) {
+ if (*src == '-') {
+ ++src;
+ inRange = JS_TRUE;
+ rangeStart = (jschar)localMax;
+ continue;
+ }
+ }
+ }
+ if (localMax > max)
+ max = localMax;
+ }
+ target->u.ucclass.bmsize = max;
+ return JS_TRUE;
+}
+
+/*
+ * item: assertion An item is either an assertion or
+ * quantatom a quantified atom.
+ *
+ * assertion: '^' Assertions match beginning of string
+ * (or line if the class static property
+ * RegExp.multiline is true).
+ * '$' End of string (or line if the class
+ * static property RegExp.multiline is
+ * true).
+ * '\b' Word boundary (between \w and \W).
+ * '\B' Word non-boundary.
+ *
+ * quantatom: atom An unquantified atom.
+ * quantatom '{' n ',' m '}'
+ * Atom must occur between n and m times.
+ * quantatom '{' n ',' '}' Atom must occur at least n times.
+ * quantatom '{' n '}' Atom must occur exactly n times.
+ * quantatom '*' Zero or more times (same as {0,}).
+ * quantatom '+' One or more times (same as {1,}).
+ * quantatom '?' Zero or one time (same as {0,1}).
+ *
+ * any of which can be optionally followed by '?' for ungreedy
+ *
+ * atom: '(' regexp ')' A parenthesized regexp (what matched
+ * can be addressed using a backreference,
+ * see '\' n below).
+ * '.' Matches any char except '\n'.
+ * '[' classlist ']' A character class.
+ * '[' '^' classlist ']' A negated character class.
+ * '\f' Form Feed.
+ * '\n' Newline (Line Feed).
+ * '\r' Carriage Return.
+ * '\t' Horizontal Tab.
+ * '\v' Vertical Tab.
+ * '\d' A digit (same as [0-9]).
+ * '\D' A non-digit.
+ * '\w' A word character, [0-9a-z_A-Z].
+ * '\W' A non-word character.
+ * '\s' A whitespace character, [ \b\f\n\r\t\v].
+ * '\S' A non-whitespace character.
+ * '\' n A backreference to the nth (n decimal
+ * and positive) parenthesized expression.
+ * '\' octal An octal escape sequence (octal must be
+ * two or three digits long, unless it is
+ * 0 for the null character).
+ * '\x' hex A hex escape (hex must be two digits).
+ * '\u' unicode A unicode escape (must be four digits).
+ * '\c' ctrl A control character, ctrl is a letter.
+ * '\' literalatomchar Any character except one of the above
+ * that follow '\' in an atom.
+ * otheratomchar Any character not first among the other
+ * atom right-hand sides.
+ */
+static JSBool
+ParseTerm(CompilerState *state)
+{
+ jschar c = *state->cp++;
+ uintN nDigits;
+ uintN num, tmp, n, i;
+ const jschar *termStart;
+
+ switch (c) {
+ /* assertions and atoms */
+ case '^':
+ state->result = NewRENode(state, REOP_BOL);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ case '$':
+ state->result = NewRENode(state, REOP_EOL);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ case '\\':
+ if (state->cp >= state->cpend) {
+ /* a trailing '\' is an error */
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TRAILING_SLASH);
+ return JS_FALSE;
+ }
+ c = *state->cp++;
+ switch (c) {
+ /* assertion escapes */
+ case 'b' :
+ state->result = NewRENode(state, REOP_WBDRY);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ case 'B':
+ state->result = NewRENode(state, REOP_WNONBDRY);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ /* Decimal escape */
+ case '0':
+ /* Give a strict warning. See also the note below. */
+ if (!js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_INVALID_BACKREF)) {
+ return JS_FALSE;
+ }
+ doOctal:
+ num = 0;
+ while (state->cp < state->cpend) {
+ c = *state->cp;
+ if (c < '0' || '7' < c)
+ break;
+ state->cp++;
+ tmp = 8 * num + (uintN)JS7_UNDEC(c);
+ if (tmp > 0377)
+ break;
+ num = tmp;
+ }
+ c = (jschar)num;
+ doFlat:
+ state->result = NewRENode(state, REOP_FLAT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.flat.chr = c;
+ state->result->u.flat.length = 1;
+ state->progLength += 3;
+ break;
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ termStart = state->cp - 1;
+ num = GetDecimalValue(c, state->parenCount, FindParenCount, state);
+ if (state->flags & JSREG_FIND_PAREN_ERROR)
+ return JS_FALSE;
+ if (num == OVERFLOW_VALUE) {
+ /* Give a strict mode warning. */
+ if (!js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ (c >= '8')
+ ? JSMSG_INVALID_BACKREF
+ : JSMSG_BAD_BACKREF)) {
+ return JS_FALSE;
+ }
+
+ /*
+ * Note: ECMA 262, 15.10.2.9 says that we should throw a syntax
+ * error here. However, for compatibility with IE, we treat the
+ * whole backref as flat if the first character in it is not a
+ * valid octal character, and as an octal escape otherwise.
+ */
+ state->cp = termStart;
+ if (c >= '8') {
+ /* Treat this as flat. termStart - 1 is the \. */
+ c = '\\';
+ goto asFlat;
+ }
+
+ /* Treat this as an octal escape. */
+ goto doOctal;
+ }
+ JS_ASSERT(1 <= num && num <= 0x10000);
+ state->result = NewRENode(state, REOP_BACKREF);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.parenIndex = num - 1;
+ state->progLength
+ += 1 + GetCompactIndexWidth(state->result->u.parenIndex);
+ break;
+ /* Control escape */
+ case 'f':
+ c = 0xC;
+ goto doFlat;
+ case 'n':
+ c = 0xA;
+ goto doFlat;
+ case 'r':
+ c = 0xD;
+ goto doFlat;
+ case 't':
+ c = 0x9;
+ goto doFlat;
+ case 'v':
+ c = 0xB;
+ goto doFlat;
+ /* Control letter */
+ case 'c':
+ if (state->cp < state->cpend && RE_IS_LETTER(*state->cp)) {
+ c = (jschar) (*state->cp++ & 0x1F);
+ } else {
+ /* back off to accepting the original '\' as a literal */
+ --state->cp;
+ c = '\\';
+ }
+ goto doFlat;
+ /* HexEscapeSequence */
+ case 'x':
+ nDigits = 2;
+ goto lexHex;
+ /* UnicodeEscapeSequence */
+ case 'u':
+ nDigits = 4;
+lexHex:
+ n = 0;
+ for (i = 0; i < nDigits && state->cp < state->cpend; i++) {
+ uintN digit;
+ c = *state->cp++;
+ if (!isASCIIHexDigit(c, &digit)) {
+ /*
+ * Back off to accepting the original 'u' or 'x' as a
+ * literal.
+ */
+ state->cp -= i + 2;
+ n = *state->cp++;
+ break;
+ }
+ n = (n << 4) | digit;
+ }
+ c = (jschar) n;
+ goto doFlat;
+ /* Character class escapes */
+ case 'd':
+ state->result = NewRENode(state, REOP_DIGIT);
+doSimple:
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ break;
+ case 'D':
+ state->result = NewRENode(state, REOP_NONDIGIT);
+ goto doSimple;
+ case 's':
+ state->result = NewRENode(state, REOP_SPACE);
+ goto doSimple;
+ case 'S':
+ state->result = NewRENode(state, REOP_NONSPACE);
+ goto doSimple;
+ case 'w':
+ state->result = NewRENode(state, REOP_ALNUM);
+ goto doSimple;
+ case 'W':
+ state->result = NewRENode(state, REOP_NONALNUM);
+ goto doSimple;
+ /* IdentityEscape */
+ default:
+ state->result = NewRENode(state, REOP_FLAT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.flat.chr = c;
+ state->result->u.flat.length = 1;
+ state->result->kid = (void *) (state->cp - 1);
+ state->progLength += 3;
+ break;
+ }
+ break;
+ case '[':
+ state->result = NewRENode(state, REOP_CLASS);
+ if (!state->result)
+ return JS_FALSE;
+ termStart = state->cp;
+ state->result->u.ucclass.startIndex = termStart - state->cpbegin;
+ for (;;) {
+ if (state->cp == state->cpend) {
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERM_CLASS, termStart);
+
+ return JS_FALSE;
+ }
+ if (*state->cp == '\\') {
+ state->cp++;
+ if (state->cp != state->cpend)
+ state->cp++;
+ continue;
+ }
+ if (*state->cp == ']') {
+ state->result->u.ucclass.kidlen = state->cp - termStart;
+ break;
+ }
+ state->cp++;
+ }
+ for (i = 0; i < CLASS_CACHE_SIZE; i++) {
+ if (!state->classCache[i].start) {
+ state->classCache[i].start = termStart;
+ state->classCache[i].length = state->result->u.ucclass.kidlen;
+ state->classCache[i].index = state->classCount;
+ break;
+ }
+ if (state->classCache[i].length ==
+ state->result->u.ucclass.kidlen) {
+ for (n = 0; ; n++) {
+ if (n == state->classCache[i].length) {
+ state->result->u.ucclass.index
+ = state->classCache[i].index;
+ goto claim;
+ }
+ if (state->classCache[i].start[n] != termStart[n])
+ break;
+ }
+ }
+ }
+ state->result->u.ucclass.index = state->classCount++;
+
+ claim:
+ /*
+ * Call CalculateBitmapSize now as we want any errors it finds
+ * to be reported during the parse phase, not at execution.
+ */
+ if (!CalculateBitmapSize(state, state->result, termStart, state->cp++))
+ return JS_FALSE;
+ /*
+ * Update classBitmapsMem with number of bytes to hold bmsize bits,
+ * which is (bitsCount + 7) / 8 or (highest_bit + 1 + 7) / 8
+ * or highest_bit / 8 + 1 where highest_bit is u.ucclass.bmsize.
+ */
+ n = (state->result->u.ucclass.bmsize >> 3) + 1;
+ if (n > CLASS_BITMAPS_MEM_LIMIT - state->classBitmapsMem) {
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ return JS_FALSE;
+ }
+ state->classBitmapsMem += n;
+ /* CLASS, <index> */
+ state->progLength
+ += 1 + GetCompactIndexWidth(state->result->u.ucclass.index);
+ break;
+
+ case '.':
+ state->result = NewRENode(state, REOP_DOT);
+ goto doSimple;
+
+ case '{':
+ {
+ const jschar *errp = state->cp--;
+ intN err;
+
+ err = ParseMinMaxQuantifier(state, JS_TRUE);
+ state->cp = errp;
+
+ if (err < 0)
+ goto asFlat;
+
+ /* FALL THROUGH */
+ }
+ case '*':
+ case '+':
+ case '?':
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_QUANTIFIER, state->cp - 1);
+ return JS_FALSE;
+ default:
+asFlat:
+ state->result = NewRENode(state, REOP_FLAT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.flat.chr = c;
+ state->result->u.flat.length = 1;
+ state->result->kid = (void *) (state->cp - 1);
+ state->progLength += 3;
+ break;
+ }
+ return ParseQuantifier(state);
+}
+
+static JSBool
+ParseQuantifier(CompilerState *state)
+{
+ RENode *term;
+ term = state->result;
+ if (state->cp < state->cpend) {
+ switch (*state->cp) {
+ case '+':
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = 1;
+ state->result->u.range.max = (uintN)-1;
+ /* <PLUS>, <next> ... <ENDCHILD> */
+ state->progLength += 4;
+ goto quantifier;
+ case '*':
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = 0;
+ state->result->u.range.max = (uintN)-1;
+ /* <STAR>, <next> ... <ENDCHILD> */
+ state->progLength += 4;
+ goto quantifier;
+ case '?':
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = 0;
+ state->result->u.range.max = 1;
+ /* <OPT>, <next> ... <ENDCHILD> */
+ state->progLength += 4;
+ goto quantifier;
+ case '{': /* balance '}' */
+ {
+ intN err;
+ const jschar *errp = state->cp;
+
+ err = ParseMinMaxQuantifier(state, JS_FALSE);
+ if (err == 0)
+ goto quantifier;
+ if (err == -1)
+ return JS_TRUE;
+
+ js_ReportCompileErrorNumberUC(state->context,
+ state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ err, errp);
+ return JS_FALSE;
+ }
+ default:;
+ }
+ }
+ return JS_TRUE;
+
+quantifier:
+ if (state->treeDepth == TREE_DEPTH_MAX) {
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ return JS_FALSE;
+ }
+
+ ++state->treeDepth;
+ ++state->cp;
+ state->result->kid = term;
+ if (state->cp < state->cpend && *state->cp == '?') {
+ ++state->cp;
+ state->result->u.range.greedy = JS_FALSE;
+ } else {
+ state->result->u.range.greedy = JS_TRUE;
+ }
+ return JS_TRUE;
+}
+
+static intN
+ParseMinMaxQuantifier(CompilerState *state, JSBool ignoreValues)
+{
+ uintN min, max;
+ jschar c;
+ const jschar *errp = state->cp++;
+
+ c = *state->cp;
+ if (JS7_ISDEC(c)) {
+ ++state->cp;
+ min = GetDecimalValue(c, 0xFFFF, NULL, state);
+ c = *state->cp;
+
+ if (!ignoreValues && min == OVERFLOW_VALUE)
+ return JSMSG_MIN_TOO_BIG;
+
+ if (c == ',') {
+ c = *++state->cp;
+ if (JS7_ISDEC(c)) {
+ ++state->cp;
+ max = GetDecimalValue(c, 0xFFFF, NULL, state);
+ c = *state->cp;
+ if (!ignoreValues && max == OVERFLOW_VALUE)
+ return JSMSG_MAX_TOO_BIG;
+ if (!ignoreValues && min > max)
+ return JSMSG_OUT_OF_ORDER;
+ } else {
+ max = (uintN)-1;
+ }
+ } else {
+ max = min;
+ }
+ if (c == '}') {
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = min;
+ state->result->u.range.max = max;
+ /*
+ * QUANT, <min>, <max>, <next> ... <ENDCHILD>
+ * where <max> is written as compact(max+1) to make
+ * (uintN)-1 sentinel to occupy 1 byte, not width_of(max)+1.
+ */
+ state->progLength += (1 + GetCompactIndexWidth(min)
+ + GetCompactIndexWidth(max + 1)
+ +3);
+ return 0;
+ }
+ }
+
+ state->cp = errp;
+ return -1;
+}
+
+static JSBool
+SetForwardJumpOffset(jsbytecode *jump, jsbytecode *target)
+{
+ ptrdiff_t offset = target - jump;
+
+ /* Check that target really points forward. */
+ JS_ASSERT(offset >= 2);
+ if ((size_t)offset > OFFSET_MAX)
+ return JS_FALSE;
+
+ jump[0] = JUMP_OFFSET_HI(offset);
+ jump[1] = JUMP_OFFSET_LO(offset);
+ return JS_TRUE;
+}
+
+/*
+ * Generate bytecode for the tree rooted at t using an explicit stack instead
+ * of recursion.
+ */
+static jsbytecode *
+EmitREBytecode(CompilerState *state, JSRegExp *re, size_t treeDepth,
+ jsbytecode *pc, RENode *t)
+{
+ EmitStateStackEntry *emitStateSP, *emitStateStack;
+ RECharSet *charSet;
+ REOp op;
+
+ if (treeDepth == 0) {
+ emitStateStack = NULL;
+ } else {
+ emitStateStack =
+ (EmitStateStackEntry *)JS_malloc(state->context,
+ sizeof(EmitStateStackEntry) *
+ treeDepth);
+ if (!emitStateStack)
+ return NULL;
+ }
+ emitStateSP = emitStateStack;
+ op = t->op;
+
+ for (;;) {
+ *pc++ = op;
+ switch (op) {
+ case REOP_EMPTY:
+ --pc;
+ break;
+
+ case REOP_ALTPREREQ2:
+ case REOP_ALTPREREQ:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->altHead = pc - 1;
+ emitStateSP->endTermFixup = pc;
+ pc += OFFSET_LEN;
+ SET_ARG(pc, t->u.altprereq.ch1);
+ pc += ARG_LEN;
+ SET_ARG(pc, t->u.altprereq.ch2);
+ pc += ARG_LEN;
+
+ emitStateSP->nextAltFixup = pc; /* offset to next alternate */
+ pc += OFFSET_LEN;
+
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_JUMP;
+ emitStateSP->jumpToJumpFlag = JS_FALSE;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_JUMP:
+ emitStateSP->nextTermFixup = pc; /* offset to following term */
+ pc += OFFSET_LEN;
+ if (!SetForwardJumpOffset(emitStateSP->nextAltFixup, pc))
+ goto jump_too_big;
+ emitStateSP->continueOp = REOP_ENDALT;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = t->u.kid2;
+ op = t->op;
+ continue;
+
+ case REOP_ENDALT:
+ /*
+ * If we already patched emitStateSP->nextTermFixup to jump to
+ * a nearer jump, to avoid 16-bit immediate offset overflow, we
+ * are done here.
+ */
+ if (emitStateSP->jumpToJumpFlag)
+ break;
+
+ /*
+ * Fix up the REOP_JUMP offset to go to the op after REOP_ENDALT.
+ * REOP_ENDALT is executed only on successful match of the last
+ * alternate in a group.
+ */
+ if (!SetForwardJumpOffset(emitStateSP->nextTermFixup, pc))
+ goto jump_too_big;
+ if (t->op != REOP_ALT) {
+ if (!SetForwardJumpOffset(emitStateSP->endTermFixup, pc))
+ goto jump_too_big;
+ }
+
+ /*
+ * If the program is bigger than the REOP_JUMP offset range, then
+ * we must check for alternates before this one that are part of
+ * the same group, and fix up their jump offsets to target jumps
+ * close enough to fit in a 16-bit unsigned offset immediate.
+ */
+ if ((size_t)(pc - re->program) > OFFSET_MAX &&
+ emitStateSP > emitStateStack) {
+ EmitStateStackEntry *esp, *esp2;
+ jsbytecode *alt, *jump;
+ ptrdiff_t span, header;
+
+ esp2 = emitStateSP;
+ alt = esp2->altHead;
+ for (esp = esp2 - 1; esp >= emitStateStack; --esp) {
+ if (esp->continueOp == REOP_ENDALT &&
+ !esp->jumpToJumpFlag &&
+ esp->nextTermFixup + OFFSET_LEN == alt &&
+ (size_t)(pc - ((esp->continueNode->op != REOP_ALT)
+ ? esp->endTermFixup
+ : esp->nextTermFixup)) > OFFSET_MAX) {
+ alt = esp->altHead;
+ jump = esp->nextTermFixup;
+
+ /*
+ * The span must be 1 less than the distance from
+ * jump offset to jump offset, so we actually jump
+ * to a REOP_JUMP bytecode, not to its offset!
+ */
+ for (;;) {
+ JS_ASSERT(jump < esp2->nextTermFixup);
+ span = esp2->nextTermFixup - jump - 1;
+ if ((size_t)span <= OFFSET_MAX)
+ break;
+ do {
+ if (--esp2 == esp)
+ goto jump_too_big;
+ } while (esp2->continueOp != REOP_ENDALT);
+ }
+
+ jump[0] = JUMP_OFFSET_HI(span);
+ jump[1] = JUMP_OFFSET_LO(span);
+
+ if (esp->continueNode->op != REOP_ALT) {
+ /*
+ * We must patch the offset at esp->endTermFixup
+ * as well, for the REOP_ALTPREREQ{,2} opcodes.
+ * If we're unlucky and endTermFixup is more than
+ * OFFSET_MAX bytes from its target, we cheat by
+ * jumping 6 bytes to the jump whose offset is at
+ * esp->nextTermFixup, which has the same target.
+ */
+ jump = esp->endTermFixup;
+ header = esp->nextTermFixup - jump;
+ span += header;
+ if ((size_t)span > OFFSET_MAX)
+ span = header;
+
+ jump[0] = JUMP_OFFSET_HI(span);
+ jump[1] = JUMP_OFFSET_LO(span);
+ }
+
+ esp->jumpToJumpFlag = JS_TRUE;
+ }
+ }
+ }
+ break;
+
+ case REOP_ALT:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->altHead = pc - 1;
+ emitStateSP->nextAltFixup = pc; /* offset to next alternate */
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_JUMP;
+ emitStateSP->jumpToJumpFlag = JS_FALSE;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_FLAT:
+ /*
+ * Coalesce FLATs if possible and if it would not increase bytecode
+ * beyond preallocated limit. The latter happens only when bytecode
+ * size for coalesced string with offset p and length 2 exceeds 6
+ * bytes preallocated for 2 single char nodes, i.e. when
+ * 1 + GetCompactIndexWidth(p) + GetCompactIndexWidth(2) > 6 or
+ * GetCompactIndexWidth(p) > 4.
+ * Since when GetCompactIndexWidth(p) <= 4 coalescing of 3 or more
+ * nodes strictly decreases bytecode size, the check has to be
+ * done only for the first coalescing.
+ */
+ if (t->kid &&
+ GetCompactIndexWidth((jschar *)t->kid - state->cpbegin) <= 4)
+ {
+ while (t->next &&
+ t->next->op == REOP_FLAT &&
+ (jschar*)t->kid + t->u.flat.length ==
+ (jschar*)t->next->kid) {
+ t->u.flat.length += t->next->u.flat.length;
+ t->next = t->next->next;
+ }
+ }
+ if (t->kid && t->u.flat.length > 1) {
+ pc[-1] = (state->flags & JSREG_FOLD) ? REOP_FLATi : REOP_FLAT;
+ pc = WriteCompactIndex(pc, (jschar *)t->kid - state->cpbegin);
+ pc = WriteCompactIndex(pc, t->u.flat.length);
+ } else if (t->u.flat.chr < 256) {
+ pc[-1] = (state->flags & JSREG_FOLD) ? REOP_FLAT1i : REOP_FLAT1;
+ *pc++ = (jsbytecode) t->u.flat.chr;
+ } else {
+ pc[-1] = (state->flags & JSREG_FOLD)
+ ? REOP_UCFLAT1i
+ : REOP_UCFLAT1;
+ SET_ARG(pc, t->u.flat.chr);
+ pc += ARG_LEN;
+ }
+ break;
+
+ case REOP_LPAREN:
+ JS_ASSERT(emitStateSP);
+ pc = WriteCompactIndex(pc, t->u.parenIndex);
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_RPAREN;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_RPAREN:
+ pc = WriteCompactIndex(pc, t->u.parenIndex);
+ break;
+
+ case REOP_BACKREF:
+ pc = WriteCompactIndex(pc, t->u.parenIndex);
+ break;
+
+ case REOP_ASSERT:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->nextTermFixup = pc;
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_ASSERTTEST;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_ASSERTTEST:
+ case REOP_ASSERTNOTTEST:
+ if (!SetForwardJumpOffset(emitStateSP->nextTermFixup, pc))
+ goto jump_too_big;
+ break;
+
+ case REOP_ASSERT_NOT:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->nextTermFixup = pc;
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_ASSERTNOTTEST;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_QUANT:
+ JS_ASSERT(emitStateSP);
+ if (t->u.range.min == 0 && t->u.range.max == (uintN)-1) {
+ pc[-1] = (t->u.range.greedy) ? REOP_STAR : REOP_MINIMALSTAR;
+ } else if (t->u.range.min == 0 && t->u.range.max == 1) {
+ pc[-1] = (t->u.range.greedy) ? REOP_OPT : REOP_MINIMALOPT;
+ } else if (t->u.range.min == 1 && t->u.range.max == (uintN) -1) {
+ pc[-1] = (t->u.range.greedy) ? REOP_PLUS : REOP_MINIMALPLUS;
+ } else {
+ if (!t->u.range.greedy)
+ pc[-1] = REOP_MINIMALQUANT;
+ pc = WriteCompactIndex(pc, t->u.range.min);
+ /*
+ * Write max + 1 to avoid using size_t(max) + 1 bytes
+ * for (uintN)-1 sentinel.
+ */
+ pc = WriteCompactIndex(pc, t->u.range.max + 1);
+ }
+ emitStateSP->nextTermFixup = pc;
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_ENDCHILD;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_ENDCHILD:
+ if (!SetForwardJumpOffset(emitStateSP->nextTermFixup, pc))
+ goto jump_too_big;
+ break;
+
+ case REOP_CLASS:
+ if (!t->u.ucclass.sense)
+ pc[-1] = REOP_NCLASS;
+ pc = WriteCompactIndex(pc, t->u.ucclass.index);
+ charSet = &re->classList[t->u.ucclass.index];
+ charSet->converted = JS_FALSE;
+ charSet->length = t->u.ucclass.bmsize;
+ charSet->u.src.startIndex = t->u.ucclass.startIndex;
+ charSet->u.src.length = t->u.ucclass.kidlen;
+ charSet->sense = t->u.ucclass.sense;
+ break;
+
+ default:
+ break;
+ }
+
+ t = t->next;
+ if (t) {
+ op = t->op;
+ } else {
+ if (emitStateSP == emitStateStack)
+ break;
+ --emitStateSP;
+ t = emitStateSP->continueNode;
+ op = emitStateSP->continueOp;
+ }
+ }
+
+ cleanup:
+ if (emitStateStack)
+ JS_free(state->context, emitStateStack);
+ return pc;
+
+ jump_too_big:
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ pc = NULL;
+ goto cleanup;
+}
+
+
+JSRegExp *
+js_NewRegExp(JSContext *cx, JSTokenStream *ts,
+ JSString *str, uintN flags, JSBool flat)
+{
+ JSRegExp *re;
+ void *mark;
+ CompilerState state;
+ size_t resize;
+ jsbytecode *endPC;
+ uintN i;
+ size_t len;
+
+ re = NULL;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ len = JSSTRING_LENGTH(str);
+
+ state.context = cx;
+ state.tokenStream = ts;
+ state.cp = js_UndependString(cx, str);
+ if (!state.cp)
+ goto out;
+ state.cpbegin = state.cp;
+ state.cpend = state.cp + len;
+ state.flags = flags;
+ state.parenCount = 0;
+ state.classCount = 0;
+ state.progLength = 0;
+ state.treeDepth = 0;
+ state.classBitmapsMem = 0;
+ for (i = 0; i < CLASS_CACHE_SIZE; i++)
+ state.classCache[i].start = NULL;
+
+ if (len != 0 && flat) {
+ state.result = NewRENode(&state, REOP_FLAT);
+ state.result->u.flat.chr = *state.cpbegin;
+ state.result->u.flat.length = len;
+ state.result->kid = (void *) state.cpbegin;
+ /* Flat bytecode: REOP_FLAT compact(string_offset) compact(len). */
+ state.progLength += 1 + GetCompactIndexWidth(0)
+ + GetCompactIndexWidth(len);
+ } else {
+ if (!ParseRegExp(&state))
+ goto out;
+ }
+ resize = offsetof(JSRegExp, program) + state.progLength + 1;
+ re = (JSRegExp *) JS_malloc(cx, resize);
+ if (!re)
+ goto out;
+
+ re->nrefs = 1;
+ JS_ASSERT(state.classBitmapsMem <= CLASS_BITMAPS_MEM_LIMIT);
+ re->classCount = state.classCount;
+ if (re->classCount) {
+ re->classList = (RECharSet *)
+ JS_malloc(cx, re->classCount * sizeof(RECharSet));
+ if (!re->classList) {
+ js_DestroyRegExp(cx, re);
+ re = NULL;
+ goto out;
+ }
+ for (i = 0; i < re->classCount; i++)
+ re->classList[i].converted = JS_FALSE;
+ } else {
+ re->classList = NULL;
+ }
+ endPC = EmitREBytecode(&state, re, state.treeDepth, re->program, state.result);
+ if (!endPC) {
+ js_DestroyRegExp(cx, re);
+ re = NULL;
+ goto out;
+ }
+ *endPC++ = REOP_END;
+ /*
+ * Check whether size was overestimated and shrink using realloc.
+ * This is safe since no pointers to newly parsed regexp or its parts
+ * besides re exist here.
+ */
+ if ((size_t)(endPC - re->program) != state.progLength + 1) {
+ JSRegExp *tmp;
+ JS_ASSERT((size_t)(endPC - re->program) < state.progLength + 1);
+ resize = offsetof(JSRegExp, program) + (endPC - re->program);
+ tmp = (JSRegExp *) JS_realloc(cx, re, resize);
+ if (tmp)
+ re = tmp;
+ }
+
+ re->flags = flags;
+ re->cloneIndex = 0;
+ re->parenCount = state.parenCount;
+ re->source = str;
+
+out:
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return re;
+}
+
+JSRegExp *
+js_NewRegExpOpt(JSContext *cx, JSTokenStream *ts,
+ JSString *str, JSString *opt, JSBool flat)
+{
+ uintN flags;
+ jschar *s;
+ size_t i, n;
+ char charBuf[2];
+
+ flags = 0;
+ if (opt) {
+ s = JSSTRING_CHARS(opt);
+ for (i = 0, n = JSSTRING_LENGTH(opt); i < n; i++) {
+ switch (s[i]) {
+ case 'g':
+ flags |= JSREG_GLOB;
+ break;
+ case 'i':
+ flags |= JSREG_FOLD;
+ break;
+ case 'm':
+ flags |= JSREG_MULTILINE;
+ break;
+ default:
+ charBuf[0] = (char)s[i];
+ charBuf[1] = '\0';
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_FLAG, charBuf);
+ return NULL;
+ }
+ }
+ }
+ return js_NewRegExp(cx, ts, str, flags, flat);
+}
+
+/*
+ * Save the current state of the match - the position in the input
+ * text as well as the position in the bytecode. The state of any
+ * parent expressions is also saved (preceding state).
+ * Contents of parenCount parentheses from parenIndex are also saved.
+ */
+static REBackTrackData *
+PushBackTrackState(REGlobalData *gData, REOp op,
+ jsbytecode *target, REMatchState *x, const jschar *cp,
+ size_t parenIndex, size_t parenCount)
+{
+ size_t i;
+ REBackTrackData *result =
+ (REBackTrackData *) ((char *)gData->backTrackSP + gData->cursz);
+
+ size_t sz = sizeof(REBackTrackData) +
+ gData->stateStackTop * sizeof(REProgState) +
+ parenCount * sizeof(RECapture);
+
+ ptrdiff_t btsize = gData->backTrackStackSize;
+ ptrdiff_t btincr = ((char *)result + sz) -
+ ((char *)gData->backTrackStack + btsize);
+
+ if (btincr > 0) {
+ ptrdiff_t offset = (char *)result - (char *)gData->backTrackStack;
+
+ btincr = JS_ROUNDUP(btincr, btsize);
+ JS_ARENA_GROW_CAST(gData->backTrackStack, REBackTrackData *,
+ &gData->pool, btsize, btincr);
+ if (!gData->backTrackStack) {
+ JS_ReportOutOfMemory(gData->cx);
+ gData->ok = JS_FALSE;
+ return NULL;
+ }
+ gData->backTrackStackSize = btsize + btincr;
+ result = (REBackTrackData *) ((char *)gData->backTrackStack + offset);
+ }
+ gData->backTrackSP = result;
+ result->sz = gData->cursz;
+ gData->cursz = sz;
+
+ result->backtrack_op = op;
+ result->backtrack_pc = target;
+ result->cp = cp;
+ result->parenCount = parenCount;
+
+ result->saveStateStackTop = gData->stateStackTop;
+ JS_ASSERT(gData->stateStackTop);
+ memcpy(result + 1, gData->stateStack,
+ sizeof(REProgState) * result->saveStateStackTop);
+
+ if (parenCount != 0) {
+ result->parenIndex = parenIndex;
+ memcpy((char *)(result + 1) +
+ sizeof(REProgState) * result->saveStateStackTop,
+ &x->parens[parenIndex],
+ sizeof(RECapture) * parenCount);
+ for (i = 0; i != parenCount; i++)
+ x->parens[parenIndex + i].index = -1;
+ }
+
+ return result;
+}
+
+
+/*
+ * Consecutive literal characters.
+ */
+#if 0
+static REMatchState *
+FlatNMatcher(REGlobalData *gData, REMatchState *x, jschar *matchChars,
+ size_t length)
+{
+ size_t i;
+ if (length > gData->cpend - x->cp)
+ return NULL;
+ for (i = 0; i != length; i++) {
+ if (matchChars[i] != x->cp[i])
+ return NULL;
+ }
+ x->cp += length;
+ return x;
+}
+#endif
+
+static REMatchState *
+FlatNIMatcher(REGlobalData *gData, REMatchState *x, jschar *matchChars,
+ size_t length)
+{
+ size_t i;
+ JS_ASSERT(gData->cpend >= x->cp);
+ if (length > (size_t)(gData->cpend - x->cp))
+ return NULL;
+ for (i = 0; i != length; i++) {
+ if (upcase(matchChars[i]) != upcase(x->cp[i]))
+ return NULL;
+ }
+ x->cp += length;
+ return x;
+}
+
+/*
+ * 1. Evaluate DecimalEscape to obtain an EscapeValue E.
+ * 2. If E is not a character then go to step 6.
+ * 3. Let ch be E's character.
+ * 4. Let A be a one-element RECharSet containing the character ch.
+ * 5. Call CharacterSetMatcher(A, false) and return its Matcher result.
+ * 6. E must be an integer. Let n be that integer.
+ * 7. If n=0 or n>NCapturingParens then throw a SyntaxError exception.
+ * 8. Return an internal Matcher closure that takes two arguments, a State x
+ * and a Continuation c, and performs the following:
+ * 1. Let cap be x's captures internal array.
+ * 2. Let s be cap[n].
+ * 3. If s is undefined, then call c(x) and return its result.
+ * 4. Let e be x's endIndex.
+ * 5. Let len be s's length.
+ * 6. Let f be e+len.
+ * 7. If f>InputLength, return failure.
+ * 8. If there exists an integer i between 0 (inclusive) and len (exclusive)
+ * such that Canonicalize(s[i]) is not the same character as
+ * Canonicalize(Input [e+i]), then return failure.
+ * 9. Let y be the State (f, cap).
+ * 10. Call c(y) and return its result.
+ */
+static REMatchState *
+BackrefMatcher(REGlobalData *gData, REMatchState *x, size_t parenIndex)
+{
+ size_t len, i;
+ const jschar *parenContent;
+ RECapture *cap = &x->parens[parenIndex];
+
+ if (cap->index == -1)
+ return x;
+
+ len = cap->length;
+ if (x->cp + len > gData->cpend)
+ return NULL;
+
+ parenContent = &gData->cpbegin[cap->index];
+ if (gData->regexp->flags & JSREG_FOLD) {
+ for (i = 0; i < len; i++) {
+ if (upcase(parenContent[i]) != upcase(x->cp[i]))
+ return NULL;
+ }
+ } else {
+ for (i = 0; i < len; i++) {
+ if (parenContent[i] != x->cp[i])
+ return NULL;
+ }
+ }
+ x->cp += len;
+ return x;
+}
+
+
+/* Add a single character to the RECharSet */
+static void
+AddCharacterToCharSet(RECharSet *cs, jschar c)
+{
+ uintN byteIndex = (uintN)(c >> 3);
+ JS_ASSERT(c <= cs->length);
+ cs->u.bits[byteIndex] |= 1 << (c & 0x7);
+}
+
+
+/* Add a character range, c1 to c2 (inclusive) to the RECharSet */
+static void
+AddCharacterRangeToCharSet(RECharSet *cs, jschar c1, jschar c2)
+{
+ uintN i;
+
+ uintN byteIndex1 = (uintN)(c1 >> 3);
+ uintN byteIndex2 = (uintN)(c2 >> 3);
+
+ JS_ASSERT((c2 <= cs->length) && (c1 <= c2));
+
+ c1 &= 0x7;
+ c2 &= 0x7;
+
+ if (byteIndex1 == byteIndex2) {
+ cs->u.bits[byteIndex1] |= ((uint8)0xFF >> (7 - (c2 - c1))) << c1;
+ } else {
+ cs->u.bits[byteIndex1] |= 0xFF << c1;
+ for (i = byteIndex1 + 1; i < byteIndex2; i++)
+ cs->u.bits[i] = 0xFF;
+ cs->u.bits[byteIndex2] |= (uint8)0xFF >> (7 - c2);
+ }
+}
+
+/* Compile the source of the class into a RECharSet */
+static JSBool
+ProcessCharSet(REGlobalData *gData, RECharSet *charSet)
+{
+ const jschar *src, *end;
+ JSBool inRange = JS_FALSE;
+ jschar rangeStart = 0;
+ uintN byteLength, n;
+ jschar c, thisCh;
+ intN nDigits, i;
+
+ JS_ASSERT(!charSet->converted);
+ /*
+ * Assert that startIndex and length points to chars inside [] inside
+ * source string.
+ */
+ JS_ASSERT(1 <= charSet->u.src.startIndex);
+ JS_ASSERT(charSet->u.src.startIndex
+ < JSSTRING_LENGTH(gData->regexp->source));
+ JS_ASSERT(charSet->u.src.length <= JSSTRING_LENGTH(gData->regexp->source)
+ - 1 - charSet->u.src.startIndex);
+
+ charSet->converted = JS_TRUE;
+ src = JSSTRING_CHARS(gData->regexp->source) + charSet->u.src.startIndex;
+ end = src + charSet->u.src.length;
+ JS_ASSERT(src[-1] == '[');
+ JS_ASSERT(end[0] == ']');
+
+ byteLength = (charSet->length >> 3) + 1;
+ charSet->u.bits = (uint8 *)JS_malloc(gData->cx, byteLength);
+ if (!charSet->u.bits) {
+ JS_ReportOutOfMemory(gData->cx);
+ gData->ok = JS_FALSE;
+ return JS_FALSE;
+ }
+ memset(charSet->u.bits, 0, byteLength);
+
+ if (src == end)
+ return JS_TRUE;
+
+ if (*src == '^') {
+ JS_ASSERT(charSet->sense == JS_FALSE);
+ ++src;
+ } else {
+ JS_ASSERT(charSet->sense == JS_TRUE);
+ }
+
+ while (src != end) {
+ switch (*src) {
+ case '\\':
+ ++src;
+ c = *src++;
+ switch (c) {
+ case 'b':
+ thisCh = 0x8;
+ break;
+ case 'f':
+ thisCh = 0xC;
+ break;
+ case 'n':
+ thisCh = 0xA;
+ break;
+ case 'r':
+ thisCh = 0xD;
+ break;
+ case 't':
+ thisCh = 0x9;
+ break;
+ case 'v':
+ thisCh = 0xB;
+ break;
+ case 'c':
+ if (src < end && JS_ISWORD(*src)) {
+ thisCh = (jschar)(*src++ & 0x1F);
+ } else {
+ --src;
+ thisCh = '\\';
+ }
+ break;
+ case 'x':
+ nDigits = 2;
+ goto lexHex;
+ case 'u':
+ nDigits = 4;
+ lexHex:
+ n = 0;
+ for (i = 0; (i < nDigits) && (src < end); i++) {
+ uintN digit;
+ c = *src++;
+ if (!isASCIIHexDigit(c, &digit)) {
+ /*
+ * Back off to accepting the original '\'
+ * as a literal
+ */
+ src -= i + 1;
+ n = '\\';
+ break;
+ }
+ n = (n << 4) | digit;
+ }
+ thisCh = (jschar)n;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ /*
+ * This is a non-ECMA extension - decimal escapes (in this
+ * case, octal!) are supposed to be an error inside class
+ * ranges, but supported here for backwards compatibility.
+ */
+ n = JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ n = 8 * n + JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ i = 8 * n + JS7_UNDEC(c);
+ if (i <= 0377)
+ n = i;
+ else
+ src--;
+ }
+ }
+ thisCh = (jschar)n;
+ break;
+
+ case 'd':
+ AddCharacterRangeToCharSet(charSet, '0', '9');
+ continue; /* don't need range processing */
+ case 'D':
+ AddCharacterRangeToCharSet(charSet, 0, '0' - 1);
+ AddCharacterRangeToCharSet(charSet,
+ (jschar)('9' + 1),
+ (jschar)charSet->length);
+ continue;
+ case 's':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (JS_ISSPACE(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ case 'S':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (!JS_ISSPACE(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ case 'w':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (JS_ISWORD(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ case 'W':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (!JS_ISWORD(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ default:
+ thisCh = c;
+ break;
+
+ }
+ break;
+
+ default:
+ thisCh = *src++;
+ break;
+
+ }
+ if (inRange) {
+ if (gData->regexp->flags & JSREG_FOLD) {
+ AddCharacterRangeToCharSet(charSet, upcase(rangeStart),
+ upcase(thisCh));
+ AddCharacterRangeToCharSet(charSet, downcase(rangeStart),
+ downcase(thisCh));
+ } else {
+ AddCharacterRangeToCharSet(charSet, rangeStart, thisCh);
+ }
+ inRange = JS_FALSE;
+ } else {
+ if (gData->regexp->flags & JSREG_FOLD) {
+ AddCharacterToCharSet(charSet, upcase(thisCh));
+ AddCharacterToCharSet(charSet, downcase(thisCh));
+ } else {
+ AddCharacterToCharSet(charSet, thisCh);
+ }
+ if (src < end - 1) {
+ if (*src == '-') {
+ ++src;
+ inRange = JS_TRUE;
+ rangeStart = thisCh;
+ }
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+void
+js_DestroyRegExp(JSContext *cx, JSRegExp *re)
+{
+ if (JS_ATOMIC_DECREMENT(&re->nrefs) == 0) {
+ if (re->classList) {
+ uintN i;
+ for (i = 0; i < re->classCount; i++) {
+ if (re->classList[i].converted)
+ JS_free(cx, re->classList[i].u.bits);
+ re->classList[i].u.bits = NULL;
+ }
+ JS_free(cx, re->classList);
+ }
+ JS_free(cx, re);
+ }
+}
+
+static JSBool
+ReallocStateStack(REGlobalData *gData)
+{
+ size_t limit = gData->stateStackLimit;
+ size_t sz = sizeof(REProgState) * limit;
+
+ JS_ARENA_GROW_CAST(gData->stateStack, REProgState *, &gData->pool, sz, sz);
+ if (!gData->stateStack) {
+ gData->ok = JS_FALSE;
+ return JS_FALSE;
+ }
+ gData->stateStackLimit = limit + limit;
+ return JS_TRUE;
+}
+
+#define PUSH_STATE_STACK(data) \
+ JS_BEGIN_MACRO \
+ ++(data)->stateStackTop; \
+ if ((data)->stateStackTop == (data)->stateStackLimit && \
+ !ReallocStateStack((data))) { \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+/*
+ * Apply the current op against the given input to see if it's going to match
+ * or fail. Return false if we don't get a match, true if we do. If updatecp is
+ * true, then update the current state's cp. Always update startpc to the next
+ * op.
+ */
+static REMatchState *
+SimpleMatch(REGlobalData *gData, REMatchState *x, REOp op,
+ jsbytecode **startpc, JSBool updatecp)
+{
+ REMatchState *result = NULL;
+ jschar matchCh;
+ size_t parenIndex;
+ size_t offset, length, index;
+ jsbytecode *pc = *startpc; /* pc has already been incremented past op */
+ jschar *source;
+ const jschar *startcp = x->cp;
+ jschar ch;
+ RECharSet *charSet;
+
+ switch (op) {
+ case REOP_BOL:
+ if (x->cp != gData->cpbegin) {
+ if (!gData->cx->regExpStatics.multiline &&
+ !(gData->regexp->flags & JSREG_MULTILINE)) {
+ break;
+ }
+ if (!RE_IS_LINE_TERM(x->cp[-1]))
+ break;
+ }
+ result = x;
+ break;
+ case REOP_EOL:
+ if (x->cp != gData->cpend) {
+ if (!gData->cx->regExpStatics.multiline &&
+ !(gData->regexp->flags & JSREG_MULTILINE)) {
+ break;
+ }
+ if (!RE_IS_LINE_TERM(*x->cp))
+ break;
+ }
+ result = x;
+ break;
+ case REOP_WBDRY:
+ if ((x->cp == gData->cpbegin || !JS_ISWORD(x->cp[-1])) ^
+ !(x->cp != gData->cpend && JS_ISWORD(*x->cp))) {
+ result = x;
+ }
+ break;
+ case REOP_WNONBDRY:
+ if ((x->cp == gData->cpbegin || !JS_ISWORD(x->cp[-1])) ^
+ (x->cp != gData->cpend && JS_ISWORD(*x->cp))) {
+ result = x;
+ }
+ break;
+ case REOP_DOT:
+ if (x->cp != gData->cpend && !RE_IS_LINE_TERM(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_DIGIT:
+ if (x->cp != gData->cpend && JS_ISDIGIT(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_NONDIGIT:
+ if (x->cp != gData->cpend && !JS_ISDIGIT(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_ALNUM:
+ if (x->cp != gData->cpend && JS_ISWORD(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_NONALNUM:
+ if (x->cp != gData->cpend && !JS_ISWORD(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_SPACE:
+ if (x->cp != gData->cpend && JS_ISSPACE(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_NONSPACE:
+ if (x->cp != gData->cpend && !JS_ISSPACE(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_BACKREF:
+ pc = ReadCompactIndex(pc, &parenIndex);
+ JS_ASSERT(parenIndex < gData->regexp->parenCount);
+ result = BackrefMatcher(gData, x, parenIndex);
+ break;
+ case REOP_FLAT:
+ pc = ReadCompactIndex(pc, &offset);
+ JS_ASSERT(offset < JSSTRING_LENGTH(gData->regexp->source));
+ pc = ReadCompactIndex(pc, &length);
+ JS_ASSERT(1 <= length);
+ JS_ASSERT(length <= JSSTRING_LENGTH(gData->regexp->source) - offset);
+ if (length <= (size_t)(gData->cpend - x->cp)) {
+ source = JSSTRING_CHARS(gData->regexp->source) + offset;
+ for (index = 0; index != length; index++) {
+ if (source[index] != x->cp[index])
+ return NULL;
+ }
+ x->cp += length;
+ result = x;
+ }
+ break;
+ case REOP_FLAT1:
+ matchCh = *pc++;
+ if (x->cp != gData->cpend && *x->cp == matchCh) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_FLATi:
+ pc = ReadCompactIndex(pc, &offset);
+ JS_ASSERT(offset < JSSTRING_LENGTH(gData->regexp->source));
+ pc = ReadCompactIndex(pc, &length);
+ JS_ASSERT(1 <= length);
+ JS_ASSERT(length <= JSSTRING_LENGTH(gData->regexp->source) - offset);
+ source = JSSTRING_CHARS(gData->regexp->source);
+ result = FlatNIMatcher(gData, x, source + offset, length);
+ break;
+ case REOP_FLAT1i:
+ matchCh = *pc++;
+ if (x->cp != gData->cpend && upcase(*x->cp) == upcase(matchCh)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_UCFLAT1:
+ matchCh = GET_ARG(pc);
+ pc += ARG_LEN;
+ if (x->cp != gData->cpend && *x->cp == matchCh) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_UCFLAT1i:
+ matchCh = GET_ARG(pc);
+ pc += ARG_LEN;
+ if (x->cp != gData->cpend && upcase(*x->cp) == upcase(matchCh)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_CLASS:
+ pc = ReadCompactIndex(pc, &index);
+ JS_ASSERT(index < gData->regexp->classCount);
+ if (x->cp != gData->cpend) {
+ charSet = &gData->regexp->classList[index];
+ JS_ASSERT(charSet->converted);
+ ch = *x->cp;
+ index = ch >> 3;
+ if (charSet->length != 0 &&
+ ch <= charSet->length &&
+ (charSet->u.bits[index] & (1 << (ch & 0x7)))) {
+ result = x;
+ result->cp++;
+ }
+ }
+ break;
+ case REOP_NCLASS:
+ pc = ReadCompactIndex(pc, &index);
+ JS_ASSERT(index < gData->regexp->classCount);
+ if (x->cp != gData->cpend) {
+ charSet = &gData->regexp->classList[index];
+ JS_ASSERT(charSet->converted);
+ ch = *x->cp;
+ index = ch >> 3;
+ if (charSet->length == 0 ||
+ ch > charSet->length ||
+ !(charSet->u.bits[index] & (1 << (ch & 0x7)))) {
+ result = x;
+ result->cp++;
+ }
+ }
+ break;
+ default:
+ JS_ASSERT(JS_FALSE);
+ }
+ if (result) {
+ if (!updatecp)
+ x->cp = startcp;
+ *startpc = pc;
+ return result;
+ }
+ x->cp = startcp;
+ return NULL;
+}
+
+static REMatchState *
+ExecuteREBytecode(REGlobalData *gData, REMatchState *x)
+{
+ REMatchState *result = NULL;
+ REBackTrackData *backTrackData;
+ jsbytecode *nextpc, *testpc;
+ REOp nextop;
+ RECapture *cap;
+ REProgState *curState;
+ const jschar *startcp;
+ size_t parenIndex, k;
+ size_t parenSoFar = 0;
+
+ jschar matchCh1, matchCh2;
+ RECharSet *charSet;
+
+ JSBranchCallback onbranch = gData->cx->branchCallback;
+ uintN onbranchCalls = 0;
+#define ONBRANCH_CALLS_MASK 127
+#define CHECK_BRANCH() \
+ JS_BEGIN_MACRO \
+ if (onbranch && \
+ (++onbranchCalls & ONBRANCH_CALLS_MASK) == 0 && \
+ !(*onbranch)(gData->cx, NULL)) { \
+ gData->ok = JS_FALSE; \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+ JSBool anchor;
+ jsbytecode *pc = gData->regexp->program;
+ REOp op = (REOp) *pc++;
+
+ /*
+ * If the first node is a simple match, step the index into the string
+ * until that match is made, or fail if it can't be found at all.
+ */
+ if (REOP_IS_SIMPLE(op)) {
+ anchor = JS_FALSE;
+ while (x->cp <= gData->cpend) {
+ nextpc = pc; /* reset back to start each time */
+ result = SimpleMatch(gData, x, op, &nextpc, JS_TRUE);
+ if (result) {
+ anchor = JS_TRUE;
+ x = result;
+ pc = nextpc; /* accept skip to next opcode */
+ op = (REOp) *pc++;
+ break;
+ }
+ gData->skipped++;
+ x->cp++;
+ }
+ if (!anchor)
+ return NULL;
+ }
+
+ for (;;) {
+ if (REOP_IS_SIMPLE(op)) {
+ result = SimpleMatch(gData, x, op, &pc, JS_TRUE);
+ } else {
+ curState = &gData->stateStack[gData->stateStackTop];
+ switch (op) {
+ case REOP_EMPTY:
+ result = x;
+ break;
+
+ case REOP_ALTPREREQ2:
+ nextpc = pc + GET_OFFSET(pc); /* start of next op */
+ pc += ARG_LEN;
+ matchCh2 = GET_ARG(pc);
+ pc += ARG_LEN;
+ k = GET_ARG(pc);
+ pc += ARG_LEN;
+
+ if (x->cp != gData->cpend) {
+ if (*x->cp == matchCh2)
+ goto doAlt;
+
+ charSet = &gData->regexp->classList[k];
+ if (!charSet->converted && !ProcessCharSet(gData, charSet))
+ return NULL;
+ matchCh1 = *x->cp;
+ k = matchCh1 >> 3;
+ if ((charSet->length == 0 ||
+ matchCh1 > charSet->length ||
+ !(charSet->u.bits[k] & (1 << (matchCh1 & 0x7)))) ^
+ charSet->sense) {
+ goto doAlt;
+ }
+ }
+ result = NULL;
+ break;
+
+ case REOP_ALTPREREQ:
+ nextpc = pc + GET_OFFSET(pc); /* start of next op */
+ pc += ARG_LEN;
+ matchCh1 = GET_ARG(pc);
+ pc += ARG_LEN;
+ matchCh2 = GET_ARG(pc);
+ pc += ARG_LEN;
+ if (x->cp == gData->cpend ||
+ (*x->cp != matchCh1 && *x->cp != matchCh2)) {
+ result = NULL;
+ break;
+ }
+ /* else false thru... */
+
+ case REOP_ALT:
+ doAlt:
+ nextpc = pc + GET_OFFSET(pc); /* start of next alternate */
+ pc += ARG_LEN; /* start of this alternate */
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ op = (REOp) *pc++;
+ startcp = x->cp;
+ if (REOP_IS_SIMPLE(op)) {
+ if (!SimpleMatch(gData, x, op, &pc, JS_TRUE)) {
+ op = (REOp) *nextpc++;
+ pc = nextpc;
+ continue;
+ }
+ result = x;
+ op = (REOp) *pc++;
+ }
+ nextop = (REOp) *nextpc++;
+ if (!PushBackTrackState(gData, nextop, nextpc, x, startcp, 0, 0))
+ return NULL;
+ continue;
+
+ /*
+ * Occurs at (successful) end of REOP_ALT,
+ */
+ case REOP_JUMP:
+ --gData->stateStackTop;
+ pc += GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ continue;
+
+ /*
+ * Occurs at last (successful) end of REOP_ALT,
+ */
+ case REOP_ENDALT:
+ --gData->stateStackTop;
+ op = (REOp) *pc++;
+ continue;
+
+ case REOP_LPAREN:
+ pc = ReadCompactIndex(pc, &parenIndex);
+ JS_ASSERT(parenIndex < gData->regexp->parenCount);
+ if (parenIndex + 1 > parenSoFar)
+ parenSoFar = parenIndex + 1;
+ x->parens[parenIndex].index = x->cp - gData->cpbegin;
+ x->parens[parenIndex].length = 0;
+ op = (REOp) *pc++;
+ continue;
+
+ case REOP_RPAREN:
+ pc = ReadCompactIndex(pc, &parenIndex);
+ JS_ASSERT(parenIndex < gData->regexp->parenCount);
+ cap = &x->parens[parenIndex];
+
+ /*
+ * FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=346090
+ * This wallpaper prevents a case where we somehow took a step
+ * backward in input while minimally-matching an empty string.
+ */
+ if (x->cp < gData->cpbegin + cap->index)
+ cap->index = -1;
+ cap->length = x->cp - (gData->cpbegin + cap->index);
+ op = (REOp) *pc++;
+ continue;
+
+ case REOP_ASSERT:
+ nextpc = pc + GET_OFFSET(pc); /* start of term after ASSERT */
+ pc += ARG_LEN; /* start of ASSERT child */
+ op = (REOp) *pc++;
+ testpc = pc;
+ if (REOP_IS_SIMPLE(op) &&
+ !SimpleMatch(gData, x, op, &testpc, JS_FALSE)) {
+ result = NULL;
+ break;
+ }
+ curState->u.assertion.top =
+ (char *)gData->backTrackSP - (char *)gData->backTrackStack;
+ curState->u.assertion.sz = gData->cursz;
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (!PushBackTrackState(gData, REOP_ASSERTTEST,
+ nextpc, x, x->cp, 0, 0)) {
+ return NULL;
+ }
+ continue;
+
+ case REOP_ASSERT_NOT:
+ nextpc = pc + GET_OFFSET(pc);
+ pc += ARG_LEN;
+ op = (REOp) *pc++;
+ testpc = pc;
+ if (REOP_IS_SIMPLE(op) /* Note - fail to fail! */ &&
+ SimpleMatch(gData, x, op, &testpc, JS_FALSE) &&
+ *testpc == REOP_ASSERTNOTTEST) {
+ result = NULL;
+ break;
+ }
+ curState->u.assertion.top
+ = (char *)gData->backTrackSP -
+ (char *)gData->backTrackStack;
+ curState->u.assertion.sz = gData->cursz;
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (!PushBackTrackState(gData, REOP_ASSERTNOTTEST,
+ nextpc, x, x->cp, 0, 0)) {
+ return NULL;
+ }
+ continue;
+
+ case REOP_ASSERTTEST:
+ --gData->stateStackTop;
+ --curState;
+ x->cp = gData->cpbegin + curState->index;
+ gData->backTrackSP =
+ (REBackTrackData *) ((char *)gData->backTrackStack +
+ curState->u.assertion.top);
+ gData->cursz = curState->u.assertion.sz;
+ if (result)
+ result = x;
+ break;
+
+ case REOP_ASSERTNOTTEST:
+ --gData->stateStackTop;
+ --curState;
+ x->cp = gData->cpbegin + curState->index;
+ gData->backTrackSP =
+ (REBackTrackData *) ((char *)gData->backTrackStack +
+ curState->u.assertion.top);
+ gData->cursz = curState->u.assertion.sz;
+ result = (!result) ? x : NULL;
+ break;
+
+ case REOP_END:
+ if (x)
+ return x;
+ break;
+
+ case REOP_STAR:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = (uintN)-1;
+ goto quantcommon;
+ case REOP_PLUS:
+ curState->u.quantifier.min = 1;
+ curState->u.quantifier.max = (uintN)-1;
+ goto quantcommon;
+ case REOP_OPT:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = 1;
+ goto quantcommon;
+ case REOP_QUANT:
+ pc = ReadCompactIndex(pc, &k);
+ curState->u.quantifier.min = k;
+ pc = ReadCompactIndex(pc, &k);
+ /* max is k - 1 to use one byte for (uintN)-1 sentinel. */
+ curState->u.quantifier.max = k - 1;
+ JS_ASSERT(curState->u.quantifier.min
+ <= curState->u.quantifier.max);
+ quantcommon:
+ if (curState->u.quantifier.max == 0) {
+ pc = pc + GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ result = x;
+ continue;
+ }
+ /* Step over <next> */
+ nextpc = pc + ARG_LEN;
+ op = (REOp) *nextpc++;
+ startcp = x->cp;
+ if (REOP_IS_SIMPLE(op)) {
+ if (!SimpleMatch(gData, x, op, &nextpc, JS_TRUE)) {
+ if (curState->u.quantifier.min == 0)
+ result = x;
+ else
+ result = NULL;
+ pc = pc + GET_OFFSET(pc);
+ break;
+ }
+ op = (REOp) *nextpc++;
+ result = x;
+ }
+ curState->index = startcp - gData->cpbegin;
+ curState->continue_op = REOP_REPEAT;
+ curState->continue_pc = pc;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (curState->u.quantifier.min == 0 &&
+ !PushBackTrackState(gData, REOP_REPEAT, pc, x, startcp,
+ 0, 0)) {
+ return NULL;
+ }
+ pc = nextpc;
+ continue;
+
+ case REOP_ENDCHILD: /* marks the end of a quantifier child */
+ pc = curState[-1].continue_pc;
+ op = curState[-1].continue_op;
+ continue;
+
+ case REOP_REPEAT:
+ CHECK_BRANCH();
+ --curState;
+ do {
+ --gData->stateStackTop;
+ if (!result) {
+ /* Failed, see if we have enough children. */
+ if (curState->u.quantifier.min == 0)
+ goto repeatDone;
+ goto break_switch;
+ }
+ if (curState->u.quantifier.min == 0 &&
+ x->cp == gData->cpbegin + curState->index) {
+ /* matched an empty string, that'll get us nowhere */
+ result = NULL;
+ goto break_switch;
+ }
+ if (curState->u.quantifier.min != 0)
+ curState->u.quantifier.min--;
+ if (curState->u.quantifier.max != (uintN) -1)
+ curState->u.quantifier.max--;
+ if (curState->u.quantifier.max == 0)
+ goto repeatDone;
+ nextpc = pc + ARG_LEN;
+ nextop = (REOp) *nextpc;
+ startcp = x->cp;
+ if (REOP_IS_SIMPLE(nextop)) {
+ nextpc++;
+ if (!SimpleMatch(gData, x, nextop, &nextpc, JS_TRUE)) {
+ if (curState->u.quantifier.min == 0)
+ goto repeatDone;
+ result = NULL;
+ goto break_switch;
+ }
+ result = x;
+ }
+ curState->index = startcp - gData->cpbegin;
+ PUSH_STATE_STACK(gData);
+ if (curState->u.quantifier.min == 0 &&
+ !PushBackTrackState(gData, REOP_REPEAT,
+ pc, x, startcp,
+ curState->parenSoFar,
+ parenSoFar -
+ curState->parenSoFar)) {
+ return NULL;
+ }
+ } while (*nextpc == REOP_ENDCHILD);
+ pc = nextpc;
+ op = (REOp) *pc++;
+ parenSoFar = curState->parenSoFar;
+ continue;
+
+ repeatDone:
+ result = x;
+ pc += GET_OFFSET(pc);
+ goto break_switch;
+
+ case REOP_MINIMALSTAR:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = (uintN)-1;
+ goto minimalquantcommon;
+ case REOP_MINIMALPLUS:
+ curState->u.quantifier.min = 1;
+ curState->u.quantifier.max = (uintN)-1;
+ goto minimalquantcommon;
+ case REOP_MINIMALOPT:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = 1;
+ goto minimalquantcommon;
+ case REOP_MINIMALQUANT:
+ pc = ReadCompactIndex(pc, &k);
+ curState->u.quantifier.min = k;
+ pc = ReadCompactIndex(pc, &k);
+ /* See REOP_QUANT comments about k - 1. */
+ curState->u.quantifier.max = k - 1;
+ JS_ASSERT(curState->u.quantifier.min
+ <= curState->u.quantifier.max);
+ minimalquantcommon:
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (curState->u.quantifier.min != 0) {
+ curState->continue_op = REOP_MINIMALREPEAT;
+ curState->continue_pc = pc;
+ /* step over <next> */
+ pc += OFFSET_LEN;
+ op = (REOp) *pc++;
+ } else {
+ if (!PushBackTrackState(gData, REOP_MINIMALREPEAT,
+ pc, x, x->cp, 0, 0)) {
+ return NULL;
+ }
+ --gData->stateStackTop;
+ pc = pc + GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ }
+ continue;
+
+ case REOP_MINIMALREPEAT:
+ CHECK_BRANCH();
+ --gData->stateStackTop;
+ --curState;
+
+ if (!result) {
+ /*
+ * Non-greedy failure - try to consume another child.
+ */
+ if (curState->u.quantifier.max == (uintN) -1 ||
+ curState->u.quantifier.max > 0) {
+ curState->index = x->cp - gData->cpbegin;
+ curState->continue_op = REOP_MINIMALREPEAT;
+ curState->continue_pc = pc;
+ pc += ARG_LEN;
+ for (k = curState->parenSoFar; k < parenSoFar; k++)
+ x->parens[k].index = -1;
+ PUSH_STATE_STACK(gData);
+ op = (REOp) *pc++;
+ continue;
+ }
+ /* Don't need to adjust pc since we're going to pop. */
+ break;
+ }
+ if (curState->u.quantifier.min == 0 &&
+ x->cp == gData->cpbegin + curState->index) {
+ /* Matched an empty string, that'll get us nowhere. */
+ result = NULL;
+ break;
+ }
+ if (curState->u.quantifier.min != 0)
+ curState->u.quantifier.min--;
+ if (curState->u.quantifier.max != (uintN) -1)
+ curState->u.quantifier.max--;
+ if (curState->u.quantifier.min != 0) {
+ curState->continue_op = REOP_MINIMALREPEAT;
+ curState->continue_pc = pc;
+ pc += ARG_LEN;
+ for (k = curState->parenSoFar; k < parenSoFar; k++)
+ x->parens[k].index = -1;
+ curState->index = x->cp - gData->cpbegin;
+ PUSH_STATE_STACK(gData);
+ op = (REOp) *pc++;
+ continue;
+ }
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (!PushBackTrackState(gData, REOP_MINIMALREPEAT,
+ pc, x, x->cp,
+ curState->parenSoFar,
+ parenSoFar - curState->parenSoFar)) {
+ return NULL;
+ }
+ --gData->stateStackTop;
+ pc = pc + GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ continue;
+
+ default:
+ JS_ASSERT(JS_FALSE);
+ result = NULL;
+ }
+ break_switch:;
+ }
+
+ /*
+ * If the match failed and there's a backtrack option, take it.
+ * Otherwise this is a complete and utter failure.
+ */
+ if (!result) {
+ if (gData->cursz == 0)
+ return NULL;
+ backTrackData = gData->backTrackSP;
+ gData->cursz = backTrackData->sz;
+ gData->backTrackSP =
+ (REBackTrackData *) ((char *)backTrackData - backTrackData->sz);
+ x->cp = backTrackData->cp;
+ pc = backTrackData->backtrack_pc;
+ op = backTrackData->backtrack_op;
+ gData->stateStackTop = backTrackData->saveStateStackTop;
+ JS_ASSERT(gData->stateStackTop);
+
+ memcpy(gData->stateStack, backTrackData + 1,
+ sizeof(REProgState) * backTrackData->saveStateStackTop);
+ curState = &gData->stateStack[gData->stateStackTop - 1];
+
+ if (backTrackData->parenCount) {
+ memcpy(&x->parens[backTrackData->parenIndex],
+ (char *)(backTrackData + 1) +
+ sizeof(REProgState) * backTrackData->saveStateStackTop,
+ sizeof(RECapture) * backTrackData->parenCount);
+ parenSoFar = backTrackData->parenIndex + backTrackData->parenCount;
+ } else {
+ for (k = curState->parenSoFar; k < parenSoFar; k++)
+ x->parens[k].index = -1;
+ parenSoFar = curState->parenSoFar;
+ }
+ continue;
+ }
+ x = result;
+
+ /*
+ * Continue with the expression.
+ */
+ op = (REOp)*pc++;
+ }
+ return NULL;
+}
+
+static REMatchState *
+MatchRegExp(REGlobalData *gData, REMatchState *x)
+{
+ REMatchState *result;
+ const jschar *cp = x->cp;
+ const jschar *cp2;
+ uintN j;
+
+ /*
+ * Have to include the position beyond the last character
+ * in order to detect end-of-input/line condition.
+ */
+ for (cp2 = cp; cp2 <= gData->cpend; cp2++) {
+ gData->skipped = cp2 - cp;
+ x->cp = cp2;
+ for (j = 0; j < gData->regexp->parenCount; j++)
+ x->parens[j].index = -1;
+ result = ExecuteREBytecode(gData, x);
+ if (!gData->ok || result)
+ return result;
+ gData->backTrackSP = gData->backTrackStack;
+ gData->cursz = 0;
+ gData->stateStackTop = 0;
+ cp2 = cp + gData->skipped;
+ }
+ return NULL;
+}
+
+
+static REMatchState *
+InitMatch(JSContext *cx, REGlobalData *gData, JSRegExp *re)
+{
+ REMatchState *result;
+ uintN i;
+
+ gData->backTrackStackSize = INITIAL_BACKTRACK;
+ JS_ARENA_ALLOCATE_CAST(gData->backTrackStack, REBackTrackData *,
+ &gData->pool,
+ INITIAL_BACKTRACK);
+ if (!gData->backTrackStack)
+ goto bad;
+
+ gData->backTrackSP = gData->backTrackStack;
+ gData->cursz = 0;
+
+ gData->stateStackLimit = INITIAL_STATESTACK;
+ JS_ARENA_ALLOCATE_CAST(gData->stateStack, REProgState *,
+ &gData->pool,
+ sizeof(REProgState) * INITIAL_STATESTACK);
+ if (!gData->stateStack)
+ goto bad;
+
+ gData->stateStackTop = 0;
+ gData->cx = cx;
+ gData->regexp = re;
+ gData->ok = JS_TRUE;
+
+ JS_ARENA_ALLOCATE_CAST(result, REMatchState *,
+ &gData->pool,
+ offsetof(REMatchState, parens)
+ + re->parenCount * sizeof(RECapture));
+ if (!result)
+ goto bad;
+
+ for (i = 0; i < re->classCount; i++) {
+ if (!re->classList[i].converted &&
+ !ProcessCharSet(gData, &re->classList[i])) {
+ return NULL;
+ }
+ }
+
+ return result;
+
+bad:
+ JS_ReportOutOfMemory(cx);
+ gData->ok = JS_FALSE;
+ return NULL;
+}
+
+JSBool
+js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
+ JSBool test, jsval *rval)
+{
+ REGlobalData gData;
+ REMatchState *x, *result;
+
+ const jschar *cp, *ep;
+ size_t i, length, start;
+ JSSubString *morepar;
+ JSBool ok;
+ JSRegExpStatics *res;
+ ptrdiff_t matchlen;
+ uintN num, morenum;
+ JSString *parstr, *matchstr;
+ JSObject *obj;
+
+ RECapture *parsub = NULL;
+
+ /*
+ * It's safe to load from cp because JSStrings have a zero at the end,
+ * and we never let cp get beyond cpend.
+ */
+ start = *indexp;
+ length = JSSTRING_LENGTH(str);
+ if (start > length)
+ start = length;
+ cp = JSSTRING_CHARS(str);
+ gData.cpbegin = cp;
+ gData.cpend = cp + length;
+ cp += start;
+ gData.start = start;
+ gData.skipped = 0;
+
+ JS_InitArenaPool(&gData.pool, "RegExpPool", 8096, 4);
+ x = InitMatch(cx, &gData, re);
+ if (!x) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ x->cp = cp;
+
+ /*
+ * Call the recursive matcher to do the real work. Return null on mismatch
+ * whether testing or not. On match, return an extended Array object.
+ */
+ result = MatchRegExp(&gData, x);
+ ok = gData.ok;
+ if (!ok)
+ goto out;
+ if (!result) {
+ *rval = JSVAL_NULL;
+ goto out;
+ }
+ cp = result->cp;
+ i = cp - gData.cpbegin;
+ *indexp = i;
+ matchlen = i - (start + gData.skipped);
+ ep = cp;
+ cp -= matchlen;
+
+ if (test) {
+ /*
+ * Testing for a match and updating cx->regExpStatics: don't allocate
+ * an array object, do return true.
+ */
+ *rval = JSVAL_TRUE;
+
+ /* Avoid warning. (gcc doesn't detect that obj is needed iff !test); */
+ obj = NULL;
+ } else {
+ /*
+ * The array returned on match has element 0 bound to the matched
+ * string, elements 1 through state.parenCount bound to the paren
+ * matches, an index property telling the length of the left context,
+ * and an input property referring to the input string.
+ */
+ obj = js_NewArrayObject(cx, 0, NULL);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+
+#define DEFVAL(val, id) { \
+ ok = js_DefineProperty(cx, obj, id, val, \
+ JS_PropertyStub, JS_PropertyStub, \
+ JSPROP_ENUMERATE, NULL); \
+ if (!ok) { \
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL; \
+ cx->weakRoots.newborn[GCX_STRING] = NULL; \
+ goto out; \
+ } \
+}
+
+ matchstr = js_NewStringCopyN(cx, cp, matchlen, 0);
+ if (!matchstr) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ ok = JS_FALSE;
+ goto out;
+ }
+ DEFVAL(STRING_TO_JSVAL(matchstr), INT_TO_JSID(0));
+ }
+
+ res = &cx->regExpStatics;
+ res->input = str;
+ res->parenCount = re->parenCount;
+ if (re->parenCount == 0) {
+ res->lastParen = js_EmptySubString;
+ } else {
+ for (num = 0; num < re->parenCount; num++) {
+ parsub = &result->parens[num];
+ if (num < 9) {
+ if (parsub->index == -1) {
+ res->parens[num].chars = NULL;
+ res->parens[num].length = 0;
+ } else {
+ res->parens[num].chars = gData.cpbegin + parsub->index;
+ res->parens[num].length = parsub->length;
+ }
+ } else {
+ morenum = num - 9;
+ morepar = res->moreParens;
+ if (!morepar) {
+ res->moreLength = 10;
+ morepar = (JSSubString*)
+ JS_malloc(cx, 10 * sizeof(JSSubString));
+ } else if (morenum >= res->moreLength) {
+ res->moreLength += 10;
+ morepar = (JSSubString*)
+ JS_realloc(cx, morepar,
+ res->moreLength * sizeof(JSSubString));
+ }
+ if (!morepar) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ cx->weakRoots.newborn[GCX_STRING] = NULL;
+ ok = JS_FALSE;
+ goto out;
+ }
+ res->moreParens = morepar;
+ if (parsub->index == -1) {
+ morepar[morenum].chars = NULL;
+ morepar[morenum].length = 0;
+ } else {
+ morepar[morenum].chars = gData.cpbegin + parsub->index;
+ morepar[morenum].length = parsub->length;
+ }
+ }
+ if (test)
+ continue;
+ if (parsub->index == -1) {
+ ok = js_DefineProperty(cx, obj, INT_TO_JSID(num + 1),
+ JSVAL_VOID, NULL, NULL,
+ JSPROP_ENUMERATE, NULL);
+ } else {
+ parstr = js_NewStringCopyN(cx, gData.cpbegin + parsub->index,
+ parsub->length, 0);
+ if (!parstr) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ cx->weakRoots.newborn[GCX_STRING] = NULL;
+ ok = JS_FALSE;
+ goto out;
+ }
+ ok = js_DefineProperty(cx, obj, INT_TO_JSID(num + 1),
+ STRING_TO_JSVAL(parstr), NULL, NULL,
+ JSPROP_ENUMERATE, NULL);
+ }
+ if (!ok) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ cx->weakRoots.newborn[GCX_STRING] = NULL;
+ goto out;
+ }
+ }
+ if (parsub->index == -1) {
+ res->lastParen = js_EmptySubString;
+ } else {
+ res->lastParen.chars = gData.cpbegin + parsub->index;
+ res->lastParen.length = parsub->length;
+ }
+ }
+
+ if (!test) {
+ /*
+ * Define the index and input properties last for better for/in loop
+ * order (so they come after the elements).
+ */
+ DEFVAL(INT_TO_JSVAL(start + gData.skipped),
+ ATOM_TO_JSID(cx->runtime->atomState.indexAtom));
+ DEFVAL(STRING_TO_JSVAL(str),
+ ATOM_TO_JSID(cx->runtime->atomState.inputAtom));
+ }
+
+#undef DEFVAL
+
+ res->lastMatch.chars = cp;
+ res->lastMatch.length = matchlen;
+
+ /*
+ * For JS1.3 and ECMAv2, emulate Perl5 exactly:
+ *
+ * js1.3 "hi", "hi there" "hihitherehi therebye"
+ */
+ res->leftContext.chars = JSSTRING_CHARS(str);
+ res->leftContext.length = start + gData.skipped;
+ res->rightContext.chars = ep;
+ res->rightContext.length = gData.cpend - ep;
+
+out:
+ JS_FinishArenaPool(&gData.pool);
+ return ok;
+}
+
+/************************************************************************/
+
+enum regexp_tinyid {
+ REGEXP_SOURCE = -1,
+ REGEXP_GLOBAL = -2,
+ REGEXP_IGNORE_CASE = -3,
+ REGEXP_LAST_INDEX = -4,
+ REGEXP_MULTILINE = -5
+};
+
+#define REGEXP_PROP_ATTRS (JSPROP_PERMANENT|JSPROP_SHARED)
+
+static JSPropertySpec regexp_props[] = {
+ {"source", REGEXP_SOURCE, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {"global", REGEXP_GLOBAL, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {"ignoreCase", REGEXP_IGNORE_CASE, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {"lastIndex", REGEXP_LAST_INDEX, REGEXP_PROP_ATTRS,0,0},
+ {"multiline", REGEXP_MULTILINE, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+regexp_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSRegExp *re;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ slot = JSVAL_TO_INT(id);
+ if (slot == REGEXP_LAST_INDEX)
+ return JS_GetReservedSlot(cx, obj, 0, vp);
+
+ JS_LOCK_OBJ(cx, obj);
+ re = (JSRegExp *) JS_GetInstancePrivate(cx, obj, &js_RegExpClass, NULL);
+ if (re) {
+ switch (slot) {
+ case REGEXP_SOURCE:
+ *vp = STRING_TO_JSVAL(re->source);
+ break;
+ case REGEXP_GLOBAL:
+ *vp = BOOLEAN_TO_JSVAL((re->flags & JSREG_GLOB) != 0);
+ break;
+ case REGEXP_IGNORE_CASE:
+ *vp = BOOLEAN_TO_JSVAL((re->flags & JSREG_FOLD) != 0);
+ break;
+ case REGEXP_MULTILINE:
+ *vp = BOOLEAN_TO_JSVAL((re->flags & JSREG_MULTILINE) != 0);
+ break;
+ }
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_TRUE;
+}
+
+static JSBool
+regexp_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSBool ok;
+ jsint slot;
+ jsdouble lastIndex;
+
+ ok = JS_TRUE;
+ if (!JSVAL_IS_INT(id))
+ return ok;
+ slot = JSVAL_TO_INT(id);
+ if (slot == REGEXP_LAST_INDEX) {
+ if (!js_ValueToNumber(cx, *vp, &lastIndex))
+ return JS_FALSE;
+ lastIndex = js_DoubleToInteger(lastIndex);
+ ok = js_NewNumberValue(cx, lastIndex, vp) &&
+ JS_SetReservedSlot(cx, obj, 0, *vp);
+ }
+ return ok;
+}
+
+/*
+ * RegExp class static properties and their Perl counterparts:
+ *
+ * RegExp.input $_
+ * RegExp.multiline $*
+ * RegExp.lastMatch $&
+ * RegExp.lastParen $+
+ * RegExp.leftContext $`
+ * RegExp.rightContext $'
+ */
+enum regexp_static_tinyid {
+ REGEXP_STATIC_INPUT = -1,
+ REGEXP_STATIC_MULTILINE = -2,
+ REGEXP_STATIC_LAST_MATCH = -3,
+ REGEXP_STATIC_LAST_PAREN = -4,
+ REGEXP_STATIC_LEFT_CONTEXT = -5,
+ REGEXP_STATIC_RIGHT_CONTEXT = -6
+};
+
+JSBool
+js_InitRegExpStatics(JSContext *cx, JSRegExpStatics *res)
+{
+ JS_ClearRegExpStatics(cx);
+ return js_AddRoot(cx, &res->input, "res->input");
+}
+
+void
+js_FreeRegExpStatics(JSContext *cx, JSRegExpStatics *res)
+{
+ if (res->moreParens) {
+ JS_free(cx, res->moreParens);
+ res->moreParens = NULL;
+ }
+ js_RemoveRoot(cx->runtime, &res->input);
+}
+
+static JSBool
+regexp_static_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSRegExpStatics *res;
+ JSString *str;
+ JSSubString *sub;
+
+ res = &cx->regExpStatics;
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case REGEXP_STATIC_INPUT:
+ *vp = res->input ? STRING_TO_JSVAL(res->input)
+ : JS_GetEmptyStringValue(cx);
+ return JS_TRUE;
+ case REGEXP_STATIC_MULTILINE:
+ *vp = BOOLEAN_TO_JSVAL(res->multiline);
+ return JS_TRUE;
+ case REGEXP_STATIC_LAST_MATCH:
+ sub = &res->lastMatch;
+ break;
+ case REGEXP_STATIC_LAST_PAREN:
+ sub = &res->lastParen;
+ break;
+ case REGEXP_STATIC_LEFT_CONTEXT:
+ sub = &res->leftContext;
+ break;
+ case REGEXP_STATIC_RIGHT_CONTEXT:
+ sub = &res->rightContext;
+ break;
+ default:
+ sub = REGEXP_PAREN_SUBSTRING(res, slot);
+ break;
+ }
+ str = js_NewStringCopyN(cx, sub->chars, sub->length, 0);
+ if (!str)
+ return JS_FALSE;
+ *vp = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+regexp_static_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSRegExpStatics *res;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ res = &cx->regExpStatics;
+ /* XXX use if-else rather than switch to keep MSVC1.52 from crashing */
+ if (JSVAL_TO_INT(id) == REGEXP_STATIC_INPUT) {
+ if (!JSVAL_IS_STRING(*vp) &&
+ !JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp)) {
+ return JS_FALSE;
+ }
+ res->input = JSVAL_TO_STRING(*vp);
+ } else if (JSVAL_TO_INT(id) == REGEXP_STATIC_MULTILINE) {
+ if (!JSVAL_IS_BOOLEAN(*vp) &&
+ !JS_ConvertValue(cx, *vp, JSTYPE_BOOLEAN, vp)) {
+ return JS_FALSE;
+ }
+ res->multiline = JSVAL_TO_BOOLEAN(*vp);
+ }
+ return JS_TRUE;
+}
+
+static JSPropertySpec regexp_static_props[] = {
+ {"input",
+ REGEXP_STATIC_INPUT,
+ JSPROP_ENUMERATE|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_setProperty},
+ {"multiline",
+ REGEXP_STATIC_MULTILINE,
+ JSPROP_ENUMERATE|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_setProperty},
+ {"lastMatch",
+ REGEXP_STATIC_LAST_MATCH,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"lastParen",
+ REGEXP_STATIC_LAST_PAREN,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"leftContext",
+ REGEXP_STATIC_LEFT_CONTEXT,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"rightContext",
+ REGEXP_STATIC_RIGHT_CONTEXT,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+
+ /* XXX should have block scope and local $1, etc. */
+ {"$1", 0, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$2", 1, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$3", 2, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$4", 3, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$5", 4, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$6", 5, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$7", 6, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$8", 7, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$9", 8, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+
+ {0,0,0,0,0}
+};
+
+static void
+regexp_finalize(JSContext *cx, JSObject *obj)
+{
+ JSRegExp *re;
+
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (!re)
+ return;
+ js_DestroyRegExp(cx, re);
+}
+
+/* Forward static prototype. */
+static JSBool
+regexp_exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+regexp_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return regexp_exec(cx, JSVAL_TO_OBJECT(argv[-2]), argc, argv, rval);
+}
+
+#if JS_HAS_XDR
+
+#include "jsxdrapi.h"
+
+static JSBool
+regexp_xdrObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSRegExp *re;
+ JSString *source;
+ uint32 flagsword;
+ JSObject *obj;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ re = (JSRegExp *) JS_GetPrivate(xdr->cx, *objp);
+ if (!re)
+ return JS_FALSE;
+ source = re->source;
+ flagsword = ((uint32)re->cloneIndex << 16) | re->flags;
+ }
+ if (!JS_XDRString(xdr, &source) ||
+ !JS_XDRUint32(xdr, &flagsword)) {
+ return JS_FALSE;
+ }
+ if (xdr->mode == JSXDR_DECODE) {
+ obj = js_NewObject(xdr->cx, &js_RegExpClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ re = js_NewRegExp(xdr->cx, NULL, source, (uint16)flagsword, JS_FALSE);
+ if (!re)
+ return JS_FALSE;
+ if (!JS_SetPrivate(xdr->cx, obj, re) ||
+ !js_SetLastIndex(xdr->cx, obj, 0)) {
+ js_DestroyRegExp(xdr->cx, re);
+ return JS_FALSE;
+ }
+ re->cloneIndex = (uint16)(flagsword >> 16);
+ *objp = obj;
+ }
+ return JS_TRUE;
+}
+
+#else /* !JS_HAS_XDR */
+
+#define regexp_xdrObject NULL
+
+#endif /* !JS_HAS_XDR */
+
+static uint32
+regexp_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSRegExp *re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (re)
+ GC_MARK(cx, re->source, "source");
+ return 0;
+}
+
+JSClass js_RegExpClass = {
+ js_RegExp_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_RegExp),
+ JS_PropertyStub, JS_PropertyStub,
+ regexp_getProperty, regexp_setProperty,
+ JS_EnumerateStub, JS_ResolveStub,
+ JS_ConvertStub, regexp_finalize,
+ NULL, NULL,
+ regexp_call, NULL,
+ regexp_xdrObject, NULL,
+ regexp_mark, 0
+};
+
+static const jschar empty_regexp_ucstr[] = {'(', '?', ':', ')', 0};
+
+JSBool
+js_regexp_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSRegExp *re;
+ const jschar *source;
+ jschar *chars;
+ size_t length, nflags;
+ uintN flags;
+ JSString *str;
+
+ if (!JS_InstanceOf(cx, obj, &js_RegExpClass, argv))
+ return JS_FALSE;
+ JS_LOCK_OBJ(cx, obj);
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (!re) {
+ JS_UNLOCK_OBJ(cx, obj);
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ source = JSSTRING_CHARS(re->source);
+ length = JSSTRING_LENGTH(re->source);
+ if (length == 0) {
+ source = empty_regexp_ucstr;
+ length = sizeof(empty_regexp_ucstr) / sizeof(jschar) - 1;
+ }
+ length += 2;
+ nflags = 0;
+ for (flags = re->flags; flags != 0; flags &= flags - 1)
+ nflags++;
+ chars = (jschar*) JS_malloc(cx, (length + nflags + 1) * sizeof(jschar));
+ if (!chars) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+
+ chars[0] = '/';
+ js_strncpy(&chars[1], source, length - 2);
+ chars[length-1] = '/';
+ if (nflags) {
+ if (re->flags & JSREG_GLOB)
+ chars[length++] = 'g';
+ if (re->flags & JSREG_FOLD)
+ chars[length++] = 'i';
+ if (re->flags & JSREG_MULTILINE)
+ chars[length++] = 'm';
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ chars[length] = 0;
+
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+regexp_compile(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *opt, *str;
+ JSRegExp *oldre, *re;
+ JSBool ok, ok2;
+ JSObject *obj2;
+ size_t length, nbytes;
+ const jschar *cp, *start, *end;
+ jschar *nstart, *ncp, *tmp;
+
+ if (!JS_InstanceOf(cx, obj, &js_RegExpClass, argv))
+ return JS_FALSE;
+ opt = NULL;
+ if (argc == 0) {
+ str = cx->runtime->emptyString;
+ } else {
+ if (JSVAL_IS_OBJECT(argv[0])) {
+ /*
+ * If we get passed in a RegExp object we construct a new
+ * RegExp that is a duplicate of it by re-compiling the
+ * original source code. ECMA requires that it be an error
+ * here if the flags are specified. (We must use the flags
+ * from the original RegExp also).
+ */
+ obj2 = JSVAL_TO_OBJECT(argv[0]);
+ if (obj2 && OBJ_GET_CLASS(cx, obj2) == &js_RegExpClass) {
+ if (argc >= 2 && !JSVAL_IS_VOID(argv[1])) { /* 'flags' passed */
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NEWREGEXP_FLAGGED);
+ return JS_FALSE;
+ }
+ JS_LOCK_OBJ(cx, obj2);
+ re = (JSRegExp *) JS_GetPrivate(cx, obj2);
+ if (!re) {
+ JS_UNLOCK_OBJ(cx, obj2);
+ return JS_FALSE;
+ }
+ re = js_NewRegExp(cx, NULL, re->source, re->flags, JS_FALSE);
+ JS_UNLOCK_OBJ(cx, obj2);
+ goto created;
+ }
+ }
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ if (argc > 1) {
+ if (JSVAL_IS_VOID(argv[1])) {
+ opt = NULL;
+ } else {
+ opt = js_ValueToString(cx, argv[1]);
+ if (!opt)
+ return JS_FALSE;
+ argv[1] = STRING_TO_JSVAL(opt);
+ }
+ }
+
+ /* Escape any naked slashes in the regexp source. */
+ length = JSSTRING_LENGTH(str);
+ start = JSSTRING_CHARS(str);
+ end = start + length;
+ nstart = ncp = NULL;
+ for (cp = start; cp < end; cp++) {
+ if (*cp == '/' && (cp == start || cp[-1] != '\\')) {
+ nbytes = (++length + 1) * sizeof(jschar);
+ if (!nstart) {
+ nstart = (jschar *) JS_malloc(cx, nbytes);
+ if (!nstart)
+ return JS_FALSE;
+ ncp = nstart + (cp - start);
+ js_strncpy(nstart, start, cp - start);
+ } else {
+ tmp = (jschar *) JS_realloc(cx, nstart, nbytes);
+ if (!tmp) {
+ JS_free(cx, nstart);
+ return JS_FALSE;
+ }
+ ncp = tmp + (ncp - nstart);
+ nstart = tmp;
+ }
+ *ncp++ = '\\';
+ }
+ if (nstart)
+ *ncp++ = *cp;
+ }
+
+ if (nstart) {
+ /* Don't forget to store the backstop after the new string. */
+ JS_ASSERT((size_t)(ncp - nstart) == length);
+ *ncp = 0;
+ str = js_NewString(cx, nstart, length, 0);
+ if (!str) {
+ JS_free(cx, nstart);
+ return JS_FALSE;
+ }
+ argv[0] = STRING_TO_JSVAL(str);
+ }
+ }
+
+ re = js_NewRegExpOpt(cx, NULL, str, opt, JS_FALSE);
+created:
+ if (!re)
+ return JS_FALSE;
+ JS_LOCK_OBJ(cx, obj);
+ oldre = (JSRegExp *) JS_GetPrivate(cx, obj);
+ ok = JS_SetPrivate(cx, obj, re);
+ ok2 = js_SetLastIndex(cx, obj, 0);
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!ok) {
+ js_DestroyRegExp(cx, re);
+ return JS_FALSE;
+ }
+ if (oldre)
+ js_DestroyRegExp(cx, oldre);
+ *rval = OBJECT_TO_JSVAL(obj);
+ return ok2;
+}
+
+static JSBool
+regexp_exec_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ JSBool test, jsval *rval)
+{
+ JSBool ok;
+ JSRegExp *re;
+ jsdouble lastIndex;
+ JSString *str;
+ size_t i;
+
+ ok = JS_InstanceOf(cx, obj, &js_RegExpClass, argv);
+ if (!ok)
+ return JS_FALSE;
+ JS_LOCK_OBJ(cx, obj);
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (!re) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_TRUE;
+ }
+
+ /* NB: we must reach out: after this paragraph, in order to drop re. */
+ HOLD_REGEXP(cx, re);
+ if (re->flags & JSREG_GLOB) {
+ ok = js_GetLastIndex(cx, obj, &lastIndex);
+ } else {
+ lastIndex = 0;
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!ok)
+ goto out;
+
+ /* Now that obj is unlocked, it's safe to (potentially) grab the GC lock. */
+ if (argc == 0) {
+ str = cx->regExpStatics.input;
+ if (!str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NO_INPUT,
+ JS_GetStringBytes(re->source),
+ (re->flags & JSREG_GLOB) ? "g" : "",
+ (re->flags & JSREG_FOLD) ? "i" : "",
+ (re->flags & JSREG_MULTILINE) ? "m" : "");
+ ok = JS_FALSE;
+ goto out;
+ }
+ } else {
+ str = js_ValueToString(cx, argv[0]);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ argv[0] = STRING_TO_JSVAL(str);
+ }
+
+ if (lastIndex < 0 || JSSTRING_LENGTH(str) < lastIndex) {
+ ok = js_SetLastIndex(cx, obj, 0);
+ *rval = JSVAL_NULL;
+ } else {
+ i = (size_t) lastIndex;
+ ok = js_ExecuteRegExp(cx, re, str, &i, test, rval);
+ if (ok && (re->flags & JSREG_GLOB))
+ ok = js_SetLastIndex(cx, obj, (*rval == JSVAL_NULL) ? 0 : i);
+ }
+
+out:
+ DROP_REGEXP(cx, re);
+ return ok;
+}
+
+static JSBool
+regexp_exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return regexp_exec_sub(cx, obj, argc, argv, JS_FALSE, rval);
+}
+
+static JSBool
+regexp_test(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (!regexp_exec_sub(cx, obj, argc, argv, JS_TRUE, rval))
+ return JS_FALSE;
+ if (*rval != JSVAL_TRUE)
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+}
+
+static JSFunctionSpec regexp_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, js_regexp_toString, 0,0,0},
+#endif
+ {js_toString_str, js_regexp_toString, 0,0,0},
+ {"compile", regexp_compile, 1,0,0},
+ {"exec", regexp_exec, 0,0,0},
+ {"test", regexp_test, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+RegExp(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /*
+ * If first arg is regexp and no flags are given, just return the arg.
+ * (regexp_compile detects the regexp + flags case and throws a
+ * TypeError.) See 10.15.3.1.
+ */
+ if ((argc < 2 || JSVAL_IS_VOID(argv[1])) &&
+ !JSVAL_IS_PRIMITIVE(argv[0]) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(argv[0])) == &js_RegExpClass) {
+ *rval = argv[0];
+ return JS_TRUE;
+ }
+
+ /* Otherwise, replace obj with a new RegExp object. */
+ obj = js_NewObject(cx, &js_RegExpClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+
+ /*
+ * regexp_compile does not use rval to root its temporaries
+ * so we can use it to root obj.
+ */
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ return regexp_compile(cx, obj, argc, argv, rval);
+}
+
+JSObject *
+js_InitRegExpClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *ctor;
+ jsval rval;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_RegExpClass, RegExp, 1,
+ regexp_props, regexp_methods,
+ regexp_static_props, NULL);
+
+ if (!proto || !(ctor = JS_GetConstructor(cx, proto)))
+ return NULL;
+ if (!JS_AliasProperty(cx, ctor, "input", "$_") ||
+ !JS_AliasProperty(cx, ctor, "multiline", "$*") ||
+ !JS_AliasProperty(cx, ctor, "lastMatch", "$&") ||
+ !JS_AliasProperty(cx, ctor, "lastParen", "$+") ||
+ !JS_AliasProperty(cx, ctor, "leftContext", "$`") ||
+ !JS_AliasProperty(cx, ctor, "rightContext", "$'")) {
+ goto bad;
+ }
+
+ /* Give RegExp.prototype private data so it matches the empty string. */
+ if (!regexp_compile(cx, proto, 0, NULL, &rval))
+ goto bad;
+ return proto;
+
+bad:
+ JS_DeleteProperty(cx, obj, js_RegExpClass.name);
+ return NULL;
+}
+
+JSObject *
+js_NewRegExpObject(JSContext *cx, JSTokenStream *ts,
+ jschar *chars, size_t length, uintN flags)
+{
+ JSString *str;
+ JSObject *obj;
+ JSRegExp *re;
+ JSTempValueRooter tvr;
+
+ str = js_NewStringCopyN(cx, chars, length, 0);
+ if (!str)
+ return NULL;
+ re = js_NewRegExp(cx, ts, str, flags, JS_FALSE);
+ if (!re)
+ return NULL;
+ JS_PUSH_TEMP_ROOT_STRING(cx, str, &tvr);
+ obj = js_NewObject(cx, &js_RegExpClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, re)) {
+ js_DestroyRegExp(cx, re);
+ obj = NULL;
+ }
+ if (obj && !js_SetLastIndex(cx, obj, 0))
+ obj = NULL;
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return obj;
+}
+
+JSObject *
+js_CloneRegExpObject(JSContext *cx, JSObject *obj, JSObject *parent)
+{
+ JSObject *clone;
+ JSRegExp *re;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_RegExpClass);
+ clone = js_NewObject(cx, &js_RegExpClass, NULL, parent);
+ if (!clone)
+ return NULL;
+ re = JS_GetPrivate(cx, obj);
+ if (!JS_SetPrivate(cx, clone, re) || !js_SetLastIndex(cx, clone, 0)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ HOLD_REGEXP(cx, re);
+ return clone;
+}
+
+JSBool
+js_GetLastIndex(JSContext *cx, JSObject *obj, jsdouble *lastIndex)
+{
+ jsval v;
+
+ return JS_GetReservedSlot(cx, obj, 0, &v) &&
+ js_ValueToNumber(cx, v, lastIndex);
+}
+
+JSBool
+js_SetLastIndex(JSContext *cx, JSObject *obj, jsdouble lastIndex)
+{
+ jsval v;
+
+ return js_NewNumberValue(cx, lastIndex, &v) &&
+ JS_SetReservedSlot(cx, obj, 0, v);
+}
diff --git a/third_party/js-1.7/jsregexp.h b/third_party/js-1.7/jsregexp.h
new file mode 100644
index 0000000..5078983
--- /dev/null
+++ b/third_party/js-1.7/jsregexp.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsregexp_h___
+#define jsregexp_h___
+/*
+ * JS regular expression interface.
+ */
+#include <stddef.h>
+#include "jspubtd.h"
+#include "jsstr.h"
+
+#ifdef JS_THREADSAFE
+#include "jsdhash.h"
+#endif
+
+struct JSRegExpStatics {
+ JSString *input; /* input string to match (perl $_, GC root) */
+ JSBool multiline; /* whether input contains newlines (perl $*) */
+ uint16 parenCount; /* number of valid elements in parens[] */
+ uint16 moreLength; /* number of allocated elements in moreParens */
+ JSSubString parens[9]; /* last set of parens matched (perl $1, $2) */
+ JSSubString *moreParens; /* null or realloc'd vector for $10, etc. */
+ JSSubString lastMatch; /* last string matched (perl $&) */
+ JSSubString lastParen; /* last paren matched (perl $+) */
+ JSSubString leftContext; /* input to left of last match (perl $`) */
+ JSSubString rightContext; /* input to right of last match (perl $') */
+};
+
+/*
+ * This struct holds a bitmap representation of a class from a regexp.
+ * There's a list of these referenced by the classList field in the JSRegExp
+ * struct below. The initial state has startIndex set to the offset in the
+ * original regexp source of the beginning of the class contents. The first
+ * use of the class converts the source representation into a bitmap.
+ *
+ */
+typedef struct RECharSet {
+ JSPackedBool converted;
+ JSPackedBool sense;
+ uint16 length;
+ union {
+ uint8 *bits;
+ struct {
+ size_t startIndex;
+ size_t length;
+ } src;
+ } u;
+} RECharSet;
+
+/*
+ * This macro is safe because moreParens is guaranteed to be allocated and big
+ * enough to hold parenCount, or else be null when parenCount is 0.
+ */
+#define REGEXP_PAREN_SUBSTRING(res, num) \
+ (((jsuint)(num) < (jsuint)(res)->parenCount) \
+ ? ((jsuint)(num) < 9) \
+ ? &(res)->parens[num] \
+ : &(res)->moreParens[(num) - 9] \
+ : &js_EmptySubString)
+
+typedef struct RENode RENode;
+
+struct JSRegExp {
+ jsrefcount nrefs; /* reference count */
+ uint16 flags; /* flags, see jsapi.h's JSREG_* defines */
+ uint16 cloneIndex; /* index in fp->vars or funobj->slots of
+ cloned regexp object */
+ size_t parenCount; /* number of parenthesized submatches */
+ size_t classCount; /* count [...] bitmaps */
+ RECharSet *classList; /* list of [...] bitmaps */
+ JSString *source; /* locked source string, sans // */
+ jsbytecode program[1]; /* regular expression bytecode */
+};
+
+extern JSRegExp *
+js_NewRegExp(JSContext *cx, JSTokenStream *ts,
+ JSString *str, uintN flags, JSBool flat);
+
+extern JSRegExp *
+js_NewRegExpOpt(JSContext *cx, JSTokenStream *ts,
+ JSString *str, JSString *opt, JSBool flat);
+
+#define HOLD_REGEXP(cx, re) JS_ATOMIC_INCREMENT(&(re)->nrefs)
+#define DROP_REGEXP(cx, re) js_DestroyRegExp(cx, re)
+
+extern void
+js_DestroyRegExp(JSContext *cx, JSRegExp *re);
+
+/*
+ * Execute re on input str at *indexp, returning null in *rval on mismatch.
+ * On match, return true if test is true, otherwise return an array object.
+ * Update *indexp and cx->regExpStatics always on match.
+ */
+extern JSBool
+js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
+ JSBool test, jsval *rval);
+
+/*
+ * These two add and remove GC roots, respectively, so their calls must be
+ * well-ordered.
+ */
+extern JSBool
+js_InitRegExpStatics(JSContext *cx, JSRegExpStatics *res);
+
+extern void
+js_FreeRegExpStatics(JSContext *cx, JSRegExpStatics *res);
+
+#define JSVAL_IS_REGEXP(cx, v) \
+ (JSVAL_IS_OBJECT(v) && JSVAL_TO_OBJECT(v) && \
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_RegExpClass)
+
+extern JSClass js_RegExpClass;
+
+extern JSObject *
+js_InitRegExpClass(JSContext *cx, JSObject *obj);
+
+/*
+ * Export js_regexp_toString to the decompiler.
+ */
+extern JSBool
+js_regexp_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+/*
+ * Create, serialize/deserialize, or clone a RegExp object.
+ */
+extern JSObject *
+js_NewRegExpObject(JSContext *cx, JSTokenStream *ts,
+ jschar *chars, size_t length, uintN flags);
+
+extern JSBool
+js_XDRRegExp(JSXDRState *xdr, JSObject **objp);
+
+extern JSObject *
+js_CloneRegExpObject(JSContext *cx, JSObject *obj, JSObject *parent);
+
+/*
+ * Get and set the per-object (clone or clone-parent) lastIndex slot.
+ */
+extern JSBool
+js_GetLastIndex(JSContext *cx, JSObject *obj, jsdouble *lastIndex);
+
+extern JSBool
+js_SetLastIndex(JSContext *cx, JSObject *obj, jsdouble lastIndex);
+
+#endif /* jsregexp_h___ */
diff --git a/third_party/js-1.7/jsscan.c b/third_party/js-1.7/jsscan.c
new file mode 100644
index 0000000..f9f7436
--- /dev/null
+++ b/third_party/js-1.7/jsscan.c
@@ -0,0 +1,2101 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS lexical scanner.
+ */
+#include "jsstddef.h"
+#include <stdio.h> /* first to avoid trouble on some systems */
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#ifdef HAVE_MEMORY_H
+#include <memory.h>
+#endif
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsdtoa.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsemit.h"
+#include "jsexn.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscript.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsparse.h"
+#include "jsxml.h"
+#endif
+
+#define JS_KEYWORD(keyword, type, op, version) \
+ const char js_##keyword##_str[] = #keyword;
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+
+struct keyword {
+ const char *chars; /* C string with keyword text */
+ JSTokenType tokentype; /* JSTokenType */
+ JSOp op; /* JSOp */
+ JSVersion version; /* JSVersion */
+};
+
+static const struct keyword keyword_defs[] = {
+#define JS_KEYWORD(keyword, type, op, version) \
+ {js_##keyword##_str, type, op, version},
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+};
+
+#define KEYWORD_COUNT (sizeof keyword_defs / sizeof keyword_defs[0])
+
+static const struct keyword *
+FindKeyword(const jschar *s, size_t length)
+{
+ register size_t i;
+ const struct keyword *kw;
+ const char *chars;
+
+ JS_ASSERT(length != 0);
+
+#define JSKW_LENGTH() length
+#define JSKW_AT(column) s[column]
+#define JSKW_GOT_MATCH(index) i = (index); goto got_match;
+#define JSKW_TEST_GUESS(index) i = (index); goto test_guess;
+#define JSKW_NO_MATCH() goto no_match;
+#include "jsautokw.h"
+#undef JSKW_NO_MATCH
+#undef JSKW_TEST_GUESS
+#undef JSKW_GOT_MATCH
+#undef JSKW_AT
+#undef JSKW_LENGTH
+
+ got_match:
+ return &keyword_defs[i];
+
+ test_guess:
+ kw = &keyword_defs[i];
+ chars = kw->chars;
+ do {
+ if (*s++ != (unsigned char)(*chars++))
+ goto no_match;
+ } while (--length != 0);
+ return kw;
+
+ no_match:
+ return NULL;
+}
+
+JSTokenType
+js_CheckKeyword(const jschar *str, size_t length)
+{
+ const struct keyword *kw;
+
+ JS_ASSERT(length != 0);
+ kw = FindKeyword(str, length);
+ return kw ? kw->tokentype : TOK_EOF;
+}
+
+JS_FRIEND_API(void)
+js_MapKeywords(void (*mapfun)(const char *))
+{
+ size_t i;
+
+ for (i = 0; i != KEYWORD_COUNT; ++i)
+ mapfun(keyword_defs[i].chars);
+}
+
+JSTokenStream *
+js_NewTokenStream(JSContext *cx, const jschar *base, size_t length,
+ const char *filename, uintN lineno,
+ JSPrincipals *principals)
+{
+ JSTokenStream *ts;
+
+ ts = js_NewBufferTokenStream(cx, base, length);
+ if (!ts)
+ return NULL;
+ ts->filename = filename;
+ ts->lineno = lineno;
+ if (principals)
+ JSPRINCIPALS_HOLD(cx, principals);
+ ts->principals = principals;
+ return ts;
+}
+
+#define TBMIN 64
+
+static JSBool
+GrowTokenBuf(JSStringBuffer *sb, size_t newlength)
+{
+ JSContext *cx;
+ jschar *base;
+ ptrdiff_t offset, length;
+ size_t tbsize;
+ JSArenaPool *pool;
+
+ cx = sb->data;
+ base = sb->base;
+ offset = PTRDIFF(sb->ptr, base, jschar);
+ pool = &cx->tempPool;
+ if (!base) {
+ tbsize = TBMIN * sizeof(jschar);
+ length = TBMIN - 1;
+ JS_ARENA_ALLOCATE_CAST(base, jschar *, pool, tbsize);
+ } else {
+ length = PTRDIFF(sb->limit, base, jschar);
+ if ((size_t)length >= ~(size_t)0 / sizeof(jschar)) {
+ base = NULL;
+ } else {
+ tbsize = (length + 1) * sizeof(jschar);
+ length += length + 1;
+ JS_ARENA_GROW_CAST(base, jschar *, pool, tbsize, tbsize);
+ }
+ }
+ if (!base) {
+ JS_ReportOutOfMemory(cx);
+ sb->base = STRING_BUFFER_ERROR_BASE;
+ return JS_FALSE;
+ }
+ sb->base = base;
+ sb->limit = base + length;
+ sb->ptr = base + offset;
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(JSTokenStream *)
+js_NewBufferTokenStream(JSContext *cx, const jschar *base, size_t length)
+{
+ size_t nb;
+ JSTokenStream *ts;
+
+ nb = sizeof(JSTokenStream) + JS_LINE_LIMIT * sizeof(jschar);
+ JS_ARENA_ALLOCATE_CAST(ts, JSTokenStream *, &cx->tempPool, nb);
+ if (!ts) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ memset(ts, 0, nb);
+ ts->lineno = 1;
+ ts->linebuf.base = ts->linebuf.limit = ts->linebuf.ptr = (jschar *)(ts + 1);
+ ts->userbuf.base = (jschar *)base;
+ ts->userbuf.limit = (jschar *)base + length;
+ ts->userbuf.ptr = (jschar *)base;
+ ts->tokenbuf.grow = GrowTokenBuf;
+ ts->tokenbuf.data = cx;
+ ts->listener = cx->runtime->sourceHandler;
+ ts->listenerData = cx->runtime->sourceHandlerData;
+ return ts;
+}
+
+JS_FRIEND_API(JSTokenStream *)
+js_NewFileTokenStream(JSContext *cx, const char *filename, FILE *defaultfp)
+{
+ jschar *base;
+ JSTokenStream *ts;
+ FILE *file;
+
+ JS_ARENA_ALLOCATE_CAST(base, jschar *, &cx->tempPool,
+ JS_LINE_LIMIT * sizeof(jschar));
+ if (!base)
+ return NULL;
+ ts = js_NewBufferTokenStream(cx, base, JS_LINE_LIMIT);
+ if (!ts)
+ return NULL;
+ if (!filename || strcmp(filename, "-") == 0) {
+ file = defaultfp;
+ } else {
+ file = fopen(filename, "r");
+ if (!file) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_OPEN,
+ filename, "No such file or directory");
+ return NULL;
+ }
+ }
+ ts->userbuf.ptr = ts->userbuf.limit;
+ ts->file = file;
+ ts->filename = filename;
+ return ts;
+}
+
+JS_FRIEND_API(JSBool)
+js_CloseTokenStream(JSContext *cx, JSTokenStream *ts)
+{
+ if (ts->flags & TSF_OWNFILENAME)
+ JS_free(cx, (void *) ts->filename);
+ if (ts->principals)
+ JSPRINCIPALS_DROP(cx, ts->principals);
+ return !ts->file || fclose(ts->file) == 0;
+}
+
+JS_FRIEND_API(int)
+js_fgets(char *buf, int size, FILE *file)
+{
+ int n, i, c;
+ JSBool crflag;
+
+ n = size - 1;
+ if (n < 0)
+ return -1;
+
+ crflag = JS_FALSE;
+ for (i = 0; i < n && (c = getc(file)) != EOF; i++) {
+ buf[i] = c;
+ if (c == '\n') { /* any \n ends a line */
+ i++; /* keep the \n; we know there is room for \0 */
+ break;
+ }
+ if (crflag) { /* \r not followed by \n ends line at the \r */
+ ungetc(c, file);
+ break; /* and overwrite c in buf with \0 */
+ }
+ crflag = (c == '\r');
+ }
+
+ buf[i] = '\0';
+ return i;
+}
+
+static int32
+GetChar(JSTokenStream *ts)
+{
+ int32 c;
+ ptrdiff_t i, j, len, olen;
+ JSBool crflag;
+ char cbuf[JS_LINE_LIMIT];
+ jschar *ubuf, *nl;
+
+ if (ts->ungetpos != 0) {
+ c = ts->ungetbuf[--ts->ungetpos];
+ } else {
+ do {
+ if (ts->linebuf.ptr == ts->linebuf.limit) {
+ len = PTRDIFF(ts->userbuf.limit, ts->userbuf.ptr, jschar);
+ if (len <= 0) {
+ if (!ts->file) {
+ ts->flags |= TSF_EOF;
+ return EOF;
+ }
+
+ /* Fill ts->userbuf so that \r and \r\n convert to \n. */
+ crflag = (ts->flags & TSF_CRFLAG) != 0;
+ len = js_fgets(cbuf, JS_LINE_LIMIT - crflag, ts->file);
+ if (len <= 0) {
+ ts->flags |= TSF_EOF;
+ return EOF;
+ }
+ olen = len;
+ ubuf = ts->userbuf.base;
+ i = 0;
+ if (crflag) {
+ ts->flags &= ~TSF_CRFLAG;
+ if (cbuf[0] != '\n') {
+ ubuf[i++] = '\n';
+ len++;
+ ts->linepos--;
+ }
+ }
+ for (j = 0; i < len; i++, j++)
+ ubuf[i] = (jschar) (unsigned char) cbuf[j];
+ ts->userbuf.limit = ubuf + len;
+ ts->userbuf.ptr = ubuf;
+ }
+ if (ts->listener) {
+ ts->listener(ts->filename, ts->lineno, ts->userbuf.ptr, len,
+ &ts->listenerTSData, ts->listenerData);
+ }
+
+ nl = ts->saveEOL;
+ if (!nl) {
+ /*
+ * Any one of \n, \r, or \r\n ends a line (the longest
+ * match wins). Also allow the Unicode line and paragraph
+ * separators.
+ */
+ for (nl = ts->userbuf.ptr; nl < ts->userbuf.limit; nl++) {
+ /*
+ * Try to prevent value-testing on most characters by
+ * filtering out characters that aren't 000x or 202x.
+ */
+ if ((*nl & 0xDFD0) == 0) {
+ if (*nl == '\n')
+ break;
+ if (*nl == '\r') {
+ if (nl + 1 < ts->userbuf.limit && nl[1] == '\n')
+ nl++;
+ break;
+ }
+ if (*nl == LINE_SEPARATOR || *nl == PARA_SEPARATOR)
+ break;
+ }
+ }
+ }
+
+ /*
+ * If there was a line terminator, copy thru it into linebuf.
+ * Else copy JS_LINE_LIMIT-1 bytes into linebuf.
+ */
+ if (nl < ts->userbuf.limit)
+ len = PTRDIFF(nl, ts->userbuf.ptr, jschar) + 1;
+ if (len >= JS_LINE_LIMIT) {
+ len = JS_LINE_LIMIT - 1;
+ ts->saveEOL = nl;
+ } else {
+ ts->saveEOL = NULL;
+ }
+ js_strncpy(ts->linebuf.base, ts->userbuf.ptr, len);
+ ts->userbuf.ptr += len;
+ olen = len;
+
+ /*
+ * Make sure linebuf contains \n for EOL (don't do this in
+ * userbuf because the user's string might be readonly).
+ */
+ if (nl < ts->userbuf.limit) {
+ if (*nl == '\r') {
+ if (ts->linebuf.base[len-1] == '\r') {
+ /*
+ * Does the line segment end in \r? We must check
+ * for a \n at the front of the next segment before
+ * storing a \n into linebuf. This case matters
+ * only when we're reading from a file.
+ */
+ if (nl + 1 == ts->userbuf.limit && ts->file) {
+ len--;
+ ts->flags |= TSF_CRFLAG; /* clear NLFLAG? */
+ if (len == 0) {
+ /*
+ * This can happen when a segment ends in
+ * \r\r. Start over. ptr == limit in this
+ * case, so we'll fall into buffer-filling
+ * code.
+ */
+ return GetChar(ts);
+ }
+ } else {
+ ts->linebuf.base[len-1] = '\n';
+ }
+ }
+ } else if (*nl == '\n') {
+ if (nl > ts->userbuf.base &&
+ nl[-1] == '\r' &&
+ ts->linebuf.base[len-2] == '\r') {
+ len--;
+ JS_ASSERT(ts->linebuf.base[len] == '\n');
+ ts->linebuf.base[len-1] = '\n';
+ }
+ } else if (*nl == LINE_SEPARATOR || *nl == PARA_SEPARATOR) {
+ ts->linebuf.base[len-1] = '\n';
+ }
+ }
+
+ /* Reset linebuf based on adjusted segment length. */
+ ts->linebuf.limit = ts->linebuf.base + len;
+ ts->linebuf.ptr = ts->linebuf.base;
+
+ /* Update position of linebuf within physical userbuf line. */
+ if (!(ts->flags & TSF_NLFLAG))
+ ts->linepos += ts->linelen;
+ else
+ ts->linepos = 0;
+ if (ts->linebuf.limit[-1] == '\n')
+ ts->flags |= TSF_NLFLAG;
+ else
+ ts->flags &= ~TSF_NLFLAG;
+
+ /* Update linelen from original segment length. */
+ ts->linelen = olen;
+ }
+ c = *ts->linebuf.ptr++;
+ } while (JS_ISFORMAT(c));
+ }
+ if (c == '\n')
+ ts->lineno++;
+ return c;
+}
+
+static void
+UngetChar(JSTokenStream *ts, int32 c)
+{
+ if (c == EOF)
+ return;
+ JS_ASSERT(ts->ungetpos < sizeof ts->ungetbuf / sizeof ts->ungetbuf[0]);
+ if (c == '\n')
+ ts->lineno--;
+ ts->ungetbuf[ts->ungetpos++] = (jschar)c;
+}
+
+static int32
+PeekChar(JSTokenStream *ts)
+{
+ int32 c;
+
+ c = GetChar(ts);
+ UngetChar(ts, c);
+ return c;
+}
+
+/*
+ * Peek n chars ahead into ts. Return true if n chars were read, false if
+ * there weren't enough characters in the input stream. This function cannot
+ * be used to peek into or past a newline.
+ */
+static JSBool
+PeekChars(JSTokenStream *ts, intN n, jschar *cp)
+{
+ intN i, j;
+ int32 c;
+
+ for (i = 0; i < n; i++) {
+ c = GetChar(ts);
+ if (c == EOF)
+ break;
+ if (c == '\n') {
+ UngetChar(ts, c);
+ break;
+ }
+ cp[i] = (jschar)c;
+ }
+ for (j = i - 1; j >= 0; j--)
+ UngetChar(ts, cp[j]);
+ return i == n;
+}
+
+static void
+SkipChars(JSTokenStream *ts, intN n)
+{
+ while (--n >= 0)
+ GetChar(ts);
+}
+
+static JSBool
+MatchChar(JSTokenStream *ts, int32 expect)
+{
+ int32 c;
+
+ c = GetChar(ts);
+ if (c == expect)
+ return JS_TRUE;
+ UngetChar(ts, c);
+ return JS_FALSE;
+}
+
+static JSBool
+ReportCompileErrorNumber(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, JSErrorReport *report,
+ JSBool charArgs, va_list ap)
+{
+ JSTempValueRooter linetvr;
+ JSString *linestr = NULL;
+ JSTokenStream *ts = NULL;
+ JSCodeGenerator *cg = NULL;
+ JSParseNode *pn = NULL;
+ JSErrorReporter onError;
+ JSTokenPos *tp;
+ JSStackFrame *fp;
+ uintN index;
+ char *message;
+ JSBool warning;
+
+ memset(report, 0, sizeof (struct JSErrorReport));
+ report->flags = flags;
+ report->errorNumber = errorNumber;
+ message = NULL;
+
+ if (!js_ExpandErrorArguments(cx, js_GetErrorMessage, NULL,
+ errorNumber, &message, report, &warning,
+ charArgs, ap)) {
+ return JS_FALSE;
+ }
+
+ JS_PUSH_TEMP_ROOT_STRING(cx, NULL, &linetvr);
+
+ switch (flags & JSREPORT_HANDLE) {
+ case JSREPORT_TS:
+ ts = handle;
+ break;
+ case JSREPORT_CG:
+ cg = handle;
+ break;
+ case JSREPORT_PN:
+ pn = handle;
+ ts = pn->pn_ts;
+ break;
+ }
+
+ JS_ASSERT(!ts || ts->linebuf.limit < ts->linebuf.base + JS_LINE_LIMIT);
+ /*
+ * We are typically called with non-null ts and null cg from jsparse.c.
+ * We can be called with null ts from the regexp compilation functions.
+ * The code generator (jsemit.c) may pass null ts and non-null cg.
+ */
+ do {
+ if (ts) {
+ report->filename = ts->filename;
+ if (pn) {
+ report->lineno = pn->pn_pos.begin.lineno;
+ if (report->lineno != ts->lineno)
+ break;
+ }
+ report->lineno = ts->lineno;
+ linestr = js_NewStringCopyN(cx, ts->linebuf.base,
+ PTRDIFF(ts->linebuf.limit,
+ ts->linebuf.base,
+ jschar),
+ 0);
+ linetvr.u.string = linestr;
+ report->linebuf = linestr
+ ? JS_GetStringBytes(linestr)
+ : NULL;
+ tp = &ts->tokens[(ts->cursor+ts->lookahead) & NTOKENS_MASK].pos;
+ if (pn)
+ tp = &pn->pn_pos;
+
+ /*
+ * FIXME: What should instead happen here is that we should
+ * find error-tokens in userbuf, if !ts->file. That will
+ * allow us to deliver a more helpful error message, which
+ * includes all or part of the bad string or bad token. The
+ * code here yields something that looks truncated.
+ * See https://bugzilla.mozilla.org/show_bug.cgi?id=352970
+ */
+ index = 0;
+ if (tp->begin.lineno == tp->end.lineno) {
+ if (tp->begin.index < ts->linepos)
+ break;
+
+ index = tp->begin.index - ts->linepos;
+ }
+
+ report->tokenptr = linestr ? report->linebuf + index : NULL;
+ report->uclinebuf = linestr ? JS_GetStringChars(linestr) : NULL;
+ report->uctokenptr = linestr ? report->uclinebuf + index : NULL;
+ break;
+ }
+
+ if (cg) {
+ report->filename = cg->filename;
+ report->lineno = CG_CURRENT_LINE(cg);
+ break;
+ }
+
+ /*
+ * If we can't find out where the error was based on the current
+ * frame, see if the next frame has a script/pc combo we can use.
+ */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report->filename = fp->script->filename;
+ report->lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+ } while (0);
+
+ /*
+ * If there's a runtime exception type associated with this error
+ * number, set that as the pending exception. For errors occuring at
+ * compile time, this is very likely to be a JSEXN_SYNTAXERR.
+ *
+ * If an exception is thrown but not caught, the JSREPORT_EXCEPTION
+ * flag will be set in report.flags. Proper behavior for an error
+ * reporter is to ignore a report with this flag for all but top-level
+ * compilation errors. The exception will remain pending, and so long
+ * as the non-top-level "load", "eval", or "compile" native function
+ * returns false, the top-level reporter will eventually receive the
+ * uncaught exception report.
+ *
+ * XXX it'd probably be best if there was only one call to this
+ * function, but there seem to be two error reporter call points.
+ */
+ onError = cx->errorReporter;
+
+ /*
+ * Try to raise an exception only if there isn't one already set --
+ * otherwise the exception will describe the last compile-time error,
+ * which is likely spurious.
+ */
+ if (!ts || !(ts->flags & TSF_ERROR)) {
+ if (js_ErrorToException(cx, message, report))
+ onError = NULL;
+ }
+
+ /*
+ * Suppress any compile-time errors that don't occur at the top level.
+ * This may still fail, as interplevel may be zero in contexts where we
+ * don't really want to call the error reporter, as when js is called
+ * by other code which could catch the error.
+ */
+ if (cx->interpLevel != 0 && !JSREPORT_IS_WARNING(flags))
+ onError = NULL;
+
+ if (onError) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+
+ /*
+ * If debugErrorHook is present then we give it a chance to veto
+ * sending the error on to the regular error reporter.
+ */
+ if (hook && !hook(cx, message, report,
+ cx->runtime->debugErrorHookData)) {
+ onError = NULL;
+ }
+ }
+ if (onError)
+ (*onError)(cx, message, report);
+
+ if (message)
+ JS_free(cx, message);
+ if (report->ucmessage)
+ JS_free(cx, (void *)report->ucmessage);
+
+ JS_POP_TEMP_ROOT(cx, &linetvr);
+
+ if (ts && !JSREPORT_IS_WARNING(flags)) {
+ /* Set the error flag to suppress spurious reports. */
+ ts->flags |= TSF_ERROR;
+ }
+
+ return warning;
+}
+
+JSBool
+js_ReportCompileErrorNumber(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...)
+{
+ va_list ap;
+ JSErrorReport report;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ va_start(ap, errorNumber);
+ warning = ReportCompileErrorNumber(cx, handle, flags, errorNumber,
+ &report, JS_TRUE, ap);
+ va_end(ap);
+
+ /*
+ * We have to do this here because js_ReportCompileErrorNumberUC doesn't
+ * need to do this.
+ */
+ if (report.messageArgs) {
+ int i = 0;
+ while (report.messageArgs[i])
+ JS_free(cx, (void *)report.messageArgs[i++]);
+ JS_free(cx, (void *)report.messageArgs);
+ }
+
+ return warning;
+}
+
+JSBool
+js_ReportCompileErrorNumberUC(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...)
+{
+ va_list ap;
+ JSErrorReport report;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ va_start(ap, errorNumber);
+ warning = ReportCompileErrorNumber(cx, handle, flags, errorNumber,
+ &report, JS_FALSE, ap);
+ va_end(ap);
+
+ if (report.messageArgs)
+ JS_free(cx, (void *)report.messageArgs);
+
+ return warning;
+}
+
+static JSBool
+GrowStringBuffer(JSStringBuffer *sb, size_t newlength)
+{
+ ptrdiff_t offset;
+ jschar *bp;
+
+ offset = PTRDIFF(sb->ptr, sb->base, jschar);
+ JS_ASSERT(offset >= 0);
+ newlength += offset + 1;
+ if ((size_t)offset < newlength && newlength < ~(size_t)0 / sizeof(jschar))
+ bp = realloc(sb->base, newlength * sizeof(jschar));
+ else
+ bp = NULL;
+ if (!bp) {
+ free(sb->base);
+ sb->base = STRING_BUFFER_ERROR_BASE;
+ return JS_FALSE;
+ }
+ sb->base = bp;
+ sb->ptr = bp + offset;
+ sb->limit = bp + newlength - 1;
+ return JS_TRUE;
+}
+
+static void
+FreeStringBuffer(JSStringBuffer *sb)
+{
+ JS_ASSERT(STRING_BUFFER_OK(sb));
+ if (sb->base)
+ free(sb->base);
+}
+
+void
+js_InitStringBuffer(JSStringBuffer *sb)
+{
+ sb->base = sb->limit = sb->ptr = NULL;
+ sb->data = NULL;
+ sb->grow = GrowStringBuffer;
+ sb->free = FreeStringBuffer;
+}
+
+void
+js_FinishStringBuffer(JSStringBuffer *sb)
+{
+ sb->free(sb);
+}
+
+#define ENSURE_STRING_BUFFER(sb,n) \
+ ((sb)->ptr + (n) <= (sb)->limit || sb->grow(sb, n))
+
+static void
+FastAppendChar(JSStringBuffer *sb, jschar c)
+{
+ if (!STRING_BUFFER_OK(sb))
+ return;
+ if (!ENSURE_STRING_BUFFER(sb, 1))
+ return;
+ *sb->ptr++ = c;
+}
+
+void
+js_AppendChar(JSStringBuffer *sb, jschar c)
+{
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb))
+ return;
+ if (!ENSURE_STRING_BUFFER(sb, 1))
+ return;
+ bp = sb->ptr;
+ *bp++ = c;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+#if JS_HAS_XML_SUPPORT
+
+void
+js_RepeatChar(JSStringBuffer *sb, jschar c, uintN count)
+{
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb) || count == 0)
+ return;
+ if (!ENSURE_STRING_BUFFER(sb, count))
+ return;
+ for (bp = sb->ptr; count; --count)
+ *bp++ = c;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+void
+js_AppendCString(JSStringBuffer *sb, const char *asciiz)
+{
+ size_t length;
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb) || *asciiz == '\0')
+ return;
+ length = strlen(asciiz);
+ if (!ENSURE_STRING_BUFFER(sb, length))
+ return;
+ for (bp = sb->ptr; length; --length)
+ *bp++ = (jschar) *asciiz++;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+void
+js_AppendJSString(JSStringBuffer *sb, JSString *str)
+{
+ size_t length;
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb))
+ return;
+ length = JSSTRING_LENGTH(str);
+ if (length == 0 || !ENSURE_STRING_BUFFER(sb, length))
+ return;
+ bp = sb->ptr;
+ js_strncpy(bp, JSSTRING_CHARS(str), length);
+ bp += length;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+static JSBool
+GetXMLEntity(JSContext *cx, JSTokenStream *ts)
+{
+ ptrdiff_t offset, length, i;
+ int32 c, d;
+ JSBool ispair;
+ jschar *bp, digit;
+ char *bytes;
+ JSErrNum msg;
+
+ /* Put the entity, including the '&' already scanned, in ts->tokenbuf. */
+ offset = PTRDIFF(ts->tokenbuf.ptr, ts->tokenbuf.base, jschar);
+ FastAppendChar(&ts->tokenbuf, '&');
+ while ((c = GetChar(ts)) != ';') {
+ if (c == EOF || c == '\n') {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_END_OF_XML_ENTITY);
+ return JS_FALSE;
+ }
+ FastAppendChar(&ts->tokenbuf, (jschar) c);
+ }
+
+ /* Let length be the number of jschars after the '&', including the ';'. */
+ length = PTRDIFF(ts->tokenbuf.ptr, ts->tokenbuf.base, jschar) - offset;
+ bp = ts->tokenbuf.base + offset;
+ c = d = 0;
+ ispair = JS_FALSE;
+ if (length > 2 && bp[1] == '#') {
+ /* Match a well-formed XML Character Reference. */
+ i = 2;
+ if (length > 3 && JS_TOLOWER(bp[i]) == 'x') {
+ if (length > 9) /* at most 6 hex digits allowed */
+ goto badncr;
+ while (++i < length) {
+ digit = bp[i];
+ if (!JS7_ISHEX(digit))
+ goto badncr;
+ c = (c << 4) + JS7_UNHEX(digit);
+ }
+ } else {
+ while (i < length) {
+ digit = bp[i++];
+ if (!JS7_ISDEC(digit))
+ goto badncr;
+ c = (c * 10) + JS7_UNDEC(digit);
+ if (c < 0)
+ goto badncr;
+ }
+ }
+
+ if (0x10000 <= c && c <= 0x10FFFF) {
+ /* Form a surrogate pair (c, d) -- c is the high surrogate. */
+ d = 0xDC00 + (c & 0x3FF);
+ c = 0xD7C0 + (c >> 10);
+ ispair = JS_TRUE;
+ } else {
+ /* Enforce the http://www.w3.org/TR/REC-xml/#wf-Legalchar WFC. */
+ if (c != 0x9 && c != 0xA && c != 0xD &&
+ !(0x20 <= c && c <= 0xD7FF) &&
+ !(0xE000 <= c && c <= 0xFFFD)) {
+ goto badncr;
+ }
+ }
+ } else {
+ /* Try to match one of the five XML 1.0 predefined entities. */
+ switch (length) {
+ case 3:
+ if (bp[2] == 't') {
+ if (bp[1] == 'l')
+ c = '<';
+ else if (bp[1] == 'g')
+ c = '>';
+ }
+ break;
+ case 4:
+ if (bp[1] == 'a' && bp[2] == 'm' && bp[3] == 'p')
+ c = '&';
+ break;
+ case 5:
+ if (bp[3] == 'o') {
+ if (bp[1] == 'a' && bp[2] == 'p' && bp[4] == 's')
+ c = '\'';
+ else if (bp[1] == 'q' && bp[2] == 'u' && bp[4] == 't')
+ c = '"';
+ }
+ break;
+ }
+ if (c == 0) {
+ msg = JSMSG_UNKNOWN_XML_ENTITY;
+ goto bad;
+ }
+ }
+
+ /* If we matched, retract ts->tokenbuf and store the entity's value. */
+ *bp++ = (jschar) c;
+ if (ispair)
+ *bp++ = (jschar) d;
+ *bp = 0;
+ ts->tokenbuf.ptr = bp;
+ return JS_TRUE;
+
+badncr:
+ msg = JSMSG_BAD_XML_NCR;
+bad:
+ /* No match: throw a TypeError per ECMA-357 10.3.2.1 step 8(a). */
+ bytes = js_DeflateString(cx, bp + 1,
+ PTRDIFF(ts->tokenbuf.ptr, bp, jschar) - 1);
+ if (bytes) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ msg, bytes);
+ JS_free(cx, bytes);
+ }
+ return JS_FALSE;
+}
+
+#endif /* JS_HAS_XML_SUPPORT */
+
+JSTokenType
+js_PeekToken(JSContext *cx, JSTokenStream *ts)
+{
+ JSTokenType tt;
+
+ if (ts->lookahead != 0) {
+ tt = ts->tokens[(ts->cursor + ts->lookahead) & NTOKENS_MASK].type;
+ } else {
+ tt = js_GetToken(cx, ts);
+ js_UngetToken(ts);
+ }
+ return tt;
+}
+
+JSTokenType
+js_PeekTokenSameLine(JSContext *cx, JSTokenStream *ts)
+{
+ JSTokenType tt;
+
+ if (!ON_CURRENT_LINE(ts, CURRENT_TOKEN(ts).pos))
+ return TOK_EOL;
+ ts->flags |= TSF_NEWLINES;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_NEWLINES;
+ return tt;
+}
+
+/*
+ * We have encountered a '\': check for a Unicode escape sequence after it,
+ * returning the character code value if we found a Unicode escape sequence.
+ * Otherwise, non-destructively return the original '\'.
+ */
+static int32
+GetUnicodeEscape(JSTokenStream *ts)
+{
+ jschar cp[5];
+ int32 c;
+
+ if (PeekChars(ts, 5, cp) && cp[0] == 'u' &&
+ JS7_ISHEX(cp[1]) && JS7_ISHEX(cp[2]) &&
+ JS7_ISHEX(cp[3]) && JS7_ISHEX(cp[4]))
+ {
+ c = (((((JS7_UNHEX(cp[1]) << 4)
+ + JS7_UNHEX(cp[2])) << 4)
+ + JS7_UNHEX(cp[3])) << 4)
+ + JS7_UNHEX(cp[4]);
+ SkipChars(ts, 5);
+ return c;
+ }
+ return '\\';
+}
+
+static JSToken *
+NewToken(JSTokenStream *ts, ptrdiff_t adjust)
+{
+ JSToken *tp;
+
+ ts->cursor = (ts->cursor + 1) & NTOKENS_MASK;
+ tp = &CURRENT_TOKEN(ts);
+ tp->ptr = ts->linebuf.ptr + adjust;
+ tp->pos.begin.index = ts->linepos +
+ PTRDIFF(tp->ptr, ts->linebuf.base, jschar) -
+ ts->ungetpos;
+ tp->pos.begin.lineno = tp->pos.end.lineno = (uint16)ts->lineno;
+ return tp;
+}
+
+JSTokenType
+js_GetToken(JSContext *cx, JSTokenStream *ts)
+{
+ JSTokenType tt;
+ int32 c, qc;
+ JSToken *tp;
+ JSAtom *atom;
+ JSBool hadUnicodeEscape;
+ const struct keyword *kw;
+
+#define INIT_TOKENBUF() (ts->tokenbuf.ptr = ts->tokenbuf.base)
+#define TOKENBUF_LENGTH() PTRDIFF(ts->tokenbuf.ptr, ts->tokenbuf.base, jschar)
+#define TOKENBUF_OK() STRING_BUFFER_OK(&ts->tokenbuf)
+#define TOKENBUF_TO_ATOM() (TOKENBUF_OK() \
+ ? js_AtomizeChars(cx, \
+ TOKENBUF_BASE(), \
+ TOKENBUF_LENGTH(), \
+ 0) \
+ : NULL)
+#define ADD_TO_TOKENBUF(c) FastAppendChar(&ts->tokenbuf, (jschar) (c))
+
+/* The following 4 macros should only be used when TOKENBUF_OK() is true. */
+#define TOKENBUF_BASE() (ts->tokenbuf.base)
+#define TOKENBUF_CHAR(i) (ts->tokenbuf.base[i])
+#define TRIM_TOKENBUF(i) (ts->tokenbuf.ptr = ts->tokenbuf.base + i)
+#define NUL_TERM_TOKENBUF() (*ts->tokenbuf.ptr = 0)
+
+ /* Check for a pushed-back token resulting from mismatching lookahead. */
+ while (ts->lookahead != 0) {
+ JS_ASSERT(!(ts->flags & TSF_XMLTEXTMODE));
+ ts->lookahead--;
+ ts->cursor = (ts->cursor + 1) & NTOKENS_MASK;
+ tt = CURRENT_TOKEN(ts).type;
+ if (tt != TOK_EOL || (ts->flags & TSF_NEWLINES))
+ return tt;
+ }
+
+ /* If there was a fatal error, keep returning TOK_ERROR. */
+ if (ts->flags & TSF_ERROR)
+ return TOK_ERROR;
+
+#if JS_HAS_XML_SUPPORT
+ if (ts->flags & TSF_XMLTEXTMODE) {
+ tt = TOK_XMLSPACE; /* veto if non-space, return TOK_XMLTEXT */
+ tp = NewToken(ts, 0);
+ INIT_TOKENBUF();
+ qc = (ts->flags & TSF_XMLONLYMODE) ? '<' : '{';
+
+ while ((c = GetChar(ts)) != qc && c != '<' && c != EOF) {
+ if (c == '&' && qc == '<') {
+ if (!GetXMLEntity(cx, ts))
+ goto error;
+ tt = TOK_XMLTEXT;
+ continue;
+ }
+
+ if (!JS_ISXMLSPACE(c))
+ tt = TOK_XMLTEXT;
+ ADD_TO_TOKENBUF(c);
+ }
+ UngetChar(ts, c);
+
+ if (TOKENBUF_LENGTH() == 0) {
+ atom = NULL;
+ } else {
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ }
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ goto out;
+ }
+
+ if (ts->flags & TSF_XMLTAGMODE) {
+ tp = NewToken(ts, 0);
+ c = GetChar(ts);
+ if (JS_ISXMLSPACE(c)) {
+ do {
+ c = GetChar(ts);
+ } while (JS_ISXMLSPACE(c));
+ UngetChar(ts, c);
+ tt = TOK_XMLSPACE;
+ goto out;
+ }
+
+ if (c == EOF) {
+ tt = TOK_EOF;
+ goto out;
+ }
+
+ INIT_TOKENBUF();
+ if (JS_ISXMLNSSTART(c)) {
+ JSBool sawColon = JS_FALSE;
+
+ ADD_TO_TOKENBUF(c);
+ while ((c = GetChar(ts)) != EOF && JS_ISXMLNAME(c)) {
+ if (c == ':') {
+ int nextc;
+
+ if (sawColon ||
+ (nextc = PeekChar(ts),
+ ((ts->flags & TSF_XMLONLYMODE) || nextc != '{') &&
+ !JS_ISXMLNAME(nextc))) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_BAD_XML_QNAME);
+ goto error;
+ }
+ sawColon = JS_TRUE;
+ }
+
+ ADD_TO_TOKENBUF(c);
+ }
+
+ UngetChar(ts, c);
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ tt = TOK_XMLNAME;
+ goto out;
+ }
+
+ switch (c) {
+ case '{':
+ if (ts->flags & TSF_XMLONLYMODE)
+ goto bad_xml_char;
+ tt = TOK_LC;
+ goto out;
+
+ case '=':
+ tt = TOK_ASSIGN;
+ goto out;
+
+ case '"':
+ case '\'':
+ qc = c;
+ while ((c = GetChar(ts)) != qc) {
+ if (c == EOF) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_STRING);
+ goto error;
+ }
+
+ /*
+ * XML attribute values are double-quoted when pretty-printed,
+ * so escape " if it is expressed directly in a single-quoted
+ * attribute value.
+ */
+ if (c == '"' && !(ts->flags & TSF_XMLONLYMODE)) {
+ JS_ASSERT(qc == '\'');
+ js_AppendCString(&ts->tokenbuf, js_quot_entity_str);
+ continue;
+ }
+
+ if (c == '&' && (ts->flags & TSF_XMLONLYMODE)) {
+ if (!GetXMLEntity(cx, ts))
+ goto error;
+ continue;
+ }
+
+ ADD_TO_TOKENBUF(c);
+ }
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ tt = TOK_XMLATTR;
+ goto out;
+
+ case '>':
+ tt = TOK_XMLTAGC;
+ goto out;
+
+ case '/':
+ if (MatchChar(ts, '>')) {
+ tt = TOK_XMLPTAGC;
+ goto out;
+ }
+ /* FALL THROUGH */
+
+ bad_xml_char:
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_CHARACTER);
+ goto error;
+ }
+ /* NOTREACHED */
+ }
+#endif /* JS_HAS_XML_SUPPORT */
+
+retry:
+ do {
+ c = GetChar(ts);
+ if (c == '\n') {
+ ts->flags &= ~TSF_DIRTYLINE;
+ if (ts->flags & TSF_NEWLINES)
+ break;
+ }
+ } while (JS_ISSPACE(c));
+
+ tp = NewToken(ts, -1);
+ if (c == EOF) {
+ tt = TOK_EOF;
+ goto out;
+ }
+
+ hadUnicodeEscape = JS_FALSE;
+ if (JS_ISIDSTART(c) ||
+ (c == '\\' &&
+ (c = GetUnicodeEscape(ts),
+ hadUnicodeEscape = JS_ISIDSTART(c)))) {
+ INIT_TOKENBUF();
+ for (;;) {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ if (c == '\\') {
+ c = GetUnicodeEscape(ts);
+ if (!JS_ISIDENT(c))
+ break;
+ hadUnicodeEscape = JS_TRUE;
+ } else {
+ if (!JS_ISIDENT(c))
+ break;
+ }
+ }
+ UngetChar(ts, c);
+
+ /*
+ * Check for keywords unless we saw Unicode escape or parser asks
+ * to ignore keywords.
+ */
+ if (!hadUnicodeEscape &&
+ !(ts->flags & TSF_KEYWORD_IS_NAME) &&
+ TOKENBUF_OK() &&
+ (kw = FindKeyword(TOKENBUF_BASE(), TOKENBUF_LENGTH()))) {
+ if (kw->tokentype == TOK_RESERVED) {
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_RESERVED_ID,
+ kw->chars)) {
+ goto error;
+ }
+ } else if (kw->version <= JSVERSION_NUMBER(cx)) {
+ tt = kw->tokentype;
+ tp->t_op = (JSOp) kw->op;
+ goto out;
+ }
+ }
+
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->t_op = JSOP_NAME;
+ tp->t_atom = atom;
+ tt = TOK_NAME;
+ goto out;
+ }
+
+ if (JS7_ISDEC(c) || (c == '.' && JS7_ISDEC(PeekChar(ts)))) {
+ jsint radix;
+ const jschar *endptr;
+ jsdouble dval;
+
+ radix = 10;
+ INIT_TOKENBUF();
+
+ if (c == '0') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ if (JS_TOLOWER(c) == 'x') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ radix = 16;
+ } else if (JS7_ISDEC(c)) {
+ radix = 8;
+ }
+ }
+
+ while (JS7_ISHEX(c)) {
+ if (radix < 16) {
+ if (JS7_ISLET(c))
+ break;
+
+ /*
+ * We permit 08 and 09 as decimal numbers, which makes our
+ * behaviour a superset of the ECMA numeric grammar. We might
+ * not always be so permissive, so we warn about it.
+ */
+ if (radix == 8 && c >= '8') {
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING,
+ JSMSG_BAD_OCTAL,
+ c == '8' ? "08" : "09")) {
+ goto error;
+ }
+ radix = 10;
+ }
+ }
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ }
+
+ if (radix == 10 && (c == '.' || JS_TOLOWER(c) == 'e')) {
+ if (c == '.') {
+ do {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ } while (JS7_ISDEC(c));
+ }
+ if (JS_TOLOWER(c) == 'e') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ if (c == '+' || c == '-') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ }
+ if (!JS7_ISDEC(c)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_MISSING_EXPONENT);
+ goto error;
+ }
+ do {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ } while (JS7_ISDEC(c));
+ }
+ }
+
+ /* Put back the next char and NUL-terminate tokenbuf for js_strto*. */
+ UngetChar(ts, c);
+ ADD_TO_TOKENBUF(0);
+
+ if (!TOKENBUF_OK())
+ goto error;
+ if (radix == 10) {
+ if (!js_strtod(cx, TOKENBUF_BASE(), &endptr, &dval)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_OUT_OF_MEMORY);
+ goto error;
+ }
+ } else {
+ if (!js_strtointeger(cx, TOKENBUF_BASE(), &endptr, radix, &dval)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_OUT_OF_MEMORY);
+ goto error;
+ }
+ }
+ tp->t_dval = dval;
+ tt = TOK_NUMBER;
+ goto out;
+ }
+
+ if (c == '"' || c == '\'') {
+ qc = c;
+ INIT_TOKENBUF();
+ while ((c = GetChar(ts)) != qc) {
+ if (c == '\n' || c == EOF) {
+ UngetChar(ts, c);
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_STRING);
+ goto error;
+ }
+ if (c == '\\') {
+ switch (c = GetChar(ts)) {
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+
+ default:
+ if ('0' <= c && c < '8') {
+ int32 val = JS7_UNDEC(c);
+
+ c = PeekChar(ts);
+ if ('0' <= c && c < '8') {
+ val = 8 * val + JS7_UNDEC(c);
+ GetChar(ts);
+ c = PeekChar(ts);
+ if ('0' <= c && c < '8') {
+ int32 save = val;
+ val = 8 * val + JS7_UNDEC(c);
+ if (val <= 0377)
+ GetChar(ts);
+ else
+ val = save;
+ }
+ }
+
+ c = (jschar)val;
+ } else if (c == 'u') {
+ jschar cp[4];
+ if (PeekChars(ts, 4, cp) &&
+ JS7_ISHEX(cp[0]) && JS7_ISHEX(cp[1]) &&
+ JS7_ISHEX(cp[2]) && JS7_ISHEX(cp[3])) {
+ c = (((((JS7_UNHEX(cp[0]) << 4)
+ + JS7_UNHEX(cp[1])) << 4)
+ + JS7_UNHEX(cp[2])) << 4)
+ + JS7_UNHEX(cp[3]);
+ SkipChars(ts, 4);
+ }
+ } else if (c == 'x') {
+ jschar cp[2];
+ if (PeekChars(ts, 2, cp) &&
+ JS7_ISHEX(cp[0]) && JS7_ISHEX(cp[1])) {
+ c = (JS7_UNHEX(cp[0]) << 4) + JS7_UNHEX(cp[1]);
+ SkipChars(ts, 2);
+ }
+ } else if (c == '\n' && JS_VERSION_IS_ECMA(cx)) {
+ /* ECMA follows C by removing escaped newlines. */
+ continue;
+ }
+ break;
+ }
+ }
+ ADD_TO_TOKENBUF(c);
+ }
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ tt = TOK_STRING;
+ goto out;
+ }
+
+ switch (c) {
+ case '\n': tt = TOK_EOL; goto eol_out;
+ case ';': tt = TOK_SEMI; break;
+ case '[': tt = TOK_LB; break;
+ case ']': tt = TOK_RB; break;
+ case '{': tt = TOK_LC; break;
+ case '}': tt = TOK_RC; break;
+ case '(': tt = TOK_LP; break;
+ case ')': tt = TOK_RP; break;
+ case ',': tt = TOK_COMMA; break;
+ case '?': tt = TOK_HOOK; break;
+
+ case '.':
+#if JS_HAS_XML_SUPPORT
+ if (MatchChar(ts, c))
+ tt = TOK_DBLDOT;
+ else
+#endif
+ tt = TOK_DOT;
+ break;
+
+ case ':':
+#if JS_HAS_XML_SUPPORT
+ if (MatchChar(ts, c)) {
+ tt = TOK_DBLCOLON;
+ break;
+ }
+#endif
+ /*
+ * Default so compiler can modify to JSOP_GETTER if 'p getter: v' in an
+ * object initializer, likewise for setter.
+ */
+ tp->t_op = JSOP_NOP;
+ tt = TOK_COLON;
+ break;
+
+ case '|':
+ if (MatchChar(ts, c)) {
+ tt = TOK_OR;
+ } else if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_BITOR;
+ tt = TOK_ASSIGN;
+ } else {
+ tt = TOK_BITOR;
+ }
+ break;
+
+ case '^':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_BITXOR;
+ tt = TOK_ASSIGN;
+ } else {
+ tt = TOK_BITXOR;
+ }
+ break;
+
+ case '&':
+ if (MatchChar(ts, c)) {
+ tt = TOK_AND;
+ } else if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_BITAND;
+ tt = TOK_ASSIGN;
+ } else {
+ tt = TOK_BITAND;
+ }
+ break;
+
+ case '=':
+ if (MatchChar(ts, c)) {
+ tp->t_op = MatchChar(ts, c) ? JSOP_NEW_EQ : (JSOp)cx->jsop_eq;
+ tt = TOK_EQOP;
+ } else {
+ tp->t_op = JSOP_NOP;
+ tt = TOK_ASSIGN;
+ }
+ break;
+
+ case '!':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = MatchChar(ts, '=') ? JSOP_NEW_NE : (JSOp)cx->jsop_ne;
+ tt = TOK_EQOP;
+ } else {
+ tp->t_op = JSOP_NOT;
+ tt = TOK_UNARYOP;
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case '@':
+ tt = TOK_AT;
+ break;
+#endif
+
+ case '<':
+#if JS_HAS_XML_SUPPORT
+ /*
+ * After much testing, it's clear that Postel's advice to protocol
+ * designers ("be liberal in what you accept, and conservative in what
+ * you send") invites a natural-law repercussion for JS as "protocol":
+ *
+ * "If you are liberal in what you accept, others will utterly fail to
+ * be conservative in what they send."
+ *
+ * Which means you will get <!-- comments to end of line in the middle
+ * of .js files, and after if conditions whose then statements are on
+ * the next line, and other wonders. See at least the following bugs:
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=309242
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=309712
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=310993
+ *
+ * So without JSOPTION_XML, we never scan an XML comment or CDATA
+ * literal. We always scan <! as the start of an HTML comment hack
+ * to end of line, used since Netscape 2 to hide script tag content
+ * from script-unaware browsers.
+ */
+ if ((ts->flags & TSF_OPERAND) &&
+ (JS_HAS_XML_OPTION(cx) || PeekChar(ts) != '!')) {
+ /* Check for XML comment or CDATA section. */
+ if (MatchChar(ts, '!')) {
+ INIT_TOKENBUF();
+
+ /* Scan XML comment. */
+ if (MatchChar(ts, '-')) {
+ if (!MatchChar(ts, '-'))
+ goto bad_xml_markup;
+ while ((c = GetChar(ts)) != '-' || !MatchChar(ts, '-')) {
+ if (c == EOF)
+ goto bad_xml_markup;
+ ADD_TO_TOKENBUF(c);
+ }
+ tt = TOK_XMLCOMMENT;
+ tp->t_op = JSOP_XMLCOMMENT;
+ goto finish_xml_markup;
+ }
+
+ /* Scan CDATA section. */
+ if (MatchChar(ts, '[')) {
+ jschar cp[6];
+ if (PeekChars(ts, 6, cp) &&
+ cp[0] == 'C' &&
+ cp[1] == 'D' &&
+ cp[2] == 'A' &&
+ cp[3] == 'T' &&
+ cp[4] == 'A' &&
+ cp[5] == '[') {
+ SkipChars(ts, 6);
+ while ((c = GetChar(ts)) != ']' ||
+ !PeekChars(ts, 2, cp) ||
+ cp[0] != ']' ||
+ cp[1] != '>') {
+ if (c == EOF)
+ goto bad_xml_markup;
+ ADD_TO_TOKENBUF(c);
+ }
+ GetChar(ts); /* discard ] but not > */
+ tt = TOK_XMLCDATA;
+ tp->t_op = JSOP_XMLCDATA;
+ goto finish_xml_markup;
+ }
+ goto bad_xml_markup;
+ }
+ }
+
+ /* Check for processing instruction. */
+ if (MatchChar(ts, '?')) {
+ JSBool inTarget = JS_TRUE;
+ size_t targetLength = 0;
+ ptrdiff_t contentIndex = -1;
+
+ INIT_TOKENBUF();
+ while ((c = GetChar(ts)) != '?' || PeekChar(ts) != '>') {
+ if (c == EOF)
+ goto bad_xml_markup;
+ if (inTarget) {
+ if (JS_ISXMLSPACE(c)) {
+ if (TOKENBUF_LENGTH() == 0)
+ goto bad_xml_markup;
+ inTarget = JS_FALSE;
+ } else {
+ if (!((TOKENBUF_LENGTH() == 0)
+ ? JS_ISXMLNSSTART(c)
+ : JS_ISXMLNS(c))) {
+ goto bad_xml_markup;
+ }
+ ++targetLength;
+ }
+ } else {
+ if (contentIndex < 0 && !JS_ISXMLSPACE(c))
+ contentIndex = TOKENBUF_LENGTH();
+ }
+ ADD_TO_TOKENBUF(c);
+ }
+ if (targetLength == 0)
+ goto bad_xml_markup;
+ if (!TOKENBUF_OK())
+ goto error;
+ if (contentIndex < 0) {
+ atom = cx->runtime->atomState.emptyAtom;
+ } else {
+ atom = js_AtomizeChars(cx,
+ &TOKENBUF_CHAR(contentIndex),
+ TOKENBUF_LENGTH() - contentIndex,
+ 0);
+ if (!atom)
+ goto error;
+ }
+ TRIM_TOKENBUF(targetLength);
+ tp->t_atom2 = atom;
+ tt = TOK_XMLPI;
+
+ finish_xml_markup:
+ if (!MatchChar(ts, '>'))
+ goto bad_xml_markup;
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->t_atom = atom;
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ goto out;
+ }
+
+ /* An XML start-of-tag character. */
+ tt = MatchChar(ts, '/') ? TOK_XMLETAGO : TOK_XMLSTAGO;
+ goto out;
+
+ bad_xml_markup:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_MARKUP);
+ goto error;
+ }
+#endif /* JS_HAS_XML_SUPPORT */
+
+ /* NB: treat HTML begin-comment as comment-till-end-of-line */
+ if (MatchChar(ts, '!')) {
+ if (MatchChar(ts, '-')) {
+ if (MatchChar(ts, '-')) {
+ ts->flags |= TSF_IN_HTML_COMMENT;
+ goto skipline;
+ }
+ UngetChar(ts, '-');
+ }
+ UngetChar(ts, '!');
+ }
+ if (MatchChar(ts, c)) {
+ tp->t_op = JSOP_LSH;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_SHOP;
+ } else {
+ tp->t_op = MatchChar(ts, '=') ? JSOP_LE : JSOP_LT;
+ tt = TOK_RELOP;
+ }
+ break;
+
+ case '>':
+ if (MatchChar(ts, c)) {
+ tp->t_op = MatchChar(ts, c) ? JSOP_URSH : JSOP_RSH;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_SHOP;
+ } else {
+ tp->t_op = MatchChar(ts, '=') ? JSOP_GE : JSOP_GT;
+ tt = TOK_RELOP;
+ }
+ break;
+
+ case '*':
+ tp->t_op = JSOP_MUL;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_STAR;
+ break;
+
+ case '/':
+ if (MatchChar(ts, '/')) {
+ /*
+ * Hack for source filters such as the Mozilla XUL preprocessor:
+ * "//@line 123\n" sets the number of the *next* line after the
+ * comment to 123.
+ */
+ if (JS_HAS_ATLINE_OPTION(cx)) {
+ jschar cp[5];
+ uintN i, line, temp;
+ char filename[1024];
+
+ if (PeekChars(ts, 5, cp) &&
+ cp[0] == '@' &&
+ cp[1] == 'l' &&
+ cp[2] == 'i' &&
+ cp[3] == 'n' &&
+ cp[4] == 'e') {
+ SkipChars(ts, 5);
+ while ((c = GetChar(ts)) != '\n' && JS_ISSPACE(c))
+ continue;
+ if (JS7_ISDEC(c)) {
+ line = JS7_UNDEC(c);
+ while ((c = GetChar(ts)) != EOF && JS7_ISDEC(c)) {
+ temp = 10 * line + JS7_UNDEC(c);
+ if (temp < line) {
+ /* Ignore overlarge line numbers. */
+ goto skipline;
+ }
+ line = temp;
+ }
+ while (c != '\n' && JS_ISSPACE(c))
+ c = GetChar(ts);
+ i = 0;
+ if (c == '"') {
+ while ((c = GetChar(ts)) != EOF && c != '"') {
+ if (c == '\n') {
+ UngetChar(ts, c);
+ goto skipline;
+ }
+ if ((c >> 8) != 0 || i >= sizeof filename - 1)
+ goto skipline;
+ filename[i++] = (char) c;
+ }
+ if (c == '"') {
+ while ((c = GetChar(ts)) != '\n' &&
+ JS_ISSPACE(c)) {
+ continue;
+ }
+ }
+ }
+ filename[i] = '\0';
+ if (c == '\n') {
+ if (i > 0) {
+ if (ts->flags & TSF_OWNFILENAME)
+ JS_free(cx, (void *) ts->filename);
+ ts->filename = JS_strdup(cx, filename);
+ if (!ts->filename)
+ goto error;
+ ts->flags |= TSF_OWNFILENAME;
+ }
+ ts->lineno = line;
+ }
+ }
+ UngetChar(ts, c);
+ }
+ }
+
+skipline:
+ /* Optimize line skipping if we are not in an HTML comment. */
+ if (ts->flags & TSF_IN_HTML_COMMENT) {
+ while ((c = GetChar(ts)) != EOF && c != '\n') {
+ if (c == '-' && MatchChar(ts, '-') && MatchChar(ts, '>'))
+ ts->flags &= ~TSF_IN_HTML_COMMENT;
+ }
+ } else {
+ while ((c = GetChar(ts)) != EOF && c != '\n')
+ continue;
+ }
+ UngetChar(ts, c);
+ ts->cursor = (ts->cursor - 1) & NTOKENS_MASK;
+ goto retry;
+ }
+
+ if (MatchChar(ts, '*')) {
+ while ((c = GetChar(ts)) != EOF &&
+ !(c == '*' && MatchChar(ts, '/'))) {
+ /* Ignore all characters until comment close. */
+ }
+ if (c == EOF) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_COMMENT);
+ goto error;
+ }
+ ts->cursor = (ts->cursor - 1) & NTOKENS_MASK;
+ goto retry;
+ }
+
+ if (ts->flags & TSF_OPERAND) {
+ JSObject *obj;
+ uintN flags;
+ JSBool inCharClass = JS_FALSE;
+
+ INIT_TOKENBUF();
+ for (;;) {
+ c = GetChar(ts);
+ if (c == '\n' || c == EOF) {
+ UngetChar(ts, c);
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_REGEXP);
+ goto error;
+ }
+ if (c == '\\') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ } else if (c == '[') {
+ inCharClass = JS_TRUE;
+ } else if (c == ']') {
+ inCharClass = JS_FALSE;
+ } else if (c == '/' && !inCharClass) {
+ /* For compat with IE, allow unescaped / in char classes. */
+ break;
+ }
+ ADD_TO_TOKENBUF(c);
+ }
+ for (flags = 0; ; ) {
+ if (MatchChar(ts, 'g'))
+ flags |= JSREG_GLOB;
+ else if (MatchChar(ts, 'i'))
+ flags |= JSREG_FOLD;
+ else if (MatchChar(ts, 'm'))
+ flags |= JSREG_MULTILINE;
+ else
+ break;
+ }
+ c = PeekChar(ts);
+ if (JS7_ISLET(c)) {
+ tp->ptr = ts->linebuf.ptr - 1;
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_REGEXP_FLAG);
+ (void) GetChar(ts);
+ goto error;
+ }
+ /* XXXbe fix jsregexp.c so it doesn't depend on NUL termination */
+ if (!TOKENBUF_OK())
+ goto error;
+ NUL_TERM_TOKENBUF();
+ obj = js_NewRegExpObject(cx, ts,
+ TOKENBUF_BASE(),
+ TOKENBUF_LENGTH(),
+ flags);
+ if (!obj)
+ goto error;
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ goto error;
+
+ /*
+ * If the regexp's script is one-shot, we can avoid the extra
+ * fork-on-exec costs of JSOP_REGEXP by selecting JSOP_OBJECT.
+ * Otherwise, to avoid incorrect proto, parent, and lastIndex
+ * sharing among threads and sequentially across re-execution,
+ * select JSOP_REGEXP.
+ */
+ tp->t_op = (cx->fp->flags & (JSFRAME_EVAL | JSFRAME_COMPILE_N_GO))
+ ? JSOP_OBJECT
+ : JSOP_REGEXP;
+ tp->t_atom = atom;
+ tt = TOK_OBJECT;
+ break;
+ }
+
+ tp->t_op = JSOP_DIV;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_DIVOP;
+ break;
+
+ case '%':
+ tp->t_op = JSOP_MOD;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_DIVOP;
+ break;
+
+ case '~':
+ tp->t_op = JSOP_BITNOT;
+ tt = TOK_UNARYOP;
+ break;
+
+ case '+':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_ADD;
+ tt = TOK_ASSIGN;
+ } else if (MatchChar(ts, c)) {
+ tt = TOK_INC;
+ } else {
+ tp->t_op = JSOP_POS;
+ tt = TOK_PLUS;
+ }
+ break;
+
+ case '-':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_SUB;
+ tt = TOK_ASSIGN;
+ } else if (MatchChar(ts, c)) {
+ if (PeekChar(ts) == '>' && !(ts->flags & TSF_DIRTYLINE)) {
+ ts->flags &= ~TSF_IN_HTML_COMMENT;
+ goto skipline;
+ }
+ tt = TOK_DEC;
+ } else {
+ tp->t_op = JSOP_NEG;
+ tt = TOK_MINUS;
+ }
+ break;
+
+#if JS_HAS_SHARP_VARS
+ case '#':
+ {
+ uint32 n;
+
+ c = GetChar(ts);
+ if (!JS7_ISDEC(c)) {
+ UngetChar(ts, c);
+ goto badchar;
+ }
+ n = (uint32)JS7_UNDEC(c);
+ for (;;) {
+ c = GetChar(ts);
+ if (!JS7_ISDEC(c))
+ break;
+ n = 10 * n + JS7_UNDEC(c);
+ if (n >= UINT16_LIMIT) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SHARPVAR_TOO_BIG);
+ goto error;
+ }
+ }
+ tp->t_dval = (jsdouble) n;
+ if (JS_HAS_STRICT_OPTION(cx) &&
+ (c == '=' || c == '#')) {
+ char buf[20];
+ JS_snprintf(buf, sizeof buf, "#%u%c", n, c);
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_DEPRECATED_USAGE,
+ buf)) {
+ goto error;
+ }
+ }
+ if (c == '=')
+ tt = TOK_DEFSHARP;
+ else if (c == '#')
+ tt = TOK_USESHARP;
+ else
+ goto badchar;
+ break;
+ }
+#endif /* JS_HAS_SHARP_VARS */
+
+#if JS_HAS_SHARP_VARS || JS_HAS_XML_SUPPORT
+ badchar:
+#endif
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_ILLEGAL_CHARACTER);
+ goto error;
+ }
+
+out:
+ JS_ASSERT(tt != TOK_EOL);
+ ts->flags |= TSF_DIRTYLINE;
+
+eol_out:
+ if (!STRING_BUFFER_OK(&ts->tokenbuf))
+ tt = TOK_ERROR;
+ JS_ASSERT(tt < TOK_LIMIT);
+ tp->pos.end.index = ts->linepos +
+ PTRDIFF(ts->linebuf.ptr, ts->linebuf.base, jschar) -
+ ts->ungetpos;
+ tp->type = tt;
+ return tt;
+
+error:
+ tt = TOK_ERROR;
+ ts->flags |= TSF_ERROR;
+ goto out;
+
+#undef INIT_TOKENBUF
+#undef TOKENBUF_LENGTH
+#undef TOKENBUF_OK
+#undef TOKENBUF_TO_ATOM
+#undef ADD_TO_TOKENBUF
+#undef TOKENBUF_BASE
+#undef TOKENBUF_CHAR
+#undef TRIM_TOKENBUF
+#undef NUL_TERM_TOKENBUF
+}
+
+void
+js_UngetToken(JSTokenStream *ts)
+{
+ JS_ASSERT(ts->lookahead < NTOKENS_MASK);
+ ts->lookahead++;
+ ts->cursor = (ts->cursor - 1) & NTOKENS_MASK;
+}
+
+JSBool
+js_MatchToken(JSContext *cx, JSTokenStream *ts, JSTokenType tt)
+{
+ if (js_GetToken(cx, ts) == tt)
+ return JS_TRUE;
+ js_UngetToken(ts);
+ return JS_FALSE;
+}
diff --git a/third_party/js-1.7/jsscan.h b/third_party/js-1.7/jsscan.h
new file mode 100644
index 0000000..08cb095
--- /dev/null
+++ b/third_party/js-1.7/jsscan.h
@@ -0,0 +1,389 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsscan_h___
+#define jsscan_h___
+/*
+ * JS lexical scanner interface.
+ */
+#include <stddef.h>
+#include <stdio.h>
+#include "jsconfig.h"
+#include "jsopcode.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+#define JS_KEYWORD(keyword, type, op, version) \
+ extern const char js_##keyword##_str[];
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+
+typedef enum JSTokenType {
+ TOK_ERROR = -1, /* well-known as the only code < EOF */
+ TOK_EOF = 0, /* end of file */
+ TOK_EOL = 1, /* end of line */
+ TOK_SEMI = 2, /* semicolon */
+ TOK_COMMA = 3, /* comma operator */
+ TOK_ASSIGN = 4, /* assignment ops (= += -= etc.) */
+ TOK_HOOK = 5, TOK_COLON = 6, /* conditional (?:) */
+ TOK_OR = 7, /* logical or (||) */
+ TOK_AND = 8, /* logical and (&&) */
+ TOK_BITOR = 9, /* bitwise-or (|) */
+ TOK_BITXOR = 10, /* bitwise-xor (^) */
+ TOK_BITAND = 11, /* bitwise-and (&) */
+ TOK_EQOP = 12, /* equality ops (== !=) */
+ TOK_RELOP = 13, /* relational ops (< <= > >=) */
+ TOK_SHOP = 14, /* shift ops (<< >> >>>) */
+ TOK_PLUS = 15, /* plus */
+ TOK_MINUS = 16, /* minus */
+ TOK_STAR = 17, TOK_DIVOP = 18, /* multiply/divide ops (* / %) */
+ TOK_UNARYOP = 19, /* unary prefix operator */
+ TOK_INC = 20, TOK_DEC = 21, /* increment/decrement (++ --) */
+ TOK_DOT = 22, /* member operator (.) */
+ TOK_LB = 23, TOK_RB = 24, /* left and right brackets */
+ TOK_LC = 25, TOK_RC = 26, /* left and right curlies (braces) */
+ TOK_LP = 27, TOK_RP = 28, /* left and right parentheses */
+ TOK_NAME = 29, /* identifier */
+ TOK_NUMBER = 30, /* numeric constant */
+ TOK_STRING = 31, /* string constant */
+ TOK_OBJECT = 32, /* RegExp or other object constant */
+ TOK_PRIMARY = 33, /* true, false, null, this, super */
+ TOK_FUNCTION = 34, /* function keyword */
+ TOK_EXPORT = 35, /* export keyword */
+ TOK_IMPORT = 36, /* import keyword */
+ TOK_IF = 37, /* if keyword */
+ TOK_ELSE = 38, /* else keyword */
+ TOK_SWITCH = 39, /* switch keyword */
+ TOK_CASE = 40, /* case keyword */
+ TOK_DEFAULT = 41, /* default keyword */
+ TOK_WHILE = 42, /* while keyword */
+ TOK_DO = 43, /* do keyword */
+ TOK_FOR = 44, /* for keyword */
+ TOK_BREAK = 45, /* break keyword */
+ TOK_CONTINUE = 46, /* continue keyword */
+ TOK_IN = 47, /* in keyword */
+ TOK_VAR = 48, /* var keyword */
+ TOK_WITH = 49, /* with keyword */
+ TOK_RETURN = 50, /* return keyword */
+ TOK_NEW = 51, /* new keyword */
+ TOK_DELETE = 52, /* delete keyword */
+ TOK_DEFSHARP = 53, /* #n= for object/array initializers */
+ TOK_USESHARP = 54, /* #n# for object/array initializers */
+ TOK_TRY = 55, /* try keyword */
+ TOK_CATCH = 56, /* catch keyword */
+ TOK_FINALLY = 57, /* finally keyword */
+ TOK_THROW = 58, /* throw keyword */
+ TOK_INSTANCEOF = 59, /* instanceof keyword */
+ TOK_DEBUGGER = 60, /* debugger keyword */
+ TOK_XMLSTAGO = 61, /* XML start tag open (<) */
+ TOK_XMLETAGO = 62, /* XML end tag open (</) */
+ TOK_XMLPTAGC = 63, /* XML point tag close (/>) */
+ TOK_XMLTAGC = 64, /* XML start or end tag close (>) */
+ TOK_XMLNAME = 65, /* XML start-tag non-final fragment */
+ TOK_XMLATTR = 66, /* XML quoted attribute value */
+ TOK_XMLSPACE = 67, /* XML whitespace */
+ TOK_XMLTEXT = 68, /* XML text */
+ TOK_XMLCOMMENT = 69, /* XML comment */
+ TOK_XMLCDATA = 70, /* XML CDATA section */
+ TOK_XMLPI = 71, /* XML processing instruction */
+ TOK_AT = 72, /* XML attribute op (@) */
+ TOK_DBLCOLON = 73, /* namespace qualified name op (::) */
+ TOK_ANYNAME = 74, /* XML AnyName singleton (*) */
+ TOK_DBLDOT = 75, /* XML descendant op (..) */
+ TOK_FILTER = 76, /* XML filtering predicate op (.()) */
+ TOK_XMLELEM = 77, /* XML element node type (no token) */
+ TOK_XMLLIST = 78, /* XML list node type (no token) */
+ TOK_YIELD = 79, /* yield from generator function */
+ TOK_ARRAYCOMP = 80, /* array comprehension initialiser */
+ TOK_ARRAYPUSH = 81, /* array push within comprehension */
+ TOK_LEXICALSCOPE = 82, /* block scope AST node label */
+ TOK_LET = 83, /* let keyword */
+ TOK_BODY = 84, /* synthetic body of function with
+ destructuring formal parameters */
+ TOK_RESERVED, /* reserved keywords */
+ TOK_LIMIT /* domain size */
+} JSTokenType;
+
+#define IS_PRIMARY_TOKEN(tt) \
+ ((uintN)((tt) - TOK_NAME) <= (uintN)(TOK_PRIMARY - TOK_NAME))
+
+#define TOKEN_TYPE_IS_XML(tt) \
+ (tt == TOK_AT || tt == TOK_DBLCOLON || tt == TOK_ANYNAME)
+
+#if JS_HAS_BLOCK_SCOPE
+# define TOKEN_TYPE_IS_DECL(tt) ((tt) == TOK_VAR || (tt) == TOK_LET)
+#else
+# define TOKEN_TYPE_IS_DECL(tt) ((tt) == TOK_VAR)
+#endif
+
+struct JSStringBuffer {
+ jschar *base;
+ jschar *limit; /* length limit for quick bounds check */
+ jschar *ptr; /* slot for next non-NUL char to store */
+ void *data;
+ JSBool (*grow)(JSStringBuffer *sb, size_t newlength);
+ void (*free)(JSStringBuffer *sb);
+};
+
+#define STRING_BUFFER_ERROR_BASE ((jschar *) 1)
+#define STRING_BUFFER_OK(sb) ((sb)->base != STRING_BUFFER_ERROR_BASE)
+#define STRING_BUFFER_OFFSET(sb) ((sb)->ptr -(sb)->base)
+
+extern void
+js_InitStringBuffer(JSStringBuffer *sb);
+
+extern void
+js_FinishStringBuffer(JSStringBuffer *sb);
+
+extern void
+js_AppendChar(JSStringBuffer *sb, jschar c);
+
+extern void
+js_RepeatChar(JSStringBuffer *sb, jschar c, uintN count);
+
+extern void
+js_AppendCString(JSStringBuffer *sb, const char *asciiz);
+
+extern void
+js_AppendJSString(JSStringBuffer *sb, JSString *str);
+
+struct JSTokenPtr {
+ uint16 index; /* index of char in physical line */
+ uint16 lineno; /* physical line number */
+};
+
+struct JSTokenPos {
+ JSTokenPtr begin; /* first character and line of token */
+ JSTokenPtr end; /* index 1 past last char, last line */
+};
+
+struct JSToken {
+ JSTokenType type; /* char value or above enumerator */
+ JSTokenPos pos; /* token position in file */
+ jschar *ptr; /* beginning of token in line buffer */
+ union {
+ struct { /* non-numeric literal */
+ JSOp op; /* operator, for minimal parser */
+ JSAtom *atom; /* atom table entry */
+ } s;
+ struct { /* atom pair, for XML PIs */
+ JSAtom *atom2; /* auxiliary atom table entry */
+ JSAtom *atom; /* main atom table entry */
+ } p;
+ jsdouble dval; /* floating point number */
+ } u;
+};
+
+#define t_op u.s.op
+#define t_atom u.s.atom
+#define t_atom2 u.p.atom2
+#define t_dval u.dval
+
+typedef struct JSTokenBuf {
+ jschar *base; /* base of line or stream buffer */
+ jschar *limit; /* limit for quick bounds check */
+ jschar *ptr; /* next char to get, or slot to use */
+} JSTokenBuf;
+
+#define JS_LINE_LIMIT 256 /* logical line buffer size limit --
+ physical line length is unlimited */
+#define NTOKENS 4 /* 1 current + 2 lookahead, rounded */
+#define NTOKENS_MASK (NTOKENS-1) /* to power of 2 to avoid divmod by 3 */
+
+struct JSTokenStream {
+ JSToken tokens[NTOKENS];/* circular token buffer */
+ uintN cursor; /* index of last parsed token */
+ uintN lookahead; /* count of lookahead tokens */
+ uintN lineno; /* current line number */
+ uintN ungetpos; /* next free char slot in ungetbuf */
+ jschar ungetbuf[6]; /* at most 6, for \uXXXX lookahead */
+ uintN flags; /* flags -- see below */
+ ptrdiff_t linelen; /* physical linebuf segment length */
+ ptrdiff_t linepos; /* linebuf offset in physical line */
+ JSTokenBuf linebuf; /* line buffer for diagnostics */
+ JSTokenBuf userbuf; /* user input buffer if !file */
+ JSStringBuffer tokenbuf; /* current token string buffer */
+ const char *filename; /* input filename or null */
+ FILE *file; /* stdio stream if reading from file */
+ JSPrincipals *principals; /* principals associated with source */
+ JSSourceHandler listener; /* callback for source; eg debugger */
+ void *listenerData; /* listener 'this' data */
+ void *listenerTSData;/* listener data for this TokenStream */
+ jschar *saveEOL; /* save next end of line in userbuf, to
+ optimize for very long lines */
+};
+
+#define CURRENT_TOKEN(ts) ((ts)->tokens[(ts)->cursor])
+#define ON_CURRENT_LINE(ts,pos) ((uint16)(ts)->lineno == (pos).end.lineno)
+
+/* JSTokenStream flags */
+#define TSF_ERROR 0x01 /* fatal error while compiling */
+#define TSF_EOF 0x02 /* hit end of file */
+#define TSF_NEWLINES 0x04 /* tokenize newlines */
+#define TSF_OPERAND 0x08 /* looking for operand, not operator */
+#define TSF_NLFLAG 0x20 /* last linebuf ended with \n */
+#define TSF_CRFLAG 0x40 /* linebuf would have ended with \r */
+#define TSF_DIRTYLINE 0x80 /* non-whitespace since start of line */
+#define TSF_OWNFILENAME 0x100 /* ts->filename is malloc'd */
+#define TSF_XMLTAGMODE 0x200 /* scanning within an XML tag in E4X */
+#define TSF_XMLTEXTMODE 0x400 /* scanning XMLText terminal from E4X */
+#define TSF_XMLONLYMODE 0x800 /* don't scan {expr} within text/tag */
+
+/* Flag indicating unexpected end of input, i.e. TOK_EOF not at top-level. */
+#define TSF_UNEXPECTED_EOF 0x1000
+
+/*
+ * To handle the hard case of contiguous HTML comments, we want to clear the
+ * TSF_DIRTYINPUT flag at the end of each such comment. But we'd rather not
+ * scan for --> within every //-style comment unless we have to. So we set
+ * TSF_IN_HTML_COMMENT when a <!-- is scanned as an HTML begin-comment, and
+ * clear it (and TSF_DIRTYINPUT) when we scan --> either on a clean line, or
+ * only if (ts->flags & TSF_IN_HTML_COMMENT), in a //-style comment.
+ *
+ * This still works as before given a malformed comment hiding hack such as:
+ *
+ * <script>
+ * <!-- comment hiding hack #1
+ * code goes here
+ * // --> oops, markup for script-unaware browsers goes here!
+ * </script>
+ *
+ * It does not cope with malformed comment hiding hacks where --> is hidden
+ * by C-style comments, or on a dirty line. Such cases are already broken.
+ */
+#define TSF_IN_HTML_COMMENT 0x2000
+
+/* Ignore keywords and return TOK_NAME instead to the parser. */
+#define TSF_KEYWORD_IS_NAME 0x4000
+
+/* Unicode separators that are treated as line terminators, in addition to \n, \r */
+#define LINE_SEPARATOR 0x2028
+#define PARA_SEPARATOR 0x2029
+
+/*
+ * Create a new token stream, either from an input buffer or from a file.
+ * Return null on file-open or memory-allocation failure.
+ *
+ * NB: All of js_New{,Buffer,File}TokenStream() return a pointer to transient
+ * memory in the current context's temp pool. This memory is deallocated via
+ * JS_ARENA_RELEASE() after parsing is finished.
+ */
+extern JSTokenStream *
+js_NewTokenStream(JSContext *cx, const jschar *base, size_t length,
+ const char *filename, uintN lineno, JSPrincipals *principals);
+
+extern JS_FRIEND_API(JSTokenStream *)
+js_NewBufferTokenStream(JSContext *cx, const jschar *base, size_t length);
+
+extern JS_FRIEND_API(JSTokenStream *)
+js_NewFileTokenStream(JSContext *cx, const char *filename, FILE *defaultfp);
+
+extern JS_FRIEND_API(JSBool)
+js_CloseTokenStream(JSContext *cx, JSTokenStream *ts);
+
+extern JS_FRIEND_API(int)
+js_fgets(char *buf, int size, FILE *file);
+
+/*
+ * If the given char array forms JavaScript keyword, return corresponding
+ * token. Otherwise return TOK_EOF.
+ */
+extern JSTokenType
+js_CheckKeyword(const jschar *chars, size_t length);
+
+#define js_IsKeyword(chars, length) \
+ (js_CheckKeyword(chars, length) != TOK_EOF)
+
+/*
+ * Friend-exported API entry point to call a mapping function on each reserved
+ * identifier in the scanner's keyword table.
+ */
+extern JS_FRIEND_API(void)
+js_MapKeywords(void (*mapfun)(const char *));
+
+/*
+ * Report a compile-time error by its number, using ts or cg to show context.
+ * Return true for a warning, false for an error.
+ */
+extern JSBool
+js_ReportCompileErrorNumber(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...);
+
+extern JSBool
+js_ReportCompileErrorNumberUC(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...);
+
+/* Steal some JSREPORT_* bits (see jsapi.h) to tell handle's type. */
+#define JSREPORT_HANDLE 0x300
+#define JSREPORT_TS 0x000
+#define JSREPORT_CG 0x100
+#define JSREPORT_PN 0x200
+
+/*
+ * Look ahead one token and return its type.
+ */
+extern JSTokenType
+js_PeekToken(JSContext *cx, JSTokenStream *ts);
+
+extern JSTokenType
+js_PeekTokenSameLine(JSContext *cx, JSTokenStream *ts);
+
+/*
+ * Get the next token from ts.
+ */
+extern JSTokenType
+js_GetToken(JSContext *cx, JSTokenStream *ts);
+
+/*
+ * Push back the last scanned token onto ts.
+ */
+extern void
+js_UngetToken(JSTokenStream *ts);
+
+/*
+ * Get the next token from ts if its type is tt.
+ */
+extern JSBool
+js_MatchToken(JSContext *cx, JSTokenStream *ts, JSTokenType tt);
+
+JS_END_EXTERN_C
+
+#endif /* jsscan_h___ */
diff --git a/third_party/js-1.7/jsscope.c b/third_party/js-1.7/jsscope.c
new file mode 100644
index 0000000..49b55a6
--- /dev/null
+++ b/third_party/js-1.7/jsscope.c
@@ -0,0 +1,1776 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS symbol tables.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h"
+#include "jsbit.h"
+#include "jsclist.h"
+#include "jsdhash.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsdbgapi.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsscope.h"
+#include "jsstr.h"
+
+JSScope *
+js_GetMutableScope(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope, *newscope;
+
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
+ if (scope->object == obj)
+ return scope;
+ newscope = js_NewScope(cx, 0, scope->map.ops, LOCKED_OBJ_GET_CLASS(obj),
+ obj);
+ if (!newscope)
+ return NULL;
+ JS_LOCK_SCOPE(cx, newscope);
+ obj->map = js_HoldObjectMap(cx, &newscope->map);
+ scope = (JSScope *) js_DropObjectMap(cx, &scope->map, obj);
+ JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
+ return newscope;
+}
+
+/*
+ * JSScope uses multiplicative hashing, _a la_ jsdhash.[ch], but specialized
+ * to minimize footprint. But if a scope has fewer than SCOPE_HASH_THRESHOLD
+ * entries, we use linear search and avoid allocating scope->table.
+ */
+#define SCOPE_HASH_THRESHOLD 6
+#define MIN_SCOPE_SIZE_LOG2 4
+#define MIN_SCOPE_SIZE JS_BIT(MIN_SCOPE_SIZE_LOG2)
+#define SCOPE_TABLE_NBYTES(n) ((n) * sizeof(JSScopeProperty *))
+
+static void
+InitMinimalScope(JSScope *scope)
+{
+ scope->hashShift = JS_DHASH_BITS - MIN_SCOPE_SIZE_LOG2;
+ scope->entryCount = scope->removedCount = 0;
+ scope->table = NULL;
+ scope->lastProp = NULL;
+}
+
+static JSBool
+CreateScopeTable(JSContext *cx, JSScope *scope, JSBool report)
+{
+ int sizeLog2;
+ JSScopeProperty *sprop, **spp;
+
+ JS_ASSERT(!scope->table);
+ JS_ASSERT(scope->lastProp);
+
+ if (scope->entryCount > SCOPE_HASH_THRESHOLD) {
+ /*
+ * Ouch: calloc failed at least once already -- let's try again,
+ * overallocating to hold at least twice the current population.
+ */
+ sizeLog2 = JS_CeilingLog2(2 * scope->entryCount);
+ scope->hashShift = JS_DHASH_BITS - sizeLog2;
+ } else {
+ JS_ASSERT(scope->hashShift == JS_DHASH_BITS - MIN_SCOPE_SIZE_LOG2);
+ sizeLog2 = MIN_SCOPE_SIZE_LOG2;
+ }
+
+ scope->table = (JSScopeProperty **)
+ calloc(JS_BIT(sizeLog2), sizeof(JSScopeProperty *));
+ if (!scope->table) {
+ if (report)
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ js_UpdateMallocCounter(cx, JS_BIT(sizeLog2) * sizeof(JSScopeProperty *));
+
+ scope->hashShift = JS_DHASH_BITS - sizeLog2;
+ for (sprop = scope->lastProp; sprop; sprop = sprop->parent) {
+ spp = js_SearchScope(scope, sprop->id, JS_TRUE);
+ SPROP_STORE_PRESERVING_COLLISION(spp, sprop);
+ }
+ return JS_TRUE;
+}
+
+JSScope *
+js_NewScope(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops, JSClass *clasp,
+ JSObject *obj)
+{
+ JSScope *scope;
+
+ scope = (JSScope *) JS_malloc(cx, sizeof(JSScope));
+ if (!scope)
+ return NULL;
+
+ js_InitObjectMap(&scope->map, nrefs, ops, clasp);
+ scope->object = obj;
+ scope->flags = 0;
+ InitMinimalScope(scope);
+
+#ifdef JS_THREADSAFE
+ scope->ownercx = cx;
+ memset(&scope->lock, 0, sizeof scope->lock);
+
+ /*
+ * Set u.link = NULL, not u.count = 0, in case the target architecture's
+ * null pointer has a non-zero integer representation.
+ */
+ scope->u.link = NULL;
+
+#ifdef DEBUG
+ scope->file[0] = scope->file[1] = scope->file[2] = scope->file[3] = NULL;
+ scope->line[0] = scope->line[1] = scope->line[2] = scope->line[3] = 0;
+#endif
+#endif
+
+ JS_RUNTIME_METER(cx->runtime, liveScopes);
+ JS_RUNTIME_METER(cx->runtime, totalScopes);
+ return scope;
+}
+
+#ifdef DEBUG_SCOPE_COUNT
+extern void
+js_unlog_scope(JSScope *scope);
+#endif
+
+void
+js_DestroyScope(JSContext *cx, JSScope *scope)
+{
+#ifdef DEBUG_SCOPE_COUNT
+ js_unlog_scope(scope);
+#endif
+
+#ifdef JS_THREADSAFE
+ /* Scope must be single-threaded at this point, so set scope->ownercx. */
+ JS_ASSERT(scope->u.count == 0);
+ scope->ownercx = cx;
+ js_FinishLock(&scope->lock);
+#endif
+ if (scope->table)
+ JS_free(cx, scope->table);
+
+#ifdef DEBUG
+ JS_LOCK_RUNTIME_VOID(cx->runtime,
+ cx->runtime->liveScopeProps -= scope->entryCount);
+#endif
+ JS_RUNTIME_UNMETER(cx->runtime, liveScopes);
+ JS_free(cx, scope);
+}
+
+#ifdef DUMP_SCOPE_STATS
+typedef struct JSScopeStats {
+ jsrefcount searches;
+ jsrefcount steps;
+ jsrefcount hits;
+ jsrefcount misses;
+ jsrefcount stepHits;
+ jsrefcount stepMisses;
+ jsrefcount adds;
+ jsrefcount redundantAdds;
+ jsrefcount addFailures;
+ jsrefcount changeFailures;
+ jsrefcount compresses;
+ jsrefcount grows;
+ jsrefcount removes;
+ jsrefcount removeFrees;
+ jsrefcount uselessRemoves;
+ jsrefcount shrinks;
+} JSScopeStats;
+
+JS_FRIEND_DATA(JSScopeStats) js_scope_stats;
+
+# define METER(x) JS_ATOMIC_INCREMENT(&js_scope_stats.x)
+#else
+# define METER(x) /* nothing */
+#endif
+
+/*
+ * Double hashing needs the second hash code to be relatively prime to table
+ * size, so we simply make hash2 odd. The inputs to multiplicative hash are
+ * the golden ratio, expressed as a fixed-point 32 bit fraction, and the int
+ * property index or named property's atom number (observe that most objects
+ * have either no indexed properties, or almost all indexed and a few names,
+ * so collisions between index and atom number are unlikely).
+ */
+#define SCOPE_HASH0(id) (HASH_ID(id) * JS_GOLDEN_RATIO)
+#define SCOPE_HASH1(hash0,shift) ((hash0) >> (shift))
+#define SCOPE_HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
+
+JS_FRIEND_API(JSScopeProperty **)
+js_SearchScope(JSScope *scope, jsid id, JSBool adding)
+{
+ JSHashNumber hash0, hash1, hash2;
+ int hashShift, sizeLog2;
+ JSScopeProperty *stored, *sprop, **spp, **firstRemoved;
+ uint32 sizeMask;
+
+ METER(searches);
+ if (!scope->table) {
+ /* Not enough properties to justify hashing: search from lastProp. */
+ JS_ASSERT(!SCOPE_HAD_MIDDLE_DELETE(scope));
+ for (spp = &scope->lastProp; (sprop = *spp); spp = &sprop->parent) {
+ if (sprop->id == id) {
+ METER(hits);
+ return spp;
+ }
+ }
+ METER(misses);
+ return spp;
+ }
+
+ /* Compute the primary hash address. */
+ hash0 = SCOPE_HASH0(id);
+ hashShift = scope->hashShift;
+ hash1 = SCOPE_HASH1(hash0, hashShift);
+ spp = scope->table + hash1;
+
+ /* Miss: return space for a new entry. */
+ stored = *spp;
+ if (SPROP_IS_FREE(stored)) {
+ METER(misses);
+ return spp;
+ }
+
+ /* Hit: return entry. */
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ if (sprop && sprop->id == id) {
+ METER(hits);
+ return spp;
+ }
+
+ /* Collision: double hash. */
+ sizeLog2 = JS_DHASH_BITS - hashShift;
+ hash2 = SCOPE_HASH2(hash0, sizeLog2, hashShift);
+ sizeMask = JS_BITMASK(sizeLog2);
+
+ /* Save the first removed entry pointer so we can recycle it if adding. */
+ if (SPROP_IS_REMOVED(stored)) {
+ firstRemoved = spp;
+ } else {
+ firstRemoved = NULL;
+ if (adding && !SPROP_HAD_COLLISION(stored))
+ SPROP_FLAG_COLLISION(spp, sprop);
+ }
+
+ for (;;) {
+ METER(steps);
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+ spp = scope->table + hash1;
+
+ stored = *spp;
+ if (SPROP_IS_FREE(stored)) {
+ METER(stepMisses);
+ return (adding && firstRemoved) ? firstRemoved : spp;
+ }
+
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ if (sprop && sprop->id == id) {
+ METER(stepHits);
+ return spp;
+ }
+
+ if (SPROP_IS_REMOVED(stored)) {
+ if (!firstRemoved)
+ firstRemoved = spp;
+ } else {
+ if (adding && !SPROP_HAD_COLLISION(stored))
+ SPROP_FLAG_COLLISION(spp, sprop);
+ }
+ }
+
+ /* NOTREACHED */
+ return NULL;
+}
+
+static JSBool
+ChangeScope(JSContext *cx, JSScope *scope, int change)
+{
+ int oldlog2, newlog2;
+ uint32 oldsize, newsize, nbytes;
+ JSScopeProperty **table, **oldtable, **spp, **oldspp, *sprop;
+
+ /* Grow, shrink, or compress by changing scope->table. */
+ oldlog2 = JS_DHASH_BITS - scope->hashShift;
+ newlog2 = oldlog2 + change;
+ oldsize = JS_BIT(oldlog2);
+ newsize = JS_BIT(newlog2);
+ nbytes = SCOPE_TABLE_NBYTES(newsize);
+ table = (JSScopeProperty **) calloc(nbytes, 1);
+ if (!table) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ /* Now that we have a new table allocated, update scope members. */
+ scope->hashShift = JS_DHASH_BITS - newlog2;
+ scope->removedCount = 0;
+ oldtable = scope->table;
+ scope->table = table;
+
+ /* Treat the above calloc as a JS_malloc, to match CreateScopeTable. */
+ cx->runtime->gcMallocBytes += nbytes;
+
+ /* Copy only live entries, leaving removed and free ones behind. */
+ for (oldspp = oldtable; oldsize != 0; oldspp++) {
+ sprop = SPROP_FETCH(oldspp);
+ if (sprop) {
+ spp = js_SearchScope(scope, sprop->id, JS_TRUE);
+ JS_ASSERT(SPROP_IS_FREE(*spp));
+ *spp = sprop;
+ }
+ oldsize--;
+ }
+
+ /* Finally, free the old table storage. */
+ JS_free(cx, oldtable);
+ return JS_TRUE;
+}
+
+/*
+ * Take care to exclude the mark and duplicate bits, in case we're called from
+ * the GC, or we are searching for a property that has not yet been flagged as
+ * a duplicate when making a duplicate formal parameter.
+ */
+#define SPROP_FLAGS_NOT_MATCHED (SPROP_MARK | SPROP_IS_DUPLICATE)
+
+JS_STATIC_DLL_CALLBACK(JSDHashNumber)
+js_HashScopeProperty(JSDHashTable *table, const void *key)
+{
+ const JSScopeProperty *sprop = (const JSScopeProperty *)key;
+ JSDHashNumber hash;
+ JSPropertyOp gsop;
+
+ /* Accumulate from least to most random so the low bits are most random. */
+ hash = 0;
+ gsop = sprop->getter;
+ if (gsop)
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ (jsword)gsop;
+ gsop = sprop->setter;
+ if (gsop)
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ (jsword)gsop;
+
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4)
+ ^ (sprop->flags & ~SPROP_FLAGS_NOT_MATCHED);
+
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->attrs;
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->shortid;
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->slot;
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->id;
+ return hash;
+}
+
+#define SPROP_MATCH(sprop, child) \
+ SPROP_MATCH_PARAMS(sprop, (child)->id, (child)->getter, (child)->setter, \
+ (child)->slot, (child)->attrs, (child)->flags, \
+ (child)->shortid)
+
+#define SPROP_MATCH_PARAMS(sprop, aid, agetter, asetter, aslot, aattrs, \
+ aflags, ashortid) \
+ ((sprop)->id == (aid) && \
+ SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \
+ aflags, ashortid))
+
+#define SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \
+ aflags, ashortid) \
+ ((sprop)->getter == (agetter) && \
+ (sprop)->setter == (asetter) && \
+ (sprop)->slot == (aslot) && \
+ (sprop)->attrs == (aattrs) && \
+ (((sprop)->flags ^ (aflags)) & ~SPROP_FLAGS_NOT_MATCHED) == 0 && \
+ (sprop)->shortid == (ashortid))
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+js_MatchScopeProperty(JSDHashTable *table,
+ const JSDHashEntryHdr *hdr,
+ const void *key)
+{
+ const JSPropertyTreeEntry *entry = (const JSPropertyTreeEntry *)hdr;
+ const JSScopeProperty *sprop = entry->child;
+ const JSScopeProperty *kprop = (const JSScopeProperty *)key;
+
+ return SPROP_MATCH(sprop, kprop);
+}
+
+static const JSDHashTableOps PropertyTreeHashOps = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ JS_DHashGetKeyStub,
+ js_HashScopeProperty,
+ js_MatchScopeProperty,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+/*
+ * A property tree node on rt->propertyFreeList overlays the following prefix
+ * struct on JSScopeProperty.
+ */
+typedef struct FreeNode {
+ jsid id;
+ JSScopeProperty *next;
+ JSScopeProperty **prevp;
+} FreeNode;
+
+#define FREENODE(sprop) ((FreeNode *) (sprop))
+
+#define FREENODE_INSERT(list, sprop) \
+ JS_BEGIN_MACRO \
+ FREENODE(sprop)->next = (list); \
+ FREENODE(sprop)->prevp = &(list); \
+ if (list) \
+ FREENODE(list)->prevp = &FREENODE(sprop)->next; \
+ (list) = (sprop); \
+ JS_END_MACRO
+
+#define FREENODE_REMOVE(sprop) \
+ JS_BEGIN_MACRO \
+ *FREENODE(sprop)->prevp = FREENODE(sprop)->next; \
+ if (FREENODE(sprop)->next) \
+ FREENODE(FREENODE(sprop)->next)->prevp = FREENODE(sprop)->prevp; \
+ JS_END_MACRO
+
+/* NB: Called with the runtime lock held. */
+static JSScopeProperty *
+NewScopeProperty(JSRuntime *rt)
+{
+ JSScopeProperty *sprop;
+
+ sprop = rt->propertyFreeList;
+ if (sprop) {
+ FREENODE_REMOVE(sprop);
+ } else {
+ JS_ARENA_ALLOCATE_CAST(sprop, JSScopeProperty *,
+ &rt->propertyArenaPool,
+ sizeof(JSScopeProperty));
+ if (!sprop)
+ return NULL;
+ }
+
+ JS_RUNTIME_METER(rt, livePropTreeNodes);
+ JS_RUNTIME_METER(rt, totalPropTreeNodes);
+ return sprop;
+}
+
+#define CHUNKY_KIDS_TAG ((jsuword)1)
+#define KIDS_IS_CHUNKY(kids) ((jsuword)(kids) & CHUNKY_KIDS_TAG)
+#define KIDS_TO_CHUNK(kids) ((PropTreeKidsChunk *) \
+ ((jsuword)(kids) & ~CHUNKY_KIDS_TAG))
+#define CHUNK_TO_KIDS(chunk) ((JSScopeProperty *) \
+ ((jsuword)(chunk) | CHUNKY_KIDS_TAG))
+#define MAX_KIDS_PER_CHUNK 10
+
+typedef struct PropTreeKidsChunk PropTreeKidsChunk;
+
+struct PropTreeKidsChunk {
+ JSScopeProperty *kids[MAX_KIDS_PER_CHUNK];
+ PropTreeKidsChunk *next;
+};
+
+static PropTreeKidsChunk *
+NewPropTreeKidsChunk(JSRuntime *rt)
+{
+ PropTreeKidsChunk *chunk;
+
+ chunk = calloc(1, sizeof *chunk);
+ if (!chunk)
+ return NULL;
+ JS_ASSERT(((jsuword)chunk & CHUNKY_KIDS_TAG) == 0);
+ JS_RUNTIME_METER(rt, propTreeKidsChunks);
+ return chunk;
+}
+
+static void
+DestroyPropTreeKidsChunk(JSRuntime *rt, PropTreeKidsChunk *chunk)
+{
+ JS_RUNTIME_UNMETER(rt, propTreeKidsChunks);
+ free(chunk);
+}
+
+/* NB: Called with the runtime lock held. */
+static JSBool
+InsertPropertyTreeChild(JSRuntime *rt, JSScopeProperty *parent,
+ JSScopeProperty *child, PropTreeKidsChunk *sweptChunk)
+{
+ JSPropertyTreeEntry *entry;
+ JSScopeProperty **childp, *kids, *sprop;
+ PropTreeKidsChunk *chunk, **chunkp;
+ uintN i;
+
+ JS_ASSERT(!parent || child->parent != parent);
+
+ if (!parent) {
+ entry = (JSPropertyTreeEntry *)
+ JS_DHashTableOperate(&rt->propertyTreeHash, child, JS_DHASH_ADD);
+ if (!entry)
+ return JS_FALSE;
+ childp = &entry->child;
+ sprop = *childp;
+ if (!sprop) {
+ *childp = child;
+ } else {
+ /*
+ * A "Duplicate child" case.
+ *
+ * We can't do away with child, as at least one live scope entry
+ * still points at it. What's more, that scope's lastProp chains
+ * through an ancestor line to reach child, and js_Enumerate and
+ * others count on this linkage. We must leave child out of the
+ * hash table, and not require it to be there when we eventually
+ * GC it (see RemovePropertyTreeChild, below).
+ *
+ * It is necessary to leave the duplicate child out of the hash
+ * table to preserve entry uniqueness. It is safe to leave the
+ * child out of the hash table (unlike the duplicate child cases
+ * below), because the child's parent link will be null, which
+ * can't dangle.
+ */
+ JS_ASSERT(sprop != child && SPROP_MATCH(sprop, child));
+ JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
+ }
+ } else {
+ childp = &parent->kids;
+ kids = *childp;
+ if (kids) {
+ if (KIDS_IS_CHUNKY(kids)) {
+ chunk = KIDS_TO_CHUNK(kids);
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ childp = &chunk->kids[i];
+ sprop = *childp;
+ if (!sprop)
+ goto insert;
+
+ JS_ASSERT(sprop != child);
+ if (SPROP_MATCH(sprop, child)) {
+ /*
+ * Duplicate child, see comment above. In this
+ * case, we must let the duplicate be inserted at
+ * this level in the tree, so we keep iterating,
+ * looking for an empty slot in which to insert.
+ */
+ JS_ASSERT(sprop != child);
+ JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
+ }
+ }
+ chunkp = &chunk->next;
+ } while ((chunk = *chunkp) != NULL);
+
+ if (sweptChunk) {
+ chunk = sweptChunk;
+ } else {
+ chunk = NewPropTreeKidsChunk(rt);
+ if (!chunk)
+ return JS_FALSE;
+ }
+ *chunkp = chunk;
+ childp = &chunk->kids[0];
+ } else {
+ sprop = kids;
+ JS_ASSERT(sprop != child);
+ if (SPROP_MATCH(sprop, child)) {
+ /*
+ * Duplicate child, see comment above. Once again, we
+ * must let duplicates created by deletion pile up in a
+ * kids-chunk-list, in order to find them when sweeping
+ * and thereby avoid dangling parent pointers.
+ */
+ JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
+ }
+ if (sweptChunk) {
+ chunk = sweptChunk;
+ } else {
+ chunk = NewPropTreeKidsChunk(rt);
+ if (!chunk)
+ return JS_FALSE;
+ }
+ parent->kids = CHUNK_TO_KIDS(chunk);
+ chunk->kids[0] = sprop;
+ childp = &chunk->kids[1];
+ }
+ }
+ insert:
+ *childp = child;
+ }
+
+ child->parent = parent;
+ return JS_TRUE;
+}
+
+/* NB: Called with the runtime lock held. */
+static PropTreeKidsChunk *
+RemovePropertyTreeChild(JSRuntime *rt, JSScopeProperty *child)
+{
+ JSPropertyTreeEntry *entry;
+ JSScopeProperty *parent, *kids, *kid;
+ PropTreeKidsChunk *list, *chunk, **chunkp, *lastChunk;
+ uintN i, j;
+
+ parent = child->parent;
+ if (!parent) {
+ /*
+ * Don't remove child if it is not in rt->propertyTreeHash, but only
+ * matches a root child in the table that has compatible members. See
+ * the "Duplicate child" comments in InsertPropertyTreeChild, above.
+ */
+ entry = (JSPropertyTreeEntry *)
+ JS_DHashTableOperate(&rt->propertyTreeHash, child, JS_DHASH_LOOKUP);
+
+ if (entry->child == child)
+ JS_DHashTableRawRemove(&rt->propertyTreeHash, &entry->hdr);
+ } else {
+ kids = parent->kids;
+ if (KIDS_IS_CHUNKY(kids)) {
+ list = chunk = KIDS_TO_CHUNK(kids);
+ chunkp = &list;
+
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ if (chunk->kids[i] == child) {
+ lastChunk = chunk;
+ if (!lastChunk->next) {
+ j = i + 1;
+ } else {
+ j = 0;
+ do {
+ chunkp = &lastChunk->next;
+ lastChunk = *chunkp;
+ } while (lastChunk->next);
+ }
+ for (; j < MAX_KIDS_PER_CHUNK; j++) {
+ if (!lastChunk->kids[j])
+ break;
+ }
+ --j;
+ if (chunk != lastChunk || j > i)
+ chunk->kids[i] = lastChunk->kids[j];
+ lastChunk->kids[j] = NULL;
+ if (j == 0) {
+ *chunkp = NULL;
+ if (!list)
+ parent->kids = NULL;
+ return lastChunk;
+ }
+ return NULL;
+ }
+ }
+
+ chunkp = &chunk->next;
+ } while ((chunk = *chunkp) != NULL);
+ } else {
+ kid = kids;
+ if (kid == child)
+ parent->kids = NULL;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Called *without* the runtime lock held, this function acquires that lock
+ * only when inserting a new child. Thus there may be races to find or add
+ * a node that result in duplicates. We expect such races to be rare!
+ */
+static JSScopeProperty *
+GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent,
+ JSScopeProperty *child)
+{
+ JSRuntime *rt;
+ JSPropertyTreeEntry *entry;
+ JSScopeProperty *sprop;
+ PropTreeKidsChunk *chunk;
+ uintN i;
+
+ rt = cx->runtime;
+ if (!parent) {
+ JS_LOCK_RUNTIME(rt);
+
+ entry = (JSPropertyTreeEntry *)
+ JS_DHashTableOperate(&rt->propertyTreeHash, child, JS_DHASH_ADD);
+ if (!entry)
+ goto out_of_memory;
+
+ sprop = entry->child;
+ if (sprop)
+ goto out;
+ } else {
+ /*
+ * Because chunks are appended at the end and never deleted except by
+ * the GC, we can search without taking the runtime lock. We may miss
+ * a matching sprop added by another thread, and make a duplicate one,
+ * but that is an unlikely, therefore small, cost. The property tree
+ * has extremely low fan-out below its root in popular embeddings with
+ * real-world workloads.
+ *
+ * If workload changes so as to increase fan-out significantly below
+ * the property tree root, we'll want to add another tag bit stored in
+ * parent->kids that indicates a JSDHashTable pointer.
+ */
+ entry = NULL;
+ sprop = parent->kids;
+ if (sprop) {
+ if (KIDS_IS_CHUNKY(sprop)) {
+ chunk = KIDS_TO_CHUNK(sprop);
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ sprop = chunk->kids[i];
+ if (!sprop)
+ goto not_found;
+
+ if (SPROP_MATCH(sprop, child))
+ return sprop;
+ }
+ } while ((chunk = chunk->next) != NULL);
+ } else {
+ if (SPROP_MATCH(sprop, child))
+ return sprop;
+ }
+ }
+
+ not_found:
+ JS_LOCK_RUNTIME(rt);
+ }
+
+ sprop = NewScopeProperty(rt);
+ if (!sprop)
+ goto out_of_memory;
+
+ sprop->id = child->id;
+ sprop->getter = child->getter;
+ sprop->setter = child->setter;
+ sprop->slot = child->slot;
+ sprop->attrs = child->attrs;
+ sprop->flags = child->flags;
+ sprop->shortid = child->shortid;
+ sprop->parent = sprop->kids = NULL;
+ if (!parent) {
+ entry->child = sprop;
+ } else {
+ if (!InsertPropertyTreeChild(rt, parent, sprop, NULL))
+ goto out_of_memory;
+ }
+
+out:
+ JS_UNLOCK_RUNTIME(rt);
+ return sprop;
+
+out_of_memory:
+ JS_UNLOCK_RUNTIME(rt);
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+}
+
+#ifdef DEBUG_notbrendan
+#define CHECK_ANCESTOR_LINE(scope, sparse) \
+ JS_BEGIN_MACRO \
+ if ((scope)->table) CheckAncestorLine(scope, sparse); \
+ JS_END_MACRO
+
+static void
+CheckAncestorLine(JSScope *scope, JSBool sparse)
+{
+ uint32 size;
+ JSScopeProperty **spp, **start, **end, *ancestorLine, *sprop, *aprop;
+ uint32 entryCount, ancestorCount;
+
+ ancestorLine = SCOPE_LAST_PROP(scope);
+ if (ancestorLine)
+ JS_ASSERT(SCOPE_HAS_PROPERTY(scope, ancestorLine));
+
+ entryCount = 0;
+ size = SCOPE_CAPACITY(scope);
+ start = scope->table;
+ for (spp = start, end = start + size; spp < end; spp++) {
+ sprop = SPROP_FETCH(spp);
+ if (sprop) {
+ entryCount++;
+ for (aprop = ancestorLine; aprop; aprop = aprop->parent) {
+ if (aprop == sprop)
+ break;
+ }
+ JS_ASSERT(aprop);
+ }
+ }
+ JS_ASSERT(entryCount == scope->entryCount);
+
+ ancestorCount = 0;
+ for (sprop = ancestorLine; sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) &&
+ !SCOPE_HAS_PROPERTY(scope, sprop)) {
+ JS_ASSERT(sparse || (sprop->flags & SPROP_IS_DUPLICATE));
+ continue;
+ }
+ ancestorCount++;
+ }
+ JS_ASSERT(ancestorCount == scope->entryCount);
+}
+#else
+#define CHECK_ANCESTOR_LINE(scope, sparse) /* nothing */
+#endif
+
+static void
+ReportReadOnlyScope(JSContext *cx, JSScope *scope)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(scope->object));
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_READ_ONLY,
+ str
+ ? JS_GetStringBytes(str)
+ : LOCKED_OBJ_GET_CLASS(scope->object)->name);
+}
+
+JSScopeProperty *
+js_AddScopeProperty(JSContext *cx, JSScope *scope, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid)
+{
+ JSScopeProperty **spp, *sprop, *overwriting, **spvec, **spp2, child;
+ uint32 size, splen, i;
+ int change;
+ JSTempValueRooter tvr;
+
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ /*
+ * You can't add properties to a sealed scope. But note well that you can
+ * change property attributes in a sealed scope, even though that replaces
+ * a JSScopeProperty * in the scope's hash table -- but no id is added, so
+ * the scope remains sealed.
+ */
+ if (SCOPE_IS_SEALED(scope)) {
+ ReportReadOnlyScope(cx, scope);
+ return NULL;
+ }
+
+ /*
+ * Normalize stub getter and setter values for faster is-stub testing in
+ * the SPROP_CALL_[GS]ETTER macros.
+ */
+ if (getter == JS_PropertyStub)
+ getter = NULL;
+ if (setter == JS_PropertyStub)
+ setter = NULL;
+
+ /*
+ * Search for id in order to claim its entry, allocating a property tree
+ * node if one doesn't already exist for our parameters.
+ */
+ spp = js_SearchScope(scope, id, JS_TRUE);
+ sprop = overwriting = SPROP_FETCH(spp);
+ if (!sprop) {
+ /* Check whether we need to grow, if the load factor is >= .75. */
+ size = SCOPE_CAPACITY(scope);
+ if (scope->entryCount + scope->removedCount >= size - (size >> 2)) {
+ if (scope->removedCount >= size >> 2) {
+ METER(compresses);
+ change = 0;
+ } else {
+ METER(grows);
+ change = 1;
+ }
+ if (!ChangeScope(cx, scope, change) &&
+ scope->entryCount + scope->removedCount == size - 1) {
+ METER(addFailures);
+ return NULL;
+ }
+ spp = js_SearchScope(scope, id, JS_TRUE);
+ JS_ASSERT(!SPROP_FETCH(spp));
+ }
+ } else {
+ /* Property exists: js_SearchScope must have returned a valid entry. */
+ JS_ASSERT(!SPROP_IS_REMOVED(*spp));
+
+ /*
+ * If all property members match, this is a redundant add and we can
+ * return early. If the caller wants to allocate a slot, but doesn't
+ * care which slot, copy sprop->slot into slot so we can match sprop,
+ * if all other members match.
+ */
+ if (!(attrs & JSPROP_SHARED) &&
+ slot == SPROP_INVALID_SLOT &&
+ SPROP_HAS_VALID_SLOT(sprop, scope)) {
+ slot = sprop->slot;
+ }
+ if (SPROP_MATCH_PARAMS_AFTER_ID(sprop, getter, setter, slot, attrs,
+ flags, shortid)) {
+ METER(redundantAdds);
+ return sprop;
+ }
+
+ /*
+ * Duplicate formal parameters require us to leave the old property
+ * on the ancestor line, so the decompiler can find it, even though
+ * its entry in scope->table is overwritten to point at a new property
+ * descending from the old one. The SPROP_IS_DUPLICATE flag helps us
+ * cope with the consequent disparity between ancestor line height and
+ * scope->entryCount.
+ */
+ if (flags & SPROP_IS_DUPLICATE) {
+ sprop->flags |= SPROP_IS_DUPLICATE;
+ } else {
+ /*
+ * If we are clearing sprop to force an existing property to be
+ * overwritten (apart from a duplicate formal parameter), we must
+ * unlink it from the ancestor line at scope->lastProp, lazily if
+ * sprop is not lastProp. And we must remove the entry at *spp,
+ * precisely so the lazy "middle delete" fixup code further below
+ * won't find sprop in scope->table, in spite of sprop being on
+ * the ancestor line.
+ *
+ * When we finally succeed in finding or creating a new sprop
+ * and storing its pointer at *spp, we'll use the |overwriting|
+ * local saved when we first looked up id to decide whether we're
+ * indeed creating a new entry, or merely overwriting an existing
+ * property.
+ */
+ if (sprop == SCOPE_LAST_PROP(scope)) {
+ do {
+ SCOPE_REMOVE_LAST_PROP(scope);
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope))
+ break;
+ sprop = SCOPE_LAST_PROP(scope);
+ } while (sprop && !SCOPE_HAS_PROPERTY(scope, sprop));
+ } else if (!SCOPE_HAD_MIDDLE_DELETE(scope)) {
+ /*
+ * If we have no hash table yet, we need one now. The middle
+ * delete code is simple-minded that way!
+ */
+ if (!scope->table) {
+ if (!CreateScopeTable(cx, scope, JS_TRUE))
+ return NULL;
+ spp = js_SearchScope(scope, id, JS_TRUE);
+ sprop = overwriting = SPROP_FETCH(spp);
+ }
+ SCOPE_SET_MIDDLE_DELETE(scope);
+ }
+ }
+
+ /*
+ * If we fail later on trying to find or create a new sprop, we will
+ * goto fail_overwrite and restore *spp from |overwriting|. Note that
+ * we don't bother to keep scope->removedCount in sync, because we'll
+ * fix up *spp and scope->entryCount shortly, no matter how control
+ * flow returns from this function.
+ */
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, NULL);
+ scope->entryCount--;
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ sprop = NULL;
+ }
+
+ if (!sprop) {
+ /*
+ * If properties were deleted from the middle of the list starting at
+ * scope->lastProp, we may need to fork the property tree and squeeze
+ * all deleted properties out of scope's ancestor line. Otherwise we
+ * risk adding a node with the same id as a "middle" node, violating
+ * the rule that properties along an ancestor line have distinct ids
+ * (unless flagged SPROP_IS_DUPLICATE).
+ */
+ if (SCOPE_HAD_MIDDLE_DELETE(scope)) {
+ JS_ASSERT(scope->table);
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ splen = scope->entryCount;
+ if (splen == 0) {
+ JS_ASSERT(scope->lastProp == NULL);
+ } else {
+ /*
+ * Enumerate live entries in scope->table using a temporary
+ * vector, by walking the (possibly sparse, due to deletions)
+ * ancestor line from scope->lastProp.
+ */
+ spvec = (JSScopeProperty **)
+ JS_malloc(cx, SCOPE_TABLE_NBYTES(splen));
+ if (!spvec)
+ goto fail_overwrite;
+ i = splen;
+ sprop = SCOPE_LAST_PROP(scope);
+ JS_ASSERT(sprop);
+ do {
+ /*
+ * NB: test SCOPE_GET_PROPERTY, not SCOPE_HAS_PROPERTY --
+ * the latter insists that sprop->id maps to sprop, while
+ * the former simply tests whether sprop->id is bound in
+ * scope. We must allow for duplicate formal parameters
+ * along the ancestor line, and fork them as needed.
+ */
+ if (!SCOPE_GET_PROPERTY(scope, sprop->id))
+ continue;
+
+ JS_ASSERT(sprop != overwriting);
+ if (i == 0) {
+ /*
+ * If our original splen estimate, scope->entryCount,
+ * is less than the ancestor line height, there must
+ * be duplicate formal parameters in this (function
+ * object) scope. Count remaining ancestors in order
+ * to realloc spvec.
+ */
+ JSScopeProperty *tmp = sprop;
+ do {
+ if (SCOPE_GET_PROPERTY(scope, tmp->id))
+ i++;
+ } while ((tmp = tmp->parent) != NULL);
+ spp2 = (JSScopeProperty **)
+ JS_realloc(cx, spvec, SCOPE_TABLE_NBYTES(splen+i));
+ if (!spp2) {
+ JS_free(cx, spvec);
+ goto fail_overwrite;
+ }
+
+ spvec = spp2;
+ memmove(spvec + i, spvec, SCOPE_TABLE_NBYTES(splen));
+ splen += i;
+ }
+
+ spvec[--i] = sprop;
+ } while ((sprop = sprop->parent) != NULL);
+ JS_ASSERT(i == 0);
+
+ /*
+ * Now loop forward through spvec, forking the property tree
+ * whenever we see a "parent gap" due to deletions from scope.
+ * NB: sprop is null on first entry to the loop body.
+ */
+ do {
+ if (spvec[i]->parent == sprop) {
+ sprop = spvec[i];
+ } else {
+ sprop = GetPropertyTreeChild(cx, sprop, spvec[i]);
+ if (!sprop) {
+ JS_free(cx, spvec);
+ goto fail_overwrite;
+ }
+
+ spp2 = js_SearchScope(scope, sprop->id, JS_FALSE);
+ JS_ASSERT(SPROP_FETCH(spp2) == spvec[i]);
+ SPROP_STORE_PRESERVING_COLLISION(spp2, sprop);
+ }
+ } while (++i < splen);
+ JS_free(cx, spvec);
+
+ /*
+ * Now sprop points to the last property in scope, where the
+ * ancestor line from sprop to the root is dense w.r.t. scope:
+ * it contains no nodes not mapped by scope->table, apart from
+ * any stinking ECMA-mandated duplicate formal parameters.
+ */
+ scope->lastProp = sprop;
+ CHECK_ANCESTOR_LINE(scope, JS_FALSE);
+ JS_RUNTIME_METER(cx->runtime, middleDeleteFixups);
+ }
+
+ SCOPE_CLR_MIDDLE_DELETE(scope);
+ }
+
+ /*
+ * Aliases share another property's slot, passed in the |slot| param.
+ * Shared properties have no slot. Unshared properties that do not
+ * alias another property's slot get one here, but may lose it due to
+ * a JS_ClearScope call.
+ */
+ if (!(flags & SPROP_IS_ALIAS)) {
+ if (attrs & JSPROP_SHARED) {
+ slot = SPROP_INVALID_SLOT;
+ } else {
+ /*
+ * We may have set slot from a nearly-matching sprop, above.
+ * If so, we're overwriting that nearly-matching sprop, so we
+ * can reuse its slot -- we don't need to allocate a new one.
+ * Callers should therefore pass SPROP_INVALID_SLOT for all
+ * non-alias, unshared property adds.
+ */
+ if (slot != SPROP_INVALID_SLOT)
+ JS_ASSERT(overwriting);
+ else if (!js_AllocSlot(cx, scope->object, &slot))
+ goto fail_overwrite;
+ }
+ }
+
+ /*
+ * Check for a watchpoint on a deleted property; if one exists, change
+ * setter to js_watch_set.
+ * XXXbe this could get expensive with lots of watchpoints...
+ */
+ if (!JS_CLIST_IS_EMPTY(&cx->runtime->watchPointList) &&
+ js_FindWatchPoint(cx->runtime, scope, id)) {
+ JS_PUSH_TEMP_ROOT_SPROP(cx, overwriting, &tvr);
+ setter = js_WrapWatchedSetter(cx, id, attrs, setter);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!setter)
+ goto fail_overwrite;
+ }
+
+ /* Find or create a property tree node labeled by our arguments. */
+ child.id = id;
+ child.getter = getter;
+ child.setter = setter;
+ child.slot = slot;
+ child.attrs = attrs;
+ child.flags = flags;
+ child.shortid = shortid;
+ sprop = GetPropertyTreeChild(cx, scope->lastProp, &child);
+ if (!sprop)
+ goto fail_overwrite;
+
+ /* Store the tree node pointer in the table entry for id. */
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, sprop);
+ scope->entryCount++;
+ scope->lastProp = sprop;
+ CHECK_ANCESTOR_LINE(scope, JS_FALSE);
+ if (!overwriting) {
+ JS_RUNTIME_METER(cx->runtime, liveScopeProps);
+ JS_RUNTIME_METER(cx->runtime, totalScopeProps);
+ }
+
+ /*
+ * If we reach the hashing threshold, try to allocate scope->table.
+ * If we can't (a rare event, preceded by swapping to death on most
+ * modern OSes), stick with linear search rather than whining about
+ * this little set-back. Therefore we must test !scope->table and
+ * scope->entryCount >= SCOPE_HASH_THRESHOLD, not merely whether the
+ * entry count just reached the threshold.
+ */
+ if (!scope->table && scope->entryCount >= SCOPE_HASH_THRESHOLD)
+ (void) CreateScopeTable(cx, scope, JS_FALSE);
+ }
+
+ METER(adds);
+ return sprop;
+
+fail_overwrite:
+ if (overwriting) {
+ /*
+ * We may or may not have forked overwriting out of scope's ancestor
+ * line, so we must check (the alternative is to set a flag above, but
+ * that hurts the common, non-error case). If we did fork overwriting
+ * out, we'll add it back at scope->lastProp. This means enumeration
+ * order can change due to a failure to overwrite an id.
+ * XXXbe very minor incompatibility
+ */
+ for (sprop = SCOPE_LAST_PROP(scope); ; sprop = sprop->parent) {
+ if (!sprop) {
+ sprop = SCOPE_LAST_PROP(scope);
+ if (overwriting->parent == sprop) {
+ scope->lastProp = overwriting;
+ } else {
+ sprop = GetPropertyTreeChild(cx, sprop, overwriting);
+ if (sprop) {
+ JS_ASSERT(sprop != overwriting);
+ scope->lastProp = sprop;
+ }
+ overwriting = sprop;
+ }
+ break;
+ }
+ if (sprop == overwriting)
+ break;
+ }
+ if (overwriting) {
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, overwriting);
+ scope->entryCount++;
+ }
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ }
+ METER(addFailures);
+ return NULL;
+}
+
+JSScopeProperty *
+js_ChangeScopePropertyAttrs(JSContext *cx, JSScope *scope,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter)
+{
+ JSScopeProperty child, *newsprop, **spp;
+
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ /* Allow only shared (slot-less) => unshared (slot-full) transition. */
+ attrs |= sprop->attrs & mask;
+ JS_ASSERT(!((attrs ^ sprop->attrs) & JSPROP_SHARED) ||
+ !(attrs & JSPROP_SHARED));
+ if (getter == JS_PropertyStub)
+ getter = NULL;
+ if (setter == JS_PropertyStub)
+ setter = NULL;
+ if (sprop->attrs == attrs &&
+ sprop->getter == getter &&
+ sprop->setter == setter) {
+ return sprop;
+ }
+
+ child.id = sprop->id;
+ child.getter = getter;
+ child.setter = setter;
+ child.slot = sprop->slot;
+ child.attrs = attrs;
+ child.flags = sprop->flags;
+ child.shortid = sprop->shortid;
+
+ if (SCOPE_LAST_PROP(scope) == sprop) {
+ /*
+ * Optimize the case where the last property added to scope is changed
+ * to have a different attrs, getter, or setter. In the last property
+ * case, we need not fork the property tree. But since we do not call
+ * js_AddScopeProperty, we may need to allocate a new slot directly.
+ */
+ if ((sprop->attrs & JSPROP_SHARED) && !(attrs & JSPROP_SHARED)) {
+ JS_ASSERT(child.slot == SPROP_INVALID_SLOT);
+ if (!js_AllocSlot(cx, scope->object, &child.slot))
+ return NULL;
+ }
+
+ newsprop = GetPropertyTreeChild(cx, sprop->parent, &child);
+ if (newsprop) {
+ spp = js_SearchScope(scope, sprop->id, JS_FALSE);
+ JS_ASSERT(SPROP_FETCH(spp) == sprop);
+
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, newsprop);
+ scope->lastProp = newsprop;
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ }
+ } else {
+ /*
+ * Let js_AddScopeProperty handle this |overwriting| case, including
+ * the conservation of sprop->slot (if it's valid). We must not call
+ * js_RemoveScopeProperty here, it will free a valid sprop->slot and
+ * js_AddScopeProperty won't re-allocate it.
+ */
+ newsprop = js_AddScopeProperty(cx, scope, child.id,
+ child.getter, child.setter, child.slot,
+ child.attrs, child.flags, child.shortid);
+ }
+
+#ifdef DUMP_SCOPE_STATS
+ if (!newsprop)
+ METER(changeFailures);
+#endif
+ return newsprop;
+}
+
+JSBool
+js_RemoveScopeProperty(JSContext *cx, JSScope *scope, jsid id)
+{
+ JSScopeProperty **spp, *stored, *sprop;
+ uint32 size;
+
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ if (SCOPE_IS_SEALED(scope)) {
+ ReportReadOnlyScope(cx, scope);
+ return JS_FALSE;
+ }
+ METER(removes);
+
+ spp = js_SearchScope(scope, id, JS_FALSE);
+ stored = *spp;
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ if (!sprop) {
+ METER(uselessRemoves);
+ return JS_TRUE;
+ }
+
+ /* Convert from a list to a hash so we can handle "middle deletes". */
+ if (!scope->table && sprop != scope->lastProp) {
+ if (!CreateScopeTable(cx, scope, JS_TRUE))
+ return JS_FALSE;
+ spp = js_SearchScope(scope, id, JS_FALSE);
+ stored = *spp;
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ }
+
+ /* First, if sprop is unshared and not cleared, free its slot number. */
+ if (SPROP_HAS_VALID_SLOT(sprop, scope)) {
+ js_FreeSlot(cx, scope->object, sprop->slot);
+ JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals);
+ }
+
+ /* Next, remove id by setting its entry to a removed or free sentinel. */
+ if (SPROP_HAD_COLLISION(stored)) {
+ JS_ASSERT(scope->table);
+ *spp = SPROP_REMOVED;
+ scope->removedCount++;
+ } else {
+ METER(removeFrees);
+ if (scope->table)
+ *spp = NULL;
+ }
+ scope->entryCount--;
+ JS_RUNTIME_UNMETER(cx->runtime, liveScopeProps);
+
+ /* Update scope->lastProp directly, or set its deferred update flag. */
+ if (sprop == SCOPE_LAST_PROP(scope)) {
+ do {
+ SCOPE_REMOVE_LAST_PROP(scope);
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope))
+ break;
+ sprop = SCOPE_LAST_PROP(scope);
+ } while (sprop && !SCOPE_HAS_PROPERTY(scope, sprop));
+ } else if (!SCOPE_HAD_MIDDLE_DELETE(scope)) {
+ SCOPE_SET_MIDDLE_DELETE(scope);
+ }
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ /* Last, consider shrinking scope's table if its load factor is <= .25. */
+ size = SCOPE_CAPACITY(scope);
+ if (size > MIN_SCOPE_SIZE && scope->entryCount <= size >> 2) {
+ METER(shrinks);
+ (void) ChangeScope(cx, scope, -1);
+ }
+
+ return JS_TRUE;
+}
+
+void
+js_ClearScope(JSContext *cx, JSScope *scope)
+{
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+#ifdef DEBUG
+ JS_LOCK_RUNTIME_VOID(cx->runtime,
+ cx->runtime->liveScopeProps -= scope->entryCount);
+#endif
+
+ if (scope->table)
+ free(scope->table);
+ SCOPE_CLR_MIDDLE_DELETE(scope);
+ InitMinimalScope(scope);
+ JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals);
+}
+
+void
+js_MarkId(JSContext *cx, jsid id)
+{
+ if (JSID_IS_ATOM(id))
+ GC_MARK_ATOM(cx, JSID_TO_ATOM(id));
+ else if (JSID_IS_OBJECT(id))
+ GC_MARK(cx, JSID_TO_OBJECT(id), "id");
+ else
+ JS_ASSERT(JSID_IS_INT(id));
+}
+
+#if defined GC_MARK_DEBUG || defined DUMP_SCOPE_STATS
+# include "jsprf.h"
+#endif
+
+void
+js_MarkScopeProperty(JSContext *cx, JSScopeProperty *sprop)
+{
+ sprop->flags |= SPROP_MARK;
+ MARK_ID(cx, sprop->id);
+
+#if JS_HAS_GETTER_SETTER
+ if (sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER)) {
+#ifdef GC_MARK_DEBUG
+ char buf[64];
+ char buf2[11];
+ const char *id;
+
+ if (JSID_IS_ATOM(sprop->id)) {
+ JSAtom *atom = JSID_TO_ATOM(sprop->id);
+
+ id = (atom && ATOM_IS_STRING(atom))
+ ? JS_GetStringBytes(ATOM_TO_STRING(atom))
+ : "unknown";
+ } else if (JSID_IS_INT(sprop->id)) {
+ JS_snprintf(buf2, sizeof buf2, "%d", JSID_TO_INT(sprop->id));
+ id = buf2;
+ } else {
+ id = "<object>";
+ }
+#endif
+
+ if (sprop->attrs & JSPROP_GETTER) {
+#ifdef GC_MARK_DEBUG
+ JS_snprintf(buf, sizeof buf, "%s %s",
+ id, js_getter_str);
+#endif
+ GC_MARK(cx, JSVAL_TO_GCTHING((jsval) sprop->getter), buf);
+ }
+ if (sprop->attrs & JSPROP_SETTER) {
+#ifdef GC_MARK_DEBUG
+ JS_snprintf(buf, sizeof buf, "%s %s",
+ id, js_setter_str);
+#endif
+ GC_MARK(cx, JSVAL_TO_GCTHING((jsval) sprop->setter), buf);
+ }
+ }
+#endif /* JS_HAS_GETTER_SETTER */
+}
+
+#ifdef DUMP_SCOPE_STATS
+
+#include <stdio.h>
+#include <math.h>
+
+uint32 js_nkids_max;
+uint32 js_nkids_sum;
+double js_nkids_sqsum;
+uint32 js_nkids_hist[11];
+
+static void
+MeterKidCount(uintN nkids)
+{
+ if (nkids) {
+ js_nkids_sum += nkids;
+ js_nkids_sqsum += (double)nkids * nkids;
+ if (nkids > js_nkids_max)
+ js_nkids_max = nkids;
+ }
+ js_nkids_hist[JS_MIN(nkids, 10)]++;
+}
+
+static void
+MeterPropertyTree(JSScopeProperty *node)
+{
+ uintN i, nkids;
+ JSScopeProperty *kids, *kid;
+ PropTreeKidsChunk *chunk;
+
+ nkids = 0;
+ kids = node->kids;
+ if (kids) {
+ if (KIDS_IS_CHUNKY(kids)) {
+ for (chunk = KIDS_TO_CHUNK(kids); chunk; chunk = chunk->next) {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ kid = chunk->kids[i];
+ if (!kid)
+ break;
+ MeterPropertyTree(kid);
+ nkids++;
+ }
+ }
+ } else {
+ MeterPropertyTree(kids);
+ nkids = 1;
+ }
+ }
+
+ MeterKidCount(nkids);
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_MeterPropertyTree(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
+ void *arg)
+{
+ JSPropertyTreeEntry *entry = (JSPropertyTreeEntry *)hdr;
+
+ MeterPropertyTree(entry->child);
+ return JS_DHASH_NEXT;
+}
+
+static void
+DumpSubtree(JSScopeProperty *sprop, int level, FILE *fp)
+{
+ char buf[10];
+ JSScopeProperty *kids, *kid;
+ PropTreeKidsChunk *chunk;
+ uintN i;
+
+ fprintf(fp, "%*sid %s g/s %p/%p slot %lu attrs %x flags %x shortid %d\n",
+ level, "",
+ JSID_IS_ATOM(sprop->id)
+ ? JS_GetStringBytes(ATOM_TO_STRING(JSID_TO_ATOM(sprop->id)))
+ : JSID_IS_OBJECT(sprop->id)
+ ? js_ValueToPrintableString(cx, OBJECT_JSID_TO_JSVAL(sprop->id))
+ : (JS_snprintf(buf, sizeof buf, "%ld", JSVAL_TO_INT(sprop->id)),
+ buf)
+ (void *) sprop->getter, (void *) sprop->setter,
+ (unsigned long) sprop->slot, sprop->attrs, sprop->flags,
+ sprop->shortid);
+ kids = sprop->kids;
+ if (kids) {
+ ++level;
+ if (KIDS_IS_CHUNKY(kids)) {
+ chunk = KIDS_TO_CHUNK(kids);
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ kid = chunk->kids[i];
+ if (!kid)
+ break;
+ JS_ASSERT(kid->parent == sprop);
+ DumpSubtree(kid, level, fp);
+ }
+ } while ((chunk = chunk->next) != NULL);
+ } else {
+ kid = kids;
+ DumpSubtree(kid, level, fp);
+ }
+ }
+}
+
+#endif /* DUMP_SCOPE_STATS */
+
+void
+js_SweepScopeProperties(JSRuntime *rt)
+{
+ JSArena **ap, *a;
+ JSScopeProperty *limit, *sprop, *parent, *kids, *kid;
+ uintN liveCount;
+ PropTreeKidsChunk *chunk, *nextChunk, *freeChunk;
+ uintN i;
+
+#ifdef DUMP_SCOPE_STATS
+ uint32 livePropCapacity = 0, totalLiveCount = 0;
+ static FILE *logfp;
+ if (!logfp)
+ logfp = fopen("/tmp/proptree.stats", "a");
+
+ MeterKidCount(rt->propertyTreeHash.entryCount);
+ JS_DHashTableEnumerate(&rt->propertyTreeHash, js_MeterPropertyTree, NULL);
+
+ {
+ double mean = 0.0, var = 0.0, sigma = 0.0;
+ double nodesum = rt->livePropTreeNodes;
+ double kidsum = js_nkids_sum;
+ if (nodesum > 0 && kidsum >= 0) {
+ mean = kidsum / nodesum;
+ var = nodesum * js_nkids_sqsum - kidsum * kidsum;
+ if (var < 0.0 || nodesum <= 1)
+ var = 0.0;
+ else
+ var /= nodesum * (nodesum - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.0) ? sqrt(var) : 0.0;
+ }
+
+ fprintf(logfp,
+ "props %u nodes %g beta %g meankids %g sigma %g max %u",
+ rt->liveScopeProps, nodesum, nodesum / rt->liveScopeProps,
+ mean, sigma, js_nkids_max);
+ }
+
+ fprintf(logfp, " histogram %u %u %u %u %u %u %u %u %u %u %u",
+ js_nkids_hist[0], js_nkids_hist[1],
+ js_nkids_hist[2], js_nkids_hist[3],
+ js_nkids_hist[4], js_nkids_hist[5],
+ js_nkids_hist[6], js_nkids_hist[7],
+ js_nkids_hist[8], js_nkids_hist[9],
+ js_nkids_hist[10]);
+ js_nkids_sum = js_nkids_max = 0;
+ js_nkids_sqsum = 0;
+ memset(js_nkids_hist, 0, sizeof js_nkids_hist);
+#endif
+
+ ap = &rt->propertyArenaPool.first.next;
+ while ((a = *ap) != NULL) {
+ limit = (JSScopeProperty *) a->avail;
+ liveCount = 0;
+ for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++) {
+ /* If the id is null, sprop is already on the freelist. */
+ if (sprop->id == JSVAL_NULL)
+ continue;
+
+ /* If the mark bit is set, sprop is alive, so we skip it. */
+ if (sprop->flags & SPROP_MARK) {
+ sprop->flags &= ~SPROP_MARK;
+ liveCount++;
+ continue;
+ }
+
+ /* Ok, sprop is garbage to collect: unlink it from its parent. */
+ freeChunk = RemovePropertyTreeChild(rt, sprop);
+
+ /*
+ * Take care to reparent all sprop's kids to their grandparent.
+ * InsertPropertyTreeChild can potentially fail for two reasons:
+ *
+ * 1. If parent is null, insertion into the root property hash
+ * table may fail. We are forced to leave the kid out of the
+ * table (as can already happen with duplicates) but ensure
+ * that the kid's parent pointer is set to null.
+ *
+ * 2. If parent is non-null, allocation of a new KidsChunk can
+ * fail. To prevent this from happening, we allow sprops's own
+ * chunks to be reused by the grandparent, which removes the
+ * need for InsertPropertyTreeChild to malloc a new KidsChunk.
+ *
+ * If sprop does not have chunky kids, then we rely on the
+ * RemovePropertyTreeChild call above (which removed sprop from
+ * its parent) either leaving one free entry, or else returning
+ * the now-unused chunk to us so we can reuse it.
+ *
+ * We also require the grandparent to have either no kids or else
+ * chunky kids. A single non-chunky kid would force a new chunk to
+ * be malloced in some cases (if sprop had a single non-chunky
+ * kid, or a multiple of MAX_KIDS_PER_CHUNK kids). Note that
+ * RemovePropertyTreeChild never converts a single-entry chunky
+ * kid back to a non-chunky kid, so we are assured of correct
+ * behaviour.
+ */
+ kids = sprop->kids;
+ if (kids) {
+ sprop->kids = NULL;
+ parent = sprop->parent;
+ /* Validate that grandparent has no kids or chunky kids. */
+ JS_ASSERT(!parent || !parent->kids ||
+ KIDS_IS_CHUNKY(parent->kids));
+ if (KIDS_IS_CHUNKY(kids)) {
+ chunk = KIDS_TO_CHUNK(kids);
+ do {
+ nextChunk = chunk->next;
+ chunk->next = NULL;
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ kid = chunk->kids[i];
+ if (!kid)
+ break;
+ JS_ASSERT(kid->parent == sprop);
+
+ /*
+ * Clear a space in the kids array for possible
+ * re-use by InsertPropertyTreeChild.
+ */
+ chunk->kids[i] = NULL;
+ if (!InsertPropertyTreeChild(rt, parent, kid,
+ chunk)) {
+ /*
+ * This can happen only if we failed to add an
+ * entry to the root property hash table.
+ */
+ JS_ASSERT(!parent);
+ kid->parent = NULL;
+ }
+ }
+ if (!chunk->kids[0]) {
+ /* The chunk wasn't reused, so we must free it. */
+ DestroyPropTreeKidsChunk(rt, chunk);
+ }
+ } while ((chunk = nextChunk) != NULL);
+ } else {
+ kid = kids;
+ if (!InsertPropertyTreeChild(rt, parent, kid, freeChunk)) {
+ /*
+ * This can happen only if we failed to add an entry
+ * to the root property hash table.
+ */
+ JS_ASSERT(!parent);
+ kid->parent = NULL;
+ }
+ }
+ }
+
+ if (freeChunk && !freeChunk->kids[0]) {
+ /* The chunk wasn't reused, so we must free it. */
+ DestroyPropTreeKidsChunk(rt, freeChunk);
+ }
+
+ /* Clear id so we know (above) that sprop is on the freelist. */
+ sprop->id = JSVAL_NULL;
+ FREENODE_INSERT(rt->propertyFreeList, sprop);
+ JS_RUNTIME_UNMETER(rt, livePropTreeNodes);
+ }
+
+ /* If a contains no live properties, return it to the malloc heap. */
+ if (liveCount == 0) {
+ for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++)
+ FREENODE_REMOVE(sprop);
+ JS_ARENA_DESTROY(&rt->propertyArenaPool, a, ap);
+ } else {
+#ifdef DUMP_SCOPE_STATS
+ livePropCapacity += limit - (JSScopeProperty *) a->base;
+ totalLiveCount += liveCount;
+#endif
+ ap = &a->next;
+ }
+ }
+
+#ifdef DUMP_SCOPE_STATS
+ fprintf(logfp, " arenautil %g%%\n",
+ (totalLiveCount * 100.0) / livePropCapacity);
+ fflush(logfp);
+#endif
+
+#ifdef DUMP_PROPERTY_TREE
+ {
+ FILE *dumpfp = fopen("/tmp/proptree.dump", "w");
+ if (dumpfp) {
+ JSPropertyTreeEntry *pte, *end;
+
+ pte = (JSPropertyTreeEntry *) rt->propertyTreeHash.entryStore;
+ end = pte + JS_DHASH_TABLE_SIZE(&rt->propertyTreeHash);
+ while (pte < end) {
+ if (pte->child)
+ DumpSubtree(pte->child, 0, dumpfp);
+ pte++;
+ }
+ fclose(dumpfp);
+ }
+ }
+#endif
+}
+
+JSBool
+js_InitPropertyTree(JSRuntime *rt)
+{
+ if (!JS_DHashTableInit(&rt->propertyTreeHash, &PropertyTreeHashOps, NULL,
+ sizeof(JSPropertyTreeEntry), JS_DHASH_MIN_SIZE)) {
+ rt->propertyTreeHash.ops = NULL;
+ return JS_FALSE;
+ }
+ JS_InitArenaPool(&rt->propertyArenaPool, "properties",
+ 256 * sizeof(JSScopeProperty), sizeof(void *));
+ return JS_TRUE;
+}
+
+void
+js_FinishPropertyTree(JSRuntime *rt)
+{
+ if (rt->propertyTreeHash.ops) {
+ JS_DHashTableFinish(&rt->propertyTreeHash);
+ rt->propertyTreeHash.ops = NULL;
+ }
+ JS_FinishArenaPool(&rt->propertyArenaPool);
+}
diff --git a/third_party/js-1.7/jsscope.h b/third_party/js-1.7/jsscope.h
new file mode 100644
index 0000000..0565d4d
--- /dev/null
+++ b/third_party/js-1.7/jsscope.h
@@ -0,0 +1,407 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsscope_h___
+#define jsscope_h___
+/*
+ * JS symbol tables.
+ */
+#include "jstypes.h"
+#include "jsobj.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+#ifdef JS_THREADSAFE
+# include "jslock.h"
+#endif
+
+/*
+ * Given P independent, non-unique properties each of size S words mapped by
+ * all scopes in a runtime, construct a property tree of N nodes each of size
+ * S+L words (L for tree linkage). A nominal L value is 2 for leftmost-child
+ * and right-sibling links. We hope that the N < P by enough that the space
+ * overhead of L, and the overhead of scope entries pointing at property tree
+ * nodes, is worth it.
+ *
+ * The tree construction goes as follows. If any empty scope in the runtime
+ * has a property X added to it, find or create a node under the tree root
+ * labeled X, and set scope->lastProp to point at that node. If any non-empty
+ * scope whose most recently added property is labeled Y has another property
+ * labeled Z added, find or create a node for Z under the node that was added
+ * for Y, and set scope->lastProp to point at that node.
+ *
+ * A property is labeled by its members' values: id, getter, setter, slot,
+ * attributes, tiny or short id, and a field telling for..in order. Note that
+ * labels are not unique in the tree, but they are unique among a node's kids
+ * (barring rare and benign multi-threaded race condition outcomes, see below)
+ * and along any ancestor line from the tree root to a given leaf node (except
+ * for the hard case of duplicate formal parameters to a function).
+ *
+ * Thus the root of the tree represents all empty scopes, and the first ply
+ * of the tree represents all scopes containing one property, etc. Each node
+ * in the tree can stand for any number of scopes having the same ordered set
+ * of properties, where that node was the last added to the scope. (We need
+ * not store the root of the tree as a node, and do not -- all we need are
+ * links to its kids.)
+ *
+ * Sidebar on for..in loop order: ECMA requires no particular order, but this
+ * implementation has promised and delivered property definition order, and
+ * compatibility is king. We could use an order number per property, which
+ * would require a sort in js_Enumerate, and an entry order generation number
+ * per scope. An order number beats a list, which should be doubly-linked for
+ * O(1) delete. An even better scheme is to use a parent link in the property
+ * tree, so that the ancestor line can be iterated from scope->lastProp when
+ * filling in a JSIdArray from back to front. This parent link also helps the
+ * GC to sweep properties iteratively.
+ *
+ * What if a property Y is deleted from a scope? If Y is the last property in
+ * the scope, we simply adjust the scope's lastProp member after we remove the
+ * scope's hash-table entry pointing at that property node. The parent link
+ * mentioned in the for..in sidebar above makes this adjustment O(1). But if
+ * Y comes between X and Z in the scope, then we might have to "fork" the tree
+ * at X, leaving X->Y->Z in case other scopes have those properties added in
+ * that order; and to finish the fork, we'd add a node labeled Z with the path
+ * X->Z, if it doesn't exist. This could lead to lots of extra nodes, and to
+ * O(n^2) growth when deleting lots of properties.
+ *
+ * Rather, for O(1) growth all around, we should share the path X->Y->Z among
+ * scopes having those three properties added in that order, and among scopes
+ * having only X->Z where Y was deleted. All such scopes have a lastProp that
+ * points to the Z child of Y. But a scope in which Y was deleted does not
+ * have a table entry for Y, and when iterating that scope by traversing the
+ * ancestor line from Z, we will have to test for a table entry for each node,
+ * skipping nodes that lack entries.
+ *
+ * What if we add Y again? X->Y->Z->Y is wrong and we'll enumerate Y twice.
+ * Therefore we must fork in such a case, if not earlier. Because delete is
+ * "bursty", we should not fork eagerly. Delaying a fork till we are at risk
+ * of adding Y after it was deleted already requires a flag in the JSScope, to
+ * wit, SCOPE_MIDDLE_DELETE.
+ *
+ * What about thread safety? If the property tree operations done by requests
+ * are find-node and insert-node, then the only hazard is duplicate insertion.
+ * This is harmless except for minor bloat. When all requests have ended or
+ * been suspended, the GC is free to sweep the tree after marking all nodes
+ * reachable from scopes, performing remove-node operations as needed. Note
+ * also that the stable storage of the property nodes during active requests
+ * permits the property cache (see jsinterp.h) to dereference JSScopeProperty
+ * weak references safely.
+ *
+ * Is the property tree worth it compared to property storage in each table's
+ * entries? To decide, we must find the relation <> between the words used
+ * with a property tree and the words required without a tree.
+ *
+ * Model all scopes as one super-scope of capacity T entries (T a power of 2).
+ * Let alpha be the load factor of this double hash-table. With the property
+ * tree, each entry in the table is a word-sized pointer to a node that can be
+ * shared by many scopes. But all such pointers are overhead compared to the
+ * situation without the property tree, where the table stores property nodes
+ * directly, as entries each of size S words. With the property tree, we need
+ * L=2 extra words per node for siblings and kids pointers. Without the tree,
+ * (1-alpha)*S*T words are wasted on free or removed sentinel-entries required
+ * by double hashing.
+ *
+ * Therefore,
+ *
+ * (property tree) <> (no property tree)
+ * N*(S+L) + T <> S*T
+ * N*(S+L) + T <> P*S + (1-alpha)*S*T
+ * N*(S+L) + alpha*T + (1-alpha)*T <> P*S + (1-alpha)*S*T
+ *
+ * Note that P is alpha*T by definition, so
+ *
+ * N*(S+L) + P + (1-alpha)*T <> P*S + (1-alpha)*S*T
+ * N*(S+L) <> P*S - P + (1-alpha)*S*T - (1-alpha)*T
+ * N*(S+L) <> (P + (1-alpha)*T) * (S-1)
+ * N*(S+L) <> (P + (1-alpha)*P/alpha) * (S-1)
+ * N*(S+L) <> P * (1/alpha) * (S-1)
+ *
+ * Let N = P*beta for a compression ratio beta, beta <= 1:
+ *
+ * P*beta*(S+L) <> P * (1/alpha) * (S-1)
+ * beta*(S+L) <> (S-1)/alpha
+ * beta <> (S-1)/((S+L)*alpha)
+ *
+ * For S = 6 (32-bit architectures) and L = 2, the property tree wins iff
+ *
+ * beta < 5/(8*alpha)
+ *
+ * We ensure that alpha <= .75, so the property tree wins if beta < .83_. An
+ * average beta from recent Mozilla browser startups was around .6.
+ *
+ * Can we reduce L? Observe that the property tree degenerates into a list of
+ * lists if at most one property Y follows X in all scopes. In or near such a
+ * case, we waste a word on the right-sibling link outside of the root ply of
+ * the tree. Note also that the root ply tends to be large, so O(n^2) growth
+ * searching it is likely, indicating the need for hashing (but with increased
+ * thread safety costs).
+ *
+ * If only K out of N nodes in the property tree have more than one child, we
+ * could eliminate the sibling link and overlay a children list or hash-table
+ * pointer on the leftmost-child link (which would then be either null or an
+ * only-child link; the overlay could be tagged in the low bit of the pointer,
+ * or flagged elsewhere in the property tree node, although such a flag must
+ * not be considered when comparing node labels during tree search).
+ *
+ * For such a system, L = 1 + (K * averageChildrenTableSize) / N instead of 2.
+ * If K << N, L approaches 1 and the property tree wins if beta < .95.
+ *
+ * We observe that fan-out below the root ply of the property tree appears to
+ * have extremely low degree (see the MeterPropertyTree code that histograms
+ * child-counts in jsscope.c), so instead of a hash-table we use a linked list
+ * of child node pointer arrays ("kid chunks"). The details are isolated in
+ * jsscope.c; others must treat JSScopeProperty.kids as opaque. We leave it
+ * strongly typed for debug-ability of the common (null or one-kid) cases.
+ *
+ * One final twist (can you stand it?): the mean number of entries per scope
+ * in Mozilla is < 5, with a large standard deviation (~8). Instead of always
+ * allocating scope->table, we leave it null while initializing all the other
+ * scope members as if it were non-null and minimal-length. Until a property
+ * is added that crosses the threshold of 6 or more entries for hashing, or
+ * until a "middle delete" occurs, we use linear search from scope->lastProp
+ * to find a given id, and save on the space overhead of a hash table.
+ */
+
+struct JSScope {
+ JSObjectMap map; /* base class state */
+ JSObject *object; /* object that owns this scope */
+ uint8 flags; /* flags, see below */
+ int8 hashShift; /* multiplicative hash shift */
+ uint16 spare; /* reserved */
+ uint32 entryCount; /* number of entries in table */
+ uint32 removedCount; /* removed entry sentinels in table */
+ JSScopeProperty **table; /* table of ptrs to shared tree nodes */
+ JSScopeProperty *lastProp; /* pointer to last property added */
+#ifdef JS_THREADSAFE
+ JSContext *ownercx; /* creating context, NULL if shared */
+ JSThinLock lock; /* binary semaphore protecting scope */
+ union { /* union lockful and lock-free state: */
+ jsrefcount count; /* lock entry count for reentrancy */
+ JSScope *link; /* next link in rt->scopeSharingTodo */
+ } u;
+#ifdef DEBUG
+ const char *file[4]; /* file where lock was (re-)taken */
+ unsigned int line[4]; /* line where lock was (re-)taken */
+#endif
+#endif
+};
+
+#define OBJ_SCOPE(obj) ((JSScope *)(obj)->map)
+
+/* By definition, hashShift = JS_DHASH_BITS - log2(capacity). */
+#define SCOPE_CAPACITY(scope) JS_BIT(JS_DHASH_BITS-(scope)->hashShift)
+
+/* Scope flags and some macros to hide them from other files than jsscope.c. */
+#define SCOPE_MIDDLE_DELETE 0x0001
+#define SCOPE_SEALED 0x0002
+
+#define SCOPE_HAD_MIDDLE_DELETE(scope) ((scope)->flags & SCOPE_MIDDLE_DELETE)
+#define SCOPE_SET_MIDDLE_DELETE(scope) ((scope)->flags |= SCOPE_MIDDLE_DELETE)
+#define SCOPE_CLR_MIDDLE_DELETE(scope) ((scope)->flags &= ~SCOPE_MIDDLE_DELETE)
+
+#define SCOPE_IS_SEALED(scope) ((scope)->flags & SCOPE_SEALED)
+#define SCOPE_SET_SEALED(scope) ((scope)->flags |= SCOPE_SEALED)
+#if 0
+/*
+ * Don't define this, it can't be done safely because JS_LOCK_OBJ will avoid
+ * taking the lock if the object owns its scope and the scope is sealed.
+ */
+#define SCOPE_CLR_SEALED(scope) ((scope)->flags &= ~SCOPE_SEALED)
+#endif
+
+/*
+ * A little information hiding for scope->lastProp, in case it ever becomes
+ * a tagged pointer again.
+ */
+#define SCOPE_LAST_PROP(scope) ((scope)->lastProp)
+#define SCOPE_REMOVE_LAST_PROP(scope) ((scope)->lastProp = \
+ (scope)->lastProp->parent)
+
+struct JSScopeProperty {
+ jsid id; /* int-tagged jsval/untagged JSAtom* */
+ JSPropertyOp getter; /* getter and setter hooks or objects */
+ JSPropertyOp setter;
+ uint32 slot; /* index in obj->slots vector */
+ uint8 attrs; /* attributes, see jsapi.h JSPROP_* */
+ uint8 flags; /* flags, see below for defines */
+ int16 shortid; /* tinyid, or local arg/var index */
+ JSScopeProperty *parent; /* parent node, reverse for..in order */
+ JSScopeProperty *kids; /* null, single child, or a tagged ptr
+ to many-kids data structure */
+};
+
+/* JSScopeProperty pointer tag bit indicating a collision. */
+#define SPROP_COLLISION ((jsuword)1)
+#define SPROP_REMOVED ((JSScopeProperty *) SPROP_COLLISION)
+
+/* Macros to get and set sprop pointer values and collision flags. */
+#define SPROP_IS_FREE(sprop) ((sprop) == NULL)
+#define SPROP_IS_REMOVED(sprop) ((sprop) == SPROP_REMOVED)
+#define SPROP_IS_LIVE(sprop) ((sprop) > SPROP_REMOVED)
+#define SPROP_FLAG_COLLISION(spp,sprop) (*(spp) = (JSScopeProperty *) \
+ ((jsuword)(sprop) | SPROP_COLLISION))
+#define SPROP_HAD_COLLISION(sprop) ((jsuword)(sprop) & SPROP_COLLISION)
+#define SPROP_FETCH(spp) SPROP_CLEAR_COLLISION(*(spp))
+
+#define SPROP_CLEAR_COLLISION(sprop) \
+ ((JSScopeProperty *) ((jsuword)(sprop) & ~SPROP_COLLISION))
+
+#define SPROP_STORE_PRESERVING_COLLISION(spp, sprop) \
+ (*(spp) = (JSScopeProperty *) ((jsuword)(sprop) \
+ | SPROP_HAD_COLLISION(*(spp))))
+
+/* Bits stored in sprop->flags. */
+#define SPROP_MARK 0x01
+#define SPROP_IS_DUPLICATE 0x02
+#define SPROP_IS_ALIAS 0x04
+#define SPROP_HAS_SHORTID 0x08
+#define SPROP_IS_HIDDEN 0x10 /* a normally-hidden property,
+ e.g., function arg or var */
+
+/*
+ * If SPROP_HAS_SHORTID is set in sprop->flags, we use sprop->shortid rather
+ * than id when calling sprop's getter or setter.
+ */
+#define SPROP_USERID(sprop) \
+ (((sprop)->flags & SPROP_HAS_SHORTID) ? INT_TO_JSVAL((sprop)->shortid) \
+ : ID_TO_VALUE((sprop)->id))
+
+#define SPROP_INVALID_SLOT 0xffffffff
+
+#define SLOT_IN_SCOPE(slot,scope) ((slot) < (scope)->map.freeslot)
+#define SPROP_HAS_VALID_SLOT(sprop,scope) SLOT_IN_SCOPE((sprop)->slot, scope)
+
+#define SPROP_HAS_STUB_GETTER(sprop) (!(sprop)->getter)
+#define SPROP_HAS_STUB_SETTER(sprop) (!(sprop)->setter)
+
+/*
+ * NB: SPROP_GET must not be called if SPROP_HAS_STUB_GETTER(sprop).
+ */
+#define SPROP_GET(cx,sprop,obj,obj2,vp) \
+ (((sprop)->attrs & JSPROP_GETTER) \
+ ? js_InternalGetOrSet(cx, obj, (sprop)->id, \
+ OBJECT_TO_JSVAL((sprop)->getter), JSACC_READ, \
+ 0, 0, vp) \
+ : (sprop)->getter(cx, OBJ_THIS_OBJECT(cx,obj), SPROP_USERID(sprop), vp))
+
+/*
+ * NB: SPROP_SET must not be called if (SPROP_HAS_STUB_SETTER(sprop) &&
+ * !(sprop->attrs & JSPROP_GETTER)).
+ */
+#define SPROP_SET(cx,sprop,obj,obj2,vp) \
+ (((sprop)->attrs & JSPROP_SETTER) \
+ ? js_InternalGetOrSet(cx, obj, (sprop)->id, \
+ OBJECT_TO_JSVAL((sprop)->setter), JSACC_WRITE, \
+ 1, vp, vp) \
+ : ((sprop)->attrs & JSPROP_GETTER) \
+ ? (JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, \
+ JSMSG_GETTER_ONLY, NULL), JS_FALSE) \
+ : (sprop)->setter(cx, OBJ_THIS_OBJECT(cx,obj), SPROP_USERID(sprop), vp))
+
+/* Macro for common expression to test for shared permanent attributes. */
+#define SPROP_IS_SHARED_PERMANENT(sprop) \
+ ((~(sprop)->attrs & (JSPROP_SHARED | JSPROP_PERMANENT)) == 0)
+
+extern JSScope *
+js_GetMutableScope(JSContext *cx, JSObject *obj);
+
+extern JSScope *
+js_NewScope(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops, JSClass *clasp,
+ JSObject *obj);
+
+extern void
+js_DestroyScope(JSContext *cx, JSScope *scope);
+
+#define ID_TO_VALUE(id) (JSID_IS_ATOM(id) ? ATOM_JSID_TO_JSVAL(id) : \
+ JSID_IS_OBJECT(id) ? OBJECT_JSID_TO_JSVAL(id) : \
+ (jsval)(id))
+#define HASH_ID(id) (JSID_IS_ATOM(id) ? JSID_TO_ATOM(id)->number : \
+ JSID_IS_OBJECT(id) ? (jsatomid) JSID_CLRTAG(id) : \
+ (jsatomid) JSID_TO_INT(id))
+
+extern JS_FRIEND_API(JSScopeProperty **)
+js_SearchScope(JSScope *scope, jsid id, JSBool adding);
+
+#define SCOPE_GET_PROPERTY(scope, id) \
+ SPROP_FETCH(js_SearchScope(scope, id, JS_FALSE))
+
+#define SCOPE_HAS_PROPERTY(scope, sprop) \
+ (SCOPE_GET_PROPERTY(scope, (sprop)->id) == (sprop))
+
+extern JSScopeProperty *
+js_AddScopeProperty(JSContext *cx, JSScope *scope, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid);
+
+extern JSScopeProperty *
+js_ChangeScopePropertyAttrs(JSContext *cx, JSScope *scope,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter);
+
+extern JSBool
+js_RemoveScopeProperty(JSContext *cx, JSScope *scope, jsid id);
+
+extern void
+js_ClearScope(JSContext *cx, JSScope *scope);
+
+/*
+ * These macros used to inline short code sequences, but they grew over time.
+ * We retain them for internal backward compatibility, and in case one or both
+ * ever shrink to inline-able size.
+ */
+#define MARK_ID(cx,id) js_MarkId(cx, id)
+#define MARK_SCOPE_PROPERTY(cx,sprop) js_MarkScopeProperty(cx, sprop)
+
+extern void
+js_MarkId(JSContext *cx, jsid id);
+
+extern void
+js_MarkScopeProperty(JSContext *cx, JSScopeProperty *sprop);
+
+extern void
+js_SweepScopeProperties(JSRuntime *rt);
+
+extern JSBool
+js_InitPropertyTree(JSRuntime *rt);
+
+extern void
+js_FinishPropertyTree(JSRuntime *rt);
+
+#endif /* jsscope_h___ */
diff --git a/third_party/js-1.7/jsscript.c b/third_party/js-1.7/jsscript.c
new file mode 100644
index 0000000..73298a4
--- /dev/null
+++ b/third_party/js-1.7/jsscript.c
@@ -0,0 +1,1717 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS script operations.
+ */
+#include "jsstddef.h"
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsscript.h"
+#if JS_HAS_XDR
+#include "jsxdrapi.h"
+#endif
+
+#if JS_HAS_SCRIPT_OBJECT
+
+static const char js_script_exec[] = "Script.prototype.exec";
+static const char js_script_compile[] = "Script.prototype.compile";
+
+/*
+ * This routine requires that obj has been locked previously.
+ */
+static jsint
+GetScriptExecDepth(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ JS_ASSERT(JS_IS_OBJ_LOCKED(cx, obj));
+ v = LOCKED_OBJ_GET_SLOT(obj, JSSLOT_START(&js_ScriptClass));
+ return JSVAL_TO_INT(v);
+}
+
+static void
+AdjustScriptExecDepth(JSContext *cx, JSObject *obj, jsint delta)
+{
+ jsint execDepth;
+
+ JS_LOCK_OBJ(cx, obj);
+ execDepth = GetScriptExecDepth(cx, obj);
+ LOCKED_OBJ_SET_SLOT(obj, JSSLOT_START(&js_ScriptClass),
+ INT_TO_JSVAL(execDepth + delta));
+ JS_UNLOCK_OBJ(cx, obj);
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+script_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ uint32 indent;
+ JSScript *script;
+ size_t i, j, k, n;
+ char buf[16];
+ jschar *s, *t;
+ JSString *str;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ indent = 0;
+ if (argc && !js_ValueToECMAUint32(cx, argv[0], &indent))
+ return JS_FALSE;
+
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+
+ /* Let n count the source string length, j the "front porch" length. */
+ j = JS_snprintf(buf, sizeof buf, "(new %s(", js_ScriptClass.name);
+ n = j + 2;
+ if (!script) {
+ /* Let k count the constructor argument string length. */
+ k = 0;
+ s = NULL; /* quell GCC overwarning */
+ } else {
+ str = JS_DecompileScript(cx, script, "Script.prototype.toSource",
+ (uintN)indent);
+ if (!str)
+ return JS_FALSE;
+ str = js_QuoteString(cx, str, '\'');
+ if (!str)
+ return JS_FALSE;
+ s = JSSTRING_CHARS(str);
+ k = JSSTRING_LENGTH(str);
+ n += k;
+ }
+
+ /* Allocate the source string and copy into it. */
+ t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!t)
+ return JS_FALSE;
+ for (i = 0; i < j; i++)
+ t[i] = buf[i];
+ for (j = 0; j < k; i++, j++)
+ t[i] = s[j];
+ t[i++] = ')';
+ t[i++] = ')';
+ t[i] = 0;
+
+ /* Create and return a JS string for t. */
+ str = JS_NewUCString(cx, t, n);
+ if (!str) {
+ JS_free(cx, t);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif /* JS_HAS_TOSOURCE */
+
+static JSBool
+script_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ uint32 indent;
+ JSScript *script;
+ JSString *str;
+
+ indent = 0;
+ if (argc && !js_ValueToECMAUint32(cx, argv[0], &indent))
+ return JS_FALSE;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (!script) {
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ str = JS_DecompileScript(cx, script, "Script.prototype.toString",
+ (uintN)indent);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+script_compile(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ JSObject *scopeobj;
+ jsval v;
+ JSScript *script, *oldscript;
+ JSStackFrame *fp, *caller;
+ const char *file;
+ uintN line;
+ JSPrincipals *principals;
+ jsint execDepth;
+
+ /* Make sure obj is a Script object. */
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ /* If no args, leave private undefined and return early. */
+ if (argc == 0)
+ goto out;
+
+ /* Otherwise, the first arg is the script source to compile. */
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ scopeobj = NULL;
+ if (argc >= 2) {
+ if (!js_ValueToObject(cx, argv[1], &scopeobj))
+ return JS_FALSE;
+ argv[1] = OBJECT_TO_JSVAL(scopeobj);
+ }
+
+ /* Compile using the caller's scope chain, which js_Invoke passes to fp. */
+ fp = cx->fp;
+ caller = JS_GetScriptedCaller(cx, fp);
+ JS_ASSERT(!caller || fp->scopeChain == caller->scopeChain);
+
+ if (caller) {
+ if (!scopeobj) {
+ scopeobj = js_GetScopeChain(cx, caller);
+ if (!scopeobj)
+ return JS_FALSE;
+ fp->scopeChain = scopeobj; /* for the compiler's benefit */
+ }
+
+ principals = JS_EvalFramePrincipals(cx, fp, caller);
+ if (principals == caller->script->principals) {
+ file = caller->script->filename;
+ line = js_PCToLineNumber(cx, caller->script, caller->pc);
+ } else {
+ file = principals->codebase;
+ line = 0;
+ }
+ } else {
+ file = NULL;
+ line = 0;
+ principals = NULL;
+ }
+
+ /* Ensure we compile this script with the right (inner) principals. */
+ scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_script_compile);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ /*
+ * Compile the new script using the caller's scope chain, a la eval().
+ * Unlike jsobj.c:obj_eval, however, we do not set JSFRAME_EVAL in fp's
+ * flags, because compilation is here separated from execution, and the
+ * run-time scope chain may not match the compile-time. JSFRAME_EVAL is
+ * tested in jsemit.c and jsscan.c to optimize based on identity of run-
+ * and compile-time scope.
+ */
+ fp->flags |= JSFRAME_SCRIPT_OBJECT;
+ script = JS_CompileUCScriptForPrincipals(cx, scopeobj, principals,
+ JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str),
+ file, line);
+ if (!script)
+ return JS_FALSE;
+
+ JS_LOCK_OBJ(cx, obj);
+ execDepth = GetScriptExecDepth(cx, obj);
+
+ /*
+ * execDepth must be 0 to allow compilation here, otherwise the JSScript
+ * struct can be released while running.
+ */
+ if (execDepth > 0) {
+ JS_UNLOCK_OBJ(cx, obj);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_COMPILE_EXECED_SCRIPT);
+ return JS_FALSE;
+ }
+
+ /* Swap script for obj's old script, if any. */
+ v = LOCKED_OBJ_GET_SLOT(obj, JSSLOT_PRIVATE);
+ oldscript = !JSVAL_IS_VOID(v) ? (JSScript *) JSVAL_TO_PRIVATE(v) : NULL;
+ LOCKED_OBJ_SET_SLOT(obj, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(script));
+ JS_UNLOCK_OBJ(cx, obj);
+
+ if (oldscript)
+ js_DestroyScript(cx, oldscript);
+
+ script->object = obj;
+ js_CallNewScriptHook(cx, script, NULL);
+
+out:
+ /* Return the object. */
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+script_exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *scopeobj, *parent;
+ JSStackFrame *fp, *caller;
+ JSScript *script;
+ JSBool ok;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ scopeobj = NULL;
+ if (argc) {
+ if (!js_ValueToObject(cx, argv[0], &scopeobj))
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(scopeobj);
+ }
+
+ /*
+ * Emulate eval() by using caller's this, var object, sharp array, etc.,
+ * all propagated by js_Execute via a non-null fourth (down) argument to
+ * js_Execute. If there is no scripted caller, js_Execute uses its second
+ * (chain) argument to set the exec frame's varobj, thisp, and scopeChain.
+ *
+ * Unlike eval, which the compiler detects, Script.prototype.exec may be
+ * called from a lightweight function, or even from native code (in which
+ * case fp->varobj and fp->scopeChain are null). If exec is called from
+ * a lightweight function, we will need to get a Call object representing
+ * its frame, to act as the var object and scope chain head.
+ */
+ fp = cx->fp;
+ caller = JS_GetScriptedCaller(cx, fp);
+ if (caller && !caller->varobj) {
+ /* Called from a lightweight function. */
+ JS_ASSERT(caller->fun && !JSFUN_HEAVYWEIGHT_TEST(caller->fun->flags));
+
+ /* Scope chain links from Call object to callee's parent. */
+ parent = OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(caller->argv[-2]));
+ if (!js_GetCallObject(cx, caller, parent))
+ return JS_FALSE;
+ }
+
+ if (!scopeobj) {
+ /* No scope object passed in: try to use the caller's scope chain. */
+ if (caller) {
+ /*
+ * Load caller->scopeChain after the conditional js_GetCallObject
+ * call above, which resets scopeChain as well as varobj.
+ */
+ scopeobj = js_GetScopeChain(cx, caller);
+ if (!scopeobj)
+ return JS_FALSE;
+ } else {
+ /*
+ * Called from native code, so we don't know what scope object to
+ * use. We could use parent (see above), but Script.prototype.exec
+ * might be a shared/sealed "superglobal" method. A more general
+ * approach would use cx->globalObject, which will be the same as
+ * exec.__parent__ in the non-superglobal case. In the superglobal
+ * case it's the right object: the global, not the superglobal.
+ */
+ scopeobj = cx->globalObject;
+ }
+ }
+
+ scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_script_exec);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ /* Keep track of nesting depth for the script. */
+ AdjustScriptExecDepth(cx, obj, 1);
+
+ /* Must get to out label after this */
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (!script) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /* Belt-and-braces: check that this script object has access to scopeobj. */
+ ok = js_CheckPrincipalsAccess(cx, scopeobj, script->principals,
+ CLASS_ATOM(cx, Script));
+ if (!ok)
+ goto out;
+
+ ok = js_Execute(cx, scopeobj, script, caller, JSFRAME_EVAL, rval);
+
+out:
+ AdjustScriptExecDepth(cx, obj, -1);
+ return ok;
+}
+
+#if JS_HAS_XDR
+
+static JSBool
+XDRAtomMap(JSXDRState *xdr, JSAtomMap *map)
+{
+ JSContext *cx;
+ uint32 natoms, i, index;
+ JSAtom **atoms;
+
+ cx = xdr->cx;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ natoms = (uint32)map->length;
+
+ if (!JS_XDRUint32(xdr, &natoms))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ atoms = map->vector;
+ } else {
+ if (natoms == 0) {
+ atoms = NULL;
+ } else {
+ atoms = (JSAtom **) JS_malloc(cx, (size_t)natoms * sizeof *atoms);
+ if (!atoms)
+ return JS_FALSE;
+#ifdef DEBUG
+ memset(atoms, 0, (size_t)natoms * sizeof *atoms);
+#endif
+ }
+
+ map->vector = atoms;
+ map->length = natoms;
+ }
+
+ for (i = 0; i != natoms; ++i) {
+ if (xdr->mode == JSXDR_ENCODE)
+ index = i;
+ if (!JS_XDRUint32(xdr, &index))
+ goto bad;
+
+ /*
+ * Assert that, when decoding, the read index is valid and points to
+ * an unoccupied element of atoms array.
+ */
+ JS_ASSERT(index < natoms);
+ JS_ASSERT(xdr->mode == JSXDR_ENCODE || !atoms[index]);
+ if (!js_XDRAtom(xdr, &atoms[index]))
+ goto bad;
+ }
+
+ return JS_TRUE;
+
+ bad:
+ if (xdr->mode == JSXDR_DECODE) {
+ JS_free(cx, atoms);
+ map->vector = NULL;
+ map->length = 0;
+ }
+
+ return JS_FALSE;
+}
+
+JSBool
+js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic)
+{
+ JSContext *cx;
+ JSScript *script, *newscript, *oldscript;
+ uint32 length, lineno, depth, magic, nsrcnotes, ntrynotes;
+ uint32 prologLength, version;
+ JSBool filenameWasSaved;
+ jssrcnote *notes, *sn;
+
+ cx = xdr->cx;
+ script = *scriptp;
+ nsrcnotes = ntrynotes = 0;
+ filenameWasSaved = JS_FALSE;
+ notes = NULL;
+
+ /*
+ * Encode prologLength and version after script->length (_2 or greater),
+ * but decode both new (>= _2) and old, prolog&version-free (_1) scripts.
+ * Version _3 supports principals serialization. Version _4 reorders the
+ * nsrcnotes and ntrynotes fields to come before everything except magic,
+ * length, prologLength, and version, so that srcnote and trynote storage
+ * can be allocated as part of the JSScript (along with bytecode storage).
+ *
+ * So far, the magic number has not changed for every jsopcode.tbl change.
+ * We stipulate forward compatibility by requiring old bytecodes never to
+ * change or go away (modulo a few exceptions before the XDR interfaces
+ * evolved, and a few exceptions during active trunk development). With
+ * the addition of JSOP_STOP to support JS_THREADED_INTERP, we make a new
+ * magic number (_5) so that we know to append JSOP_STOP to old scripts
+ * when deserializing.
+ */
+ if (xdr->mode == JSXDR_ENCODE)
+ magic = JSXDR_MAGIC_SCRIPT_CURRENT;
+ if (!JS_XDRUint32(xdr, &magic))
+ return JS_FALSE;
+ JS_ASSERT((uint32)JSXDR_MAGIC_SCRIPT_5 - (uint32)JSXDR_MAGIC_SCRIPT_1 == 4);
+ if (magic - (uint32)JSXDR_MAGIC_SCRIPT_1 > 4) {
+ if (!hasMagic) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SCRIPT_MAGIC);
+ return JS_FALSE;
+ }
+ *hasMagic = JS_FALSE;
+ return JS_TRUE;
+ }
+ if (hasMagic)
+ *hasMagic = JS_TRUE;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ length = script->length;
+ prologLength = PTRDIFF(script->main, script->code, jsbytecode);
+ JS_ASSERT((int16)script->version != JSVERSION_UNKNOWN);
+ version = (uint32)script->version | (script->numGlobalVars << 16);
+ lineno = (uint32)script->lineno;
+ depth = (uint32)script->depth;
+
+ /* Count the srcnotes, keeping notes pointing at the first one. */
+ notes = SCRIPT_NOTES(script);
+ for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn))
+ continue;
+ nsrcnotes = PTRDIFF(sn, notes, jssrcnote);
+ nsrcnotes++; /* room for the terminator */
+
+ /* Count the trynotes. */
+ if (script->trynotes) {
+ while (script->trynotes[ntrynotes].catchStart)
+ ntrynotes++;
+ ntrynotes++; /* room for the end marker */
+ }
+ }
+
+ if (!JS_XDRUint32(xdr, &length))
+ return JS_FALSE;
+ if (magic >= JSXDR_MAGIC_SCRIPT_2) {
+ if (!JS_XDRUint32(xdr, &prologLength))
+ return JS_FALSE;
+ if (!JS_XDRUint32(xdr, &version))
+ return JS_FALSE;
+
+ /* To fuse allocations, we need srcnote and trynote counts early. */
+ if (magic >= JSXDR_MAGIC_SCRIPT_4) {
+ if (!JS_XDRUint32(xdr, &nsrcnotes))
+ return JS_FALSE;
+ if (!JS_XDRUint32(xdr, &ntrynotes))
+ return JS_FALSE;
+ }
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ size_t alloclength = length;
+ if (magic < JSXDR_MAGIC_SCRIPT_5)
+ ++alloclength; /* add a byte for JSOP_STOP */
+
+ script = js_NewScript(cx, alloclength, nsrcnotes, ntrynotes);
+ if (!script)
+ return JS_FALSE;
+ if (magic >= JSXDR_MAGIC_SCRIPT_2) {
+ script->main += prologLength;
+ script->version = (JSVersion) (version & 0xffff);
+ script->numGlobalVars = (uint16) (version >> 16);
+
+ /* If we know nsrcnotes, we allocated space for notes in script. */
+ if (magic >= JSXDR_MAGIC_SCRIPT_4)
+ notes = SCRIPT_NOTES(script);
+ }
+ *scriptp = script;
+ }
+
+ /*
+ * Control hereafter must goto error on failure, in order for the DECODE
+ * case to destroy script and conditionally free notes, which if non-null
+ * in the (DECODE and magic < _4) case must point at a temporary vector
+ * allocated just below.
+ */
+ oldscript = xdr->script;
+ xdr->script = script;
+ if (!JS_XDRBytes(xdr, (char *)script->code, length * sizeof(jsbytecode)) ||
+ !XDRAtomMap(xdr, &script->atomMap)) {
+ goto error;
+ }
+
+ if (magic < JSXDR_MAGIC_SCRIPT_5) {
+ if (xdr->mode == JSXDR_DECODE) {
+ /*
+ * Append JSOP_STOP to old scripts, to relieve the interpreter
+ * from having to bounds-check pc. Also take care to increment
+ * length, as it is used below and must count all bytecode.
+ */
+ script->code[length++] = JSOP_STOP;
+ }
+
+ if (magic < JSXDR_MAGIC_SCRIPT_4) {
+ if (!JS_XDRUint32(xdr, &nsrcnotes))
+ goto error;
+ if (xdr->mode == JSXDR_DECODE) {
+ notes = (jssrcnote *)
+ JS_malloc(cx, nsrcnotes * sizeof(jssrcnote));
+ if (!notes)
+ goto error;
+ }
+ }
+ }
+
+ if (!JS_XDRBytes(xdr, (char *)notes, nsrcnotes * sizeof(jssrcnote)) ||
+ !JS_XDRCStringOrNull(xdr, (char **)&script->filename) ||
+ !JS_XDRUint32(xdr, &lineno) ||
+ !JS_XDRUint32(xdr, &depth) ||
+ (magic < JSXDR_MAGIC_SCRIPT_4 && !JS_XDRUint32(xdr, &ntrynotes))) {
+ goto error;
+ }
+
+ /* Script principals transcoding support comes with versions >= _3. */
+ if (magic >= JSXDR_MAGIC_SCRIPT_3) {
+ JSPrincipals *principals;
+ uint32 encodeable;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ principals = script->principals;
+ encodeable = (cx->runtime->principalsTranscoder != NULL);
+ if (!JS_XDRUint32(xdr, &encodeable))
+ goto error;
+ if (encodeable &&
+ !cx->runtime->principalsTranscoder(xdr, &principals)) {
+ goto error;
+ }
+ } else {
+ if (!JS_XDRUint32(xdr, &encodeable))
+ goto error;
+ if (encodeable) {
+ if (!cx->runtime->principalsTranscoder) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_DECODE_PRINCIPALS);
+ goto error;
+ }
+ if (!cx->runtime->principalsTranscoder(xdr, &principals))
+ goto error;
+ script->principals = principals;
+ }
+ }
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ const char *filename = script->filename;
+ if (filename) {
+ filename = js_SaveScriptFilename(cx, filename);
+ if (!filename)
+ goto error;
+ JS_free(cx, (void *) script->filename);
+ script->filename = filename;
+ filenameWasSaved = JS_TRUE;
+ }
+ script->lineno = (uintN)lineno;
+ script->depth = (uintN)depth;
+
+ if (magic < JSXDR_MAGIC_SCRIPT_4) {
+ /*
+ * Argh, we have to reallocate script, copy notes into the extra
+ * space after the bytecodes, and free the temporary notes vector.
+ * First, add enough slop to nsrcnotes so we can align the address
+ * after the srcnotes of the first trynote.
+ */
+ uint32 osrcnotes = nsrcnotes;
+
+ if (ntrynotes)
+ nsrcnotes += JSTRYNOTE_ALIGNMASK;
+ newscript = (JSScript *) JS_realloc(cx, script,
+ sizeof(JSScript) +
+ length * sizeof(jsbytecode) +
+ nsrcnotes * sizeof(jssrcnote) +
+ ntrynotes * sizeof(JSTryNote));
+ if (!newscript)
+ goto error;
+
+ *scriptp = script = newscript;
+ script->code = (jsbytecode *)(script + 1);
+ script->main = script->code + prologLength;
+ memcpy(script->code + length, notes, osrcnotes * sizeof(jssrcnote));
+ JS_free(cx, (void *) notes);
+ notes = NULL;
+ if (ntrynotes) {
+ script->trynotes = (JSTryNote *)
+ ((jsword)(SCRIPT_NOTES(script) + nsrcnotes) &
+ ~(jsword)JSTRYNOTE_ALIGNMASK);
+ memset(script->trynotes, 0, ntrynotes * sizeof(JSTryNote));
+ }
+ }
+ }
+
+ while (ntrynotes) {
+ JSTryNote *tn = &script->trynotes[--ntrynotes];
+ uint32 start = (uint32) tn->start,
+ catchLength = (uint32) tn->length,
+ catchStart = (uint32) tn->catchStart;
+
+ if (!JS_XDRUint32(xdr, &start) ||
+ !JS_XDRUint32(xdr, &catchLength) ||
+ !JS_XDRUint32(xdr, &catchStart)) {
+ goto error;
+ }
+ tn->start = (ptrdiff_t) start;
+ tn->length = (ptrdiff_t) catchLength;
+ tn->catchStart = (ptrdiff_t) catchStart;
+ }
+
+ xdr->script = oldscript;
+ return JS_TRUE;
+
+ error:
+ if (xdr->mode == JSXDR_DECODE) {
+ if (script->filename && !filenameWasSaved) {
+ JS_free(cx, (void *) script->filename);
+ script->filename = NULL;
+ }
+ if (notes && magic < JSXDR_MAGIC_SCRIPT_4)
+ JS_free(cx, (void *) notes);
+ js_DestroyScript(cx, script);
+ *scriptp = NULL;
+ }
+ return JS_FALSE;
+}
+
+#if JS_HAS_XDR_FREEZE_THAW
+/*
+ * These cannot be exposed to web content, and chrome does not need them, so
+ * we take them out of the Mozilla client altogether. Fortunately, there is
+ * no way to serialize a native function (see fun_xdrObject in jsfun.c).
+ */
+
+static JSBool
+script_freeze(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXDRState *xdr;
+ JSScript *script;
+ JSBool ok, hasMagic;
+ uint32 len;
+ void *buf;
+ JSString *str;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (!script)
+ return JS_TRUE;
+
+ /* create new XDR */
+ xdr = JS_XDRNewMem(cx, JSXDR_ENCODE);
+ if (!xdr)
+ return JS_FALSE;
+
+ /* write */
+ ok = js_XDRScript(xdr, &script, &hasMagic);
+ if (!ok)
+ goto out;
+ if (!hasMagic) {
+ *rval = JSVAL_VOID;
+ goto out;
+ }
+
+ buf = JS_XDRMemGetData(xdr, &len);
+ if (!buf) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ JS_ASSERT((jsword)buf % sizeof(jschar) == 0);
+ len /= sizeof(jschar);
+ str = JS_NewUCStringCopyN(cx, (jschar *)buf, len);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+#if IS_BIG_ENDIAN
+ {
+ jschar *chars;
+ uint32 i;
+
+ /* Swap bytes in Unichars to keep frozen strings machine-independent. */
+ chars = JS_GetStringChars(str);
+ for (i = 0; i < len; i++)
+ chars[i] = JSXDR_SWAB16(chars[i]);
+ }
+#endif
+ *rval = STRING_TO_JSVAL(str);
+
+out:
+ JS_XDRDestroy(xdr);
+ return ok;
+}
+
+static JSBool
+script_thaw(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXDRState *xdr;
+ JSString *str;
+ void *buf;
+ uint32 len;
+ jsval v;
+ JSScript *script, *oldscript;
+ JSBool ok, hasMagic;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ if (argc == 0)
+ return JS_TRUE;
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ /* create new XDR */
+ xdr = JS_XDRNewMem(cx, JSXDR_DECODE);
+ if (!xdr)
+ return JS_FALSE;
+
+ buf = JS_GetStringChars(str);
+ len = JS_GetStringLength(str);
+#if IS_BIG_ENDIAN
+ {
+ jschar *from, *to;
+ uint32 i;
+
+ /* Swap bytes in Unichars to keep frozen strings machine-independent. */
+ from = (jschar *)buf;
+ to = (jschar *) JS_malloc(cx, len * sizeof(jschar));
+ if (!to) {
+ JS_XDRDestroy(xdr);
+ return JS_FALSE;
+ }
+ for (i = 0; i < len; i++)
+ to[i] = JSXDR_SWAB16(from[i]);
+ buf = (char *)to;
+ }
+#endif
+ len *= sizeof(jschar);
+ JS_XDRMemSetData(xdr, buf, len);
+
+ /* XXXbe should magic mismatch be error, or false return value? */
+ ok = js_XDRScript(xdr, &script, &hasMagic);
+ if (!ok)
+ goto out;
+ if (!hasMagic) {
+ *rval = JSVAL_FALSE;
+ goto out;
+ }
+
+ JS_LOCK_OBJ(cx, obj);
+ execDepth = GetScriptExecDepth(cx, obj);
+
+ /*
+ * execDepth must be 0 to allow compilation here, otherwise the JSScript
+ * struct can be released while running.
+ */
+ if (execDepth > 0) {
+ JS_UNLOCK_OBJ(cx, obj);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_COMPILE_EXECED_SCRIPT);
+ goto out;
+ }
+
+ /* Swap script for obj's old script, if any. */
+ v = LOCKED_OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ oldscript = !JSVAL_IS_VOID(v) ? (JSScript *) JSVAL_TO_PRIVATE(v) : NULL;
+ LOCKED_OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(script));
+ JS_UNLOCK_OBJ(cx, obj);
+
+ if (oldscript)
+ js_DestroyScript(cx, oldscript);
+
+ script->object = obj;
+ js_CallNewScriptHook(cx, script, NULL);
+
+out:
+ /*
+ * We reset the buffer to be NULL so that it doesn't free the chars
+ * memory owned by str (argv[0]).
+ */
+ JS_XDRMemSetData(xdr, NULL, 0);
+ JS_XDRDestroy(xdr);
+#if IS_BIG_ENDIAN
+ JS_free(cx, buf);
+#endif
+ *rval = JSVAL_TRUE;
+ return ok;
+}
+
+static const char js_thaw_str[] = "thaw";
+
+#endif /* JS_HAS_XDR_FREEZE_THAW */
+#endif /* JS_HAS_XDR */
+
+static JSFunctionSpec script_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, script_toSource, 0,0,0},
+#endif
+ {js_toString_str, script_toString, 0,0,0},
+ {"compile", script_compile, 2,0,0},
+ {"exec", script_exec, 1,0,0},
+#if JS_HAS_XDR_FREEZE_THAW
+ {"freeze", script_freeze, 0,0,0},
+ {js_thaw_str, script_thaw, 1,0,0},
+#endif /* JS_HAS_XDR_FREEZE_THAW */
+ {0,0,0,0,0}
+};
+
+#endif /* JS_HAS_SCRIPT_OBJECT */
+
+static void
+script_finalize(JSContext *cx, JSObject *obj)
+{
+ JSScript *script;
+
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (script)
+ js_DestroyScript(cx, script);
+}
+
+static JSBool
+script_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+#if JS_HAS_SCRIPT_OBJECT
+ return script_exec(cx, JSVAL_TO_OBJECT(argv[-2]), argc, argv, rval);
+#else
+ return JS_FALSE;
+#endif
+}
+
+static uint32
+script_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSScript *script;
+
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (script)
+ js_MarkScript(cx, script);
+ return 0;
+}
+
+#if !JS_HAS_SCRIPT_OBJECT
+const char js_Script_str[] = "Script";
+
+#define JSProto_Script JSProto_Object
+#endif
+
+JS_FRIEND_DATA(JSClass) js_ScriptClass = {
+ js_Script_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Script) |
+ JSCLASS_HAS_RESERVED_SLOTS(1),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, script_finalize,
+ NULL, NULL, script_call, NULL,/*XXXbe xdr*/
+ NULL, NULL, script_mark, 0
+};
+
+#if JS_HAS_SCRIPT_OBJECT
+
+static JSBool
+Script(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* If not constructing, replace obj with a new Script object. */
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+
+ /*
+ * script_compile does not use rval to root its temporaries
+ * so we can use it to root obj.
+ */
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ if (!JS_SetReservedSlot(cx, obj, 0, INT_TO_JSVAL(0)))
+ return JS_FALSE;
+
+ return script_compile(cx, obj, argc, argv, rval);
+}
+
+#if JS_HAS_XDR_FREEZE_THAW
+
+static JSBool
+script_static_thaw(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ if (!script_thaw(cx, obj, argc, argv, rval))
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec script_static_methods[] = {
+ {js_thaw_str, script_static_thaw, 1,0,0},
+ {0,0,0,0,0}
+};
+
+#else /* !JS_HAS_XDR_FREEZE_THAW */
+
+#define script_static_methods NULL
+
+#endif /* !JS_HAS_XDR_FREEZE_THAW */
+
+JSObject *
+js_InitScriptClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_ScriptClass, Script, 1,
+ NULL, script_methods, NULL, script_static_methods);
+}
+
+#endif /* JS_HAS_SCRIPT_OBJECT */
+
+/*
+ * Shared script filename management.
+ */
+JS_STATIC_DLL_CALLBACK(int)
+js_compare_strings(const void *k1, const void *k2)
+{
+ return strcmp(k1, k2) == 0;
+}
+
+/* Shared with jsatom.c to save code space. */
+extern void * JS_DLL_CALLBACK
+js_alloc_table_space(void *priv, size_t size);
+
+extern void JS_DLL_CALLBACK
+js_free_table_space(void *priv, void *item);
+
+/* NB: This struct overlays JSHashEntry -- see jshash.h, do not reorganize. */
+typedef struct ScriptFilenameEntry {
+ JSHashEntry *next; /* hash chain linkage */
+ JSHashNumber keyHash; /* key hash function result */
+ const void *key; /* ptr to filename, below */
+ uint32 flags; /* user-defined filename prefix flags */
+ JSPackedBool mark; /* GC mark flag */
+ char filename[3]; /* two or more bytes, NUL-terminated */
+} ScriptFilenameEntry;
+
+JS_STATIC_DLL_CALLBACK(JSHashEntry *)
+js_alloc_sftbl_entry(void *priv, const void *key)
+{
+ size_t nbytes = offsetof(ScriptFilenameEntry, filename) + strlen(key) + 1;
+
+ return (JSHashEntry *) malloc(JS_MAX(nbytes, sizeof(JSHashEntry)));
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_sftbl_entry(void *priv, JSHashEntry *he, uintN flag)
+{
+ if (flag != HT_FREE_ENTRY)
+ return;
+ free(he);
+}
+
+static JSHashAllocOps sftbl_alloc_ops = {
+ js_alloc_table_space, js_free_table_space,
+ js_alloc_sftbl_entry, js_free_sftbl_entry
+};
+
+JSBool
+js_InitRuntimeScriptState(JSRuntime *rt)
+{
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->scriptFilenameTableLock);
+ rt->scriptFilenameTableLock = JS_NEW_LOCK();
+ if (!rt->scriptFilenameTableLock)
+ return JS_FALSE;
+#endif
+ JS_ASSERT(!rt->scriptFilenameTable);
+ rt->scriptFilenameTable =
+ JS_NewHashTable(16, JS_HashString, js_compare_strings, NULL,
+ &sftbl_alloc_ops, NULL);
+ if (!rt->scriptFilenameTable) {
+ js_FinishRuntimeScriptState(rt); /* free lock if threadsafe */
+ return JS_FALSE;
+ }
+ JS_INIT_CLIST(&rt->scriptFilenamePrefixes);
+ return JS_TRUE;
+}
+
+typedef struct ScriptFilenamePrefix {
+ JSCList links; /* circular list linkage for easy deletion */
+ const char *name; /* pointer to pinned ScriptFilenameEntry string */
+ size_t length; /* prefix string length, precomputed */
+ uint32 flags; /* user-defined flags to inherit from this prefix */
+} ScriptFilenamePrefix;
+
+void
+js_FinishRuntimeScriptState(JSRuntime *rt)
+{
+ if (rt->scriptFilenameTable) {
+ JS_HashTableDestroy(rt->scriptFilenameTable);
+ rt->scriptFilenameTable = NULL;
+ }
+#ifdef JS_THREADSAFE
+ if (rt->scriptFilenameTableLock) {
+ JS_DESTROY_LOCK(rt->scriptFilenameTableLock);
+ rt->scriptFilenameTableLock = NULL;
+ }
+#endif
+}
+
+void
+js_FreeRuntimeScriptState(JSRuntime *rt)
+{
+ ScriptFilenamePrefix *sfp;
+
+ if (!rt->scriptFilenameTable)
+ return;
+
+ while (!JS_CLIST_IS_EMPTY(&rt->scriptFilenamePrefixes)) {
+ sfp = (ScriptFilenamePrefix *) rt->scriptFilenamePrefixes.next;
+ JS_REMOVE_LINK(&sfp->links);
+ free(sfp);
+ }
+ js_FinishRuntimeScriptState(rt);
+}
+
+#ifdef DEBUG_brendan
+#define DEBUG_SFTBL
+#endif
+#ifdef DEBUG_SFTBL
+size_t sftbl_savings = 0;
+#endif
+
+static ScriptFilenameEntry *
+SaveScriptFilename(JSRuntime *rt, const char *filename, uint32 flags)
+{
+ JSHashTable *table;
+ JSHashNumber hash;
+ JSHashEntry **hep;
+ ScriptFilenameEntry *sfe;
+ size_t length;
+ JSCList *head, *link;
+ ScriptFilenamePrefix *sfp;
+
+ table = rt->scriptFilenameTable;
+ hash = JS_HashString(filename);
+ hep = JS_HashTableRawLookup(table, hash, filename);
+ sfe = (ScriptFilenameEntry *) *hep;
+#ifdef DEBUG_SFTBL
+ if (sfe)
+ sftbl_savings += strlen(sfe->filename);
+#endif
+
+ if (!sfe) {
+ sfe = (ScriptFilenameEntry *)
+ JS_HashTableRawAdd(table, hep, hash, filename, NULL);
+ if (!sfe)
+ return NULL;
+ sfe->key = strcpy(sfe->filename, filename);
+ sfe->flags = 0;
+ sfe->mark = JS_FALSE;
+ }
+
+ /* If saving a prefix, add it to the set in rt->scriptFilenamePrefixes. */
+ if (flags != 0) {
+ /* Search in case filename was saved already; we must be idempotent. */
+ sfp = NULL;
+ length = strlen(filename);
+ for (head = link = &rt->scriptFilenamePrefixes;
+ link->next != head;
+ link = link->next) {
+ /* Lag link behind sfp to insert in non-increasing length order. */
+ sfp = (ScriptFilenamePrefix *) link->next;
+ if (!strcmp(sfp->name, filename))
+ break;
+ if (sfp->length <= length) {
+ sfp = NULL;
+ break;
+ }
+ sfp = NULL;
+ }
+
+ if (!sfp) {
+ /* No such prefix: add one now. */
+ sfp = (ScriptFilenamePrefix *) malloc(sizeof(ScriptFilenamePrefix));
+ if (!sfp)
+ return NULL;
+ JS_INSERT_AFTER(&sfp->links, link);
+ sfp->name = sfe->filename;
+ sfp->length = length;
+ sfp->flags = 0;
+ }
+
+ /*
+ * Accumulate flags in both sfe and sfp: sfe for later access from the
+ * JS_GetScriptedCallerFilenameFlags debug-API, and sfp so that longer
+ * filename entries can inherit by prefix.
+ */
+ sfe->flags |= flags;
+ sfp->flags |= flags;
+ }
+
+ return sfe;
+}
+
+const char *
+js_SaveScriptFilename(JSContext *cx, const char *filename)
+{
+ JSRuntime *rt;
+ ScriptFilenameEntry *sfe;
+ JSCList *head, *link;
+ ScriptFilenamePrefix *sfp;
+
+ rt = cx->runtime;
+ JS_ACQUIRE_LOCK(rt->scriptFilenameTableLock);
+ sfe = SaveScriptFilename(rt, filename, 0);
+ if (!sfe) {
+ JS_RELEASE_LOCK(rt->scriptFilenameTableLock);
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+
+ /*
+ * Try to inherit flags by prefix. We assume there won't be more than a
+ * few (dozen! ;-) prefixes, so linear search is tolerable.
+ * XXXbe every time I've assumed that in the JS engine, I've been wrong!
+ */
+ for (head = &rt->scriptFilenamePrefixes, link = head->next;
+ link != head;
+ link = link->next) {
+ sfp = (ScriptFilenamePrefix *) link;
+ if (!strncmp(sfp->name, filename, sfp->length)) {
+ sfe->flags |= sfp->flags;
+ break;
+ }
+ }
+ JS_RELEASE_LOCK(rt->scriptFilenameTableLock);
+ return sfe->filename;
+}
+
+const char *
+js_SaveScriptFilenameRT(JSRuntime *rt, const char *filename, uint32 flags)
+{
+ ScriptFilenameEntry *sfe;
+
+ /* This may be called very early, via the jsdbgapi.h entry point. */
+ if (!rt->scriptFilenameTable && !js_InitRuntimeScriptState(rt))
+ return NULL;
+
+ JS_ACQUIRE_LOCK(rt->scriptFilenameTableLock);
+ sfe = SaveScriptFilename(rt, filename, flags);
+ JS_RELEASE_LOCK(rt->scriptFilenameTableLock);
+ if (!sfe)
+ return NULL;
+
+ return sfe->filename;
+}
+
+/*
+ * Back up from a saved filename by its offset within its hash table entry.
+ */
+#define FILENAME_TO_SFE(fn) \
+ ((ScriptFilenameEntry *) ((fn) - offsetof(ScriptFilenameEntry, filename)))
+
+/*
+ * The sfe->key member, redundant given sfe->filename but required by the old
+ * jshash.c code, here gives us a useful sanity check. This assertion will
+ * very likely botch if someone tries to mark a string that wasn't allocated
+ * as an sfe->filename.
+ */
+#define ASSERT_VALID_SFE(sfe) JS_ASSERT((sfe)->key == (sfe)->filename)
+
+uint32
+js_GetScriptFilenameFlags(const char *filename)
+{
+ ScriptFilenameEntry *sfe;
+
+ sfe = FILENAME_TO_SFE(filename);
+ ASSERT_VALID_SFE(sfe);
+ return sfe->flags;
+}
+
+void
+js_MarkScriptFilename(const char *filename)
+{
+ ScriptFilenameEntry *sfe;
+
+ sfe = FILENAME_TO_SFE(filename);
+ ASSERT_VALID_SFE(sfe);
+ sfe->mark = JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_script_filename_marker(JSHashEntry *he, intN i, void *arg)
+{
+ ScriptFilenameEntry *sfe = (ScriptFilenameEntry *) he;
+
+ sfe->mark = JS_TRUE;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_MarkScriptFilenames(JSRuntime *rt, JSBool keepAtoms)
+{
+ JSCList *head, *link;
+ ScriptFilenamePrefix *sfp;
+
+ if (!rt->scriptFilenameTable)
+ return;
+
+ if (keepAtoms) {
+ JS_HashTableEnumerateEntries(rt->scriptFilenameTable,
+ js_script_filename_marker,
+ rt);
+ }
+ for (head = &rt->scriptFilenamePrefixes, link = head->next;
+ link != head;
+ link = link->next) {
+ sfp = (ScriptFilenamePrefix *) link;
+ js_MarkScriptFilename(sfp->name);
+ }
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_script_filename_sweeper(JSHashEntry *he, intN i, void *arg)
+{
+ ScriptFilenameEntry *sfe = (ScriptFilenameEntry *) he;
+
+ if (!sfe->mark)
+ return HT_ENUMERATE_REMOVE;
+ sfe->mark = JS_FALSE;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_SweepScriptFilenames(JSRuntime *rt)
+{
+ if (!rt->scriptFilenameTable)
+ return;
+
+ JS_HashTableEnumerateEntries(rt->scriptFilenameTable,
+ js_script_filename_sweeper,
+ rt);
+#ifdef DEBUG_notme
+#ifdef DEBUG_SFTBL
+ printf("script filename table savings so far: %u\n", sftbl_savings);
+#endif
+#endif
+}
+
+JSScript *
+js_NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 ntrynotes)
+{
+ JSScript *script;
+
+ /* Round up source note count to align script->trynotes for its type. */
+ if (ntrynotes)
+ nsrcnotes += JSTRYNOTE_ALIGNMASK;
+ script = (JSScript *) JS_malloc(cx,
+ sizeof(JSScript) +
+ length * sizeof(jsbytecode) +
+ nsrcnotes * sizeof(jssrcnote) +
+ ntrynotes * sizeof(JSTryNote));
+ if (!script)
+ return NULL;
+ memset(script, 0, sizeof(JSScript));
+ script->code = script->main = (jsbytecode *)(script + 1);
+ script->length = length;
+ script->version = cx->version;
+ if (ntrynotes) {
+ script->trynotes = (JSTryNote *)
+ ((jsword)(SCRIPT_NOTES(script) + nsrcnotes) &
+ ~(jsword)JSTRYNOTE_ALIGNMASK);
+ memset(script->trynotes, 0, ntrynotes * sizeof(JSTryNote));
+ }
+ return script;
+}
+
+JS_FRIEND_API(JSScript *)
+js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg, JSFunction *fun)
+{
+ uint32 mainLength, prologLength, nsrcnotes, ntrynotes;
+ JSScript *script;
+ const char *filename;
+
+ mainLength = CG_OFFSET(cg);
+ prologLength = CG_PROLOG_OFFSET(cg);
+ CG_COUNT_FINAL_SRCNOTES(cg, nsrcnotes);
+ CG_COUNT_FINAL_TRYNOTES(cg, ntrynotes);
+ script = js_NewScript(cx, prologLength + mainLength, nsrcnotes, ntrynotes);
+ if (!script)
+ return NULL;
+
+ /* Now that we have script, error control flow must go to label bad. */
+ script->main += prologLength;
+ memcpy(script->code, CG_PROLOG_BASE(cg), prologLength * sizeof(jsbytecode));
+ memcpy(script->main, CG_BASE(cg), mainLength * sizeof(jsbytecode));
+ script->numGlobalVars = cg->treeContext.numGlobalVars;
+ if (!js_InitAtomMap(cx, &script->atomMap, &cg->atomList))
+ goto bad;
+
+ filename = cg->filename;
+ if (filename) {
+ script->filename = js_SaveScriptFilename(cx, filename);
+ if (!script->filename)
+ goto bad;
+ }
+ script->lineno = cg->firstLine;
+ script->depth = cg->maxStackDepth;
+ if (cg->principals) {
+ script->principals = cg->principals;
+ JSPRINCIPALS_HOLD(cx, script->principals);
+ }
+
+ if (!js_FinishTakingSrcNotes(cx, cg, SCRIPT_NOTES(script)))
+ goto bad;
+ if (script->trynotes)
+ js_FinishTakingTryNotes(cx, cg, script->trynotes);
+
+ /*
+ * We initialize fun->u.script to be the script constructed above
+ * so that the debugger has a valid FUN_SCRIPT(fun).
+ */
+ if (fun) {
+ JS_ASSERT(FUN_INTERPRETED(fun) && !FUN_SCRIPT(fun));
+ fun->u.i.script = script;
+ if (cg->treeContext.flags & TCF_FUN_HEAVYWEIGHT)
+ fun->flags |= JSFUN_HEAVYWEIGHT;
+ }
+
+ /* Tell the debugger about this compiled script. */
+ js_CallNewScriptHook(cx, script, fun);
+ return script;
+
+bad:
+ js_DestroyScript(cx, script);
+ return NULL;
+}
+
+JS_FRIEND_API(void)
+js_CallNewScriptHook(JSContext *cx, JSScript *script, JSFunction *fun)
+{
+ JSRuntime *rt;
+ JSNewScriptHook hook;
+
+ rt = cx->runtime;
+ hook = rt->newScriptHook;
+ if (hook) {
+ JS_KEEP_ATOMS(rt);
+ hook(cx, script->filename, script->lineno, script, fun,
+ rt->newScriptHookData);
+ JS_UNKEEP_ATOMS(rt);
+ }
+}
+
+JS_FRIEND_API(void)
+js_CallDestroyScriptHook(JSContext *cx, JSScript *script)
+{
+ JSRuntime *rt;
+ JSDestroyScriptHook hook;
+
+ rt = cx->runtime;
+ hook = rt->destroyScriptHook;
+ if (hook)
+ hook(cx, script, rt->destroyScriptHookData);
+}
+
+void
+js_DestroyScript(JSContext *cx, JSScript *script)
+{
+ js_CallDestroyScriptHook(cx, script);
+
+ JS_ClearScriptTraps(cx, script);
+ js_FreeAtomMap(cx, &script->atomMap);
+ if (script->principals)
+ JSPRINCIPALS_DROP(cx, script->principals);
+ if (JS_GSN_CACHE(cx).script == script)
+ JS_CLEAR_GSN_CACHE(cx);
+ JS_free(cx, script);
+}
+
+void
+js_MarkScript(JSContext *cx, JSScript *script)
+{
+ JSAtomMap *map;
+ uintN i, length;
+ JSAtom **vector;
+
+ map = &script->atomMap;
+ length = map->length;
+ vector = map->vector;
+ for (i = 0; i < length; i++)
+ GC_MARK_ATOM(cx, vector[i]);
+
+ if (script->filename)
+ js_MarkScriptFilename(script->filename);
+}
+
+typedef struct GSNCacheEntry {
+ JSDHashEntryHdr hdr;
+ jsbytecode *pc;
+ jssrcnote *sn;
+} GSNCacheEntry;
+
+#define GSN_CACHE_THRESHOLD 100
+
+jssrcnote *
+js_GetSrcNoteCached(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ ptrdiff_t target, offset;
+ GSNCacheEntry *entry;
+ jssrcnote *sn, *result;
+ uintN nsrcnotes;
+
+
+ target = PTRDIFF(pc, script->code, jsbytecode);
+ if ((uint32)target >= script->length)
+ return NULL;
+
+ if (JS_GSN_CACHE(cx).script == script) {
+ JS_METER_GSN_CACHE(cx, hits);
+ entry = (GSNCacheEntry *)
+ JS_DHashTableOperate(&JS_GSN_CACHE(cx).table, pc,
+ JS_DHASH_LOOKUP);
+ return entry->sn;
+ }
+
+ JS_METER_GSN_CACHE(cx, misses);
+ offset = 0;
+ for (sn = SCRIPT_NOTES(script); ; sn = SN_NEXT(sn)) {
+ if (SN_IS_TERMINATOR(sn)) {
+ result = NULL;
+ break;
+ }
+ offset += SN_DELTA(sn);
+ if (offset == target && SN_IS_GETTABLE(sn)) {
+ result = sn;
+ break;
+ }
+ }
+
+ if (JS_GSN_CACHE(cx).script != script &&
+ script->length >= GSN_CACHE_THRESHOLD) {
+ JS_CLEAR_GSN_CACHE(cx);
+ nsrcnotes = 0;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn);
+ sn = SN_NEXT(sn)) {
+ if (SN_IS_GETTABLE(sn))
+ ++nsrcnotes;
+ }
+ if (!JS_DHashTableInit(&JS_GSN_CACHE(cx).table, JS_DHashGetStubOps(),
+ NULL, sizeof(GSNCacheEntry), nsrcnotes)) {
+ JS_GSN_CACHE(cx).table.ops = NULL;
+ } else {
+ pc = script->code;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn);
+ sn = SN_NEXT(sn)) {
+ pc += SN_DELTA(sn);
+ if (SN_IS_GETTABLE(sn)) {
+ entry = (GSNCacheEntry *)
+ JS_DHashTableOperate(&JS_GSN_CACHE(cx).table, pc,
+ JS_DHASH_ADD);
+ entry->pc = pc;
+ entry->sn = sn;
+ }
+ }
+ JS_GSN_CACHE(cx).script = script;
+ JS_METER_GSN_CACHE(cx, fills);
+ }
+ }
+
+ return result;
+}
+
+uintN
+js_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ JSAtom *atom;
+ JSFunction *fun;
+ uintN lineno;
+ ptrdiff_t offset, target;
+ jssrcnote *sn;
+ JSSrcNoteType type;
+
+ /* Cope with JSStackFrame.pc value prior to entering js_Interpret. */
+ if (!pc)
+ return 0;
+
+ /*
+ * Special case: function definition needs no line number note because
+ * the function's script contains its starting line number.
+ */
+ if (*pc == JSOP_DEFFUN ||
+ (*pc == JSOP_LITOPX && pc[1 + LITERAL_INDEX_LEN] == JSOP_DEFFUN)) {
+ atom = js_GetAtom(cx, &script->atomMap,
+ (*pc == JSOP_DEFFUN)
+ ? GET_ATOM_INDEX(pc)
+ : GET_LITERAL_INDEX(pc));
+ fun = (JSFunction *) JS_GetPrivate(cx, ATOM_TO_OBJECT(atom));
+ JS_ASSERT(FUN_INTERPRETED(fun));
+ return fun->u.i.script->lineno;
+ }
+
+ /*
+ * General case: walk through source notes accumulating their deltas,
+ * keeping track of line-number notes, until we pass the note for pc's
+ * offset within script->code.
+ */
+ lineno = script->lineno;
+ offset = 0;
+ target = PTRDIFF(pc, script->code, jsbytecode);
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ offset += SN_DELTA(sn);
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ if (type == SRC_SETLINE) {
+ if (offset <= target)
+ lineno = (uintN) js_GetSrcNoteOffset(sn, 0);
+ } else if (type == SRC_NEWLINE) {
+ if (offset <= target)
+ lineno++;
+ }
+ if (offset > target)
+ break;
+ }
+ return lineno;
+}
+
+/* The line number limit is the same as the jssrcnote offset limit. */
+#define SN_LINE_LIMIT (SN_3BYTE_OFFSET_FLAG << 16)
+
+jsbytecode *
+js_LineNumberToPC(JSScript *script, uintN target)
+{
+ ptrdiff_t offset, best;
+ uintN lineno, bestdiff, diff;
+ jssrcnote *sn;
+ JSSrcNoteType type;
+
+ offset = 0;
+ best = -1;
+ lineno = script->lineno;
+ bestdiff = SN_LINE_LIMIT;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ if (lineno == target)
+ goto out;
+ if (lineno > target) {
+ diff = lineno - target;
+ if (diff < bestdiff) {
+ bestdiff = diff;
+ best = offset;
+ }
+ }
+ offset += SN_DELTA(sn);
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ if (type == SRC_SETLINE) {
+ lineno = (uintN) js_GetSrcNoteOffset(sn, 0);
+ } else if (type == SRC_NEWLINE) {
+ lineno++;
+ }
+ }
+ if (best >= 0)
+ offset = best;
+out:
+ return script->code + offset;
+}
+
+JS_FRIEND_API(uintN)
+js_GetScriptLineExtent(JSScript *script)
+{
+ uintN lineno;
+ jssrcnote *sn;
+ JSSrcNoteType type;
+
+ lineno = script->lineno;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ if (type == SRC_SETLINE) {
+ lineno = (uintN) js_GetSrcNoteOffset(sn, 0);
+ } else if (type == SRC_NEWLINE) {
+ lineno++;
+ }
+ }
+ return 1 + lineno - script->lineno;
+}
+
+#if JS_HAS_GENERATORS
+
+jsbytecode *
+js_FindFinallyHandler(JSScript *script, jsbytecode *pc)
+{
+ JSTryNote *tn;
+ ptrdiff_t off;
+ JSOp op2;
+
+ tn = script->trynotes;
+ if (!tn)
+ return NULL;
+
+ off = pc - script->main;
+ if (off < 0)
+ return NULL;
+
+ JS_ASSERT(tn->catchStart != 0);
+ do {
+ if ((jsuword)(off - tn->start) < (jsuword)tn->length) {
+ /*
+ * We have a handler: is it the finally one, or a catch handler?
+ *
+ * Catch bytecode begins with: JSOP_SETSP JSOP_ENTERBLOCK
+ * Finally bytecode begins with: JSOP_SETSP JSOP_(GOSUB|EXCEPTION)
+ */
+ pc = script->main + tn->catchStart;
+ JS_ASSERT(*pc == JSOP_SETSP);
+ op2 = pc[JSOP_SETSP_LENGTH];
+ if (op2 != JSOP_ENTERBLOCK) {
+ JS_ASSERT(op2 == JSOP_GOSUB || op2 == JSOP_EXCEPTION);
+ return pc;
+ }
+ }
+ } while ((++tn)->catchStart != 0);
+ return NULL;
+}
+
+#endif
diff --git a/third_party/js-1.7/jsscript.h b/third_party/js-1.7/jsscript.h
new file mode 100644
index 0000000..18ad373
--- /dev/null
+++ b/third_party/js-1.7/jsscript.h
@@ -0,0 +1,225 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsscript_h___
+#define jsscript_h___
+/*
+ * JS script descriptor.
+ */
+#include "jsatom.h"
+#include "jsprvtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Exception handling runtime information.
+ *
+ * All fields except length are code offsets relative to the main entry point
+ * of the script. If script->trynotes is not null, it points to a vector of
+ * these structs terminated by one with catchStart == 0.
+ */
+struct JSTryNote {
+ ptrdiff_t start; /* start of try statement */
+ ptrdiff_t length; /* count of try statement bytecodes */
+ ptrdiff_t catchStart; /* start of catch block (0 if end) */
+};
+
+#define JSTRYNOTE_GRAIN sizeof(ptrdiff_t)
+#define JSTRYNOTE_ALIGNMASK (JSTRYNOTE_GRAIN - 1)
+
+struct JSScript {
+ jsbytecode *code; /* bytecodes and their immediate operands */
+ uint32 length; /* length of code vector */
+ jsbytecode *main; /* main entry point, after predef'ing prolog */
+ uint16 version; /* JS version under which script was compiled */
+ uint16 numGlobalVars; /* declared global var/const/function count */
+ JSAtomMap atomMap; /* maps immediate index to literal struct */
+ const char *filename; /* source filename or null */
+ uintN lineno; /* base line number of script */
+ uintN depth; /* maximum stack depth in slots */
+ JSTryNote *trynotes; /* exception table for this script */
+ JSPrincipals *principals; /* principals for this script */
+ JSObject *object; /* optional Script-class object wrapper */
+};
+
+/* No need to store script->notes now that it is allocated right after code. */
+#define SCRIPT_NOTES(script) ((jssrcnote*)((script)->code+(script)->length))
+
+#define SCRIPT_FIND_CATCH_START(script, pc, catchpc) \
+ JS_BEGIN_MACRO \
+ JSTryNote *tn_ = (script)->trynotes; \
+ jsbytecode *catchpc_ = NULL; \
+ if (tn_) { \
+ ptrdiff_t off_ = PTRDIFF(pc, (script)->main, jsbytecode); \
+ if (off_ >= 0) { \
+ while ((jsuword)(off_ - tn_->start) >= (jsuword)tn_->length) \
+ ++tn_; \
+ if (tn_->catchStart) \
+ catchpc_ = (script)->main + tn_->catchStart; \
+ } \
+ } \
+ catchpc = catchpc_; \
+ JS_END_MACRO
+
+/*
+ * Find the innermost finally block that handles the given pc. This is a
+ * version of SCRIPT_FIND_CATCH_START that ignore catch blocks and is used
+ * to implement generator.close().
+ */
+jsbytecode *
+js_FindFinallyHandler(JSScript *script, jsbytecode *pc);
+
+extern JS_FRIEND_DATA(JSClass) js_ScriptClass;
+
+extern JSObject *
+js_InitScriptClass(JSContext *cx, JSObject *obj);
+
+/*
+ * On first new context in rt, initialize script runtime state, specifically
+ * the script filename table and its lock.
+ */
+extern JSBool
+js_InitRuntimeScriptState(JSRuntime *rt);
+
+/*
+ * On last context destroy for rt, if script filenames are all GC'd, free the
+ * script filename table and its lock.
+ */
+extern void
+js_FinishRuntimeScriptState(JSRuntime *rt);
+
+/*
+ * On JS_DestroyRuntime(rt), forcibly free script filename prefixes and any
+ * script filename table entries that have not been GC'd, the latter using
+ * js_FinishRuntimeScriptState.
+ *
+ * This allows script filename prefixes to outlive any context in rt.
+ */
+extern void
+js_FreeRuntimeScriptState(JSRuntime *rt);
+
+extern const char *
+js_SaveScriptFilename(JSContext *cx, const char *filename);
+
+extern const char *
+js_SaveScriptFilenameRT(JSRuntime *rt, const char *filename, uint32 flags);
+
+extern uint32
+js_GetScriptFilenameFlags(const char *filename);
+
+extern void
+js_MarkScriptFilename(const char *filename);
+
+extern void
+js_MarkScriptFilenames(JSRuntime *rt, JSBool keepAtoms);
+
+extern void
+js_SweepScriptFilenames(JSRuntime *rt);
+
+/*
+ * Two successively less primitive ways to make a new JSScript. The first
+ * does *not* call a non-null cx->runtime->newScriptHook -- only the second,
+ * js_NewScriptFromCG, calls this optional debugger hook.
+ *
+ * The js_NewScript function can't know whether the script it creates belongs
+ * to a function, or is top-level or eval code, but the debugger wants access
+ * to the newly made script's function, if any -- so callers of js_NewScript
+ * are responsible for notifying the debugger after successfully creating any
+ * kind (function or other) of new JSScript.
+ */
+extern JSScript *
+js_NewScript(JSContext *cx, uint32 length, uint32 snlength, uint32 tnlength);
+
+extern JS_FRIEND_API(JSScript *)
+js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg, JSFunction *fun);
+
+/*
+ * New-script-hook calling is factored from js_NewScriptFromCG so that it
+ * and callers of js_XDRScript can share this code. In the case of callers
+ * of js_XDRScript, the hook should be invoked only after successful decode
+ * of any owning function (the fun parameter) or script object (null fun).
+ */
+extern JS_FRIEND_API(void)
+js_CallNewScriptHook(JSContext *cx, JSScript *script, JSFunction *fun);
+
+extern JS_FRIEND_API(void)
+js_CallDestroyScriptHook(JSContext *cx, JSScript *script);
+
+extern void
+js_DestroyScript(JSContext *cx, JSScript *script);
+
+extern void
+js_MarkScript(JSContext *cx, JSScript *script);
+
+/*
+ * To perturb as little code as possible, we introduce a js_GetSrcNote lookup
+ * cache without adding an explicit cx parameter. Thus js_GetSrcNote becomes
+ * a macro that uses cx from its calls' lexical environments.
+ */
+#define js_GetSrcNote(script,pc) js_GetSrcNoteCached(cx, script, pc)
+
+extern jssrcnote *
+js_GetSrcNoteCached(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+/* XXX need cx to lock function objects declared by prolog bytecodes. */
+extern uintN
+js_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+extern jsbytecode *
+js_LineNumberToPC(JSScript *script, uintN lineno);
+
+extern JS_FRIEND_API(uintN)
+js_GetScriptLineExtent(JSScript *script);
+
+/*
+ * If magic is non-null, js_XDRScript succeeds on magic number mismatch but
+ * returns false in *magic; it reflects a match via a true *magic out param.
+ * If magic is null, js_XDRScript returns false on bad magic number errors,
+ * which it reports.
+ *
+ * NB: callers must call js_CallNewScriptHook after successful JSXDR_DECODE
+ * and subsequent set-up of owning function or script object, if any.
+ */
+extern JSBool
+js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *magic);
+
+JS_END_EXTERN_C
+
+#endif /* jsscript_h___ */
diff --git a/third_party/js-1.7/jsshell.msg b/third_party/js-1.7/jsshell.msg
new file mode 100644
index 0000000..4b811ac
--- /dev/null
+++ b/third_party/js-1.7/jsshell.msg
@@ -0,0 +1,50 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ Error messages for JSShell. See js.msg for format.
+*/
+
+MSG_DEF(JSSMSG_NOT_AN_ERROR, 0, 0, JSEXN_NONE, "<Error #0 is reserved>")
+MSG_DEF(JSSMSG_CANT_OPEN, 1, 2, JSEXN_NONE, "can't open {0}: {1}")
+MSG_DEF(JSSMSG_TRAP_USAGE, 2, 0, JSEXN_NONE, "usage: trap [fun] [pc] expr")
+MSG_DEF(JSSMSG_LINE2PC_USAGE, 3, 0, JSEXN_NONE, "usage: line2pc [fun] line")
+MSG_DEF(JSSMSG_FILE_SCRIPTS_ONLY, 4, 0, JSEXN_NONE, "only works on JS scripts read from files")
+MSG_DEF(JSSMSG_UNEXPECTED_EOF, 5, 1, JSEXN_NONE, "unexpected EOF in {0}")
+MSG_DEF(JSSMSG_DOEXP_USAGE, 6, 0, JSEXN_NONE, "usage: doexp obj id")
diff --git a/third_party/js-1.7/jsstddef.h b/third_party/js-1.7/jsstddef.h
new file mode 100644
index 0000000..addaa88
--- /dev/null
+++ b/third_party/js-1.7/jsstddef.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * stddef inclusion here to first declare ptrdif as a signed long instead of a
+ * signed int.
+ */
+
+#ifdef _WINDOWS
+# ifndef XP_WIN
+# define XP_WIN
+# endif
+#if defined(_WIN32) || defined(WIN32)
+# ifndef XP_WIN32
+# define XP_WIN32
+# endif
+#else
+# ifndef XP_WIN16
+# define XP_WIN16
+# endif
+#endif
+#endif
+
+#ifdef XP_WIN16
+#ifndef _PTRDIFF_T_DEFINED
+typedef long ptrdiff_t;
+
+/*
+ * The Win16 compiler treats pointer differences as 16-bit signed values.
+ * This macro allows us to treat them as 17-bit signed values, stored in
+ * a 32-bit type.
+ */
+#define PTRDIFF(p1, p2, type) \
+ ((((unsigned long)(p1)) - ((unsigned long)(p2))) / sizeof(type))
+
+#define _PTRDIFF_T_DEFINED
+#endif /*_PTRDIFF_T_DEFINED*/
+#else /*WIN16*/
+
+#define PTRDIFF(p1, p2, type) \
+ ((p1) - (p2))
+
+#endif
+
+#include <stddef.h>
+
+
diff --git a/third_party/js-1.7/jsstr.c b/third_party/js-1.7/jsstr.c
new file mode 100644
index 0000000..e38f652
--- /dev/null
+++ b/third_party/js-1.7/jsstr.c
@@ -0,0 +1,4818 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS string type implementation.
+ *
+ * In order to avoid unnecessary js_LockGCThing/js_UnlockGCThing calls, these
+ * native methods store strings (possibly newborn) converted from their 'this'
+ * parameter and arguments on the stack: 'this' conversions at argv[-1], arg
+ * conversions at their index (argv[0], argv[1]). This is a legitimate method
+ * of rooting things that might lose their newborn root due to subsequent GC
+ * allocations in the same native method.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsstr.h"
+
+#define JSSTRDEP_RECURSION_LIMIT 100
+
+size_t
+js_MinimizeDependentStrings(JSString *str, int level, JSString **basep)
+{
+ JSString *base;
+ size_t start, length;
+
+ JS_ASSERT(JSSTRING_IS_DEPENDENT(str));
+ base = JSSTRDEP_BASE(str);
+ start = JSSTRDEP_START(str);
+ if (JSSTRING_IS_DEPENDENT(base)) {
+ if (level < JSSTRDEP_RECURSION_LIMIT) {
+ start += js_MinimizeDependentStrings(base, level + 1, &base);
+ } else {
+ do {
+ start += JSSTRDEP_START(base);
+ base = JSSTRDEP_BASE(base);
+ } while (JSSTRING_IS_DEPENDENT(base));
+ }
+ if (start == 0) {
+ JS_ASSERT(JSSTRING_IS_PREFIX(str));
+ JSPREFIX_SET_BASE(str, base);
+ } else if (start <= JSSTRDEP_START_MASK) {
+ length = JSSTRDEP_LENGTH(str);
+ JSSTRDEP_SET_START_AND_LENGTH(str, start, length);
+ JSSTRDEP_SET_BASE(str, base);
+ }
+ }
+ *basep = base;
+ return start;
+}
+
+jschar *
+js_GetDependentStringChars(JSString *str)
+{
+ size_t start;
+ JSString *base;
+
+ start = js_MinimizeDependentStrings(str, 0, &base);
+ JS_ASSERT(!JSSTRING_IS_DEPENDENT(base));
+ JS_ASSERT(start < base->length);
+ return base->chars + start;
+}
+
+jschar *
+js_GetStringChars(JSString *str)
+{
+ if (JSSTRING_IS_DEPENDENT(str) && !js_UndependString(NULL, str))
+ return NULL;
+
+ *js_GetGCThingFlags(str) &= ~GCF_MUTABLE;
+ return str->chars;
+}
+
+JSString *
+js_ConcatStrings(JSContext *cx, JSString *left, JSString *right)
+{
+ size_t rn, ln, lrdist, n;
+ jschar *rs, *ls, *s;
+ JSDependentString *ldep; /* non-null if left should become dependent */
+ JSString *str;
+
+ if (JSSTRING_IS_DEPENDENT(right)) {
+ rn = JSSTRDEP_LENGTH(right);
+ rs = JSSTRDEP_CHARS(right);
+ } else {
+ rn = right->length;
+ rs = right->chars;
+ }
+ if (rn == 0)
+ return left;
+
+ if (JSSTRING_IS_DEPENDENT(left) ||
+ !(*js_GetGCThingFlags(left) & GCF_MUTABLE)) {
+ /* We must copy if left does not own a buffer to realloc. */
+ ln = JSSTRING_LENGTH(left);
+ if (ln == 0)
+ return right;
+ ls = JSSTRING_CHARS(left);
+ s = (jschar *) JS_malloc(cx, (ln + rn + 1) * sizeof(jschar));
+ if (!s)
+ return NULL;
+ js_strncpy(s, ls, ln);
+ ldep = NULL;
+ } else {
+ /* We can realloc left's space and make it depend on our result. */
+ ln = left->length;
+ if (ln == 0)
+ return right;
+ ls = left->chars;
+ s = (jschar *) JS_realloc(cx, ls, (ln + rn + 1) * sizeof(jschar));
+ if (!s)
+ return NULL;
+
+ /* Take care: right could depend on left! */
+ lrdist = (size_t)(rs - ls);
+ if (lrdist < ln)
+ rs = s + lrdist;
+ left->chars = ls = s;
+ ldep = JSSTRDEP(left);
+ }
+
+ js_strncpy(s + ln, rs, rn);
+ n = ln + rn;
+ s[n] = 0;
+ str = js_NewString(cx, s, n, GCF_MUTABLE);
+ if (!str) {
+ /* Out of memory: clean up any space we (re-)allocated. */
+ if (!ldep) {
+ JS_free(cx, s);
+ } else {
+ s = JS_realloc(cx, ls, (ln + 1) * sizeof(jschar));
+ if (s)
+ left->chars = s;
+ }
+ } else {
+ /* Morph left into a dependent prefix if we realloc'd its buffer. */
+ if (ldep) {
+ JSPREFIX_SET_LENGTH(ldep, ln);
+ JSPREFIX_SET_BASE(ldep, str);
+#ifdef DEBUG
+ {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_METER(rt, liveDependentStrings);
+ JS_RUNTIME_METER(rt, totalDependentStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->strdepLengthSum += (double)ln,
+ rt->strdepLengthSquaredSum += (double)ln * (double)ln));
+ }
+#endif
+ }
+ }
+
+ return str;
+}
+
+/*
+ * May be called with null cx by js_GetStringChars, above; and by the jslock.c
+ * MAKE_STRING_IMMUTABLE file-local macro.
+ */
+const jschar *
+js_UndependString(JSContext *cx, JSString *str)
+{
+ size_t n, size;
+ jschar *s;
+
+ if (JSSTRING_IS_DEPENDENT(str)) {
+ n = JSSTRDEP_LENGTH(str);
+ size = (n + 1) * sizeof(jschar);
+ s = (jschar *) (cx ? JS_malloc(cx, size) : malloc(size));
+ if (!s)
+ return NULL;
+
+ js_strncpy(s, JSSTRDEP_CHARS(str), n);
+ s[n] = 0;
+ str->length = n;
+ str->chars = s;
+
+#ifdef DEBUG
+ if (cx) {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_UNMETER(rt, liveDependentStrings);
+ JS_RUNTIME_UNMETER(rt, totalDependentStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->strdepLengthSum -= (double)n,
+ rt->strdepLengthSquaredSum -= (double)n * (double)n));
+ }
+#endif
+ }
+
+ return str->chars;
+}
+
+/*
+ * Forward declarations for URI encode/decode and helper routines
+ */
+static JSBool
+str_decodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+str_decodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+str_encodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+str_encodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static uint32
+Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length);
+
+/*
+ * Contributions from the String class to the set of methods defined for the
+ * global object. escape and unescape used to be defined in the Mocha library,
+ * but as ECMA decided to spec them, they've been moved to the core engine
+ * and made ECMA-compliant. (Incomplete escapes are interpreted as literal
+ * characters by unescape.)
+ */
+
+/*
+ * Stuff to emulate the old libmocha escape, which took a second argument
+ * giving the type of escape to perform. Retained for compatibility, and
+ * copied here to avoid reliance on net.h, mkparse.c/NET_EscapeBytes.
+ */
+
+#define URL_XALPHAS ((uint8) 1)
+#define URL_XPALPHAS ((uint8) 2)
+#define URL_PATH ((uint8) 4)
+
+static const uint8 urlCharType[256] =
+/* Bit 0 xalpha -- the alphas
+ * Bit 1 xpalpha -- as xalpha but
+ * converts spaces to plus and plus to %20
+ * Bit 2 ... path -- as xalphas but doesn't escape '/'
+ */
+ /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+ { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x */
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1x */
+ 0,0,0,0,0,0,0,0,0,0,7,4,0,7,7,4, /* 2x !"#$%&'()*+,-./ */
+ 7,7,7,7,7,7,7,7,7,7,0,0,0,0,0,0, /* 3x 0123456789:;<=>? */
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, /* 4x @ABCDEFGHIJKLMNO */
+ 7,7,7,7,7,7,7,7,7,7,7,0,0,0,0,7, /* 5X PQRSTUVWXYZ[\]^_ */
+ 0,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, /* 6x `abcdefghijklmno */
+ 7,7,7,7,7,7,7,7,7,7,7,0,0,0,0,0, /* 7X pqrstuvwxyz{\}~ DEL */
+ 0, };
+
+/* This matches the ECMA escape set when mask is 7 (default.) */
+
+#define IS_OK(C, mask) (urlCharType[((uint8) (C))] & (mask))
+
+/* See ECMA-262 15.1.2.4. */
+JSBool
+js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ size_t i, ni, length, newlength;
+ const jschar *chars;
+ jschar *newchars;
+ jschar ch;
+ jsint mask;
+ jsdouble d;
+ const char digits[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
+
+ mask = URL_XALPHAS | URL_XPALPHAS | URL_PATH;
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(d) ||
+ (mask = (jsint)d) != d ||
+ mask & ~(URL_XALPHAS | URL_XPALPHAS | URL_PATH))
+ {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%lx", (unsigned long) mask);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_STRING_MASK, numBuf);
+ return JS_FALSE;
+ }
+ }
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ chars = JSSTRING_CHARS(str);
+ length = newlength = JSSTRING_LENGTH(str);
+
+ /* Take a first pass and see how big the result string will need to be. */
+ for (i = 0; i < length; i++) {
+ if ((ch = chars[i]) < 128 && IS_OK(ch, mask))
+ continue;
+ if (ch < 256) {
+ if (mask == URL_XPALPHAS && ch == ' ')
+ continue; /* The character will be encoded as '+' */
+ newlength += 2; /* The character will be encoded as %XX */
+ } else {
+ newlength += 5; /* The character will be encoded as %uXXXX */
+ }
+
+ /*
+ * This overflow test works because newlength is incremented by at
+ * most 5 on each iteration.
+ */
+ if (newlength < length) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+
+ if (newlength >= ~(size_t)0 / sizeof(jschar)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ newchars = (jschar *) JS_malloc(cx, (newlength + 1) * sizeof(jschar));
+ if (!newchars)
+ return JS_FALSE;
+ for (i = 0, ni = 0; i < length; i++) {
+ if ((ch = chars[i]) < 128 && IS_OK(ch, mask)) {
+ newchars[ni++] = ch;
+ } else if (ch < 256) {
+ if (mask == URL_XPALPHAS && ch == ' ') {
+ newchars[ni++] = '+'; /* convert spaces to pluses */
+ } else {
+ newchars[ni++] = '%';
+ newchars[ni++] = digits[ch >> 4];
+ newchars[ni++] = digits[ch & 0xF];
+ }
+ } else {
+ newchars[ni++] = '%';
+ newchars[ni++] = 'u';
+ newchars[ni++] = digits[ch >> 12];
+ newchars[ni++] = digits[(ch & 0xF00) >> 8];
+ newchars[ni++] = digits[(ch & 0xF0) >> 4];
+ newchars[ni++] = digits[ch & 0xF];
+ }
+ }
+ JS_ASSERT(ni == newlength);
+ newchars[newlength] = 0;
+
+ str = js_NewString(cx, newchars, newlength, 0);
+ if (!str) {
+ JS_free(cx, newchars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#undef IS_OK
+
+/* See ECMA-262 15.1.2.5 */
+static JSBool
+str_unescape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ size_t i, ni, length;
+ const jschar *chars;
+ jschar *newchars;
+ jschar ch;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ chars = JSSTRING_CHARS(str);
+ length = JSSTRING_LENGTH(str);
+
+ /* Don't bother allocating less space for the new string. */
+ newchars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!newchars)
+ return JS_FALSE;
+ ni = i = 0;
+ while (i < length) {
+ ch = chars[i++];
+ if (ch == '%') {
+ if (i + 1 < length &&
+ JS7_ISHEX(chars[i]) && JS7_ISHEX(chars[i + 1]))
+ {
+ ch = JS7_UNHEX(chars[i]) * 16 + JS7_UNHEX(chars[i + 1]);
+ i += 2;
+ } else if (i + 4 < length && chars[i] == 'u' &&
+ JS7_ISHEX(chars[i + 1]) && JS7_ISHEX(chars[i + 2]) &&
+ JS7_ISHEX(chars[i + 3]) && JS7_ISHEX(chars[i + 4]))
+ {
+ ch = (((((JS7_UNHEX(chars[i + 1]) << 4)
+ + JS7_UNHEX(chars[i + 2])) << 4)
+ + JS7_UNHEX(chars[i + 3])) << 4)
+ + JS7_UNHEX(chars[i + 4]);
+ i += 5;
+ }
+ }
+ newchars[ni++] = ch;
+ }
+ newchars[ni] = 0;
+
+ str = js_NewString(cx, newchars, ni, 0);
+ if (!str) {
+ JS_free(cx, newchars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#if JS_HAS_UNEVAL
+static JSBool
+str_uneval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToSource(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+const char js_escape_str[] = "escape";
+const char js_unescape_str[] = "unescape";
+#if JS_HAS_UNEVAL
+const char js_uneval_str[] = "uneval";
+#endif
+const char js_decodeURI_str[] = "decodeURI";
+const char js_encodeURI_str[] = "encodeURI";
+const char js_decodeURIComponent_str[] = "decodeURIComponent";
+const char js_encodeURIComponent_str[] = "encodeURIComponent";
+
+static JSFunctionSpec string_functions[] = {
+ {js_escape_str, js_str_escape, 1,0,0},
+ {js_unescape_str, str_unescape, 1,0,0},
+#if JS_HAS_UNEVAL
+ {js_uneval_str, str_uneval, 1,0,0},
+#endif
+ {js_decodeURI_str, str_decodeURI, 1,0,0},
+ {js_encodeURI_str, str_encodeURI, 1,0,0},
+ {js_decodeURIComponent_str, str_decodeURI_Component, 1,0,0},
+ {js_encodeURIComponent_str, str_encodeURI_Component, 1,0,0},
+
+ {0,0,0,0,0}
+};
+
+jschar js_empty_ucstr[] = {0};
+JSSubString js_EmptySubString = {0, js_empty_ucstr};
+
+enum string_tinyid {
+ STRING_LENGTH = -1
+};
+
+static JSPropertySpec string_props[] = {
+ {js_length_str, STRING_LENGTH,
+ JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED, 0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+str_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsval v;
+ JSString *str;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ slot = JSVAL_TO_INT(id);
+ if (slot == STRING_LENGTH) {
+ if (OBJ_GET_CLASS(cx, obj) == &js_StringClass) {
+ /* Follow ECMA-262 by fetching intrinsic length of our string. */
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_STRING(v));
+ str = JSVAL_TO_STRING(v);
+ } else {
+ /* Preserve compatibility: convert obj to a string primitive. */
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ }
+
+ *vp = INT_TO_JSVAL((jsint) JSSTRING_LENGTH(str));
+ }
+ return JS_TRUE;
+}
+
+#define STRING_ELEMENT_ATTRS (JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_PERMANENT)
+
+static JSBool
+str_enumerate(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+ JSString *str, *str1;
+ size_t i, length;
+
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_STRING(v));
+ str = JSVAL_TO_STRING(v);
+
+ length = JSSTRING_LENGTH(str);
+ for (i = 0; i < length; i++) {
+ str1 = js_NewDependentString(cx, str, i, 1, 0);
+ if (!str1)
+ return JS_FALSE;
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, INT_TO_JSID(i),
+ STRING_TO_JSVAL(str1), NULL, NULL,
+ STRING_ELEMENT_ATTRS, NULL)) {
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+str_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ jsval v;
+ JSString *str, *str1;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id) || (flags & JSRESOLVE_ASSIGNING))
+ return JS_TRUE;
+
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_STRING(v));
+ str = JSVAL_TO_STRING(v);
+
+ slot = JSVAL_TO_INT(id);
+ if ((size_t)slot < JSSTRING_LENGTH(str)) {
+ str1 = js_NewDependentString(cx, str, (size_t)slot, 1, 0);
+ if (!str1)
+ return JS_FALSE;
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, INT_TO_JSID(slot),
+ STRING_TO_JSVAL(str1), NULL, NULL,
+ STRING_ELEMENT_ATTRS, NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ return JS_TRUE;
+}
+
+JSClass js_StringClass = {
+ js_String_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_String),
+ JS_PropertyStub, JS_PropertyStub, str_getProperty, JS_PropertyStub,
+ str_enumerate, (JSResolveOp)str_resolve, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#if JS_HAS_TOSOURCE
+
+/*
+ * String.prototype.quote is generic (as are most string methods), unlike
+ * toSource, toString, and valueOf.
+ */
+static JSBool
+str_quote(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ str = js_QuoteString(cx, str, '"');
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSString *str;
+ size_t i, j, k, n;
+ char buf[16];
+ jschar *s, *t;
+
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_StringClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_STRING(v))
+ return js_obj_toSource(cx, obj, argc, argv, rval);
+ }
+ str = js_QuoteString(cx, JSVAL_TO_STRING(v), '"');
+ if (!str)
+ return JS_FALSE;
+ j = JS_snprintf(buf, sizeof buf, "(new %s(", js_StringClass.name);
+ s = JSSTRING_CHARS(str);
+ k = JSSTRING_LENGTH(str);
+ n = j + k + 2;
+ t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!t)
+ return JS_FALSE;
+ for (i = 0; i < j; i++)
+ t[i] = buf[i];
+ for (j = 0; j < k; i++, j++)
+ t[i] = s[j];
+ t[i++] = ')';
+ t[i++] = ')';
+ t[i] = 0;
+ str = js_NewString(cx, t, n, 0);
+ if (!str) {
+ JS_free(cx, t);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#endif /* JS_HAS_TOSOURCE */
+
+static JSBool
+str_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_StringClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_STRING(v))
+ return js_obj_toString(cx, obj, argc, argv, rval);
+ *rval = v;
+ return JS_TRUE;
+}
+
+static JSBool
+str_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_StringClass, argv))
+ return JS_FALSE;
+ *rval = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ return JS_TRUE;
+}
+
+/*
+ * Java-like string native methods.
+ */
+static JSBool
+str_substring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ jsdouble length, begin, end;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ length = JSSTRING_LENGTH(str);
+ begin = js_DoubleToInteger(d);
+ if (begin < 0)
+ begin = 0;
+ else if (begin > length)
+ begin = length;
+
+ if (argc == 1) {
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ end = js_DoubleToInteger(d);
+ if (end < 0)
+ end = 0;
+ else if (end > length)
+ end = length;
+ if (end < begin) {
+ /* ECMA emulates old JDK1.0 java.lang.String.substring. */
+ jsdouble tmp = begin;
+ begin = end;
+ end = tmp;
+ }
+ }
+
+ str = js_NewDependentString(cx, str, (size_t)begin,
+ (size_t)(end - begin), 0);
+ if (!str)
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toLowerCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ size_t i, n;
+ jschar *s, *news;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ n = JSSTRING_LENGTH(str);
+ news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!news)
+ return JS_FALSE;
+ s = JSSTRING_CHARS(str);
+ for (i = 0; i < n; i++)
+ news[i] = JS_TOLOWER(s[i]);
+ news[n] = 0;
+ str = js_NewString(cx, news, n, 0);
+ if (!str) {
+ JS_free(cx, news);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toLocaleLowerCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ /*
+ * Forcefully ignore the first (or any) argument and return toLowerCase(),
+ * ECMA has reserved that argument, presumably for defining the locale.
+ */
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToLowerCase) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ return cx->localeCallbacks->localeToLowerCase(cx, str, rval);
+ }
+ return str_toLowerCase(cx, obj, 0, argv, rval);
+}
+
+static JSBool
+str_toUpperCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ size_t i, n;
+ jschar *s, *news;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ n = JSSTRING_LENGTH(str);
+ news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!news)
+ return JS_FALSE;
+ s = JSSTRING_CHARS(str);
+ for (i = 0; i < n; i++)
+ news[i] = JS_TOUPPER(s[i]);
+ news[n] = 0;
+ str = js_NewString(cx, news, n, 0);
+ if (!str) {
+ JS_free(cx, news);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toLocaleUpperCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ /*
+ * Forcefully ignore the first (or any) argument and return toUpperCase(),
+ * ECMA has reserved that argument, presumbaly for defining the locale.
+ */
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToUpperCase) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ return cx->localeCallbacks->localeToUpperCase(cx, str, rval);
+ }
+ return str_toUpperCase(cx, obj, 0, argv, rval);
+}
+
+static JSBool
+str_localeCompare(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str, *thatStr;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc == 0) {
+ *rval = JSVAL_ZERO;
+ } else {
+ thatStr = js_ValueToString(cx, argv[0]);
+ if (!thatStr)
+ return JS_FALSE;
+ if (cx->localeCallbacks && cx->localeCallbacks->localeCompare) {
+ argv[0] = STRING_TO_JSVAL(thatStr);
+ return cx->localeCallbacks->localeCompare(cx, str, thatStr, rval);
+ }
+ *rval = INT_TO_JSVAL(js_CompareStrings(str, thatStr));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+str_charAt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ size_t index;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc == 0) {
+ d = 0.0;
+ } else {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ }
+
+ if (d < 0 || JSSTRING_LENGTH(str) <= d) {
+ *rval = JS_GetEmptyStringValue(cx);
+ } else {
+ index = (size_t)d;
+ str = js_NewDependentString(cx, str, index, 1, 0);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+str_charCodeAt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ size_t index;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc == 0) {
+ d = 0.0;
+ } else {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ }
+
+ if (d < 0 || JSSTRING_LENGTH(str) <= d) {
+ *rval = JS_GetNaNValue(cx);
+ } else {
+ index = (size_t)d;
+ *rval = INT_TO_JSVAL((jsint) JSSTRING_CHARS(str)[index]);
+ }
+ return JS_TRUE;
+}
+
+jsint
+js_BoyerMooreHorspool(const jschar *text, jsint textlen,
+ const jschar *pat, jsint patlen,
+ jsint start)
+{
+ jsint i, j, k, m;
+ uint8 skip[BMH_CHARSET_SIZE];
+ jschar c;
+
+ JS_ASSERT(0 < patlen && patlen <= BMH_PATLEN_MAX);
+ for (i = 0; i < BMH_CHARSET_SIZE; i++)
+ skip[i] = (uint8)patlen;
+ m = patlen - 1;
+ for (i = 0; i < m; i++) {
+ c = pat[i];
+ if (c >= BMH_CHARSET_SIZE)
+ return BMH_BAD_PATTERN;
+ skip[c] = (uint8)(m - i);
+ }
+ for (k = start + m;
+ k < textlen;
+ k += ((c = text[k]) >= BMH_CHARSET_SIZE) ? patlen : skip[c]) {
+ for (i = k, j = m; ; i--, j--) {
+ if (j < 0)
+ return i + 1;
+ if (text[i] != pat[j])
+ break;
+ }
+ }
+ return -1;
+}
+
+static JSBool
+str_indexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str, *str2;
+ jsint i, j, index, textlen, patlen;
+ const jschar *text, *pat;
+ jsdouble d;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ text = JSSTRING_CHARS(str);
+ textlen = (jsint) JSSTRING_LENGTH(str);
+
+ str2 = js_ValueToString(cx, argv[0]);
+ if (!str2)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str2);
+ pat = JSSTRING_CHARS(str2);
+ patlen = (jsint) JSSTRING_LENGTH(str2);
+
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0)
+ i = 0;
+ else if (d > textlen)
+ i = textlen;
+ else
+ i = (jsint)d;
+ } else {
+ i = 0;
+ }
+ if (patlen == 0) {
+ *rval = INT_TO_JSVAL(i);
+ return JS_TRUE;
+ }
+
+ /* XXX tune the BMH threshold (512) */
+ if ((jsuint)(patlen - 2) <= BMH_PATLEN_MAX - 2 && textlen >= 512) {
+ index = js_BoyerMooreHorspool(text, textlen, pat, patlen, i);
+ if (index != BMH_BAD_PATTERN)
+ goto out;
+ }
+
+ index = -1;
+ j = 0;
+ while (i + j < textlen) {
+ if (text[i + j] == pat[j]) {
+ if (++j == patlen) {
+ index = i;
+ break;
+ }
+ } else {
+ i++;
+ j = 0;
+ }
+ }
+
+out:
+ *rval = INT_TO_JSVAL(index);
+ return JS_TRUE;
+}
+
+static JSBool
+str_lastIndexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str, *str2;
+ const jschar *text, *pat;
+ jsint i, j, textlen, patlen;
+ jsdouble d;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ text = JSSTRING_CHARS(str);
+ textlen = (jsint) JSSTRING_LENGTH(str);
+
+ str2 = js_ValueToString(cx, argv[0]);
+ if (!str2)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str2);
+ pat = JSSTRING_CHARS(str2);
+ patlen = (jsint) JSSTRING_LENGTH(str2);
+
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(d)) {
+ i = textlen;
+ } else {
+ d = js_DoubleToInteger(d);
+ if (d < 0)
+ i = 0;
+ else if (d > textlen)
+ i = textlen;
+ else
+ i = (jsint)d;
+ }
+ } else {
+ i = textlen;
+ }
+
+ if (patlen == 0) {
+ *rval = INT_TO_JSVAL(i);
+ return JS_TRUE;
+ }
+
+ j = 0;
+ while (i >= 0) {
+ /* Don't assume that text is NUL-terminated: it could be dependent. */
+ if (i + j < textlen && text[i + j] == pat[j]) {
+ if (++j == patlen)
+ break;
+ } else {
+ i--;
+ j = 0;
+ }
+ }
+ *rval = INT_TO_JSVAL(i);
+ return JS_TRUE;
+}
+
+/*
+ * Perl-inspired string functions.
+ */
+typedef struct GlobData {
+ uintN flags; /* inout: mode and flag bits, see below */
+ uintN optarg; /* in: index of optional flags argument */
+ JSString *str; /* out: 'this' parameter object as string */
+ JSRegExp *regexp; /* out: regexp parameter object private data */
+} GlobData;
+
+/*
+ * Mode and flag bit definitions for match_or_replace's GlobData.flags field.
+ */
+#define MODE_MATCH 0x00 /* in: return match array on success */
+#define MODE_REPLACE 0x01 /* in: match and replace */
+#define MODE_SEARCH 0x02 /* in: search only, return match index or -1 */
+#define GET_MODE(f) ((f) & 0x03)
+#define FORCE_FLAT 0x04 /* in: force flat (non-regexp) string match */
+#define KEEP_REGEXP 0x08 /* inout: keep GlobData.regexp alive for caller
+ of match_or_replace; if set on input
+ but clear on output, regexp ownership
+ does not pass to caller */
+#define GLOBAL_REGEXP 0x10 /* out: regexp had the 'g' flag */
+
+static JSBool
+match_or_replace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ JSBool (*glob)(JSContext *cx, jsint count, GlobData *data),
+ GlobData *data, jsval *rval)
+{
+ JSString *str, *src, *opt;
+ JSObject *reobj;
+ JSRegExp *re;
+ size_t index, length;
+ JSBool ok, test;
+ jsint count;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ data->str = str;
+
+ if (JSVAL_IS_REGEXP(cx, argv[0])) {
+ reobj = JSVAL_TO_OBJECT(argv[0]);
+ re = (JSRegExp *) JS_GetPrivate(cx, reobj);
+ } else {
+ src = js_ValueToString(cx, argv[0]);
+ if (!src)
+ return JS_FALSE;
+ if (data->optarg < argc) {
+ argv[0] = STRING_TO_JSVAL(src);
+ opt = js_ValueToString(cx, argv[data->optarg]);
+ if (!opt)
+ return JS_FALSE;
+ } else {
+ opt = NULL;
+ }
+ re = js_NewRegExpOpt(cx, NULL, src, opt,
+ (data->flags & FORCE_FLAT) != 0);
+ if (!re)
+ return JS_FALSE;
+ reobj = NULL;
+ }
+ /* From here on, all control flow must reach the matching DROP. */
+ data->regexp = re;
+ HOLD_REGEXP(cx, re);
+
+ if (re->flags & JSREG_GLOB)
+ data->flags |= GLOBAL_REGEXP;
+ index = 0;
+ if (GET_MODE(data->flags) == MODE_SEARCH) {
+ ok = js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, rval);
+ if (ok) {
+ *rval = (*rval == JSVAL_TRUE)
+ ? INT_TO_JSVAL(cx->regExpStatics.leftContext.length)
+ : INT_TO_JSVAL(-1);
+ }
+ } else if (data->flags & GLOBAL_REGEXP) {
+ if (reobj) {
+ /* Set the lastIndex property's reserved slot to 0. */
+ ok = js_SetLastIndex(cx, reobj, 0);
+ } else {
+ ok = JS_TRUE;
+ }
+ if (ok) {
+ length = JSSTRING_LENGTH(str);
+ for (count = 0; index <= length; count++) {
+ ok = js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, rval);
+ if (!ok || *rval != JSVAL_TRUE)
+ break;
+ ok = glob(cx, count, data);
+ if (!ok)
+ break;
+ if (cx->regExpStatics.lastMatch.length == 0) {
+ if (index == length)
+ break;
+ index++;
+ }
+ }
+ }
+ } else {
+ if (GET_MODE(data->flags) == MODE_REPLACE) {
+ test = JS_TRUE;
+ } else {
+ /*
+ * MODE_MATCH implies str_match is being called from a script or a
+ * scripted function. If the caller cares only about testing null
+ * vs. non-null return value, optimize away the array object that
+ * would normally be returned in *rval.
+ */
+ JSStackFrame *fp = cx->fp->down;
+
+ /* Skip Function.prototype.call and .apply frames. */
+ while (fp && !fp->pc) {
+ JS_ASSERT(!fp->script);
+ fp = fp->down;
+ }
+
+ /* Assume a full array result is required, then prove otherwise. */
+ test = JS_FALSE;
+ if (fp) {
+ JS_ASSERT(*fp->pc == JSOP_CALL || *fp->pc == JSOP_NEW);
+ JS_ASSERT(js_CodeSpec[*fp->pc].length == 3);
+ switch (fp->pc[3]) {
+ case JSOP_POP:
+ case JSOP_IFEQ:
+ case JSOP_IFNE:
+ case JSOP_IFEQX:
+ case JSOP_IFNEX:
+ test = JS_TRUE;
+ break;
+ default:;
+ }
+ }
+ }
+ ok = js_ExecuteRegExp(cx, re, str, &index, test, rval);
+ }
+
+ DROP_REGEXP(cx, re);
+ if (reobj) {
+ /* Tell our caller that it doesn't need to destroy data->regexp. */
+ data->flags &= ~KEEP_REGEXP;
+ } else if (!(data->flags & KEEP_REGEXP)) {
+ /* Caller didn't want to keep data->regexp, so null and destroy it. */
+ data->regexp = NULL;
+ js_DestroyRegExp(cx, re);
+ }
+
+ return ok;
+}
+
+typedef struct MatchData {
+ GlobData base;
+ jsval *arrayval; /* NB: local root pointer */
+} MatchData;
+
+static JSBool
+match_glob(JSContext *cx, jsint count, GlobData *data)
+{
+ MatchData *mdata;
+ JSObject *arrayobj;
+ JSSubString *matchsub;
+ JSString *matchstr;
+ jsval v;
+
+ mdata = (MatchData *)data;
+ arrayobj = JSVAL_TO_OBJECT(*mdata->arrayval);
+ if (!arrayobj) {
+ arrayobj = js_ConstructObject(cx, &js_ArrayClass, NULL, NULL, 0, NULL);
+ if (!arrayobj)
+ return JS_FALSE;
+ *mdata->arrayval = OBJECT_TO_JSVAL(arrayobj);
+ }
+ matchsub = &cx->regExpStatics.lastMatch;
+ matchstr = js_NewStringCopyN(cx, matchsub->chars, matchsub->length, 0);
+ if (!matchstr)
+ return JS_FALSE;
+ v = STRING_TO_JSVAL(matchstr);
+ return js_SetProperty(cx, arrayobj, INT_TO_JSID(count), &v);
+}
+
+static JSBool
+str_match(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ MatchData mdata;
+ JSBool ok;
+
+ mdata.base.flags = MODE_MATCH;
+ mdata.base.optarg = 1;
+ mdata.arrayval = &argv[2];
+ *mdata.arrayval = JSVAL_NULL;
+ ok = match_or_replace(cx, obj, argc, argv, match_glob, &mdata.base, rval);
+ if (ok && !JSVAL_IS_NULL(*mdata.arrayval))
+ *rval = *mdata.arrayval;
+ return ok;
+}
+
+static JSBool
+str_search(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ GlobData data;
+
+ data.flags = MODE_SEARCH;
+ data.optarg = 1;
+ return match_or_replace(cx, obj, argc, argv, NULL, &data, rval);
+}
+
+typedef struct ReplaceData {
+ GlobData base; /* base struct state */
+ JSObject *lambda; /* replacement function object or null */
+ JSString *repstr; /* replacement string */
+ jschar *dollar; /* null or pointer to first $ in repstr */
+ jschar *dollarEnd; /* limit pointer for js_strchr_limit */
+ jschar *chars; /* result chars, null initially */
+ size_t length; /* result length, 0 initially */
+ jsint index; /* index in result of next replacement */
+ jsint leftIndex; /* left context index in base.str->chars */
+ JSSubString dollarStr; /* for "$$" interpret_dollar result */
+} ReplaceData;
+
+static JSSubString *
+interpret_dollar(JSContext *cx, jschar *dp, jschar *ep, ReplaceData *rdata,
+ size_t *skip)
+{
+ JSRegExpStatics *res;
+ jschar dc, *cp;
+ uintN num, tmp;
+
+ JS_ASSERT(*dp == '$');
+
+ /* If there is only a dollar, bail now */
+ if (dp + 1 >= ep)
+ return NULL;
+
+ /* Interpret all Perl match-induced dollar variables. */
+ res = &cx->regExpStatics;
+ dc = dp[1];
+ if (JS7_ISDEC(dc)) {
+ /* ECMA-262 Edition 3: 1-9 or 01-99 */
+ num = JS7_UNDEC(dc);
+ if (num > res->parenCount)
+ return NULL;
+
+ cp = dp + 2;
+ if (cp < ep && (dc = *cp, JS7_ISDEC(dc))) {
+ tmp = 10 * num + JS7_UNDEC(dc);
+ if (tmp <= res->parenCount) {
+ cp++;
+ num = tmp;
+ }
+ }
+ if (num == 0)
+ return NULL;
+
+ /* Adjust num from 1 $n-origin to 0 array-index-origin. */
+ num--;
+ *skip = cp - dp;
+ return REGEXP_PAREN_SUBSTRING(res, num);
+ }
+
+ *skip = 2;
+ switch (dc) {
+ case '$':
+ rdata->dollarStr.chars = dp;
+ rdata->dollarStr.length = 1;
+ return &rdata->dollarStr;
+ case '&':
+ return &res->lastMatch;
+ case '+':
+ return &res->lastParen;
+ case '`':
+ return &res->leftContext;
+ case '\'':
+ return &res->rightContext;
+ }
+ return NULL;
+}
+
+static JSBool
+find_replen(JSContext *cx, ReplaceData *rdata, size_t *sizep)
+{
+ JSString *repstr;
+ size_t replen, skip;
+ jschar *dp, *ep;
+ JSSubString *sub;
+ JSObject *lambda;
+
+ lambda = rdata->lambda;
+ if (lambda) {
+ uintN argc, i, j, m, n, p;
+ jsval *sp, *oldsp, rval;
+ void *mark;
+ JSStackFrame *fp;
+ JSBool ok;
+
+ /*
+ * Save the regExpStatics from the current regexp, since they may be
+ * clobbered by a RegExp usage in the lambda function. Note that all
+ * members of JSRegExpStatics are JSSubStrings, so not GC roots, save
+ * input, which is rooted otherwise via argv[-1] in str_replace.
+ */
+ JSRegExpStatics save = cx->regExpStatics;
+ JSBool freeMoreParens = JS_FALSE;
+
+ /*
+ * In the lambda case, not only do we find the replacement string's
+ * length, we compute repstr and return it via rdata for use within
+ * do_replace. The lambda is called with arguments ($&, $1, $2, ...,
+ * index, input), i.e., all the properties of a regexp match array.
+ * For $&, etc., we must create string jsvals from cx->regExpStatics.
+ * We grab up stack space to keep the newborn strings GC-rooted.
+ */
+ p = rdata->base.regexp->parenCount;
+ argc = 1 + p + 2;
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ /* Push lambda and its 'this' parameter. */
+ *sp++ = OBJECT_TO_JSVAL(lambda);
+ *sp++ = OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, lambda));
+
+#define PUSH_REGEXP_STATIC(sub) \
+ JS_BEGIN_MACRO \
+ JSString *str = js_NewStringCopyN(cx, \
+ cx->regExpStatics.sub.chars, \
+ cx->regExpStatics.sub.length, \
+ 0); \
+ if (!str) { \
+ ok = JS_FALSE; \
+ goto lambda_out; \
+ } \
+ *sp++ = STRING_TO_JSVAL(str); \
+ JS_END_MACRO
+
+ /* Push $&, $1, $2, ... */
+ PUSH_REGEXP_STATIC(lastMatch);
+ i = 0;
+ m = cx->regExpStatics.parenCount;
+ n = JS_MIN(m, 9);
+ for (j = 0; i < n; i++, j++)
+ PUSH_REGEXP_STATIC(parens[j]);
+ for (j = 0; i < m; i++, j++)
+ PUSH_REGEXP_STATIC(moreParens[j]);
+
+ /*
+ * We need to clear moreParens in the top-of-stack cx->regExpStatics
+ * to it won't be possibly realloc'ed, leaving the bottom-of-stack
+ * moreParens pointing to freed memory.
+ */
+ cx->regExpStatics.moreParens = NULL;
+ freeMoreParens = JS_TRUE;
+
+#undef PUSH_REGEXP_STATIC
+
+ /* Make sure to push undefined for any unmatched parens. */
+ for (; i < p; i++)
+ *sp++ = JSVAL_VOID;
+
+ /* Push match index and input string. */
+ *sp++ = INT_TO_JSVAL((jsint)cx->regExpStatics.leftContext.length);
+ *sp++ = STRING_TO_JSVAL(rdata->base.str);
+
+ /* Lift current frame to include the args and do the call. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_Invoke(cx, argc, JSINVOKE_INTERNAL);
+ rval = fp->sp[-1];
+ fp->sp = oldsp;
+
+ if (ok) {
+ /*
+ * NB: we count on the newborn string root to hold any string
+ * created by this js_ValueToString that would otherwise be GC-
+ * able, until we use rdata->repstr in do_replace.
+ */
+ repstr = js_ValueToString(cx, rval);
+ if (!repstr) {
+ ok = JS_FALSE;
+ } else {
+ rdata->repstr = repstr;
+ *sizep = JSSTRING_LENGTH(repstr);
+ }
+ }
+
+ lambda_out:
+ js_FreeStack(cx, mark);
+ if (freeMoreParens)
+ JS_free(cx, cx->regExpStatics.moreParens);
+ cx->regExpStatics = save;
+ return ok;
+ }
+
+ repstr = rdata->repstr;
+ replen = JSSTRING_LENGTH(repstr);
+ for (dp = rdata->dollar, ep = rdata->dollarEnd; dp;
+ dp = js_strchr_limit(dp, '$', ep)) {
+ sub = interpret_dollar(cx, dp, ep, rdata, &skip);
+ if (sub) {
+ replen += sub->length - skip;
+ dp += skip;
+ }
+ else
+ dp++;
+ }
+ *sizep = replen;
+ return JS_TRUE;
+}
+
+static void
+do_replace(JSContext *cx, ReplaceData *rdata, jschar *chars)
+{
+ JSString *repstr;
+ jschar *bp, *cp, *dp, *ep;
+ size_t len, skip;
+ JSSubString *sub;
+
+ repstr = rdata->repstr;
+ bp = cp = JSSTRING_CHARS(repstr);
+ for (dp = rdata->dollar, ep = rdata->dollarEnd; dp;
+ dp = js_strchr_limit(dp, '$', ep)) {
+ len = dp - cp;
+ js_strncpy(chars, cp, len);
+ chars += len;
+ cp = dp;
+ sub = interpret_dollar(cx, dp, ep, rdata, &skip);
+ if (sub) {
+ len = sub->length;
+ js_strncpy(chars, sub->chars, len);
+ chars += len;
+ cp += skip;
+ dp += skip;
+ } else {
+ dp++;
+ }
+ }
+ js_strncpy(chars, cp, JSSTRING_LENGTH(repstr) - (cp - bp));
+}
+
+static JSBool
+replace_glob(JSContext *cx, jsint count, GlobData *data)
+{
+ ReplaceData *rdata;
+ JSString *str;
+ size_t leftoff, leftlen, replen, growth;
+ const jschar *left;
+ jschar *chars;
+
+ rdata = (ReplaceData *)data;
+ str = data->str;
+ leftoff = rdata->leftIndex;
+ left = JSSTRING_CHARS(str) + leftoff;
+ leftlen = cx->regExpStatics.lastMatch.chars - left;
+ rdata->leftIndex = cx->regExpStatics.lastMatch.chars - JSSTRING_CHARS(str);
+ rdata->leftIndex += cx->regExpStatics.lastMatch.length;
+ if (!find_replen(cx, rdata, &replen))
+ return JS_FALSE;
+ growth = leftlen + replen;
+ chars = (jschar *)
+ (rdata->chars
+ ? JS_realloc(cx, rdata->chars, (rdata->length + growth + 1)
+ * sizeof(jschar))
+ : JS_malloc(cx, (growth + 1) * sizeof(jschar)));
+ if (!chars) {
+ JS_free(cx, rdata->chars);
+ rdata->chars = NULL;
+ return JS_FALSE;
+ }
+ rdata->chars = chars;
+ rdata->length += growth;
+ chars += rdata->index;
+ rdata->index += growth;
+ js_strncpy(chars, left, leftlen);
+ chars += leftlen;
+ do_replace(cx, rdata, chars);
+ return JS_TRUE;
+}
+
+static JSBool
+str_replace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *lambda;
+ JSString *repstr, *str;
+ ReplaceData rdata;
+ JSBool ok;
+ jschar *chars;
+ size_t leftlen, rightlen, length;
+
+ if (JS_TypeOfValue(cx, argv[1]) == JSTYPE_FUNCTION) {
+ lambda = JSVAL_TO_OBJECT(argv[1]);
+ repstr = NULL;
+ } else {
+ if (!JS_ConvertValue(cx, argv[1], JSTYPE_STRING, &argv[1]))
+ return JS_FALSE;
+ repstr = JSVAL_TO_STRING(argv[1]);
+ lambda = NULL;
+ }
+
+ /*
+ * For ECMA Edition 3, the first argument is to be converted to a string
+ * to match in a "flat" sense (without regular expression metachars having
+ * special meanings) UNLESS the first arg is a RegExp object.
+ */
+ rdata.base.flags = MODE_REPLACE | KEEP_REGEXP | FORCE_FLAT;
+ rdata.base.optarg = 2;
+
+ rdata.lambda = lambda;
+ rdata.repstr = repstr;
+ if (repstr) {
+ rdata.dollarEnd = JSSTRING_CHARS(repstr) + JSSTRING_LENGTH(repstr);
+ rdata.dollar = js_strchr_limit(JSSTRING_CHARS(repstr), '$',
+ rdata.dollarEnd);
+ } else {
+ rdata.dollar = rdata.dollarEnd = NULL;
+ }
+ rdata.chars = NULL;
+ rdata.length = 0;
+ rdata.index = 0;
+ rdata.leftIndex = 0;
+
+ ok = match_or_replace(cx, obj, argc, argv, replace_glob, &rdata.base, rval);
+ if (!ok)
+ return JS_FALSE;
+
+ if (!rdata.chars) {
+ if ((rdata.base.flags & GLOBAL_REGEXP) || *rval != JSVAL_TRUE) {
+ /* Didn't match even once. */
+ *rval = STRING_TO_JSVAL(rdata.base.str);
+ goto out;
+ }
+ leftlen = cx->regExpStatics.leftContext.length;
+ ok = find_replen(cx, &rdata, &length);
+ if (!ok)
+ goto out;
+ length += leftlen;
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ js_strncpy(chars, cx->regExpStatics.leftContext.chars, leftlen);
+ do_replace(cx, &rdata, chars + leftlen);
+ rdata.chars = chars;
+ rdata.length = length;
+ }
+
+ rightlen = cx->regExpStatics.rightContext.length;
+ length = rdata.length + rightlen;
+ chars = (jschar *)
+ JS_realloc(cx, rdata.chars, (length + 1) * sizeof(jschar));
+ if (!chars) {
+ JS_free(cx, rdata.chars);
+ ok = JS_FALSE;
+ goto out;
+ }
+ js_strncpy(chars + rdata.length, cx->regExpStatics.rightContext.chars,
+ rightlen);
+ chars[length] = 0;
+
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = STRING_TO_JSVAL(str);
+
+out:
+ /* If KEEP_REGEXP is still set, it's our job to destroy regexp now. */
+ if (rdata.base.flags & KEEP_REGEXP)
+ js_DestroyRegExp(cx, rdata.base.regexp);
+ return ok;
+}
+
+/*
+ * Subroutine used by str_split to find the next split point in str, starting
+ * at offset *ip and looking either for the separator substring given by sep,
+ * or for the next re match. In the re case, return the matched separator in
+ * *sep, and the possibly updated offset in *ip.
+ *
+ * Return -2 on error, -1 on end of string, >= 0 for a valid index of the next
+ * separator occurrence if found, or str->length if no separator is found.
+ */
+static jsint
+find_split(JSContext *cx, JSString *str, JSRegExp *re, jsint *ip,
+ JSSubString *sep)
+{
+ jsint i, j, k;
+ size_t length;
+ jschar *chars;
+
+ /*
+ * Stop if past end of string. If at end of string, we will compare the
+ * null char stored there (by js_NewString*) to sep->chars[j] in the while
+ * loop at the end of this function, so that
+ *
+ * "ab,".split(',') => ["ab", ""]
+ *
+ * and the resulting array converts back to the string "ab," for symmetry.
+ * However, we ape Perl and do this only if there is a sufficiently large
+ * limit argument (see str_split).
+ */
+ i = *ip;
+ length = JSSTRING_LENGTH(str);
+ if ((size_t)i > length)
+ return -1;
+
+ chars = JSSTRING_CHARS(str);
+
+ /*
+ * Match a regular expression against the separator at or above index i.
+ * Call js_ExecuteRegExp with true for the test argument. On successful
+ * match, get the separator from cx->regExpStatics.lastMatch.
+ */
+ if (re) {
+ size_t index;
+ jsval rval;
+
+ again:
+ /* JS1.2 deviated from Perl by never matching at end of string. */
+ index = (size_t)i;
+ if (!js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, &rval))
+ return -2;
+ if (rval != JSVAL_TRUE) {
+ /* Mismatch: ensure our caller advances i past end of string. */
+ sep->length = 1;
+ return length;
+ }
+ i = (jsint)index;
+ *sep = cx->regExpStatics.lastMatch;
+ if (sep->length == 0) {
+ /*
+ * Empty string match: never split on an empty match at the start
+ * of a find_split cycle. Same rule as for an empty global match
+ * in match_or_replace.
+ */
+ if (i == *ip) {
+ /*
+ * "Bump-along" to avoid sticking at an empty match, but don't
+ * bump past end of string -- our caller must do that by adding
+ * sep->length to our return value.
+ */
+ if ((size_t)i == length)
+ return -1;
+ i++;
+ goto again;
+ }
+ if ((size_t)i == length) {
+ /*
+ * If there was a trivial zero-length match at the end of the
+ * split, then we shouldn't output the matched string at the end
+ * of the split array. See ECMA-262 Ed. 3, 15.5.4.14, Step 15.
+ */
+ sep->chars = NULL;
+ }
+ }
+ JS_ASSERT((size_t)i >= sep->length);
+ return i - sep->length;
+ }
+
+ /*
+ * Deviate from ECMA by never splitting an empty string by any separator
+ * string into a non-empty array (an array of length 1 that contains the
+ * empty string).
+ */
+ if (!JS_VERSION_IS_ECMA(cx) && length == 0)
+ return -1;
+
+ /*
+ * Special case: if sep is the empty string, split str into one character
+ * substrings. Let our caller worry about whether to split once at end of
+ * string into an empty substring.
+ */
+ if (sep->length == 0)
+ return ((size_t)i == length) ? -1 : i + 1;
+
+ /*
+ * Now that we know sep is non-empty, search starting at i in str for an
+ * occurrence of all of sep's chars. If we find them, return the index of
+ * the first separator char. Otherwise, return length.
+ */
+ j = 0;
+ while ((size_t)(k = i + j) < length) {
+ if (chars[k] == sep->chars[j]) {
+ if ((size_t)++j == sep->length)
+ return i;
+ } else {
+ i++;
+ j = 0;
+ }
+ }
+ return k;
+}
+
+static JSBool
+str_split(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str, *sub;
+ JSObject *arrayobj;
+ jsval v;
+ JSBool ok, limited;
+ JSRegExp *re;
+ JSSubString *sep, tmp;
+ jsdouble d;
+ jsint i, j;
+ uint32 len, limit;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ arrayobj = js_ConstructObject(cx, &js_ArrayClass, NULL, NULL, 0, NULL);
+ if (!arrayobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(arrayobj);
+
+ if (argc == 0) {
+ v = STRING_TO_JSVAL(str);
+ ok = JS_SetElement(cx, arrayobj, 0, &v);
+ } else {
+ if (JSVAL_IS_REGEXP(cx, argv[0])) {
+ re = (JSRegExp *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[0]));
+ sep = &tmp;
+
+ /* Set a magic value so we can detect a successful re match. */
+ sep->chars = NULL;
+ sep->length = 0;
+ } else {
+ JSString *str2 = js_ValueToString(cx, argv[0]);
+ if (!str2)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str2);
+
+ /*
+ * Point sep at a local copy of str2's header because find_split
+ * will modify sep->length.
+ */
+ tmp.length = JSSTRING_LENGTH(str2);
+ tmp.chars = JSSTRING_CHARS(str2);
+ sep = &tmp;
+ re = NULL;
+ }
+
+ /* Use the second argument as the split limit, if given. */
+ limited = (argc > 1) && !JSVAL_IS_VOID(argv[1]);
+ limit = 0; /* Avoid warning. */
+ if (limited) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+
+ /* Clamp limit between 0 and 1 + string length. */
+ if (!js_DoubleToECMAUint32(cx, d, &limit))
+ return JS_FALSE;
+ if (limit > JSSTRING_LENGTH(str))
+ limit = 1 + JSSTRING_LENGTH(str);
+ }
+
+ len = i = 0;
+ while ((j = find_split(cx, str, re, &i, sep)) >= 0) {
+ if (limited && len >= limit)
+ break;
+ sub = js_NewDependentString(cx, str, i, (size_t)(j - i), 0);
+ if (!sub)
+ return JS_FALSE;
+ v = STRING_TO_JSVAL(sub);
+ if (!JS_SetElement(cx, arrayobj, len, &v))
+ return JS_FALSE;
+ len++;
+
+ /*
+ * Imitate perl's feature of including parenthesized substrings
+ * that matched part of the delimiter in the new array, after the
+ * split substring that was delimited.
+ */
+ if (re && sep->chars) {
+ uintN num;
+ JSSubString *parsub;
+
+ for (num = 0; num < cx->regExpStatics.parenCount; num++) {
+ if (limited && len >= limit)
+ break;
+ parsub = REGEXP_PAREN_SUBSTRING(&cx->regExpStatics, num);
+ sub = js_NewStringCopyN(cx, parsub->chars, parsub->length,
+ 0);
+ if (!sub)
+ return JS_FALSE;
+ v = STRING_TO_JSVAL(sub);
+ if (!JS_SetElement(cx, arrayobj, len, &v))
+ return JS_FALSE;
+ len++;
+ }
+ sep->chars = NULL;
+ }
+
+ i = j + sep->length;
+ if (!JS_VERSION_IS_ECMA(cx)) {
+ /*
+ * Deviate from ECMA to imitate Perl, which omits a final
+ * split unless a limit argument is given and big enough.
+ */
+ if (!limited && (size_t)i == JSSTRING_LENGTH(str))
+ break;
+ }
+ }
+ ok = (j != -2);
+ }
+ return ok;
+}
+
+#if JS_HAS_PERL_SUBSTR
+static JSBool
+str_substr(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ jsdouble length, begin, end;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ length = JSSTRING_LENGTH(str);
+ begin = js_DoubleToInteger(d);
+ if (begin < 0) {
+ begin += length;
+ if (begin < 0)
+ begin = 0;
+ } else if (begin > length) {
+ begin = length;
+ }
+
+ if (argc == 1) {
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ end = js_DoubleToInteger(d);
+ if (end < 0)
+ end = 0;
+ end += begin;
+ if (end > length)
+ end = length;
+ }
+
+ str = js_NewDependentString(cx, str, (size_t)begin,
+ (size_t)(end - begin), 0);
+ if (!str)
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif /* JS_HAS_PERL_SUBSTR */
+
+/*
+ * Python-esque sequence operations.
+ */
+static JSBool
+str_concat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str, *str2;
+ uintN i;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ for (i = 0; i < argc; i++) {
+ str2 = js_ValueToString(cx, argv[i]);
+ if (!str2)
+ return JS_FALSE;
+ argv[i] = STRING_TO_JSVAL(str2);
+
+ str = js_ConcatStrings(cx, str, str2);
+ if (!str)
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_slice(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ jsdouble length, begin, end;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ length = JSSTRING_LENGTH(str);
+ begin = js_DoubleToInteger(d);
+ if (begin < 0) {
+ begin += length;
+ if (begin < 0)
+ begin = 0;
+ } else if (begin > length) {
+ begin = length;
+ }
+
+ if (argc == 1) {
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ end = js_DoubleToInteger(d);
+ if (end < 0) {
+ end += length;
+ if (end < 0)
+ end = 0;
+ } else if (end > length) {
+ end = length;
+ }
+ if (end < begin)
+ end = begin;
+ }
+
+ str = js_NewDependentString(cx, str, (size_t)begin,
+ (size_t)(end - begin), 0);
+ if (!str)
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#if JS_HAS_STR_HTML_HELPERS
+/*
+ * HTML composition aids.
+ */
+static JSBool
+tagify(JSContext *cx, JSObject *obj, jsval *argv,
+ const char *begin, JSString *param, const char *end,
+ jsval *rval)
+{
+ JSString *str;
+ jschar *tagbuf;
+ size_t beglen, endlen, parlen, taglen;
+ size_t i, j;
+
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ str = JSVAL_TO_STRING((jsval)obj);
+ } else {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ }
+
+ if (!end)
+ end = begin;
+
+ beglen = strlen(begin);
+ taglen = 1 + beglen + 1; /* '<begin' + '>' */
+ parlen = 0; /* Avoid warning. */
+ if (param) {
+ parlen = JSSTRING_LENGTH(param);
+ taglen += 2 + parlen + 1; /* '="param"' */
+ }
+ endlen = strlen(end);
+ taglen += JSSTRING_LENGTH(str) + 2 + endlen + 1; /* 'str</end>' */
+
+ if (taglen >= ~(size_t)0 / sizeof(jschar)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ tagbuf = (jschar *) JS_malloc(cx, (taglen + 1) * sizeof(jschar));
+ if (!tagbuf)
+ return JS_FALSE;
+
+ j = 0;
+ tagbuf[j++] = '<';
+ for (i = 0; i < beglen; i++)
+ tagbuf[j++] = (jschar)begin[i];
+ if (param) {
+ tagbuf[j++] = '=';
+ tagbuf[j++] = '"';
+ js_strncpy(&tagbuf[j], JSSTRING_CHARS(param), parlen);
+ j += parlen;
+ tagbuf[j++] = '"';
+ }
+ tagbuf[j++] = '>';
+ js_strncpy(&tagbuf[j], JSSTRING_CHARS(str), JSSTRING_LENGTH(str));
+ j += JSSTRING_LENGTH(str);
+ tagbuf[j++] = '<';
+ tagbuf[j++] = '/';
+ for (i = 0; i < endlen; i++)
+ tagbuf[j++] = (jschar)end[i];
+ tagbuf[j++] = '>';
+ JS_ASSERT(j == taglen);
+ tagbuf[j] = 0;
+
+ str = js_NewString(cx, tagbuf, taglen, 0);
+ if (!str) {
+ free((char *)tagbuf);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+tagify_value(JSContext *cx, JSObject *obj, jsval *argv,
+ const char *begin, const char *end,
+ jsval *rval)
+{
+ JSString *param;
+
+ param = js_ValueToString(cx, argv[0]);
+ if (!param)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(param);
+ return tagify(cx, obj, argv, begin, param, end, rval);
+}
+
+static JSBool
+str_bold(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "b", NULL, NULL, rval);
+}
+
+static JSBool
+str_italics(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "i", NULL, NULL, rval);
+}
+
+static JSBool
+str_fixed(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "tt", NULL, NULL, rval);
+}
+
+static JSBool
+str_fontsize(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "font size", "font", rval);
+}
+
+static JSBool
+str_fontcolor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "font color", "font", rval);
+}
+
+static JSBool
+str_link(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "a href", "a", rval);
+}
+
+static JSBool
+str_anchor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "a name", "a", rval);
+}
+
+static JSBool
+str_strike(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "strike", NULL, NULL, rval);
+}
+
+static JSBool
+str_small(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "small", NULL, NULL, rval);
+}
+
+static JSBool
+str_big(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "big", NULL, NULL, rval);
+}
+
+static JSBool
+str_blink(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "blink", NULL, NULL, rval);
+}
+
+static JSBool
+str_sup(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "sup", NULL, NULL, rval);
+}
+
+static JSBool
+str_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "sub", NULL, NULL, rval);
+}
+#endif /* JS_HAS_STR_HTML_HELPERS */
+
+static JSFunctionSpec string_methods[] = {
+#if JS_HAS_TOSOURCE
+ {"quote", str_quote, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {js_toSource_str, str_toSource, 0,JSFUN_THISP_STRING,0},
+#endif
+
+ /* Java-like methods. */
+ {js_toString_str, str_toString, 0,JSFUN_THISP_STRING,0},
+ {js_valueOf_str, str_valueOf, 0,JSFUN_THISP_STRING,0},
+ {"substring", str_substring, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toLowerCase", str_toLowerCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toUpperCase", str_toUpperCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"charAt", str_charAt, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"charCodeAt", str_charCodeAt, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"indexOf", str_indexOf, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"lastIndexOf", str_lastIndexOf, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toLocaleLowerCase", str_toLocaleLowerCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toLocaleUpperCase", str_toLocaleUpperCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"localeCompare", str_localeCompare, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+
+ /* Perl-ish methods (search is actually Python-esque). */
+ {"match", str_match, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,2},
+ {"search", str_search, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"replace", str_replace, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"split", str_split, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+#if JS_HAS_PERL_SUBSTR
+ {"substr", str_substr, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+#endif
+
+ /* Python-esque sequence methods. */
+ {"concat", str_concat, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"slice", str_slice, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+
+ /* HTML string methods. */
+#if JS_HAS_STR_HTML_HELPERS
+ {"bold", str_bold, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"italics", str_italics, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"fixed", str_fixed, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"fontsize", str_fontsize, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"fontcolor", str_fontcolor, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"link", str_link, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"anchor", str_anchor, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"strike", str_strike, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"small", str_small, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"big", str_big, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"blink", str_blink, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"sup", str_sup, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"sub", str_sub, 0,JSFUN_THISP_PRIMITIVE,0},
+#endif
+
+ {0,0,0,0,0}
+};
+
+static JSBool
+String(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ if (argc > 0) {
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ } else {
+ str = cx->runtime->emptyString;
+ }
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, STRING_TO_JSVAL(str));
+ return JS_TRUE;
+}
+
+static JSBool
+str_fromCharCode(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jschar *chars;
+ uintN i;
+ uint16 code;
+ JSString *str;
+
+ JS_ASSERT(argc < ARRAY_INIT_LIMIT);
+ chars = (jschar *) JS_malloc(cx, (argc + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToUint16(cx, argv[i], &code)) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ chars[i] = (jschar)code;
+ }
+ chars[i] = 0;
+ str = js_NewString(cx, chars, argc, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec string_static_methods[] = {
+ {"fromCharCode", str_fromCharCode, 1,0,0},
+ {0,0,0,0,0}
+};
+
+JSBool
+js_InitRuntimeStringState(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSString *empty;
+ JSAtom *atom;
+
+ rt = cx->runtime;
+
+ /* Initialize string cache */
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->deflatedStringCacheLock);
+ rt->deflatedStringCacheLock = JS_NEW_LOCK();
+ if (!rt->deflatedStringCacheLock)
+ return JS_FALSE;
+#endif
+
+ /* Make a permanently locked empty string. */
+ JS_ASSERT(!rt->emptyString);
+ empty = js_NewStringCopyN(cx, js_empty_ucstr, 0, GCF_LOCK);
+ if (!empty)
+ goto bad;
+
+ /* Atomize it for scripts that use '' + x to convert x to string. */
+ atom = js_AtomizeString(cx, empty, ATOM_PINNED);
+ if (!atom)
+ goto bad;
+
+ rt->emptyString = empty;
+ rt->atomState.emptyAtom = atom;
+
+ return JS_TRUE;
+
+ bad:
+#ifdef JS_THREADSAFE
+ JS_DESTROY_LOCK(rt->deflatedStringCacheLock);
+ rt->deflatedStringCacheLock = NULL;
+#endif
+ return JS_FALSE;
+
+}
+
+void
+js_FinishRuntimeStringState(JSContext *cx)
+{
+ JSRuntime *rt = cx->runtime;
+
+ js_UnlockGCThingRT(rt, rt->emptyString);
+ rt->emptyString = NULL;
+}
+
+void
+js_FinishDeflatedStringCache(JSRuntime *rt)
+{
+ if (rt->deflatedStringCache) {
+ JS_HashTableDestroy(rt->deflatedStringCache);
+ rt->deflatedStringCache = NULL;
+ }
+#ifdef JS_THREADSAFE
+ if (rt->deflatedStringCacheLock) {
+ JS_DESTROY_LOCK(rt->deflatedStringCacheLock);
+ rt->deflatedStringCacheLock = NULL;
+ }
+#endif
+}
+
+JSObject *
+js_InitStringClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ /* Define the escape, unescape functions in the global object. */
+ if (!JS_DefineFunctions(cx, obj, string_functions))
+ return NULL;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_StringClass, String, 1,
+ string_props, string_methods,
+ NULL, string_static_methods);
+ if (!proto)
+ return NULL;
+ OBJ_SET_SLOT(cx, proto, JSSLOT_PRIVATE,
+ STRING_TO_JSVAL(cx->runtime->emptyString));
+ return proto;
+}
+
+JSString *
+js_NewString(JSContext *cx, jschar *chars, size_t length, uintN gcflag)
+{
+ JSString *str;
+
+ if (length > JSSTRING_LENGTH_MASK) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+
+ str = (JSString *) js_NewGCThing(cx, gcflag | GCX_STRING, sizeof(JSString));
+ if (!str)
+ return NULL;
+ str->length = length;
+ str->chars = chars;
+#ifdef DEBUG
+ {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_METER(rt, liveStrings);
+ JS_RUNTIME_METER(rt, totalStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->lengthSum += (double)length,
+ rt->lengthSquaredSum += (double)length * (double)length));
+ }
+#endif
+ return str;
+}
+
+JSString *
+js_NewDependentString(JSContext *cx, JSString *base, size_t start,
+ size_t length, uintN gcflag)
+{
+ JSDependentString *ds;
+
+ if (length == 0)
+ return cx->runtime->emptyString;
+
+ if (start == 0 && length == JSSTRING_LENGTH(base))
+ return base;
+
+ if (start > JSSTRDEP_START_MASK ||
+ (start != 0 && length > JSSTRDEP_LENGTH_MASK)) {
+ return js_NewStringCopyN(cx, JSSTRING_CHARS(base) + start, length,
+ gcflag);
+ }
+
+ ds = (JSDependentString *)
+ js_NewGCThing(cx, gcflag | GCX_MUTABLE_STRING, sizeof(JSString));
+ if (!ds)
+ return NULL;
+ if (start == 0) {
+ JSPREFIX_SET_LENGTH(ds, length);
+ JSPREFIX_SET_BASE(ds, base);
+ } else {
+ JSSTRDEP_SET_START_AND_LENGTH(ds, start, length);
+ JSSTRDEP_SET_BASE(ds, base);
+ }
+#ifdef DEBUG
+ {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_METER(rt, liveDependentStrings);
+ JS_RUNTIME_METER(rt, totalDependentStrings);
+ JS_RUNTIME_METER(rt, liveStrings);
+ JS_RUNTIME_METER(rt, totalStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->strdepLengthSum += (double)length,
+ rt->strdepLengthSquaredSum += (double)length * (double)length));
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->lengthSum += (double)length,
+ rt->lengthSquaredSum += (double)length * (double)length));
+ }
+#endif
+ return (JSString *)ds;
+}
+
+#ifdef DEBUG
+#include <math.h>
+
+void printJSStringStats(JSRuntime *rt) {
+ double mean = 0., var = 0., sigma = 0.;
+ jsrefcount count = rt->totalStrings;
+ if (count > 0 && rt->lengthSum >= 0) {
+ mean = rt->lengthSum / count;
+ var = count * rt->lengthSquaredSum - rt->lengthSum * rt->lengthSum;
+ if (var < 0.0 || count <= 1)
+ var = 0.0;
+ else
+ var /= count * (count - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.) ? sqrt(var) : 0.;
+ }
+ fprintf(stderr, "%lu total strings, mean length %g (sigma %g)\n",
+ (unsigned long)count, mean, sigma);
+
+ mean = var = sigma = 0.;
+ count = rt->totalDependentStrings;
+ if (count > 0 && rt->strdepLengthSum >= 0) {
+ mean = rt->strdepLengthSum / count;
+ var = count * rt->strdepLengthSquaredSum
+ - rt->strdepLengthSum * rt->strdepLengthSum;
+ if (var < 0.0 || count <= 1)
+ var = 0.0;
+ else
+ var /= count * (count - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.) ? sqrt(var) : 0.;
+ }
+ fprintf(stderr, "%lu total dependent strings, mean length %g (sigma %g)\n",
+ (unsigned long)count, mean, sigma);
+}
+#endif
+
+JSString *
+js_NewStringCopyN(JSContext *cx, const jschar *s, size_t n, uintN gcflag)
+{
+ jschar *news;
+ JSString *str;
+
+ news = (jschar *)JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!news)
+ return NULL;
+ js_strncpy(news, s, n);
+ news[n] = 0;
+ str = js_NewString(cx, news, n, gcflag);
+ if (!str)
+ JS_free(cx, news);
+ return str;
+}
+
+JSString *
+js_NewStringCopyZ(JSContext *cx, const jschar *s, uintN gcflag)
+{
+ size_t n, m;
+ jschar *news;
+ JSString *str;
+
+ n = js_strlen(s);
+ m = (n + 1) * sizeof(jschar);
+ news = (jschar *) JS_malloc(cx, m);
+ if (!news)
+ return NULL;
+ memcpy(news, s, m);
+ str = js_NewString(cx, news, n, gcflag);
+ if (!str)
+ JS_free(cx, news);
+ return str;
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_string_pointer(const void *key)
+{
+ return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
+}
+
+void
+js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str)
+{
+ JSHashNumber hash;
+ JSHashEntry *he, **hep;
+
+ if (!rt->deflatedStringCache)
+ return;
+
+ hash = js_hash_string_pointer(str);
+ JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock);
+ hep = JS_HashTableRawLookup(rt->deflatedStringCache, hash, str);
+ he = *hep;
+ if (he) {
+#ifdef DEBUG
+ rt->deflatedStringCacheBytes -= JSSTRING_LENGTH(str);
+#endif
+ free(he->value);
+ JS_HashTableRawRemove(rt->deflatedStringCache, hep, he);
+ }
+ JS_RELEASE_LOCK(rt->deflatedStringCacheLock);
+}
+
+void
+js_FinalizeString(JSContext *cx, JSString *str)
+{
+ js_FinalizeStringRT(cx->runtime, str);
+}
+
+void
+js_FinalizeStringRT(JSRuntime *rt, JSString *str)
+{
+ JSBool valid;
+
+ JS_RUNTIME_UNMETER(rt, liveStrings);
+ if (JSSTRING_IS_DEPENDENT(str)) {
+ /* If JSSTRFLAG_DEPENDENT is set, this string must be valid. */
+ JS_ASSERT(JSSTRDEP_BASE(str));
+ JS_RUNTIME_UNMETER(rt, liveDependentStrings);
+ valid = JS_TRUE;
+ } else {
+ /* A stillborn string has null chars, so is not valid. */
+ valid = (str->chars != NULL);
+ if (valid)
+ free(str->chars);
+ }
+ if (valid) {
+ js_PurgeDeflatedStringCache(rt, str);
+ str->chars = NULL;
+ }
+ str->length = 0;
+}
+
+JSObject *
+js_StringToObject(JSContext *cx, JSString *str)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_StringClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, STRING_TO_JSVAL(str));
+ return obj;
+}
+
+JS_FRIEND_API(const char *)
+js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun)
+{
+ JSString *str;
+ const char *bytes;
+
+ str = v2sfun(cx, v);
+ if (!str)
+ return NULL;
+ str = js_QuoteString(cx, str, 0);
+ if (!str)
+ return NULL;
+ bytes = js_GetStringBytes(cx->runtime, str);
+ if (!bytes)
+ JS_ReportOutOfMemory(cx);
+ return bytes;
+}
+
+JS_FRIEND_API(JSString *)
+js_ValueToString(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSString *str;
+
+ if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (!obj)
+ return ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, &v))
+ return NULL;
+ }
+ if (JSVAL_IS_STRING(v)) {
+ str = JSVAL_TO_STRING(v);
+ } else if (JSVAL_IS_INT(v)) {
+ str = js_NumberToString(cx, JSVAL_TO_INT(v));
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(v));
+ } else if (JSVAL_IS_BOOLEAN(v)) {
+ str = js_BooleanToString(cx, JSVAL_TO_BOOLEAN(v));
+ } else {
+ str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
+ }
+ return str;
+}
+
+JS_FRIEND_API(JSString *)
+js_ValueToSource(JSContext *cx, jsval v)
+{
+ JSTempValueRooter tvr;
+ JSString *str;
+
+ if (JSVAL_IS_STRING(v))
+ return js_QuoteString(cx, JSVAL_TO_STRING(v), '"');
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ /* Special case to preserve negative zero, _contra_ toString. */
+ if (JSVAL_IS_DOUBLE(v) && JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) {
+ /* NB: _ucNstr rather than _ucstr to indicate non-terminated. */
+ static const jschar js_negzero_ucNstr[] = {'-', '0'};
+
+ return js_NewStringCopyN(cx, js_negzero_ucNstr, 2, 0);
+ }
+ return js_ValueToString(cx, v);
+ }
+
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ if (!js_TryMethod(cx, JSVAL_TO_OBJECT(v),
+ cx->runtime->atomState.toSourceAtom,
+ 0, NULL, &tvr.u.value)) {
+ str = NULL;
+ } else {
+ str = js_ValueToString(cx, tvr.u.value);
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return str;
+}
+
+JSHashNumber
+js_HashString(JSString *str)
+{
+ JSHashNumber h;
+ const jschar *s;
+ size_t n;
+
+ h = 0;
+ for (s = JSSTRING_CHARS(str), n = JSSTRING_LENGTH(str); n; s++, n--)
+ h = (h >> (JS_HASH_BITS - 4)) ^ (h << 4) ^ *s;
+ return h;
+}
+
+intN
+js_CompareStrings(JSString *str1, JSString *str2)
+{
+ size_t l1, l2, n, i;
+ const jschar *s1, *s2;
+ intN cmp;
+
+ JS_ASSERT(str1);
+ JS_ASSERT(str2);
+
+ /* Fast case: pointer equality could be a quick win. */
+ if (str1 == str2)
+ return 0;
+
+ l1 = JSSTRING_LENGTH(str1), l2 = JSSTRING_LENGTH(str2);
+ s1 = JSSTRING_CHARS(str1), s2 = JSSTRING_CHARS(str2);
+ n = JS_MIN(l1, l2);
+ for (i = 0; i < n; i++) {
+ cmp = s1[i] - s2[i];
+ if (cmp != 0)
+ return cmp;
+ }
+ return (intN)(l1 - l2);
+}
+
+JSBool
+js_EqualStrings(JSString *str1, JSString *str2)
+{
+ size_t n;
+ const jschar *s1, *s2;
+
+ JS_ASSERT(str1);
+ JS_ASSERT(str2);
+
+ /* Fast case: pointer equality could be a quick win. */
+ if (str1 == str2)
+ return JS_TRUE;
+
+ n = JSSTRING_LENGTH(str1);
+ if (n != JSSTRING_LENGTH(str2))
+ return JS_FALSE;
+
+ if (n == 0)
+ return JS_TRUE;
+
+ s1 = JSSTRING_CHARS(str1), s2 = JSSTRING_CHARS(str2);
+ do {
+ if (*s1 != *s2)
+ return JS_FALSE;
+ ++s1, ++s2;
+ } while (--n != 0);
+
+ return JS_TRUE;
+}
+
+size_t
+js_strlen(const jschar *s)
+{
+ const jschar *t;
+
+ for (t = s; *t != 0; t++)
+ continue;
+ return (size_t)(t - s);
+}
+
+jschar *
+js_strchr(const jschar *s, jschar c)
+{
+ while (*s != 0) {
+ if (*s == c)
+ return (jschar *)s;
+ s++;
+ }
+ return NULL;
+}
+
+jschar *
+js_strchr_limit(const jschar *s, jschar c, const jschar *limit)
+{
+ while (s < limit) {
+ if (*s == c)
+ return (jschar *)s;
+ s++;
+ }
+ return NULL;
+}
+
+const jschar *
+js_SkipWhiteSpace(const jschar *s)
+{
+ /* JS_ISSPACE is false on a null. */
+ while (JS_ISSPACE(*s))
+ s++;
+ return s;
+}
+
+#ifdef JS_C_STRINGS_ARE_UTF8
+
+jschar *
+js_InflateString(JSContext *cx, const char *bytes, size_t *length)
+{
+ jschar *chars = NULL;
+ size_t dstlen = 0;
+
+ if (!js_InflateStringToBuffer(cx, bytes, *length, NULL, &dstlen))
+ return NULL;
+ chars = (jschar *) JS_malloc(cx, (dstlen + 1) * sizeof (jschar));
+ if (!chars)
+ return NULL;
+ js_InflateStringToBuffer(cx, bytes, *length, chars, &dstlen);
+ chars[dstlen] = 0;
+ *length = dstlen;
+ return chars;
+}
+
+/*
+ * May be called with null cx by js_GetStringBytes, see below.
+ */
+char *
+js_DeflateString(JSContext *cx, const jschar *chars, size_t length)
+{
+ size_t size = 0;
+ char *bytes = NULL;
+ if (!js_DeflateStringToBuffer(cx, chars, length, NULL, &size))
+ return NULL;
+ bytes = (char *) (cx ? JS_malloc(cx, size+1) : malloc(size+1));
+ if (!bytes)
+ return NULL;
+ js_DeflateStringToBuffer(cx, chars, length, bytes, &size);
+ bytes[size] = 0;
+ return bytes;
+}
+
+JSBool
+js_DeflateStringToBuffer(JSContext *cx, const jschar *src, size_t srclen,
+ char *dst, size_t *dstlenp)
+{
+ size_t i, utf8Len, dstlen = *dstlenp, origDstlen = dstlen;
+ jschar c, c2;
+ uint32 v;
+ uint8 utf8buf[6];
+
+ if (!dst)
+ dstlen = origDstlen = (size_t) -1;
+
+ while (srclen) {
+ c = *src++;
+ srclen--;
+ if ((c >= 0xDC00) && (c <= 0xDFFF))
+ goto badSurrogate;
+ if (c < 0xD800 || c > 0xDBFF) {
+ v = c;
+ } else {
+ if (srclen < 1)
+ goto bufferTooSmall;
+ c2 = *src++;
+ srclen--;
+ if ((c2 < 0xDC00) || (c2 > 0xDFFF)) {
+ c = c2;
+ goto badSurrogate;
+ }
+ v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
+ }
+ if (v < 0x0080) {
+ /* no encoding necessary - performance hack */
+ if (!dstlen)
+ goto bufferTooSmall;
+ if (dst)
+ *dst++ = (char) v;
+ utf8Len = 1;
+ } else {
+ utf8Len = js_OneUcs4ToUtf8Char(utf8buf, v);
+ if (utf8Len > dstlen)
+ goto bufferTooSmall;
+ if (dst) {
+ for (i = 0; i < utf8Len; i++)
+ *dst++ = (char) utf8buf[i];
+ }
+ }
+ dstlen -= utf8Len;
+ }
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+badSurrogate:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ char buffer[10];
+ JS_snprintf(buffer, 10, "0x%x", c);
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_SURROGATE_CHAR,
+ buffer);
+ }
+ return JS_FALSE;
+
+bufferTooSmall:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_InflateStringToBuffer(JSContext *cx, const char *src, size_t srclen,
+ jschar *dst, size_t *dstlenp)
+{
+ uint32 v;
+ size_t offset = 0, j, n, dstlen = *dstlenp, origDstlen = dstlen;
+
+ if (!dst)
+ dstlen = origDstlen = (size_t) -1;
+
+ while (srclen) {
+ v = (uint8) *src;
+ n = 1;
+ if (v & 0x80) {
+ while (v & (0x80 >> n))
+ n++;
+ if (n > srclen)
+ goto bufferTooSmall;
+ if (n == 1 || n > 6)
+ goto badCharacter;
+ for (j = 1; j < n; j++) {
+ if ((src[j] & 0xC0) != 0x80)
+ goto badCharacter;
+ }
+ v = Utf8ToOneUcs4Char(src, n);
+ if (v >= 0x10000) {
+ v -= 0x10000;
+ if (v > 0xFFFFF || dstlen < 2) {
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ char buffer[10];
+ JS_snprintf(buffer, 10, "0x%x", v + 0x10000);
+ JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_UTF8_CHAR_TOO_LARGE,
+ buffer);
+ }
+ return JS_FALSE;
+ }
+ if (dstlen < 2)
+ goto bufferTooSmall;
+ if (dst) {
+ *dst++ = (jschar)((v >> 10) + 0xD800);
+ v = (jschar)((v & 0x3FF) + 0xDC00);
+ }
+ dstlen--;
+ }
+ }
+ if (!dstlen)
+ goto bufferTooSmall;
+ if (dst)
+ *dst++ = (jschar) v;
+ dstlen--;
+ offset += n;
+ src += n;
+ srclen -= n;
+ }
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+badCharacter:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ char buffer[10];
+ JS_snprintf(buffer, 10, "%d", offset);
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_MALFORMED_UTF8_CHAR,
+ buffer);
+ }
+ return JS_FALSE;
+
+bufferTooSmall:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+}
+
+#else
+
+JSBool
+js_InflateStringToBuffer(JSContext* cx, const char *bytes, size_t length,
+ jschar *chars, size_t* charsLength)
+{
+ size_t i;
+
+ if (length > *charsLength) {
+ for (i = 0; i < *charsLength; i++)
+ chars[i] = (unsigned char) bytes[i];
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+ }
+ for (i = 0; i < length; i++)
+ chars[i] = (unsigned char) bytes[i];
+ *charsLength = length;
+ return JS_TRUE;
+}
+
+jschar *
+js_InflateString(JSContext *cx, const char *bytes, size_t *bytesLength)
+{
+ jschar *chars;
+ size_t i, length = *bytesLength;
+
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars) {
+ *bytesLength = 0;
+ return NULL;
+ }
+ for (i = 0; i < length; i++)
+ chars[i] = (unsigned char) bytes[i];
+ chars[length] = 0;
+ *bytesLength = length;
+ return chars;
+}
+
+JSBool
+js_DeflateStringToBuffer(JSContext* cx, const jschar *chars, size_t length,
+ char *bytes, size_t* bytesLength)
+{
+ size_t i;
+
+ if (length > *bytesLength) {
+ for (i = 0; i < *bytesLength; i++)
+ bytes[i] = (char) chars[i];
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+ }
+ for (i = 0; i < length; i++)
+ bytes[i] = (char) chars[i];
+ *bytesLength = length;
+ return JS_TRUE;
+}
+
+/*
+ * May be called with null cx by js_GetStringBytes, see below.
+ */
+char *
+js_DeflateString(JSContext *cx, const jschar *chars, size_t length)
+{
+ size_t i, size;
+ char *bytes;
+
+ size = (length + 1) * sizeof(char);
+ bytes = (char *) (cx ? JS_malloc(cx, size) : malloc(size));
+ if (!bytes)
+ return NULL;
+
+ for (i = 0; i < length; i++)
+ bytes[i] = (char) chars[i];
+
+ bytes[length] = 0;
+ return bytes;
+}
+
+#endif
+
+static JSHashTable *
+GetDeflatedStringCache(JSRuntime *rt)
+{
+ JSHashTable *cache;
+
+ cache = rt->deflatedStringCache;
+ if (!cache) {
+ cache = JS_NewHashTable(8, js_hash_string_pointer,
+ JS_CompareValues, JS_CompareValues,
+ NULL, NULL);
+ rt->deflatedStringCache = cache;
+ }
+ return cache;
+}
+
+JSBool
+js_SetStringBytes(JSRuntime *rt, JSString *str, char *bytes, size_t length)
+{
+ JSHashTable *cache;
+ JSBool ok;
+ JSHashNumber hash;
+ JSHashEntry **hep;
+
+ JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock);
+
+ cache = GetDeflatedStringCache(rt);
+ if (!cache) {
+ ok = JS_FALSE;
+ } else {
+ hash = js_hash_string_pointer(str);
+ hep = JS_HashTableRawLookup(cache, hash, str);
+ JS_ASSERT(*hep == NULL);
+ ok = JS_HashTableRawAdd(cache, hep, hash, str, bytes) != NULL;
+#ifdef DEBUG
+ if (ok)
+ rt->deflatedStringCacheBytes += length;
+#endif
+ }
+
+ JS_RELEASE_LOCK(rt->deflatedStringCacheLock);
+ return ok;
+}
+
+char *
+js_GetStringBytes(JSRuntime *rt, JSString *str)
+{
+ JSHashTable *cache;
+ char *bytes;
+ JSHashNumber hash;
+ JSHashEntry *he, **hep;
+
+ JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock);
+
+ cache = GetDeflatedStringCache(rt);
+ if (!cache) {
+ bytes = NULL;
+ } else {
+ hash = js_hash_string_pointer(str);
+ hep = JS_HashTableRawLookup(cache, hash, str);
+ he = *hep;
+ if (he) {
+ bytes = (char *) he->value;
+
+ /* Try to catch failure to JS_ShutDown between runtime epochs. */
+ JS_ASSERT((*bytes == '\0' && JSSTRING_LENGTH(str) == 0) ||
+ *bytes == (char) JSSTRING_CHARS(str)[0]);
+ } else {
+ bytes = js_DeflateString(NULL, JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str));
+ if (bytes) {
+ if (JS_HashTableRawAdd(cache, hep, hash, str, bytes)) {
+#ifdef DEBUG
+ rt->deflatedStringCacheBytes += JSSTRING_LENGTH(str);
+#endif
+ } else {
+ free(bytes);
+ bytes = NULL;
+ }
+ }
+ }
+ }
+
+ JS_RELEASE_LOCK(rt->deflatedStringCacheLock);
+ return bytes;
+}
+
+/*
+ * From java.lang.Character.java:
+ *
+ * The character properties are currently encoded into 32 bits in the
+ * following manner:
+ *
+ * 10 bits signed offset used for converting case
+ * 1 bit if 1, adding the signed offset converts the character to
+ * lowercase
+ * 1 bit if 1, subtracting the signed offset converts the character to
+ * uppercase
+ * 1 bit if 1, character has a titlecase equivalent (possibly itself)
+ * 3 bits 0 may not be part of an identifier
+ * 1 ignorable control; may continue a Unicode identifier or JS
+ * identifier
+ * 2 may continue a JS identifier but not a Unicode identifier
+ * (unused)
+ * 3 may continue a Unicode identifier or JS identifier
+ * 4 is a JS whitespace character
+ * 5 may start or continue a JS identifier;
+ * may continue but not start a Unicode identifier (_)
+ * 6 may start or continue a JS identifier but not a Unicode
+ * identifier ($)
+ * 7 may start or continue a Unicode identifier or JS identifier
+ * Thus:
+ * 5, 6, 7 may start a JS identifier
+ * 1, 2, 3, 5, 6, 7 may continue a JS identifier
+ * 7 may start a Unicode identifier
+ * 1, 3, 5, 7 may continue a Unicode identifier
+ * 1 is ignorable within an identifier
+ * 4 is JS whitespace
+ * 2 bits 0 this character has no numeric property
+ * 1 adding the digit offset to the character code and then
+ * masking with 0x1F will produce the desired numeric value
+ * 2 this character has a "strange" numeric value
+ * 3 a JS supradecimal digit: adding the digit offset to the
+ * character code, then masking with 0x1F, then adding 10
+ * will produce the desired numeric value
+ * 5 bits digit offset
+ * 1 bit XML 1.0 name start character
+ * 1 bit XML 1.0 name character
+ * 2 bits reserved for future use
+ * 5 bits character type
+ */
+
+/* The X table has 1024 entries for a total of 1024 bytes. */
+
+const uint8 js_X[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, /* 0x0000 */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* 0x0200 */
+ 16, 17, 18, 19, 20, 21, 22, 23, /* 0x0400 */
+ 24, 25, 26, 27, 28, 28, 28, 28, /* 0x0600 */
+ 28, 28, 28, 28, 29, 30, 31, 32, /* 0x0800 */
+ 33, 34, 35, 36, 37, 38, 39, 40, /* 0x0A00 */
+ 41, 42, 43, 44, 45, 46, 28, 28, /* 0x0C00 */
+ 47, 48, 49, 50, 51, 52, 53, 28, /* 0x0E00 */
+ 28, 28, 54, 55, 56, 57, 58, 59, /* 0x1000 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1C00 */
+ 60, 60, 61, 62, 63, 64, 65, 66, /* 0x1E00 */
+ 67, 68, 69, 70, 71, 72, 73, 74, /* 0x2000 */
+ 75, 75, 75, 76, 77, 78, 28, 28, /* 0x2200 */
+ 79, 80, 81, 82, 83, 83, 84, 85, /* 0x2400 */
+ 86, 85, 28, 28, 87, 88, 89, 28, /* 0x2600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2C00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2E00 */
+ 90, 91, 92, 93, 94, 56, 95, 28, /* 0x3000 */
+ 96, 97, 98, 99, 83, 100, 83, 101, /* 0x3200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3C00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3E00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4000 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x4E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9C00 */
+ 56, 56, 56, 56, 56, 56, 102, 28, /* 0x9E00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA000 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xAA00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xAC00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xAE00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xBA00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xBC00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xBE00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xCA00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xCC00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xCE00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xD000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xD200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xD400 */
+ 56, 56, 56, 56, 56, 56, 103, 28, /* 0xD600 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xD800 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xDA00 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xDC00 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xDE00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE000 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE200 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE400 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE600 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE800 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xEA00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xEC00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xEE00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF000 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF200 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF400 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF600 */
+105, 105, 105, 105, 56, 56, 56, 56, /* 0xF800 */
+106, 28, 28, 28, 107, 108, 109, 110, /* 0xFA00 */
+ 56, 56, 56, 56, 111, 112, 113, 114, /* 0xFC00 */
+115, 116, 56, 117, 118, 119, 120, 121 /* 0xFE00 */
+};
+
+/* The Y table has 7808 entries for a total of 7808 bytes. */
+
+const uint8 js_Y[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
+ 0, 1, 1, 1, 1, 1, 0, 0, /* 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
+ 2, 3, 3, 3, 4, 3, 3, 3, /* 0 */
+ 5, 6, 3, 7, 3, 8, 3, 3, /* 0 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 0 */
+ 9, 9, 3, 3, 7, 7, 7, 3, /* 0 */
+ 3, 10, 10, 10, 10, 10, 10, 10, /* 1 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 1 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 1 */
+ 10, 10, 10, 5, 3, 6, 11, 12, /* 1 */
+ 11, 13, 13, 13, 13, 13, 13, 13, /* 1 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 1 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 1 */
+ 13, 13, 13, 5, 7, 6, 7, 0, /* 1 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 2, 3, 4, 4, 4, 4, 15, 15, /* 2 */
+ 11, 15, 16, 5, 7, 8, 15, 11, /* 2 */
+ 15, 7, 17, 17, 11, 16, 15, 3, /* 2 */
+ 11, 18, 16, 6, 19, 19, 19, 3, /* 2 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 3 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 3 */
+ 20, 20, 20, 20, 20, 20, 20, 7, /* 3 */
+ 20, 20, 20, 20, 20, 20, 20, 16, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 7, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 22, /* 3 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 25, 26, 23, 24, 23, 24, 23, 24, /* 4 */
+ 16, 23, 24, 23, 24, 23, 24, 23, /* 4 */
+ 24, 23, 24, 23, 24, 23, 24, 23, /* 5 */
+ 24, 16, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 27, 23, 24, 23, 24, 23, 24, 28, /* 5 */
+ 16, 29, 23, 24, 23, 24, 30, 23, /* 6 */
+ 24, 31, 31, 23, 24, 16, 32, 32, /* 6 */
+ 33, 23, 24, 31, 34, 16, 35, 36, /* 6 */
+ 23, 24, 16, 16, 35, 37, 16, 38, /* 6 */
+ 23, 24, 23, 24, 23, 24, 38, 23, /* 6 */
+ 24, 39, 40, 16, 23, 24, 39, 23, /* 6 */
+ 24, 41, 41, 23, 24, 23, 24, 42, /* 6 */
+ 23, 24, 16, 40, 23, 24, 40, 40, /* 6 */
+ 40, 40, 40, 40, 43, 44, 45, 43, /* 7 */
+ 44, 45, 43, 44, 45, 23, 24, 23, /* 7 */
+ 24, 23, 24, 23, 24, 23, 24, 23, /* 7 */
+ 24, 23, 24, 23, 24, 16, 23, 24, /* 7 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 7 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 7 */
+ 16, 43, 44, 45, 23, 24, 46, 46, /* 7 */
+ 46, 46, 23, 24, 23, 24, 23, 24, /* 7 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 8 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 8 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 9 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 9 */
+ 16, 16, 16, 47, 48, 16, 49, 49, /* 9 */
+ 50, 50, 16, 51, 16, 16, 16, 16, /* 9 */
+ 49, 16, 16, 52, 16, 16, 16, 16, /* 9 */
+ 53, 54, 16, 16, 16, 16, 16, 54, /* 9 */
+ 16, 16, 55, 16, 16, 16, 16, 16, /* 9 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 9 */
+ 16, 16, 16, 56, 16, 16, 16, 16, /* 10 */
+ 56, 16, 57, 57, 16, 16, 16, 16, /* 10 */
+ 16, 16, 58, 16, 16, 16, 16, 16, /* 10 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 10 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 10 */
+ 16, 46, 46, 46, 46, 46, 46, 46, /* 10 */
+ 59, 59, 59, 59, 59, 59, 59, 59, /* 10 */
+ 59, 11, 11, 59, 59, 59, 59, 59, /* 10 */
+ 59, 59, 11, 11, 11, 11, 11, 11, /* 11 */
+ 11, 11, 11, 11, 11, 11, 11, 11, /* 11 */
+ 59, 59, 11, 11, 11, 11, 11, 11, /* 11 */
+ 11, 11, 11, 11, 11, 11, 11, 46, /* 11 */
+ 59, 59, 59, 59, 59, 11, 11, 11, /* 11 */
+ 11, 11, 46, 46, 46, 46, 46, 46, /* 11 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 11 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 11 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 60, 60, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 3, 3, 46, 46, /* 13 */
+ 46, 46, 59, 46, 46, 46, 3, 46, /* 13 */
+ 46, 46, 46, 46, 11, 11, 61, 3, /* 14 */
+ 62, 62, 62, 46, 63, 46, 64, 64, /* 14 */
+ 16, 20, 20, 20, 20, 20, 20, 20, /* 14 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 14 */
+ 20, 20, 46, 20, 20, 20, 20, 20, /* 14 */
+ 20, 20, 20, 20, 65, 66, 66, 66, /* 14 */
+ 16, 21, 21, 21, 21, 21, 21, 21, /* 14 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 14 */
+ 21, 21, 16, 21, 21, 21, 21, 21, /* 15 */
+ 21, 21, 21, 21, 67, 68, 68, 46, /* 15 */
+ 69, 70, 38, 38, 38, 71, 72, 46, /* 15 */
+ 46, 46, 38, 46, 38, 46, 38, 46, /* 15 */
+ 38, 46, 23, 24, 23, 24, 23, 24, /* 15 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 15 */
+ 73, 74, 16, 40, 46, 46, 46, 46, /* 15 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 15 */
+ 46, 75, 75, 75, 75, 75, 75, 75, /* 16 */
+ 75, 75, 75, 75, 75, 46, 75, 75, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 16 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 16 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 17 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 17 */
+ 46, 74, 74, 74, 74, 74, 74, 74, /* 17 */
+ 74, 74, 74, 74, 74, 46, 74, 74, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 15, 60, 60, 60, 60, 46, /* 18 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 40, 23, 24, 23, 24, 46, 46, 23, /* 19 */
+ 24, 46, 46, 23, 24, 46, 46, 46, /* 19 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 19 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 19 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 19 */
+ 23, 24, 23, 24, 46, 46, 23, 24, /* 19 */
+ 23, 24, 23, 24, 23, 24, 46, 46, /* 19 */
+ 23, 24, 46, 46, 46, 46, 46, 46, /* 19 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 76, 76, 76, 76, 76, 76, 76, /* 20 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 20 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 21 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 21 */
+ 76, 76, 76, 76, 76, 76, 76, 46, /* 21 */
+ 46, 59, 3, 3, 3, 3, 3, 3, /* 21 */
+ 46, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 16, /* 22 */
+ 46, 3, 46, 46, 46, 46, 46, 46, /* 22 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 46, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 46, 60, 60, 60, 3, 60, /* 22 */
+ 3, 60, 60, 3, 60, 46, 46, 46, /* 23 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 23 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 23 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 23 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 23 */
+ 40, 40, 40, 46, 46, 46, 46, 46, /* 23 */
+ 40, 40, 40, 3, 3, 46, 46, 46, /* 23 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 23 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 24 */
+ 46, 46, 46, 46, 3, 46, 46, 46, /* 24 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 24 */
+ 46, 46, 46, 3, 46, 46, 46, 3, /* 24 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 24 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 24 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 24 */
+ 40, 40, 40, 46, 46, 46, 46, 46, /* 24 */
+ 59, 40, 40, 40, 40, 40, 40, 40, /* 25 */
+ 40, 40, 40, 60, 60, 60, 60, 60, /* 25 */
+ 60, 60, 60, 46, 46, 46, 46, 46, /* 25 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 25 */
+ 78, 78, 78, 78, 78, 78, 78, 78, /* 25 */
+ 78, 78, 3, 3, 3, 3, 46, 46, /* 25 */
+ 60, 40, 40, 40, 40, 40, 40, 40, /* 25 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 25 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 46, 46, 40, 40, 40, 40, 40, 46, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 27 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 27 */
+ 40, 40, 40, 40, 3, 40, 60, 60, /* 27 */
+ 60, 60, 60, 60, 60, 79, 79, 60, /* 27 */
+ 60, 60, 60, 60, 60, 59, 59, 60, /* 27 */
+ 60, 15, 60, 60, 60, 60, 46, 46, /* 27 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 27 */
+ 9, 9, 46, 46, 46, 46, 46, 46, /* 27 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 60, 60, 80, 46, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 46, 46, 60, 40, 80, 80, /* 29 */
+ 80, 60, 60, 60, 60, 60, 60, 60, /* 30 */
+ 60, 80, 80, 80, 80, 60, 46, 46, /* 30 */
+ 15, 60, 60, 60, 60, 46, 46, 46, /* 30 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 30 */
+ 40, 40, 60, 60, 3, 3, 81, 81, /* 30 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 30 */
+ 3, 46, 46, 46, 46, 46, 46, 46, /* 30 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 30 */
+ 46, 60, 80, 80, 46, 40, 40, 40, /* 31 */
+ 40, 40, 40, 40, 40, 46, 46, 40, /* 31 */
+ 40, 46, 46, 40, 40, 40, 40, 40, /* 31 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 31 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 31 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 31 */
+ 40, 46, 40, 46, 46, 46, 40, 40, /* 31 */
+ 40, 40, 46, 46, 60, 46, 80, 80, /* 31 */
+ 80, 60, 60, 60, 60, 46, 46, 80, /* 32 */
+ 80, 46, 46, 80, 80, 60, 46, 46, /* 32 */
+ 46, 46, 46, 46, 46, 46, 46, 80, /* 32 */
+ 46, 46, 46, 46, 40, 40, 46, 40, /* 32 */
+ 40, 40, 60, 60, 46, 46, 81, 81, /* 32 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 32 */
+ 40, 40, 4, 4, 82, 82, 82, 82, /* 32 */
+ 19, 83, 15, 46, 46, 46, 46, 46, /* 32 */
+ 46, 46, 60, 46, 46, 40, 40, 40, /* 33 */
+ 40, 40, 40, 46, 46, 46, 46, 40, /* 33 */
+ 40, 46, 46, 40, 40, 40, 40, 40, /* 33 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 33 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 33 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 33 */
+ 40, 46, 40, 40, 46, 40, 40, 46, /* 33 */
+ 40, 40, 46, 46, 60, 46, 80, 80, /* 33 */
+ 80, 60, 60, 46, 46, 46, 46, 60, /* 34 */
+ 60, 46, 46, 60, 60, 60, 46, 46, /* 34 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 34 */
+ 46, 40, 40, 40, 40, 46, 40, 46, /* 34 */
+ 46, 46, 46, 46, 46, 46, 81, 81, /* 34 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 34 */
+ 60, 60, 40, 40, 40, 46, 46, 46, /* 34 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 34 */
+ 46, 60, 60, 80, 46, 40, 40, 40, /* 35 */
+ 40, 40, 40, 40, 46, 40, 46, 40, /* 35 */
+ 40, 40, 46, 40, 40, 40, 40, 40, /* 35 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 35 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 35 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 35 */
+ 40, 46, 40, 40, 46, 40, 40, 40, /* 35 */
+ 40, 40, 46, 46, 60, 40, 80, 80, /* 35 */
+ 80, 60, 60, 60, 60, 60, 46, 60, /* 36 */
+ 60, 80, 46, 80, 80, 60, 46, 46, /* 36 */
+ 15, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 40, 46, 46, 46, 46, 46, 81, 81, /* 36 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 36 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 46, 60, 80, 80, 46, 40, 40, 40, /* 37 */
+ 40, 40, 40, 40, 40, 46, 46, 40, /* 37 */
+ 40, 46, 46, 40, 40, 40, 40, 40, /* 37 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 37 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 37 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 37 */
+ 40, 46, 40, 40, 46, 46, 40, 40, /* 37 */
+ 40, 40, 46, 46, 60, 40, 80, 60, /* 37 */
+ 80, 60, 60, 60, 46, 46, 46, 80, /* 38 */
+ 80, 46, 46, 80, 80, 60, 46, 46, /* 38 */
+ 46, 46, 46, 46, 46, 46, 60, 80, /* 38 */
+ 46, 46, 46, 46, 40, 40, 46, 40, /* 38 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 38 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 38 */
+ 15, 46, 46, 46, 46, 46, 46, 46, /* 38 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 38 */
+ 46, 46, 60, 80, 46, 40, 40, 40, /* 39 */
+ 40, 40, 40, 46, 46, 46, 40, 40, /* 39 */
+ 40, 46, 40, 40, 40, 40, 46, 46, /* 39 */
+ 46, 40, 40, 46, 40, 46, 40, 40, /* 39 */
+ 46, 46, 46, 40, 40, 46, 46, 46, /* 39 */
+ 40, 40, 40, 46, 46, 46, 40, 40, /* 39 */
+ 40, 40, 40, 40, 40, 40, 46, 40, /* 39 */
+ 40, 40, 46, 46, 46, 46, 80, 80, /* 39 */
+ 60, 80, 80, 46, 46, 46, 80, 80, /* 40 */
+ 80, 46, 80, 80, 80, 60, 46, 46, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 80, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 81, /* 40 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 40 */
+ 84, 19, 19, 46, 46, 46, 46, 46, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 40 */
+ 46, 80, 80, 80, 46, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 40, 46, 40, 40, /* 41 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 46, 40, 40, 40, /* 41 */
+ 40, 40, 46, 46, 46, 46, 60, 60, /* 41 */
+ 60, 80, 80, 80, 80, 46, 60, 60, /* 42 */
+ 60, 46, 60, 60, 60, 60, 46, 46, /* 42 */
+ 46, 46, 46, 46, 46, 60, 60, 46, /* 42 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 42 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 42 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 42 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 42 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 42 */
+ 46, 46, 80, 80, 46, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 40, 46, 40, 40, /* 43 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 46, 40, 40, 40, /* 43 */
+ 40, 40, 46, 46, 46, 46, 80, 60, /* 43 */
+ 80, 80, 80, 80, 80, 46, 60, 80, /* 44 */
+ 80, 46, 80, 80, 60, 60, 46, 46, /* 44 */
+ 46, 46, 46, 46, 46, 80, 80, 46, /* 44 */
+ 46, 46, 46, 46, 46, 46, 40, 46, /* 44 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 44 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 44 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 44 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 44 */
+ 46, 46, 80, 80, 46, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 46, 40, 40, /* 45 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 46, 46, 46, 46, 80, 80, /* 45 */
+ 80, 60, 60, 60, 46, 46, 80, 80, /* 46 */
+ 80, 46, 80, 80, 80, 60, 46, 46, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 80, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 46 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 46 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 46 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 3, /* 47 */
+ 40, 60, 40, 40, 60, 60, 60, 60, /* 47 */
+ 60, 60, 60, 46, 46, 46, 46, 4, /* 47 */
+ 40, 40, 40, 40, 40, 40, 59, 60, /* 48 */
+ 60, 60, 60, 60, 60, 60, 60, 15, /* 48 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 48 */
+ 9, 9, 3, 3, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 40, 40, 46, 40, 46, 46, 40, /* 49 */
+ 40, 46, 40, 46, 46, 40, 46, 46, /* 49 */
+ 46, 46, 46, 46, 40, 40, 40, 40, /* 49 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 49 */
+ 46, 40, 40, 40, 46, 40, 46, 40, /* 49 */
+ 46, 46, 40, 40, 46, 40, 40, 3, /* 49 */
+ 40, 60, 40, 40, 60, 60, 60, 60, /* 49 */
+ 60, 60, 46, 60, 60, 40, 46, 46, /* 49 */
+ 40, 40, 40, 40, 40, 46, 59, 46, /* 50 */
+ 60, 60, 60, 60, 60, 60, 46, 46, /* 50 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 50 */
+ 9, 9, 46, 46, 40, 40, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 15, 15, 15, 15, 3, 3, 3, 3, /* 51 */
+ 3, 3, 3, 3, 3, 3, 3, 3, /* 51 */
+ 3, 3, 3, 15, 15, 15, 15, 15, /* 51 */
+ 60, 60, 15, 15, 15, 15, 15, 15, /* 51 */
+ 78, 78, 78, 78, 78, 78, 78, 78, /* 51 */
+ 78, 78, 85, 85, 85, 85, 85, 85, /* 51 */
+ 85, 85, 85, 85, 15, 60, 15, 60, /* 51 */
+ 15, 60, 5, 6, 5, 6, 80, 80, /* 51 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 46, 46, 46, 46, 46, 46, /* 52 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 52 */
+ 60, 60, 60, 60, 60, 60, 60, 80, /* 52 */
+ 60, 60, 60, 60, 60, 3, 60, 60, /* 53 */
+ 60, 60, 60, 60, 46, 46, 46, 46, /* 53 */
+ 60, 60, 60, 60, 60, 60, 46, 60, /* 53 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 53 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 53 */
+ 60, 60, 60, 60, 60, 60, 46, 46, /* 53 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 53 */
+ 46, 60, 46, 46, 46, 46, 46, 46, /* 53 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 46, 46, /* 55 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 46, /* 55 */
+ 46, 46, 46, 3, 46, 46, 46, 46, /* 55 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 46, 46, 46, 46, 46, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 46, 46, 46, 46, 46, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 46, 46, 46, 46, 46, 46, /* 59 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 16, 16, /* 61 */
+ 16, 16, 16, 16, 46, 46, 46, 46, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 46, 46, 46, 46, 46, 46, /* 62 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 63 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 63 */
+ 86, 86, 86, 86, 86, 86, 46, 46, /* 63 */
+ 87, 87, 87, 87, 87, 87, 46, 46, /* 63 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 63 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 63 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 63 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 63 */
+ 86, 86, 86, 86, 86, 86, 46, 46, /* 64 */
+ 87, 87, 87, 87, 87, 87, 46, 46, /* 64 */
+ 16, 86, 16, 86, 16, 86, 16, 86, /* 64 */
+ 46, 87, 46, 87, 46, 87, 46, 87, /* 64 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 64 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 64 */
+ 88, 88, 89, 89, 89, 89, 90, 90, /* 64 */
+ 91, 91, 92, 92, 93, 93, 46, 46, /* 64 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 65 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 65 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 65 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 65 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 65 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 65 */
+ 86, 86, 16, 94, 16, 46, 16, 16, /* 65 */
+ 87, 87, 95, 95, 96, 11, 38, 11, /* 65 */
+ 11, 11, 16, 94, 16, 46, 16, 16, /* 66 */
+ 97, 97, 97, 97, 96, 11, 11, 11, /* 66 */
+ 86, 86, 16, 16, 46, 46, 16, 16, /* 66 */
+ 87, 87, 98, 98, 46, 11, 11, 11, /* 66 */
+ 86, 86, 16, 16, 16, 99, 16, 16, /* 66 */
+ 87, 87, 100, 100, 101, 11, 11, 11, /* 66 */
+ 46, 46, 16, 94, 16, 46, 16, 16, /* 66 */
+102, 102, 103, 103, 96, 11, 11, 46, /* 66 */
+ 2, 2, 2, 2, 2, 2, 2, 2, /* 67 */
+ 2, 2, 2, 2, 104, 104, 104, 104, /* 67 */
+ 8, 8, 8, 8, 8, 8, 3, 3, /* 67 */
+ 5, 6, 5, 5, 5, 6, 5, 5, /* 67 */
+ 3, 3, 3, 3, 3, 3, 3, 3, /* 67 */
+105, 106, 104, 104, 104, 104, 104, 46, /* 67 */
+ 3, 3, 3, 3, 3, 3, 3, 3, /* 67 */
+ 3, 5, 6, 3, 3, 3, 3, 12, /* 67 */
+ 12, 3, 3, 3, 7, 5, 6, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 104, 104, 104, 104, 104, 104, /* 68 */
+ 17, 46, 46, 46, 17, 17, 17, 17, /* 68 */
+ 17, 17, 7, 7, 7, 5, 6, 16, /* 68 */
+107, 107, 107, 107, 107, 107, 107, 107, /* 69 */
+107, 107, 7, 7, 7, 5, 6, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 4, 4, 4, 4, 4, 4, 4, 4, /* 69 */
+ 4, 4, 4, 4, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 70 */
+ 60, 60, 60, 60, 60, 79, 79, 79, /* 70 */
+ 79, 60, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 15, 15, 38, 15, 15, 15, 15, 38, /* 71 */
+ 15, 15, 16, 38, 38, 38, 16, 16, /* 71 */
+ 38, 38, 38, 16, 15, 38, 15, 15, /* 71 */
+ 38, 38, 38, 38, 38, 38, 15, 15, /* 71 */
+ 15, 15, 15, 15, 38, 15, 38, 15, /* 71 */
+ 38, 15, 38, 38, 38, 38, 16, 16, /* 71 */
+ 38, 38, 15, 38, 16, 40, 40, 40, /* 71 */
+ 40, 46, 46, 46, 46, 46, 46, 46, /* 71 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 72 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 72 */
+ 46, 46, 46, 19, 19, 19, 19, 19, /* 72 */
+ 19, 19, 19, 19, 19, 19, 19, 108, /* 72 */
+109, 109, 109, 109, 109, 109, 109, 109, /* 72 */
+109, 109, 109, 109, 110, 110, 110, 110, /* 72 */
+111, 111, 111, 111, 111, 111, 111, 111, /* 72 */
+111, 111, 111, 111, 112, 112, 112, 112, /* 72 */
+113, 113, 113, 46, 46, 46, 46, 46, /* 73 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 73 */
+ 7, 7, 7, 7, 7, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 7, 15, 7, 15, 15, 15, /* 74 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 15, 46, 46, 46, 46, 46, /* 74 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 74 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 74 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 46, 46, 46, 46, 46, 46, /* 76 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 76 */
+ 15, 46, 15, 15, 15, 15, 15, 15, /* 77 */
+ 7, 7, 7, 7, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 7, 7, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 5, 6, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 46, 46, 46, 46, 46, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 46, 46, 46, /* 79 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 79 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 79 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 80 */
+ 15, 15, 15, 46, 46, 46, 46, 46, /* 80 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 80 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 80 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 80 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 80 */
+114, 114, 114, 114, 82, 82, 82, 82, /* 80 */
+ 82, 82, 82, 82, 82, 82, 82, 82, /* 80 */
+ 82, 82, 82, 82, 82, 82, 82, 82, /* 81 */
+115, 115, 115, 115, 115, 115, 115, 115, /* 81 */
+115, 115, 115, 115, 115, 115, 115, 115, /* 81 */
+115, 115, 115, 115, 15, 15, 15, 15, /* 81 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 81 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 81 */
+ 15, 15, 15, 15, 15, 15, 116, 116, /* 81 */
+116, 116, 116, 116, 116, 116, 116, 116, /* 81 */
+116, 116, 116, 116, 116, 116, 116, 116, /* 82 */
+116, 116, 116, 116, 116, 116, 116, 116, /* 82 */
+117, 117, 117, 117, 117, 117, 117, 117, /* 82 */
+117, 117, 117, 117, 117, 117, 117, 117, /* 82 */
+117, 117, 117, 117, 117, 117, 117, 117, /* 82 */
+117, 117, 118, 46, 46, 46, 46, 46, /* 82 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 82 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 82 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 46, 46, /* 84 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 85 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 46, 46, 46, 46, /* 86 */
+ 46, 46, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 46, 15, 15, 15, 15, 46, 15, 15, /* 87 */
+ 15, 15, 46, 46, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 46, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 88 */
+ 15, 15, 15, 15, 46, 15, 46, 15, /* 88 */
+ 15, 15, 15, 46, 46, 46, 15, 46, /* 88 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 88 */
+ 46, 15, 15, 15, 15, 15, 15, 15, /* 88 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 88 */
+ 46, 46, 46, 46, 46, 46, 119, 119, /* 88 */
+119, 119, 119, 119, 119, 119, 119, 119, /* 88 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 89 */
+114, 114, 83, 83, 83, 83, 83, 83, /* 89 */
+ 83, 83, 83, 83, 15, 46, 46, 46, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 46, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 89 */
+ 2, 3, 3, 3, 15, 59, 3, 120, /* 90 */
+ 5, 6, 5, 6, 5, 6, 5, 6, /* 90 */
+ 5, 6, 15, 15, 5, 6, 5, 6, /* 90 */
+ 5, 6, 5, 6, 8, 5, 6, 5, /* 90 */
+ 15, 121, 121, 121, 121, 121, 121, 121, /* 90 */
+121, 121, 60, 60, 60, 60, 60, 60, /* 90 */
+ 8, 59, 59, 59, 59, 59, 15, 15, /* 90 */
+ 46, 46, 46, 46, 46, 46, 46, 15, /* 90 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 46, 46, 46, /* 92 */
+ 46, 60, 60, 59, 59, 59, 59, 46, /* 92 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 3, 59, 59, 59, 46, /* 93 */
+ 46, 46, 46, 46, 46, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 46, 46, 46, /* 94 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 95 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 95 */
+ 15, 15, 85, 85, 85, 85, 15, 15, /* 95 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 46, 46, 46, /* 96 */
+ 85, 85, 85, 85, 85, 85, 85, 85, /* 96 */
+ 85, 85, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 46, 46, 46, 46, /* 97 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 97 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 97 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 97 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 97 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 97 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 97 */
+ 15, 15, 15, 15, 46, 46, 46, 15, /* 97 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 98 */
+114, 114, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 46, 46, 46, 46, 46, 46, 46, /* 98 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 46, 46, 46, 46, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 100 */
+ 46, 46, 46, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 46, 46, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 101 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 46, 46, /* 102 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 102 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 102 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 46, 46, 46, 46, /* 103 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 103 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 103 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 103 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 46, 46, /* 106 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 106 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 106 */
+ 16, 16, 16, 16, 16, 16, 16, 46, /* 107 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 107 */
+ 46, 46, 46, 16, 16, 16, 16, 16, /* 107 */
+ 46, 46, 46, 46, 46, 46, 60, 40, /* 107 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 107 */
+ 40, 7, 40, 40, 40, 40, 40, 40, /* 107 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 107 */
+ 40, 40, 40, 40, 40, 46, 40, 46, /* 107 */
+ 40, 40, 46, 40, 40, 46, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 46, 46, 46, 46, 46, 46, /* 109 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 109 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 110 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 110 */
+ 46, 46, 46, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 5, 6, /* 111 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 112 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 114 */
+ 40, 40, 40, 40, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 60, 60, 60, 60, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 3, 8, 8, 12, 12, 5, 6, 5, /* 115 */
+ 6, 5, 6, 5, 6, 5, 6, 5, /* 115 */
+ 6, 5, 6, 5, 6, 46, 46, 46, /* 116 */
+ 46, 3, 3, 3, 3, 12, 12, 12, /* 116 */
+ 3, 3, 3, 46, 3, 3, 3, 3, /* 116 */
+ 8, 5, 6, 5, 6, 5, 6, 3, /* 116 */
+ 3, 3, 7, 8, 7, 7, 7, 46, /* 116 */
+ 3, 4, 3, 3, 46, 46, 46, 46, /* 116 */
+ 40, 40, 40, 46, 40, 46, 40, 40, /* 116 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 116 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 46, 46, 104, /* 117 */
+ 46, 3, 3, 3, 4, 3, 3, 3, /* 118 */
+ 5, 6, 3, 7, 3, 8, 3, 3, /* 118 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 118 */
+ 9, 9, 3, 3, 7, 7, 7, 3, /* 118 */
+ 3, 10, 10, 10, 10, 10, 10, 10, /* 118 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 118 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 118 */
+ 10, 10, 10, 5, 3, 6, 11, 12, /* 118 */
+ 11, 13, 13, 13, 13, 13, 13, 13, /* 119 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 119 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 119 */
+ 13, 13, 13, 5, 7, 6, 7, 46, /* 119 */
+ 46, 3, 5, 6, 3, 3, 40, 40, /* 119 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 119 */
+ 59, 40, 40, 40, 40, 40, 40, 40, /* 119 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 119 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 59, 59, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 120 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 121 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 121 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 121 */
+ 46, 46, 40, 40, 40, 46, 46, 46, /* 121 */
+ 4, 4, 7, 11, 15, 4, 4, 46, /* 121 */
+ 7, 7, 7, 7, 7, 15, 15, 46, /* 121 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 121 */
+ 46, 46, 46, 46, 46, 15, 46, 46 /* 121 */
+};
+
+/* The A table has 124 entries for a total of 496 bytes. */
+
+const uint32 js_A[] = {
+0x0001000F, /* 0 Cc, ignorable */
+0x0004000F, /* 1 Cc, whitespace */
+0x0004000C, /* 2 Zs, whitespace */
+0x00000018, /* 3 Po */
+0x0006001A, /* 4 Sc, currency */
+0x00000015, /* 5 Ps */
+0x00000016, /* 6 Pe */
+0x00000019, /* 7 Sm */
+0x00000014, /* 8 Pd */
+0x00036089, /* 9 Nd, identifier part, decimal 16 */
+0x0827FF81, /* 10 Lu, hasLower (add 32), identifier start, supradecimal 31 */
+0x0000001B, /* 11 Sk */
+0x00050017, /* 12 Pc, underscore */
+0x0817FF82, /* 13 Ll, hasUpper (subtract 32), identifier start, supradecimal 31 */
+0x0000000C, /* 14 Zs */
+0x0000001C, /* 15 So */
+0x00070182, /* 16 Ll, identifier start */
+0x0000600B, /* 17 No, decimal 16 */
+0x0000500B, /* 18 No, decimal 8 */
+0x0000800B, /* 19 No, strange */
+0x08270181, /* 20 Lu, hasLower (add 32), identifier start */
+0x08170182, /* 21 Ll, hasUpper (subtract 32), identifier start */
+0xE1D70182, /* 22 Ll, hasUpper (subtract -121), identifier start */
+0x00670181, /* 23 Lu, hasLower (add 1), identifier start */
+0x00570182, /* 24 Ll, hasUpper (subtract 1), identifier start */
+0xCE670181, /* 25 Lu, hasLower (add -199), identifier start */
+0x3A170182, /* 26 Ll, hasUpper (subtract 232), identifier start */
+0xE1E70181, /* 27 Lu, hasLower (add -121), identifier start */
+0x4B170182, /* 28 Ll, hasUpper (subtract 300), identifier start */
+0x34A70181, /* 29 Lu, hasLower (add 210), identifier start */
+0x33A70181, /* 30 Lu, hasLower (add 206), identifier start */
+0x33670181, /* 31 Lu, hasLower (add 205), identifier start */
+0x32A70181, /* 32 Lu, hasLower (add 202), identifier start */
+0x32E70181, /* 33 Lu, hasLower (add 203), identifier start */
+0x33E70181, /* 34 Lu, hasLower (add 207), identifier start */
+0x34E70181, /* 35 Lu, hasLower (add 211), identifier start */
+0x34670181, /* 36 Lu, hasLower (add 209), identifier start */
+0x35670181, /* 37 Lu, hasLower (add 213), identifier start */
+0x00070181, /* 38 Lu, identifier start */
+0x36A70181, /* 39 Lu, hasLower (add 218), identifier start */
+0x00070185, /* 40 Lo, identifier start */
+0x36670181, /* 41 Lu, hasLower (add 217), identifier start */
+0x36E70181, /* 42 Lu, hasLower (add 219), identifier start */
+0x00AF0181, /* 43 Lu, hasLower (add 2), hasTitle, identifier start */
+0x007F0183, /* 44 Lt, hasUpper (subtract 1), hasLower (add 1), hasTitle, identifier start */
+0x009F0182, /* 45 Ll, hasUpper (subtract 2), hasTitle, identifier start */
+0x00000000, /* 46 unassigned */
+0x34970182, /* 47 Ll, hasUpper (subtract 210), identifier start */
+0x33970182, /* 48 Ll, hasUpper (subtract 206), identifier start */
+0x33570182, /* 49 Ll, hasUpper (subtract 205), identifier start */
+0x32970182, /* 50 Ll, hasUpper (subtract 202), identifier start */
+0x32D70182, /* 51 Ll, hasUpper (subtract 203), identifier start */
+0x33D70182, /* 52 Ll, hasUpper (subtract 207), identifier start */
+0x34570182, /* 53 Ll, hasUpper (subtract 209), identifier start */
+0x34D70182, /* 54 Ll, hasUpper (subtract 211), identifier start */
+0x35570182, /* 55 Ll, hasUpper (subtract 213), identifier start */
+0x36970182, /* 56 Ll, hasUpper (subtract 218), identifier start */
+0x36570182, /* 57 Ll, hasUpper (subtract 217), identifier start */
+0x36D70182, /* 58 Ll, hasUpper (subtract 219), identifier start */
+0x00070084, /* 59 Lm, identifier start */
+0x00030086, /* 60 Mn, identifier part */
+0x09A70181, /* 61 Lu, hasLower (add 38), identifier start */
+0x09670181, /* 62 Lu, hasLower (add 37), identifier start */
+0x10270181, /* 63 Lu, hasLower (add 64), identifier start */
+0x0FE70181, /* 64 Lu, hasLower (add 63), identifier start */
+0x09970182, /* 65 Ll, hasUpper (subtract 38), identifier start */
+0x09570182, /* 66 Ll, hasUpper (subtract 37), identifier start */
+0x10170182, /* 67 Ll, hasUpper (subtract 64), identifier start */
+0x0FD70182, /* 68 Ll, hasUpper (subtract 63), identifier start */
+0x0F970182, /* 69 Ll, hasUpper (subtract 62), identifier start */
+0x0E570182, /* 70 Ll, hasUpper (subtract 57), identifier start */
+0x0BD70182, /* 71 Ll, hasUpper (subtract 47), identifier start */
+0x0D970182, /* 72 Ll, hasUpper (subtract 54), identifier start */
+0x15970182, /* 73 Ll, hasUpper (subtract 86), identifier start */
+0x14170182, /* 74 Ll, hasUpper (subtract 80), identifier start */
+0x14270181, /* 75 Lu, hasLower (add 80), identifier start */
+0x0C270181, /* 76 Lu, hasLower (add 48), identifier start */
+0x0C170182, /* 77 Ll, hasUpper (subtract 48), identifier start */
+0x00034089, /* 78 Nd, identifier part, decimal 0 */
+0x00000087, /* 79 Me */
+0x00030088, /* 80 Mc, identifier part */
+0x00037489, /* 81 Nd, identifier part, decimal 26 */
+0x00005A0B, /* 82 No, decimal 13 */
+0x00006E0B, /* 83 No, decimal 23 */
+0x0000740B, /* 84 No, decimal 26 */
+0x0000000B, /* 85 No */
+0xFE170182, /* 86 Ll, hasUpper (subtract -8), identifier start */
+0xFE270181, /* 87 Lu, hasLower (add -8), identifier start */
+0xED970182, /* 88 Ll, hasUpper (subtract -74), identifier start */
+0xEA970182, /* 89 Ll, hasUpper (subtract -86), identifier start */
+0xE7170182, /* 90 Ll, hasUpper (subtract -100), identifier start */
+0xE0170182, /* 91 Ll, hasUpper (subtract -128), identifier start */
+0xE4170182, /* 92 Ll, hasUpper (subtract -112), identifier start */
+0xE0970182, /* 93 Ll, hasUpper (subtract -126), identifier start */
+0xFDD70182, /* 94 Ll, hasUpper (subtract -9), identifier start */
+0xEDA70181, /* 95 Lu, hasLower (add -74), identifier start */
+0xFDE70181, /* 96 Lu, hasLower (add -9), identifier start */
+0xEAA70181, /* 97 Lu, hasLower (add -86), identifier start */
+0xE7270181, /* 98 Lu, hasLower (add -100), identifier start */
+0xFE570182, /* 99 Ll, hasUpper (subtract -7), identifier start */
+0xE4270181, /* 100 Lu, hasLower (add -112), identifier start */
+0xFE670181, /* 101 Lu, hasLower (add -7), identifier start */
+0xE0270181, /* 102 Lu, hasLower (add -128), identifier start */
+0xE0A70181, /* 103 Lu, hasLower (add -126), identifier start */
+0x00010010, /* 104 Cf, ignorable */
+0x0004000D, /* 105 Zl, whitespace */
+0x0004000E, /* 106 Zp, whitespace */
+0x0000400B, /* 107 No, decimal 0 */
+0x0000440B, /* 108 No, decimal 2 */
+0x0427438A, /* 109 Nl, hasLower (add 16), identifier start, decimal 1 */
+0x0427818A, /* 110 Nl, hasLower (add 16), identifier start, strange */
+0x0417638A, /* 111 Nl, hasUpper (subtract 16), identifier start, decimal 17 */
+0x0417818A, /* 112 Nl, hasUpper (subtract 16), identifier start, strange */
+0x0007818A, /* 113 Nl, identifier start, strange */
+0x0000420B, /* 114 No, decimal 1 */
+0x0000720B, /* 115 No, decimal 25 */
+0x06A0001C, /* 116 So, hasLower (add 26) */
+0x0690001C, /* 117 So, hasUpper (subtract 26) */
+0x00006C0B, /* 118 No, decimal 22 */
+0x0000560B, /* 119 No, decimal 11 */
+0x0007738A, /* 120 Nl, identifier start, decimal 25 */
+0x0007418A, /* 121 Nl, identifier start, decimal 0 */
+0x00000013, /* 122 Cs */
+0x00000012 /* 123 Co */
+};
+
+const jschar js_uriReservedPlusPound_ucstr[] =
+ {';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '#', 0};
+const jschar js_uriUnescaped_ucstr[] =
+ {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+ 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
+ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+ 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+ '-', '_', '.', '!', '~', '*', '\'', '(', ')', 0};
+
+#define URI_CHUNK 64U
+
+/* Concatenate jschars onto an unshared/newborn JSString. */
+static JSBool
+AddCharsToURI(JSContext *cx, JSString *str, const jschar *chars, size_t length)
+{
+ size_t total;
+
+ JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
+ total = str->length + length + 1;
+ if (!str->chars ||
+ JS_HOWMANY(total, URI_CHUNK) > JS_HOWMANY(str->length + 1, URI_CHUNK)) {
+ total = JS_ROUNDUP(total, URI_CHUNK);
+ str->chars = JS_realloc(cx, str->chars, total * sizeof(jschar));
+ if (!str->chars)
+ return JS_FALSE;
+ }
+ js_strncpy(str->chars + str->length, chars, length);
+ str->length += length;
+ str->chars[str->length] = 0;
+ return JS_TRUE;
+}
+
+/*
+ * ECMA 3, 15.1.3 URI Handling Function Properties
+ *
+ * The following are implementations of the algorithms
+ * given in the ECMA specification for the hidden functions
+ * 'Encode' and 'Decode'.
+ */
+static JSBool
+Encode(JSContext *cx, JSString *str, const jschar *unescapedSet,
+ const jschar *unescapedSet2, jsval *rval)
+{
+ size_t length, j, k, L;
+ jschar *chars, c, c2;
+ uint32 v;
+ uint8 utf8buf[6];
+ jschar hexBuf[4];
+ static const char HexDigits[] = "0123456789ABCDEF"; /* NB: uppercase */
+ JSString *R;
+
+ length = JSSTRING_LENGTH(str);
+ if (length == 0) {
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ R = js_NewString(cx, NULL, 0, 0);
+ if (!R)
+ return JS_FALSE;
+
+ hexBuf[0] = '%';
+ hexBuf[3] = 0;
+ chars = JSSTRING_CHARS(str);
+ for (k = 0; k < length; k++) {
+ c = chars[k];
+ if (js_strchr(unescapedSet, c) ||
+ (unescapedSet2 && js_strchr(unescapedSet2, c))) {
+ if (!AddCharsToURI(cx, R, &c, 1))
+ return JS_FALSE;
+ } else {
+ if ((c >= 0xDC00) && (c <= 0xDFFF)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_URI, NULL);
+ return JS_FALSE;
+ }
+ if (c < 0xD800 || c > 0xDBFF) {
+ v = c;
+ } else {
+ k++;
+ if (k == length) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_URI, NULL);
+ return JS_FALSE;
+ }
+ c2 = chars[k];
+ if ((c2 < 0xDC00) || (c2 > 0xDFFF)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_URI, NULL);
+ return JS_FALSE;
+ }
+ v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
+ }
+ L = js_OneUcs4ToUtf8Char(utf8buf, v);
+ for (j = 0; j < L; j++) {
+ hexBuf[1] = HexDigits[utf8buf[j] >> 4];
+ hexBuf[2] = HexDigits[utf8buf[j] & 0xf];
+ if (!AddCharsToURI(cx, R, hexBuf, 3))
+ return JS_FALSE;
+ }
+ }
+ }
+
+ /*
+ * Shrinking realloc can fail (e.g., with a BSD-style allocator), but we
+ * don't worry about that case here. Worst case, R hangs onto URI_CHUNK-1
+ * more jschars than it needs.
+ */
+ chars = (jschar *) JS_realloc(cx, R->chars, (R->length+1) * sizeof(jschar));
+ if (chars)
+ R->chars = chars;
+ *rval = STRING_TO_JSVAL(R);
+ return JS_TRUE;
+}
+
+static JSBool
+Decode(JSContext *cx, JSString *str, const jschar *reservedSet, jsval *rval)
+{
+ size_t length, start, k;
+ jschar *chars, c, H;
+ uint32 v;
+ jsuint B;
+ uint8 octets[6];
+ JSString *R;
+ intN j, n;
+
+ length = JSSTRING_LENGTH(str);
+ if (length == 0) {
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ R = js_NewString(cx, NULL, 0, 0);
+ if (!R)
+ return JS_FALSE;
+
+ chars = JSSTRING_CHARS(str);
+ for (k = 0; k < length; k++) {
+ c = chars[k];
+ if (c == '%') {
+ start = k;
+ if ((k + 2) >= length)
+ goto bad;
+ if (!JS7_ISHEX(chars[k+1]) || !JS7_ISHEX(chars[k+2]))
+ goto bad;
+ B = JS7_UNHEX(chars[k+1]) * 16 + JS7_UNHEX(chars[k+2]);
+ k += 2;
+ if (!(B & 0x80)) {
+ c = (jschar)B;
+ } else {
+ n = 1;
+ while (B & (0x80 >> n))
+ n++;
+ if (n == 1 || n > 6)
+ goto bad;
+ octets[0] = (uint8)B;
+ if (k + 3 * (n - 1) >= length)
+ goto bad;
+ for (j = 1; j < n; j++) {
+ k++;
+ if (chars[k] != '%')
+ goto bad;
+ if (!JS7_ISHEX(chars[k+1]) || !JS7_ISHEX(chars[k+2]))
+ goto bad;
+ B = JS7_UNHEX(chars[k+1]) * 16 + JS7_UNHEX(chars[k+2]);
+ if ((B & 0xC0) != 0x80)
+ goto bad;
+ k += 2;
+ octets[j] = (char)B;
+ }
+ v = Utf8ToOneUcs4Char(octets, n);
+ if (v >= 0x10000) {
+ v -= 0x10000;
+ if (v > 0xFFFFF)
+ goto bad;
+ c = (jschar)((v & 0x3FF) + 0xDC00);
+ H = (jschar)((v >> 10) + 0xD800);
+ if (!AddCharsToURI(cx, R, &H, 1))
+ return JS_FALSE;
+ } else {
+ c = (jschar)v;
+ }
+ }
+ if (js_strchr(reservedSet, c)) {
+ if (!AddCharsToURI(cx, R, &chars[start], (k - start + 1)))
+ return JS_FALSE;
+ } else {
+ if (!AddCharsToURI(cx, R, &c, 1))
+ return JS_FALSE;
+ }
+ } else {
+ if (!AddCharsToURI(cx, R, &c, 1))
+ return JS_FALSE;
+ }
+ }
+
+ /*
+ * Shrinking realloc can fail (e.g., with a BSD-style allocator), but we
+ * don't worry about that case here. Worst case, R hangs onto URI_CHUNK-1
+ * more jschars than it needs.
+ */
+ chars = (jschar *) JS_realloc(cx, R->chars, (R->length+1) * sizeof(jschar));
+ if (chars)
+ R->chars = chars;
+ *rval = STRING_TO_JSVAL(R);
+ return JS_TRUE;
+
+bad:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_URI);
+ return JS_FALSE;
+}
+
+static JSBool
+str_decodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Decode(cx, str, js_uriReservedPlusPound_ucstr, rval);
+}
+
+static JSBool
+str_decodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Decode(cx, str, js_empty_ucstr, rval);
+}
+
+static JSBool
+str_encodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Encode(cx, str, js_uriReservedPlusPound_ucstr, js_uriUnescaped_ucstr,
+ rval);
+}
+
+static JSBool
+str_encodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Encode(cx, str, js_uriUnescaped_ucstr, NULL, rval);
+}
+
+/*
+ * Convert one UCS-4 char and write it into a UTF-8 buffer, which must be at
+ * least 6 bytes long. Return the number of UTF-8 bytes of data written.
+ */
+int
+js_OneUcs4ToUtf8Char(uint8 *utf8Buffer, uint32 ucs4Char)
+{
+ int utf8Length = 1;
+
+ JS_ASSERT(ucs4Char <= 0x7FFFFFFF);
+ if (ucs4Char < 0x80) {
+ *utf8Buffer = (uint8)ucs4Char;
+ } else {
+ int i;
+ uint32 a = ucs4Char >> 11;
+ utf8Length = 2;
+ while (a) {
+ a >>= 5;
+ utf8Length++;
+ }
+ i = utf8Length;
+ while (--i) {
+ utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
+ ucs4Char >>= 6;
+ }
+ *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
+ }
+ return utf8Length;
+}
+
+/*
+ * Convert a utf8 character sequence into a UCS-4 character and return that
+ * character. It is assumed that the caller already checked that the sequence
+ * is valid.
+ */
+static uint32
+Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length)
+{
+ uint32 ucs4Char;
+ uint32 minucs4Char;
+ /* from Unicode 3.1, non-shortest form is illegal */
+ static const uint32 minucs4Table[] = {
+ 0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
+ };
+
+ JS_ASSERT(utf8Length >= 1 && utf8Length <= 6);
+ if (utf8Length == 1) {
+ ucs4Char = *utf8Buffer;
+ JS_ASSERT(!(ucs4Char & 0x80));
+ } else {
+ JS_ASSERT((*utf8Buffer & (0x100 - (1 << (7-utf8Length)))) ==
+ (0x100 - (1 << (8-utf8Length))));
+ ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
+ minucs4Char = minucs4Table[utf8Length-2];
+ while (--utf8Length) {
+ JS_ASSERT((*utf8Buffer & 0xC0) == 0x80);
+ ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
+ }
+ if (ucs4Char < minucs4Char ||
+ ucs4Char == 0xFFFE || ucs4Char == 0xFFFF) {
+ ucs4Char = 0xFFFD;
+ }
+ }
+ return ucs4Char;
+}
diff --git a/third_party/js-1.7/jsstr.h b/third_party/js-1.7/jsstr.h
new file mode 100644
index 0000000..708c69a
--- /dev/null
+++ b/third_party/js-1.7/jsstr.h
@@ -0,0 +1,500 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsstr_h___
+#define jsstr_h___
+/*
+ * JS string type implementation.
+ *
+ * A JS string is a counted array of unicode characters. To support handoff
+ * of API client memory, the chars are allocated separately from the length,
+ * necessitating a pointer after the count, to form a separately allocated
+ * string descriptor. String descriptors are GC'ed, while their chars are
+ * allocated from the malloc heap.
+ *
+ * When a string is treated as an object (by following it with . or []), the
+ * runtime wraps it with a JSObject whose valueOf method returns the unwrapped
+ * string descriptor.
+ */
+#include <ctype.h>
+#include "jspubtd.h"
+#include "jsprvtd.h"
+#include "jshash.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * The original GC-thing "string" type, a flat character string owned by its
+ * GC-thing descriptor. The chars member points to a vector having byte size
+ * (length + 1) * sizeof(jschar), terminated at index length by a zero jschar.
+ * The terminator is purely a backstop, in case the chars pointer flows out to
+ * native code that requires \u0000 termination.
+ *
+ * NB: Always use the JSSTRING_LENGTH and JSSTRING_CHARS accessor macros,
+ * unless you guard str->member uses with !JSSTRING_IS_DEPENDENT(str).
+ */
+struct JSString {
+ size_t length;
+ jschar *chars;
+};
+
+/*
+ * Overlay structure for a string that depends on another string's characters.
+ * Distinguished by the JSSTRFLAG_DEPENDENT bit being set in length. The base
+ * member may point to another dependent string if JSSTRING_CHARS has not been
+ * called yet. The length chars in a dependent string are stored starting at
+ * base->chars + start, and are not necessarily zero-terminated. If start is
+ * 0, it is not stored, length is a full size_t (minus the JSSTRFLAG_* bits in
+ * the high two positions), and the JSSTRFLAG_PREFIX flag is set.
+ */
+struct JSDependentString {
+ size_t length;
+ JSString *base;
+};
+
+/* Definitions for flags stored in the high order bits of JSString.length. */
+#define JSSTRFLAG_BITS 2
+#define JSSTRFLAG_SHIFT(flg) ((size_t)(flg) << JSSTRING_LENGTH_BITS)
+#define JSSTRFLAG_MASK JSSTRFLAG_SHIFT(JS_BITMASK(JSSTRFLAG_BITS))
+#define JSSTRFLAG_DEPENDENT JSSTRFLAG_SHIFT(1)
+#define JSSTRFLAG_PREFIX JSSTRFLAG_SHIFT(2)
+
+/* Universal JSString type inquiry and accessor macros. */
+#define JSSTRING_BIT(n) ((size_t)1 << (n))
+#define JSSTRING_BITMASK(n) (JSSTRING_BIT(n) - 1)
+#define JSSTRING_HAS_FLAG(str,flg) ((str)->length & (flg))
+#define JSSTRING_IS_DEPENDENT(str) JSSTRING_HAS_FLAG(str, JSSTRFLAG_DEPENDENT)
+#define JSSTRING_IS_PREFIX(str) JSSTRING_HAS_FLAG(str, JSSTRFLAG_PREFIX)
+#define JSSTRING_CHARS(str) (JSSTRING_IS_DEPENDENT(str) \
+ ? JSSTRDEP_CHARS(str) \
+ : (str)->chars)
+#define JSSTRING_LENGTH(str) (JSSTRING_IS_DEPENDENT(str) \
+ ? JSSTRDEP_LENGTH(str) \
+ : (str)->length)
+#define JSSTRING_LENGTH_BITS (sizeof(size_t) * JS_BITS_PER_BYTE \
+ - JSSTRFLAG_BITS)
+#define JSSTRING_LENGTH_MASK JSSTRING_BITMASK(JSSTRING_LENGTH_BITS)
+
+/* Specific JSDependentString shift/mask accessor and mutator macros. */
+#define JSSTRDEP_START_BITS (JSSTRING_LENGTH_BITS-JSSTRDEP_LENGTH_BITS)
+#define JSSTRDEP_START_SHIFT JSSTRDEP_LENGTH_BITS
+#define JSSTRDEP_START_MASK JSSTRING_BITMASK(JSSTRDEP_START_BITS)
+#define JSSTRDEP_LENGTH_BITS (JSSTRING_LENGTH_BITS / 2)
+#define JSSTRDEP_LENGTH_MASK JSSTRING_BITMASK(JSSTRDEP_LENGTH_BITS)
+
+#define JSSTRDEP(str) ((JSDependentString *)(str))
+#define JSSTRDEP_START(str) (JSSTRING_IS_PREFIX(str) ? 0 \
+ : ((JSSTRDEP(str)->length \
+ >> JSSTRDEP_START_SHIFT) \
+ & JSSTRDEP_START_MASK))
+#define JSSTRDEP_LENGTH(str) (JSSTRDEP(str)->length \
+ & (JSSTRING_IS_PREFIX(str) \
+ ? JSSTRING_LENGTH_MASK \
+ : JSSTRDEP_LENGTH_MASK))
+
+#define JSSTRDEP_SET_START_AND_LENGTH(str,off,len) \
+ (JSSTRDEP(str)->length = JSSTRFLAG_DEPENDENT \
+ | ((off) << JSSTRDEP_START_SHIFT) \
+ | (len))
+#define JSPREFIX_SET_LENGTH(str,len) \
+ (JSSTRDEP(str)->length = JSSTRFLAG_DEPENDENT | JSSTRFLAG_PREFIX | (len))
+
+#define JSSTRDEP_BASE(str) (JSSTRDEP(str)->base)
+#define JSSTRDEP_SET_BASE(str,bstr) (JSSTRDEP(str)->base = (bstr))
+#define JSPREFIX_BASE(str) JSSTRDEP_BASE(str)
+#define JSPREFIX_SET_BASE(str,bstr) JSSTRDEP_SET_BASE(str,bstr)
+
+#define JSSTRDEP_CHARS(str) \
+ (JSSTRING_IS_DEPENDENT(JSSTRDEP_BASE(str)) \
+ ? js_GetDependentStringChars(str) \
+ : JSSTRDEP_BASE(str)->chars + JSSTRDEP_START(str))
+
+extern size_t
+js_MinimizeDependentStrings(JSString *str, int level, JSString **basep);
+
+extern jschar *
+js_GetDependentStringChars(JSString *str);
+
+extern jschar *
+js_GetStringChars(JSString *str);
+
+extern JSString *
+js_ConcatStrings(JSContext *cx, JSString *left, JSString *right);
+
+extern const jschar *
+js_UndependString(JSContext *cx, JSString *str);
+
+struct JSSubString {
+ size_t length;
+ const jschar *chars;
+};
+
+extern jschar js_empty_ucstr[];
+extern JSSubString js_EmptySubString;
+
+/* Unicode character attribute lookup tables. */
+extern const uint8 js_X[];
+extern const uint8 js_Y[];
+extern const uint32 js_A[];
+
+/* Enumerated Unicode general category types. */
+typedef enum JSCharType {
+ JSCT_UNASSIGNED = 0,
+ JSCT_UPPERCASE_LETTER = 1,
+ JSCT_LOWERCASE_LETTER = 2,
+ JSCT_TITLECASE_LETTER = 3,
+ JSCT_MODIFIER_LETTER = 4,
+ JSCT_OTHER_LETTER = 5,
+ JSCT_NON_SPACING_MARK = 6,
+ JSCT_ENCLOSING_MARK = 7,
+ JSCT_COMBINING_SPACING_MARK = 8,
+ JSCT_DECIMAL_DIGIT_NUMBER = 9,
+ JSCT_LETTER_NUMBER = 10,
+ JSCT_OTHER_NUMBER = 11,
+ JSCT_SPACE_SEPARATOR = 12,
+ JSCT_LINE_SEPARATOR = 13,
+ JSCT_PARAGRAPH_SEPARATOR = 14,
+ JSCT_CONTROL = 15,
+ JSCT_FORMAT = 16,
+ JSCT_PRIVATE_USE = 18,
+ JSCT_SURROGATE = 19,
+ JSCT_DASH_PUNCTUATION = 20,
+ JSCT_START_PUNCTUATION = 21,
+ JSCT_END_PUNCTUATION = 22,
+ JSCT_CONNECTOR_PUNCTUATION = 23,
+ JSCT_OTHER_PUNCTUATION = 24,
+ JSCT_MATH_SYMBOL = 25,
+ JSCT_CURRENCY_SYMBOL = 26,
+ JSCT_MODIFIER_SYMBOL = 27,
+ JSCT_OTHER_SYMBOL = 28
+} JSCharType;
+
+/* Character classifying and mapping macros, based on java.lang.Character. */
+#define JS_CCODE(c) (js_A[js_Y[(js_X[(uint16)(c)>>6]<<6)|((c)&0x3F)]])
+#define JS_CTYPE(c) (JS_CCODE(c) & 0x1F)
+
+#define JS_ISALPHA(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER)) \
+ >> JS_CTYPE(c)) & 1)
+
+#define JS_ISALNUM(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER) | \
+ (1 << JSCT_DECIMAL_DIGIT_NUMBER)) \
+ >> JS_CTYPE(c)) & 1)
+
+/* A unicode letter, suitable for use in an identifier. */
+#define JS_ISLETTER(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER) | \
+ (1 << JSCT_LETTER_NUMBER)) \
+ >> JS_CTYPE(c)) & 1)
+
+/*
+ * 'IdentifierPart' from ECMA grammar, is Unicode letter or combining mark or
+ * digit or connector punctuation.
+ */
+#define JS_ISIDPART(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER) | \
+ (1 << JSCT_LETTER_NUMBER) | \
+ (1 << JSCT_NON_SPACING_MARK) | \
+ (1 << JSCT_COMBINING_SPACING_MARK) | \
+ (1 << JSCT_DECIMAL_DIGIT_NUMBER) | \
+ (1 << JSCT_CONNECTOR_PUNCTUATION)) \
+ >> JS_CTYPE(c)) & 1)
+
+/* Unicode control-format characters, ignored in input */
+#define JS_ISFORMAT(c) (((1 << JSCT_FORMAT) >> JS_CTYPE(c)) & 1)
+
+/*
+ * Per ECMA-262 15.10.2.6, these characters are the only ones that make up a
+ * "word", as far as a RegExp is concerned. If we want a Unicode-friendlier
+ * definition of "word", we should rename this macro to something regexp-y.
+ */
+#define JS_ISWORD(c) ((c) < 128 && (isalnum(c) || (c) == '_'))
+
+#define JS_ISIDSTART(c) (JS_ISLETTER(c) || (c) == '_' || (c) == '$')
+#define JS_ISIDENT(c) (JS_ISIDPART(c) || (c) == '_' || (c) == '$')
+
+#define JS_ISXMLSPACE(c) ((c) == ' ' || (c) == '\t' || (c) == '\r' || \
+ (c) == '\n')
+#define JS_ISXMLNSSTART(c) ((JS_CCODE(c) & 0x00000100) || (c) == '_')
+#define JS_ISXMLNS(c) ((JS_CCODE(c) & 0x00000080) || (c) == '.' || \
+ (c) == '-' || (c) == '_')
+#define JS_ISXMLNAMESTART(c) (JS_ISXMLNSSTART(c) || (c) == ':')
+#define JS_ISXMLNAME(c) (JS_ISXMLNS(c) || (c) == ':')
+
+#define JS_ISDIGIT(c) (JS_CTYPE(c) == JSCT_DECIMAL_DIGIT_NUMBER)
+
+/* XXXbe unify on A/X/Y tbls, avoid ctype.h? */
+/* XXXbe fs, etc. ? */
+#define JS_ISSPACE(c) ((JS_CCODE(c) & 0x00070000) == 0x00040000)
+#define JS_ISPRINT(c) ((c) < 128 && isprint(c))
+
+#define JS_ISUPPER(c) (JS_CTYPE(c) == JSCT_UPPERCASE_LETTER)
+#define JS_ISLOWER(c) (JS_CTYPE(c) == JSCT_LOWERCASE_LETTER)
+
+#define JS_TOUPPER(c) ((jschar) ((JS_CCODE(c) & 0x00100000) \
+ ? (c) - ((int32)JS_CCODE(c) >> 22) \
+ : (c)))
+#define JS_TOLOWER(c) ((jschar) ((JS_CCODE(c) & 0x00200000) \
+ ? (c) + ((int32)JS_CCODE(c) >> 22) \
+ : (c)))
+
+/*
+ * Shorthands for ASCII (7-bit) decimal and hex conversion.
+ * Manually inline isdigit for performance; MSVC doesn't do this for us.
+ */
+#define JS7_ISDEC(c) ((((unsigned)(c)) - '0') <= 9)
+#define JS7_UNDEC(c) ((c) - '0')
+#define JS7_ISHEX(c) ((c) < 128 && isxdigit(c))
+#define JS7_UNHEX(c) (uintN)(JS7_ISDEC(c) ? (c) - '0' : 10 + tolower(c) - 'a')
+#define JS7_ISLET(c) ((c) < 128 && isalpha(c))
+
+/* Initialize per-runtime string state for the first context in the runtime. */
+extern JSBool
+js_InitRuntimeStringState(JSContext *cx);
+
+extern void
+js_FinishRuntimeStringState(JSContext *cx);
+
+extern void
+js_FinishDeflatedStringCache(JSRuntime *rt);
+
+/* Initialize the String class, returning its prototype object. */
+extern JSClass js_StringClass;
+
+extern JSObject *
+js_InitStringClass(JSContext *cx, JSObject *obj);
+
+extern const char js_escape_str[];
+extern const char js_unescape_str[];
+extern const char js_uneval_str[];
+extern const char js_decodeURI_str[];
+extern const char js_encodeURI_str[];
+extern const char js_decodeURIComponent_str[];
+extern const char js_encodeURIComponent_str[];
+
+/* GC-allocate a string descriptor for the given malloc-allocated chars. */
+extern JSString *
+js_NewString(JSContext *cx, jschar *chars, size_t length, uintN gcflag);
+
+extern JSString *
+js_NewDependentString(JSContext *cx, JSString *base, size_t start,
+ size_t length, uintN gcflag);
+
+/* Copy a counted string and GC-allocate a descriptor for it. */
+extern JSString *
+js_NewStringCopyN(JSContext *cx, const jschar *s, size_t n, uintN gcflag);
+
+/* Copy a C string and GC-allocate a descriptor for it. */
+extern JSString *
+js_NewStringCopyZ(JSContext *cx, const jschar *s, uintN gcflag);
+
+/* Free the chars held by str when it is finalized by the GC. */
+extern void
+js_FinalizeString(JSContext *cx, JSString *str);
+
+extern void
+js_FinalizeStringRT(JSRuntime *rt, JSString *str);
+
+/* Wrap a string value in a String object. */
+extern JSObject *
+js_StringToObject(JSContext *cx, JSString *str);
+
+/*
+ * Convert a value to a printable C string.
+ */
+typedef JSString *(*JSValueToStringFun)(JSContext *cx, jsval v);
+
+extern JS_FRIEND_API(const char *)
+js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun);
+
+#define js_ValueToPrintableString(cx,v) \
+ js_ValueToPrintable(cx, v, js_ValueToString)
+
+#define js_ValueToPrintableSource(cx,v) \
+ js_ValueToPrintable(cx, v, js_ValueToSource)
+
+/*
+ * Convert a value to a string, returning null after reporting an error,
+ * otherwise returning a new string reference.
+ */
+extern JS_FRIEND_API(JSString *)
+js_ValueToString(JSContext *cx, jsval v);
+
+/*
+ * Convert a value to its source expression, returning null after reporting
+ * an error, otherwise returning a new string reference.
+ */
+extern JS_FRIEND_API(JSString *)
+js_ValueToSource(JSContext *cx, jsval v);
+
+#ifdef HT_ENUMERATE_NEXT /* XXX don't require jshash.h */
+/*
+ * Compute a hash function from str.
+ */
+extern JSHashNumber
+js_HashString(JSString *str);
+#endif
+
+/*
+ * Return less than, equal to, or greater than zero depending on whether
+ * str1 is less than, equal to, or greater than str2.
+ */
+extern intN
+js_CompareStrings(JSString *str1, JSString *str2);
+
+/*
+ * Test if strings are equal.
+ */
+extern JSBool
+js_EqualStrings(JSString *str1, JSString *str2);
+
+/*
+ * Boyer-Moore-Horspool superlinear search for pat:patlen in text:textlen.
+ * The patlen argument must be positive and no greater than BMH_PATLEN_MAX.
+ * The start argument tells where in text to begin the search.
+ *
+ * Return the index of pat in text, or -1 if not found.
+ */
+#define BMH_CHARSET_SIZE 256 /* ISO-Latin-1 */
+#define BMH_PATLEN_MAX 255 /* skip table element is uint8 */
+
+#define BMH_BAD_PATTERN (-2) /* return value if pat is not ISO-Latin-1 */
+
+extern jsint
+js_BoyerMooreHorspool(const jschar *text, jsint textlen,
+ const jschar *pat, jsint patlen,
+ jsint start);
+
+extern size_t
+js_strlen(const jschar *s);
+
+extern jschar *
+js_strchr(const jschar *s, jschar c);
+
+extern jschar *
+js_strchr_limit(const jschar *s, jschar c, const jschar *limit);
+
+#define js_strncpy(t, s, n) memcpy((t), (s), (n) * sizeof(jschar))
+
+/*
+ * Return s advanced past any Unicode white space characters.
+ */
+extern const jschar *
+js_SkipWhiteSpace(const jschar *s);
+
+/*
+ * Inflate bytes to JS chars and vice versa. Report out of memory via cx
+ * and return null on error, otherwise return the jschar or byte vector that
+ * was JS_malloc'ed. length is updated with the length of the new string in jschars.
+ */
+extern jschar *
+js_InflateString(JSContext *cx, const char *bytes, size_t *length);
+
+extern char *
+js_DeflateString(JSContext *cx, const jschar *chars, size_t length);
+
+/*
+ * Inflate bytes to JS chars into a buffer.
+ * 'chars' must be large enough for 'length' jschars.
+ * The buffer is NOT null-terminated.
+ * cx may be NULL, which means no errors are thrown.
+ * The destination length needs to be initialized with the buffer size, takes
+ * the number of chars moved.
+ */
+extern JSBool
+js_InflateStringToBuffer(JSContext* cx, const char *bytes, size_t length,
+ jschar *chars, size_t* charsLength);
+
+/*
+ * Deflate JS chars to bytes into a buffer.
+ * 'bytes' must be large enough for 'length chars.
+ * The buffer is NOT null-terminated.
+ * cx may be NULL, which means no errors are thrown.
+ * The destination length needs to be initialized with the buffer size, takes
+ * the number of bytes moved.
+ */
+extern JSBool
+js_DeflateStringToBuffer(JSContext* cx, const jschar *chars,
+ size_t charsLength, char *bytes, size_t* length);
+
+/*
+ * Associate bytes with str in the deflated string cache, returning true on
+ * successful association, false on out of memory.
+ */
+extern JSBool
+js_SetStringBytes(JSRuntime *rt, JSString *str, char *bytes, size_t length);
+
+/*
+ * Find or create a deflated string cache entry for str that contains its
+ * characters chopped from Unicode code points into bytes.
+ */
+extern char *
+js_GetStringBytes(JSRuntime *rt, JSString *str);
+
+/* Remove a deflated string cache entry associated with str if any. */
+extern void
+js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str);
+
+JSBool
+js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+/*
+ * Convert one UCS-4 char and write it into a UTF-8 buffer, which must be at
+ * least 6 bytes long. Return the number of UTF-8 bytes of data written.
+ */
+extern int
+js_OneUcs4ToUtf8Char(uint8 *utf8Buffer, uint32 ucs4Char);
+
+JS_END_EXTERN_C
+
+#endif /* jsstr_h___ */
diff --git a/third_party/js-1.7/jstypes.h b/third_party/js-1.7/jstypes.h
new file mode 100644
index 0000000..8aca929
--- /dev/null
+++ b/third_party/js-1.7/jstypes.h
@@ -0,0 +1,464 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+** File: jstypes.h
+** Description: Definitions of NSPR's basic types
+**
+** Prototypes and macros used to make up for deficiencies in ANSI environments
+** that we have found.
+**
+** Since we do not wrap <stdlib.h> and all the other standard headers, authors
+** of portable code will not know in general that they need these definitions.
+** Instead of requiring these authors to find the dependent uses in their code
+** and take the following steps only in those C files, we take steps once here
+** for all C files.
+**/
+
+#ifndef jstypes_h___
+#define jstypes_h___
+
+#include <stddef.h>
+
+/***********************************************************************
+** MACROS: JS_EXTERN_API
+** JS_EXPORT_API
+** DESCRIPTION:
+** These are only for externally visible routines and globals. For
+** internal routines, just use "extern" for type checking and that
+** will not export internal cross-file or forward-declared symbols.
+** Define a macro for declaring procedures return types. We use this to
+** deal with windoze specific type hackery for DLL definitions. Use
+** JS_EXTERN_API when the prototype for the method is declared. Use
+** JS_EXPORT_API for the implementation of the method.
+**
+** Example:
+** in dowhim.h
+** JS_EXTERN_API( void ) DoWhatIMean( void );
+** in dowhim.c
+** JS_EXPORT_API( void ) DoWhatIMean( void ) { return; }
+**
+**
+***********************************************************************/
+#ifdef WIN32
+/* These also work for __MWERKS__ */
+#define JS_EXTERN_API(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_API(__type) __declspec(dllexport) __type
+#define JS_EXTERN_DATA(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_DATA(__type) __declspec(dllexport) __type
+
+#define JS_DLL_CALLBACK
+#define JS_STATIC_DLL_CALLBACK(__x) static __x
+
+#elif defined(XP_OS2) && defined(__declspec)
+
+#define JS_EXTERN_API(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_API(__type) __declspec(dllexport) __type
+#define JS_EXTERN_DATA(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_DATA(__type) __declspec(dllexport) __type
+
+#define JS_DLL_CALLBACK
+#define JS_STATIC_DLL_CALLBACK(__x) static __x
+
+#elif defined(WIN16)
+
+#ifdef _WINDLL
+#define JS_EXTERN_API(__type) extern __type _cdecl _export _loadds
+#define JS_EXPORT_API(__type) __type _cdecl _export _loadds
+#define JS_EXTERN_DATA(__type) extern __type _export
+#define JS_EXPORT_DATA(__type) __type _export
+
+#define JS_DLL_CALLBACK __cdecl __loadds
+#define JS_STATIC_DLL_CALLBACK(__x) static __x CALLBACK
+
+#else /* this must be .EXE */
+#define JS_EXTERN_API(__type) extern __type _cdecl _export
+#define JS_EXPORT_API(__type) __type _cdecl _export
+#define JS_EXTERN_DATA(__type) extern __type _export
+#define JS_EXPORT_DATA(__type) __type _export
+
+#define JS_DLL_CALLBACK __cdecl __loadds
+#define JS_STATIC_DLL_CALLBACK(__x) __x JS_DLL_CALLBACK
+#endif /* _WINDLL */
+
+#else /* Unix */
+
+#ifdef HAVE_VISIBILITY_ATTRIBUTE
+#define JS_EXTERNAL_VIS __attribute__((visibility ("default")))
+#else
+#define JS_EXTERNAL_VIS
+#endif
+
+#define JS_EXTERN_API(__type) extern JS_EXTERNAL_VIS __type
+#define JS_EXPORT_API(__type) JS_EXTERNAL_VIS __type
+#define JS_EXTERN_DATA(__type) extern JS_EXTERNAL_VIS __type
+#define JS_EXPORT_DATA(__type) JS_EXTERNAL_VIS __type
+
+#define JS_DLL_CALLBACK
+#define JS_STATIC_DLL_CALLBACK(__x) static __x
+
+#endif
+
+#ifdef _WIN32
+# if defined(__MWERKS__) || defined(__GNUC__)
+# define JS_IMPORT_API(__x) __x
+# else
+# define JS_IMPORT_API(__x) __declspec(dllimport) __x
+# endif
+#elif defined(XP_OS2) && defined(__declspec)
+# define JS_IMPORT_API(__x) __declspec(dllimport) __x
+#else
+# define JS_IMPORT_API(__x) JS_EXPORT_API (__x)
+#endif
+
+#if defined(_WIN32) && !defined(__MWERKS__)
+# define JS_IMPORT_DATA(__x) __declspec(dllimport) __x
+#elif defined(XP_OS2) && defined(__declspec)
+# define JS_IMPORT_DATA(__x) __declspec(dllimport) __x
+#else
+# define JS_IMPORT_DATA(__x) JS_EXPORT_DATA (__x)
+#endif
+
+/*
+ * The linkage of JS API functions differs depending on whether the file is
+ * used within the JS library or not. Any source file within the JS
+ * interpreter should define EXPORT_JS_API whereas any client of the library
+ * should not.
+ */
+#ifdef EXPORT_JS_API
+#define JS_PUBLIC_API(t) JS_EXPORT_API(t)
+#define JS_PUBLIC_DATA(t) JS_EXPORT_DATA(t)
+#else
+#define JS_PUBLIC_API(t) JS_IMPORT_API(t)
+#define JS_PUBLIC_DATA(t) JS_IMPORT_DATA(t)
+#endif
+
+#define JS_FRIEND_API(t) JS_PUBLIC_API(t)
+#define JS_FRIEND_DATA(t) JS_PUBLIC_DATA(t)
+
+#ifdef _WIN32
+# define JS_INLINE __inline
+#elif defined(__GNUC__)
+# define JS_INLINE
+#else
+# define JS_INLINE
+#endif
+
+/***********************************************************************
+** MACROS: JS_BEGIN_MACRO
+** JS_END_MACRO
+** DESCRIPTION:
+** Macro body brackets so that macros with compound statement definitions
+** behave syntactically more like functions when called.
+***********************************************************************/
+#define JS_BEGIN_MACRO do {
+#define JS_END_MACRO } while (0)
+
+/***********************************************************************
+** MACROS: JS_BEGIN_EXTERN_C
+** JS_END_EXTERN_C
+** DESCRIPTION:
+** Macro shorthands for conditional C++ extern block delimiters.
+***********************************************************************/
+#ifdef __cplusplus
+#define JS_BEGIN_EXTERN_C extern "C" {
+#define JS_END_EXTERN_C }
+#else
+#define JS_BEGIN_EXTERN_C
+#define JS_END_EXTERN_C
+#endif
+
+/***********************************************************************
+** MACROS: JS_BIT
+** JS_BITMASK
+** DESCRIPTION:
+** Bit masking macros. XXX n must be <= 31 to be portable
+***********************************************************************/
+#define JS_BIT(n) ((JSUint32)1 << (n))
+#define JS_BITMASK(n) (JS_BIT(n) - 1)
+
+/***********************************************************************
+** MACROS: JS_PTR_TO_INT32
+** JS_PTR_TO_UINT32
+** JS_INT32_TO_PTR
+** JS_UINT32_TO_PTR
+** DESCRIPTION:
+** Integer to pointer and pointer to integer conversion macros.
+***********************************************************************/
+#define JS_PTR_TO_INT32(x) ((jsint)((char *)(x) - (char *)0))
+#define JS_PTR_TO_UINT32(x) ((jsuint)((char *)(x) - (char *)0))
+#define JS_INT32_TO_PTR(x) ((void *)((char *)0 + (jsint)(x)))
+#define JS_UINT32_TO_PTR(x) ((void *)((char *)0 + (jsuint)(x)))
+
+/***********************************************************************
+** MACROS: JS_HOWMANY
+** JS_ROUNDUP
+** JS_MIN
+** JS_MAX
+** DESCRIPTION:
+** Commonly used macros for operations on compatible types.
+***********************************************************************/
+#define JS_HOWMANY(x,y) (((x)+(y)-1)/(y))
+#define JS_ROUNDUP(x,y) (JS_HOWMANY(x,y)*(y))
+#define JS_MIN(x,y) ((x)<(y)?(x):(y))
+#define JS_MAX(x,y) ((x)>(y)?(x):(y))
+
+#if (defined(XP_WIN) && !defined(CROSS_COMPILE)) || defined (WINCE)
+# include "jscpucfg.h" /* Use standard Mac or Windows configuration */
+#elif defined(XP_UNIX) || defined(XP_BEOS) || defined(XP_OS2) || defined(CROSS_COMPILE)
+# include "jsautocfg.h" /* Use auto-detected configuration */
+# include "jsosdep.h" /* ...and platform-specific flags */
+#else
+# error "Must define one of XP_BEOS, XP_OS2, XP_WIN or XP_UNIX"
+#endif
+
+JS_BEGIN_EXTERN_C
+
+/************************************************************************
+** TYPES: JSUint8
+** JSInt8
+** DESCRIPTION:
+** The int8 types are known to be 8 bits each. There is no type that
+** is equivalent to a plain "char".
+************************************************************************/
+#if JS_BYTES_PER_BYTE == 1
+typedef unsigned char JSUint8;
+typedef signed char JSInt8;
+#else
+#error No suitable type for JSInt8/JSUint8
+#endif
+
+/************************************************************************
+** TYPES: JSUint16
+** JSInt16
+** DESCRIPTION:
+** The int16 types are known to be 16 bits each.
+************************************************************************/
+#if JS_BYTES_PER_SHORT == 2
+typedef unsigned short JSUint16;
+typedef short JSInt16;
+#else
+#error No suitable type for JSInt16/JSUint16
+#endif
+
+/************************************************************************
+** TYPES: JSUint32
+** JSInt32
+** DESCRIPTION:
+** The int32 types are known to be 32 bits each.
+************************************************************************/
+#if JS_BYTES_PER_INT == 4
+typedef unsigned int JSUint32;
+typedef int JSInt32;
+#define JS_INT32(x) x
+#define JS_UINT32(x) x ## U
+#elif JS_BYTES_PER_LONG == 4
+typedef unsigned long JSUint32;
+typedef long JSInt32;
+#define JS_INT32(x) x ## L
+#define JS_UINT32(x) x ## UL
+#else
+#error No suitable type for JSInt32/JSUint32
+#endif
+
+/************************************************************************
+** TYPES: JSUint64
+** JSInt64
+** DESCRIPTION:
+** The int64 types are known to be 64 bits each. Care must be used when
+** declaring variables of type JSUint64 or JSInt64. Different hardware
+** architectures and even different compilers have varying support for
+** 64 bit values. The only guaranteed portability requires the use of
+** the JSLL_ macros (see jslong.h).
+************************************************************************/
+#ifdef JS_HAVE_LONG_LONG
+#if JS_BYTES_PER_LONG == 8
+typedef long JSInt64;
+typedef unsigned long JSUint64;
+#elif defined(WIN16)
+typedef __int64 JSInt64;
+typedef unsigned __int64 JSUint64;
+#elif defined(WIN32) && !defined(__GNUC__)
+typedef __int64 JSInt64;
+typedef unsigned __int64 JSUint64;
+#else
+typedef long long JSInt64;
+typedef unsigned long long JSUint64;
+#endif /* JS_BYTES_PER_LONG == 8 */
+#else /* !JS_HAVE_LONG_LONG */
+typedef struct {
+#ifdef IS_LITTLE_ENDIAN
+ JSUint32 lo, hi;
+#else
+ JSUint32 hi, lo;
+#endif
+} JSInt64;
+typedef JSInt64 JSUint64;
+#endif /* !JS_HAVE_LONG_LONG */
+
+/************************************************************************
+** TYPES: JSUintn
+** JSIntn
+** DESCRIPTION:
+** The JSIntn types are most appropriate for automatic variables. They are
+** guaranteed to be at least 16 bits, though various architectures may
+** define them to be wider (e.g., 32 or even 64 bits). These types are
+** never valid for fields of a structure.
+************************************************************************/
+#if JS_BYTES_PER_INT >= 2
+typedef int JSIntn;
+typedef unsigned int JSUintn;
+#else
+#error 'sizeof(int)' not sufficient for platform use
+#endif
+
+/************************************************************************
+** TYPES: JSFloat64
+** DESCRIPTION:
+** NSPR's floating point type is always 64 bits.
+************************************************************************/
+typedef double JSFloat64;
+
+/************************************************************************
+** TYPES: JSSize
+** DESCRIPTION:
+** A type for representing the size of objects.
+************************************************************************/
+typedef size_t JSSize;
+
+/************************************************************************
+** TYPES: JSPtrDiff
+** DESCRIPTION:
+** A type for pointer difference. Variables of this type are suitable
+** for storing a pointer or pointer sutraction.
+************************************************************************/
+typedef ptrdiff_t JSPtrdiff;
+
+/************************************************************************
+** TYPES: JSUptrdiff
+** DESCRIPTION:
+** A type for pointer difference. Variables of this type are suitable
+** for storing a pointer or pointer sutraction.
+************************************************************************/
+#if JS_BYTES_PER_WORD == 8 && JS_BYTES_PER_LONG != 8
+typedef JSUint64 JSUptrdiff;
+#else
+typedef unsigned long JSUptrdiff;
+#endif
+
+/************************************************************************
+** TYPES: JSBool
+** DESCRIPTION:
+** Use JSBool for variables and parameter types. Use JS_FALSE and JS_TRUE
+** for clarity of target type in assignments and actual arguments. Use
+** 'if (bool)', 'while (!bool)', '(bool) ? x : y' etc., to test booleans
+** just as you would C int-valued conditions.
+************************************************************************/
+typedef JSIntn JSBool;
+#define JS_TRUE (JSIntn)1
+#define JS_FALSE (JSIntn)0
+
+/************************************************************************
+** TYPES: JSPackedBool
+** DESCRIPTION:
+** Use JSPackedBool within structs where bitfields are not desireable
+** but minimum and consistent overhead matters.
+************************************************************************/
+typedef JSUint8 JSPackedBool;
+
+/*
+** A JSWord is an integer that is the same size as a void*
+*/
+#if JS_BYTES_PER_WORD == 8 && JS_BYTES_PER_LONG != 8
+typedef JSInt64 JSWord;
+typedef JSUint64 JSUword;
+#else
+typedef long JSWord;
+typedef unsigned long JSUword;
+#endif
+
+#include "jsotypes.h"
+
+/***********************************************************************
+** MACROS: JS_LIKELY
+** JS_UNLIKELY
+** DESCRIPTION:
+** These macros allow you to give a hint to the compiler about branch
+** probability so that it can better optimize. Use them like this:
+**
+** if (JS_LIKELY(v == 1)) {
+** ... expected code path ...
+** }
+**
+** if (JS_UNLIKELY(v == 0)) {
+** ... non-expected code path ...
+** }
+**
+***********************************************************************/
+#if defined(__GNUC__) && (__GNUC__ > 2)
+#define JS_LIKELY(x) (__builtin_expect((x), 1))
+#define JS_UNLIKELY(x) (__builtin_expect((x), 0))
+#else
+#define JS_LIKELY(x) (x)
+#define JS_UNLIKELY(x) (x)
+#endif
+
+/***********************************************************************
+** MACROS: JS_ARRAY_LENGTH
+** JS_ARRAY_END
+** DESCRIPTION:
+** Macros to get the number of elements and the pointer to one past the
+** last element of a C array. Use them like this:
+**
+** jschar buf[10], *s;
+** JSString *str;
+** ...
+** for (s = buf; s != JS_ARRAY_END(buf); ++s) *s = ...;
+** ...
+** str = JS_NewStringCopyN(cx, buf, JS_ARRAY_LENGTH(buf));
+** ...
+**
+***********************************************************************/
+
+#define JS_ARRAY_LENGTH(array) (sizeof (array) / sizeof (array)[0])
+#define JS_ARRAY_END(array) ((array) + JS_ARRAY_LENGTH(array))
+
+JS_END_EXTERN_C
+
+#endif /* jstypes_h___ */
diff --git a/third_party/js-1.7/jsutil.c b/third_party/js-1.7/jsutil.c
new file mode 100644
index 0000000..1bb9f93
--- /dev/null
+++ b/third_party/js-1.7/jsutil.c
@@ -0,0 +1,198 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR assertion checker.
+ */
+#include "jsstddef.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "jstypes.h"
+#include "jsutil.h"
+
+#ifdef WIN32
+# include <windows.h>
+#endif
+
+JS_PUBLIC_API(void) JS_Assert(const char *s, const char *file, JSIntn ln)
+{
+ fprintf(stderr, "Assertion failure: %s, at %s:%d\n", s, file, ln);
+#if defined(WIN32)
+ DebugBreak();
+ exit(3);
+#elif defined(XP_OS2) || (defined(__GNUC__) && defined(__i386))
+ asm("int $3");
+#endif
+ abort();
+}
+
+#if defined DEBUG_notme && defined XP_UNIX
+
+#define __USE_GNU 1
+#include <dlfcn.h>
+#include <string.h>
+#include "jshash.h"
+#include "jsprf.h"
+
+JSCallsite js_calltree_root = {0, NULL, NULL, 0, NULL, NULL, NULL, NULL};
+
+static JSCallsite *
+CallTree(void **bp)
+{
+ void **bpup, **bpdown, *pc;
+ JSCallsite *parent, *site, **csp;
+ Dl_info info;
+ int ok, offset;
+ const char *symbol;
+ char *method;
+
+ /* Reverse the stack frame list to avoid recursion. */
+ bpup = NULL;
+ for (;;) {
+ bpdown = (void**) bp[0];
+ bp[0] = (void*) bpup;
+ if ((void**) bpdown[0] < bpdown)
+ break;
+ bpup = bp;
+ bp = bpdown;
+ }
+
+ /* Reverse the stack again, finding and building a path in the tree. */
+ parent = &js_calltree_root;
+ do {
+ bpup = (void**) bp[0];
+ bp[0] = (void*) bpdown;
+ pc = bp[1];
+
+ csp = &parent->kids;
+ while ((site = *csp) != NULL) {
+ if (site->pc == pc) {
+ /* Put the most recently used site at the front of siblings. */
+ *csp = site->siblings;
+ site->siblings = parent->kids;
+ parent->kids = site;
+
+ /* Site already built -- go up the stack. */
+ goto upward;
+ }
+ csp = &site->siblings;
+ }
+
+ /* Check for recursion: see if pc is on our ancestor line. */
+ for (site = parent; site; site = site->parent) {
+ if (site->pc == pc)
+ goto upward;
+ }
+
+ /*
+ * Not in tree at all: let's find our symbolic callsite info.
+ * XXX static syms are masked by nearest lower global
+ */
+ info.dli_fname = info.dli_sname = NULL;
+ ok = dladdr(pc, &info);
+ if (ok < 0) {
+ fprintf(stderr, "dladdr failed!\n");
+ return NULL;
+ }
+
+/* XXXbe sub 0x08040000? or something, see dbaron bug with tenthumbs comment */
+ symbol = info.dli_sname;
+ offset = (char*)pc - (char*)info.dli_fbase;
+ method = symbol
+ ? strdup(symbol)
+ : JS_smprintf("%s+%X",
+ info.dli_fname ? info.dli_fname : "main",
+ offset);
+ if (!method)
+ return NULL;
+
+ /* Create a new callsite record. */
+ site = (JSCallsite *) malloc(sizeof(JSCallsite));
+ if (!site)
+ return NULL;
+
+ /* Insert the new site into the tree. */
+ site->pc = pc;
+ site->name = method;
+ site->library = info.dli_fname;
+ site->offset = offset;
+ site->parent = parent;
+ site->siblings = parent->kids;
+ parent->kids = site;
+ site->kids = NULL;
+
+ upward:
+ parent = site;
+ bpdown = bp;
+ bp = bpup;
+ } while (bp);
+
+ return site;
+}
+
+JSCallsite *
+JS_Backtrace(int skip)
+{
+ void **bp, **bpdown;
+
+ /* Stack walking code adapted from Kipp's "leaky". */
+#if defined(__i386)
+ __asm__( "movl %%ebp, %0" : "=g"(bp));
+#elif defined(__x86_64__)
+ __asm__( "movq %%rbp, %0" : "=g"(bp));
+#else
+ /*
+ * It would be nice if this worked uniformly, but at least on i386 and
+ * x86_64, it stopped working with gcc 4.1, because it points to the
+ * end of the saved registers instead of the start.
+ */
+ bp = (void**) __builtin_frame_address(0);
+#endif
+ while (--skip >= 0) {
+ bpdown = (void**) *bp++;
+ if (bpdown < bp)
+ break;
+ bp = bpdown;
+ }
+
+ return CallTree(bp);
+}
+
+#endif /* DEBUG_notme && XP_UNIX */
diff --git a/third_party/js-1.7/jsutil.h b/third_party/js-1.7/jsutil.h
new file mode 100644
index 0000000..efcb614
--- /dev/null
+++ b/third_party/js-1.7/jsutil.h
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR assertion checker.
+ */
+
+#ifndef jsutil_h___
+#define jsutil_h___
+
+JS_BEGIN_EXTERN_C
+
+#ifdef DEBUG
+
+extern JS_PUBLIC_API(void)
+JS_Assert(const char *s, const char *file, JSIntn ln);
+#define JS_ASSERT(_expr) \
+ ((_expr)?((void)0):JS_Assert(# _expr,__FILE__,__LINE__))
+
+#define JS_NOT_REACHED(_reasonStr) \
+ JS_Assert(_reasonStr,__FILE__,__LINE__)
+
+#else
+
+#define JS_ASSERT(expr) ((void) 0)
+#define JS_NOT_REACHED(reasonStr)
+
+#endif /* defined(DEBUG) */
+
+/*
+ * Compile-time assert. "condition" must be a constant expression.
+ * The macro should be used only once per source line in places where
+ * a "typedef" declaration is allowed.
+ */
+#define JS_STATIC_ASSERT(condition) \
+ JS_STATIC_ASSERT_IMPL(condition, __LINE__)
+#define JS_STATIC_ASSERT_IMPL(condition, line) \
+ JS_STATIC_ASSERT_IMPL2(condition, line)
+#define JS_STATIC_ASSERT_IMPL2(condition, line) \
+ typedef int js_static_assert_line_##line[(condition) ? 1 : -1]
+
+/*
+** Abort the process in a non-graceful manner. This will cause a core file,
+** call to the debugger or other moral equivalent as well as causing the
+** entire process to stop.
+*/
+extern JS_PUBLIC_API(void) JS_Abort(void);
+
+#ifdef XP_UNIX
+
+typedef struct JSCallsite JSCallsite;
+
+struct JSCallsite {
+ uint32 pc;
+ char *name;
+ const char *library;
+ int offset;
+ JSCallsite *parent;
+ JSCallsite *siblings;
+ JSCallsite *kids;
+ void *handy;
+};
+
+extern JSCallsite *JS_Backtrace(int skip);
+
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsutil_h___ */
diff --git a/third_party/js-1.7/jsxdrapi.c b/third_party/js-1.7/jsxdrapi.c
new file mode 100644
index 0000000..2855c60
--- /dev/null
+++ b/third_party/js-1.7/jsxdrapi.c
@@ -0,0 +1,835 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#include "jsstddef.h"
+#include "jsconfig.h"
+
+#if JS_HAS_XDR
+
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsdhash.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsnum.h"
+#include "jsobj.h" /* js_XDRObject */
+#include "jsscript.h" /* js_XDRScript */
+#include "jsstr.h"
+#include "jsxdrapi.h"
+
+#ifdef DEBUG
+#define DBG(x) x
+#else
+#define DBG(x) ((void)0)
+#endif
+
+typedef struct JSXDRMemState {
+ JSXDRState state;
+ char *base;
+ uint32 count;
+ uint32 limit;
+} JSXDRMemState;
+
+#define MEM_BLOCK 8192
+#define MEM_PRIV(xdr) ((JSXDRMemState *)(xdr))
+
+#define MEM_BASE(xdr) (MEM_PRIV(xdr)->base)
+#define MEM_COUNT(xdr) (MEM_PRIV(xdr)->count)
+#define MEM_LIMIT(xdr) (MEM_PRIV(xdr)->limit)
+
+#define MEM_LEFT(xdr, bytes) \
+ JS_BEGIN_MACRO \
+ if ((xdr)->mode == JSXDR_DECODE && \
+ MEM_COUNT(xdr) + bytes > MEM_LIMIT(xdr)) { \
+ JS_ReportErrorNumber((xdr)->cx, js_GetErrorMessage, NULL, \
+ JSMSG_END_OF_DATA); \
+ return 0; \
+ } \
+ JS_END_MACRO
+
+#define MEM_NEED(xdr, bytes) \
+ JS_BEGIN_MACRO \
+ if ((xdr)->mode == JSXDR_ENCODE) { \
+ if (MEM_LIMIT(xdr) && \
+ MEM_COUNT(xdr) + bytes > MEM_LIMIT(xdr)) { \
+ uint32 limit_ = JS_ROUNDUP(MEM_COUNT(xdr) + bytes, MEM_BLOCK);\
+ void *data_ = JS_realloc((xdr)->cx, MEM_BASE(xdr), limit_); \
+ if (!data_) \
+ return 0; \
+ MEM_BASE(xdr) = data_; \
+ MEM_LIMIT(xdr) = limit_; \
+ } \
+ } else { \
+ MEM_LEFT(xdr, bytes); \
+ } \
+ JS_END_MACRO
+
+#define MEM_DATA(xdr) ((void *)(MEM_BASE(xdr) + MEM_COUNT(xdr)))
+#define MEM_INCR(xdr,bytes) (MEM_COUNT(xdr) += (bytes))
+
+static JSBool
+mem_get32(JSXDRState *xdr, uint32 *lp)
+{
+ MEM_LEFT(xdr, 4);
+ *lp = *(uint32 *)MEM_DATA(xdr);
+ MEM_INCR(xdr, 4);
+ return JS_TRUE;
+}
+
+static JSBool
+mem_set32(JSXDRState *xdr, uint32 *lp)
+{
+ MEM_NEED(xdr, 4);
+ *(uint32 *)MEM_DATA(xdr) = *lp;
+ MEM_INCR(xdr, 4);
+ return JS_TRUE;
+}
+
+static JSBool
+mem_getbytes(JSXDRState *xdr, char *bytes, uint32 len)
+{
+ MEM_LEFT(xdr, len);
+ memcpy(bytes, MEM_DATA(xdr), len);
+ MEM_INCR(xdr, len);
+ return JS_TRUE;
+}
+
+static JSBool
+mem_setbytes(JSXDRState *xdr, char *bytes, uint32 len)
+{
+ MEM_NEED(xdr, len);
+ memcpy(MEM_DATA(xdr), bytes, len);
+ MEM_INCR(xdr, len);
+ return JS_TRUE;
+}
+
+static void *
+mem_raw(JSXDRState *xdr, uint32 len)
+{
+ void *data;
+ if (xdr->mode == JSXDR_ENCODE) {
+ MEM_NEED(xdr, len);
+ } else if (xdr->mode == JSXDR_DECODE) {
+ MEM_LEFT(xdr, len);
+ }
+ data = MEM_DATA(xdr);
+ MEM_INCR(xdr, len);
+ return data;
+}
+
+static JSBool
+mem_seek(JSXDRState *xdr, int32 offset, JSXDRWhence whence)
+{
+ switch (whence) {
+ case JSXDR_SEEK_CUR:
+ if ((int32)MEM_COUNT(xdr) + offset < 0) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_SEEK_BEYOND_START);
+ return JS_FALSE;
+ }
+ if (offset > 0)
+ MEM_NEED(xdr, offset);
+ MEM_COUNT(xdr) += offset;
+ return JS_TRUE;
+ case JSXDR_SEEK_SET:
+ if (offset < 0) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_SEEK_BEYOND_START);
+ return JS_FALSE;
+ }
+ if (xdr->mode == JSXDR_ENCODE) {
+ if ((uint32)offset > MEM_COUNT(xdr))
+ MEM_NEED(xdr, offset - MEM_COUNT(xdr));
+ MEM_COUNT(xdr) = offset;
+ } else {
+ if ((uint32)offset > MEM_LIMIT(xdr)) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_SEEK_BEYOND_END);
+ return JS_FALSE;
+ }
+ MEM_COUNT(xdr) = offset;
+ }
+ return JS_TRUE;
+ case JSXDR_SEEK_END:
+ if (offset >= 0 ||
+ xdr->mode == JSXDR_ENCODE ||
+ (int32)MEM_LIMIT(xdr) + offset < 0) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_END_SEEK);
+ return JS_FALSE;
+ }
+ MEM_COUNT(xdr) = MEM_LIMIT(xdr) + offset;
+ return JS_TRUE;
+ default: {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", whence);
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_WHITHER_WHENCE, numBuf);
+ return JS_FALSE;
+ }
+ }
+}
+
+static uint32
+mem_tell(JSXDRState *xdr)
+{
+ return MEM_COUNT(xdr);
+}
+
+static void
+mem_finalize(JSXDRState *xdr)
+{
+ JS_free(xdr->cx, MEM_BASE(xdr));
+}
+
+static JSXDROps xdrmem_ops = {
+ mem_get32, mem_set32, mem_getbytes, mem_setbytes,
+ mem_raw, mem_seek, mem_tell, mem_finalize
+};
+
+JS_PUBLIC_API(void)
+JS_XDRInitBase(JSXDRState *xdr, JSXDRMode mode, JSContext *cx)
+{
+ xdr->mode = mode;
+ xdr->cx = cx;
+ xdr->registry = NULL;
+ xdr->numclasses = xdr->maxclasses = 0;
+ xdr->reghash = NULL;
+ xdr->userdata = NULL;
+ xdr->script = NULL;
+}
+
+JS_PUBLIC_API(JSXDRState *)
+JS_XDRNewMem(JSContext *cx, JSXDRMode mode)
+{
+ JSXDRState *xdr = (JSXDRState *) JS_malloc(cx, sizeof(JSXDRMemState));
+ if (!xdr)
+ return NULL;
+ JS_XDRInitBase(xdr, mode, cx);
+ if (mode == JSXDR_ENCODE) {
+ if (!(MEM_BASE(xdr) = JS_malloc(cx, MEM_BLOCK))) {
+ JS_free(cx, xdr);
+ return NULL;
+ }
+ } else {
+ /* XXXbe ok, so better not deref MEM_BASE(xdr) if not ENCODE */
+ MEM_BASE(xdr) = NULL;
+ }
+ xdr->ops = &xdrmem_ops;
+ MEM_COUNT(xdr) = 0;
+ MEM_LIMIT(xdr) = MEM_BLOCK;
+ return xdr;
+}
+
+JS_PUBLIC_API(void *)
+JS_XDRMemGetData(JSXDRState *xdr, uint32 *lp)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return NULL;
+ *lp = MEM_COUNT(xdr);
+ return MEM_BASE(xdr);
+}
+
+JS_PUBLIC_API(void)
+JS_XDRMemSetData(JSXDRState *xdr, void *data, uint32 len)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return;
+ MEM_LIMIT(xdr) = len;
+ MEM_BASE(xdr) = data;
+ MEM_COUNT(xdr) = 0;
+}
+
+JS_PUBLIC_API(uint32)
+JS_XDRMemDataLeft(JSXDRState *xdr)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return 0;
+ return MEM_LIMIT(xdr) - MEM_COUNT(xdr);
+}
+
+JS_PUBLIC_API(void)
+JS_XDRMemResetData(JSXDRState *xdr)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return;
+ MEM_COUNT(xdr) = 0;
+}
+
+JS_PUBLIC_API(void)
+JS_XDRDestroy(JSXDRState *xdr)
+{
+ JSContext *cx = xdr->cx;
+ xdr->ops->finalize(xdr);
+ if (xdr->registry) {
+ JS_free(cx, xdr->registry);
+ if (xdr->reghash)
+ JS_DHashTableDestroy(xdr->reghash);
+ }
+ JS_free(cx, xdr);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRUint8(JSXDRState *xdr, uint8 *b)
+{
+ uint32 l = *b;
+ if (!JS_XDRUint32(xdr, &l))
+ return JS_FALSE;
+ *b = (uint8) l;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRUint16(JSXDRState *xdr, uint16 *s)
+{
+ uint32 l = *s;
+ if (!JS_XDRUint32(xdr, &l))
+ return JS_FALSE;
+ *s = (uint16) l;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRUint32(JSXDRState *xdr, uint32 *lp)
+{
+ JSBool ok = JS_TRUE;
+ if (xdr->mode == JSXDR_ENCODE) {
+ uint32 xl = JSXDR_SWAB32(*lp);
+ ok = xdr->ops->set32(xdr, &xl);
+ } else if (xdr->mode == JSXDR_DECODE) {
+ ok = xdr->ops->get32(xdr, lp);
+ *lp = JSXDR_SWAB32(*lp);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRBytes(JSXDRState *xdr, char *bytes, uint32 len)
+{
+ uint32 padlen;
+ static char padbuf[JSXDR_ALIGN-1];
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ if (!xdr->ops->setbytes(xdr, bytes, len))
+ return JS_FALSE;
+ } else {
+ if (!xdr->ops->getbytes(xdr, bytes, len))
+ return JS_FALSE;
+ }
+ len = xdr->ops->tell(xdr);
+ if (len % JSXDR_ALIGN) {
+ padlen = JSXDR_ALIGN - (len % JSXDR_ALIGN);
+ if (xdr->mode == JSXDR_ENCODE) {
+ if (!xdr->ops->setbytes(xdr, padbuf, padlen))
+ return JS_FALSE;
+ } else {
+ if (!xdr->ops->seek(xdr, padlen, JSXDR_SEEK_CUR))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+/**
+ * Convert between a C string and the XDR representation:
+ * leading 32-bit count, then counted vector of chars,
+ * then possibly \0 padding to multiple of 4.
+ */
+JS_PUBLIC_API(JSBool)
+JS_XDRCString(JSXDRState *xdr, char **sp)
+{
+ uint32 len;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ len = strlen(*sp);
+ JS_XDRUint32(xdr, &len);
+ if (xdr->mode == JSXDR_DECODE) {
+ if (!(*sp = (char *) JS_malloc(xdr->cx, len + 1)))
+ return JS_FALSE;
+ }
+ if (!JS_XDRBytes(xdr, *sp, len)) {
+ if (xdr->mode == JSXDR_DECODE)
+ JS_free(xdr->cx, *sp);
+ return JS_FALSE;
+ }
+ if (xdr->mode == JSXDR_DECODE) {
+ (*sp)[len] = '\0';
+ } else if (xdr->mode == JSXDR_FREE) {
+ JS_free(xdr->cx, *sp);
+ *sp = NULL;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRCStringOrNull(JSXDRState *xdr, char **sp)
+{
+ uint32 null = (*sp == NULL);
+ if (!JS_XDRUint32(xdr, &null))
+ return JS_FALSE;
+ if (null) {
+ *sp = NULL;
+ return JS_TRUE;
+ }
+ return JS_XDRCString(xdr, sp);
+}
+
+static JSBool
+XDRChars(JSXDRState *xdr, jschar *chars, uint32 nchars)
+{
+ uint32 i, padlen, nbytes;
+ jschar *raw;
+
+ nbytes = nchars * sizeof(jschar);
+ padlen = nbytes % JSXDR_ALIGN;
+ if (padlen) {
+ padlen = JSXDR_ALIGN - padlen;
+ nbytes += padlen;
+ }
+ if (!(raw = (jschar *) xdr->ops->raw(xdr, nbytes)))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_ENCODE) {
+ for (i = 0; i != nchars; i++)
+ raw[i] = JSXDR_SWAB16(chars[i]);
+ if (padlen)
+ memset((char *)raw + nbytes - padlen, 0, padlen);
+ } else if (xdr->mode == JSXDR_DECODE) {
+ for (i = 0; i != nchars; i++)
+ chars[i] = JSXDR_SWAB16(raw[i]);
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Convert between a JS (Unicode) string and the XDR representation.
+ */
+JS_PUBLIC_API(JSBool)
+JS_XDRString(JSXDRState *xdr, JSString **strp)
+{
+ uint32 nchars;
+ jschar *chars;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ nchars = JSSTRING_LENGTH(*strp);
+ if (!JS_XDRUint32(xdr, &nchars))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ chars = (jschar *) JS_malloc(xdr->cx, (nchars + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ } else {
+ chars = JSSTRING_CHARS(*strp);
+ }
+
+ if (!XDRChars(xdr, chars, nchars))
+ goto bad;
+ if (xdr->mode == JSXDR_DECODE) {
+ chars[nchars] = 0;
+ *strp = JS_NewUCString(xdr->cx, chars, nchars);
+ if (!*strp)
+ goto bad;
+ }
+ return JS_TRUE;
+
+bad:
+ if (xdr->mode == JSXDR_DECODE)
+ JS_free(xdr->cx, chars);
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRStringOrNull(JSXDRState *xdr, JSString **strp)
+{
+ uint32 null = (*strp == NULL);
+ if (!JS_XDRUint32(xdr, &null))
+ return JS_FALSE;
+ if (null) {
+ *strp = NULL;
+ return JS_TRUE;
+ }
+ return JS_XDRString(xdr, strp);
+}
+
+static JSBool
+XDRDoubleValue(JSXDRState *xdr, jsdouble *dp)
+{
+ jsdpun u;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ u.d = *dp;
+ if (!JS_XDRUint32(xdr, &u.s.lo) || !JS_XDRUint32(xdr, &u.s.hi))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *dp = u.d;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRDouble(JSXDRState *xdr, jsdouble **dpp)
+{
+ jsdouble d;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ d = **dpp;
+ if (!XDRDoubleValue(xdr, &d))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE) {
+ *dpp = JS_NewDouble(xdr->cx, d);
+ if (!*dpp)
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+/* These are magic pseudo-tags: see jsapi.h, near the top, for real tags. */
+#define JSVAL_XDRNULL 0x8
+#define JSVAL_XDRVOID 0xA
+
+static JSBool
+XDRValueBody(JSXDRState *xdr, uint32 type, jsval *vp)
+{
+ switch (type) {
+ case JSVAL_XDRNULL:
+ *vp = JSVAL_NULL;
+ break;
+ case JSVAL_XDRVOID:
+ *vp = JSVAL_VOID;
+ break;
+ case JSVAL_STRING: {
+ JSString *str;
+ if (xdr->mode == JSXDR_ENCODE)
+ str = JSVAL_TO_STRING(*vp);
+ if (!JS_XDRString(xdr, &str))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = STRING_TO_JSVAL(str);
+ break;
+ }
+ case JSVAL_DOUBLE: {
+ jsdouble *dp;
+ if (xdr->mode == JSXDR_ENCODE)
+ dp = JSVAL_TO_DOUBLE(*vp);
+ if (!JS_XDRDouble(xdr, &dp))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = DOUBLE_TO_JSVAL(dp);
+ break;
+ }
+ case JSVAL_OBJECT: {
+ JSObject *obj;
+ if (xdr->mode == JSXDR_ENCODE)
+ obj = JSVAL_TO_OBJECT(*vp);
+ if (!js_XDRObject(xdr, &obj))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = OBJECT_TO_JSVAL(obj);
+ break;
+ }
+ case JSVAL_BOOLEAN: {
+ uint32 b;
+ if (xdr->mode == JSXDR_ENCODE)
+ b = (uint32) JSVAL_TO_BOOLEAN(*vp);
+ if (!JS_XDRUint32(xdr, &b))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = BOOLEAN_TO_JSVAL((JSBool) b);
+ break;
+ }
+ default: {
+ uint32 i;
+
+ JS_ASSERT(type & JSVAL_INT);
+ if (xdr->mode == JSXDR_ENCODE)
+ i = (uint32) JSVAL_TO_INT(*vp);
+ if (!JS_XDRUint32(xdr, &i))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = INT_TO_JSVAL((int32) i);
+ break;
+ }
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRValue(JSXDRState *xdr, jsval *vp)
+{
+ uint32 type;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ if (JSVAL_IS_NULL(*vp))
+ type = JSVAL_XDRNULL;
+ else if (JSVAL_IS_VOID(*vp))
+ type = JSVAL_XDRVOID;
+ else
+ type = JSVAL_TAG(*vp);
+ }
+ return JS_XDRUint32(xdr, &type) && XDRValueBody(xdr, type, vp);
+}
+
+JSBool
+js_XDRAtom(JSXDRState *xdr, JSAtom **atomp)
+{
+ jsval v;
+ uint32 type;
+ jsdouble d;
+ JSAtom *atom;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ v = ATOM_KEY(*atomp);
+ return JS_XDRValue(xdr, &v);
+ }
+
+ /*
+ * Inline JS_XDRValue when decoding to avoid ceation of GC things when
+ * then corresponding atom already exists. See bug 321985.
+ */
+ if (!JS_XDRUint32(xdr, &type))
+ return JS_FALSE;
+ if (type == JSVAL_STRING)
+ return js_XDRStringAtom(xdr, atomp);
+
+ if (type == JSVAL_DOUBLE) {
+ if (!XDRDoubleValue(xdr, &d))
+ return JS_FALSE;
+ atom = js_AtomizeDouble(xdr->cx, d, 0);
+ } else {
+ if (!XDRValueBody(xdr, type, &v))
+ return JS_FALSE;
+ atom = js_AtomizeValue(xdr->cx, v, 0);
+ }
+
+ if (!atom)
+ return JS_FALSE;
+ *atomp = atom;
+ return JS_TRUE;
+}
+
+extern JSBool
+js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp)
+{
+ JSString *str;
+ uint32 nchars;
+ JSAtom *atom;
+ JSContext *cx;
+ void *mark;
+ jschar *chars;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ JS_ASSERT(ATOM_IS_STRING(*atomp));
+ str = ATOM_TO_STRING(*atomp);
+ return JS_XDRString(xdr, &str);
+ }
+
+ /*
+ * Inline JS_XDRString when decoding to avoid JSString allocation
+ * for already existing atoms. See bug 321985.
+ */
+ if (!JS_XDRUint32(xdr, &nchars))
+ return JS_FALSE;
+ atom = NULL;
+ cx = xdr->cx;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(chars, jschar *, &cx->tempPool,
+ nchars * sizeof(jschar));
+ if (!chars)
+ JS_ReportOutOfMemory(cx);
+ else if (XDRChars(xdr, chars, nchars))
+ atom = js_AtomizeChars(cx, chars, nchars, 0);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!atom)
+ return JS_FALSE;
+ *atomp = atom;
+ return JS_TRUE;
+}
+
+/*
+ * FIXME: This performs lossy conversion and we need to switch to
+ * js_XDRStringAtom while allowing to read older XDR files. See bug 325202.
+ */
+JSBool
+js_XDRCStringAtom(JSXDRState *xdr, JSAtom **atomp)
+{
+ char *bytes;
+ uint32 nbytes;
+ JSAtom *atom;
+ JSContext *cx;
+ void *mark;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ JS_ASSERT(ATOM_IS_STRING(*atomp));
+ bytes = JS_GetStringBytes(ATOM_TO_STRING(*atomp));
+ return JS_XDRCString(xdr, &bytes);
+ }
+
+ /*
+ * Inline JS_XDRCString when decoding not to malloc temporary buffer
+ * just to free it after atomization. See bug 321985.
+ */
+ if (!JS_XDRUint32(xdr, &nbytes))
+ return JS_FALSE;
+ atom = NULL;
+ cx = xdr->cx;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(bytes, char *, &cx->tempPool,
+ nbytes * sizeof *bytes);
+ if (!bytes)
+ JS_ReportOutOfMemory(cx);
+ else if (JS_XDRBytes(xdr, bytes, nbytes))
+ atom = js_Atomize(cx, bytes, nbytes, 0);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!atom)
+ return JS_FALSE;
+ *atomp = atom;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRScript(JSXDRState *xdr, JSScript **scriptp)
+{
+ if (!js_XDRScript(xdr, scriptp, NULL))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ js_CallNewScriptHook(xdr->cx, *scriptp, NULL);
+ return JS_TRUE;
+}
+
+#define CLASS_REGISTRY_MIN 8
+#define CLASS_INDEX_TO_ID(i) ((i)+1)
+#define CLASS_ID_TO_INDEX(id) ((id)-1)
+
+typedef struct JSRegHashEntry {
+ JSDHashEntryHdr hdr;
+ const char *name;
+ uint32 index;
+} JSRegHashEntry;
+
+JS_PUBLIC_API(JSBool)
+JS_XDRRegisterClass(JSXDRState *xdr, JSClass *clasp, uint32 *idp)
+{
+ uintN numclasses, maxclasses;
+ JSClass **registry;
+
+ numclasses = xdr->numclasses;
+ maxclasses = xdr->maxclasses;
+ if (numclasses == maxclasses) {
+ maxclasses = (maxclasses == 0) ? CLASS_REGISTRY_MIN : maxclasses << 1;
+ registry = (JSClass **)
+ JS_realloc(xdr->cx, xdr->registry, maxclasses * sizeof(JSClass *));
+ if (!registry)
+ return JS_FALSE;
+ xdr->registry = registry;
+ xdr->maxclasses = maxclasses;
+ } else {
+ JS_ASSERT(numclasses && numclasses < maxclasses);
+ registry = xdr->registry;
+ }
+
+ registry[numclasses] = clasp;
+ if (xdr->reghash) {
+ JSRegHashEntry *entry = (JSRegHashEntry *)
+ JS_DHashTableOperate(xdr->reghash, clasp->name, JS_DHASH_ADD);
+ if (!entry) {
+ JS_ReportOutOfMemory(xdr->cx);
+ return JS_FALSE;
+ }
+ entry->name = clasp->name;
+ entry->index = numclasses;
+ }
+ *idp = CLASS_INDEX_TO_ID(numclasses);
+ xdr->numclasses = ++numclasses;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(uint32)
+JS_XDRFindClassIdByName(JSXDRState *xdr, const char *name)
+{
+ uintN i, numclasses;
+
+ numclasses = xdr->numclasses;
+ if (numclasses >= 10) {
+ JSRegHashEntry *entry;
+
+ /* Bootstrap reghash from registry on first overpopulated Find. */
+ if (!xdr->reghash) {
+ xdr->reghash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
+ sizeof(JSRegHashEntry),
+ numclasses);
+ if (xdr->reghash) {
+ for (i = 0; i < numclasses; i++) {
+ JSClass *clasp = xdr->registry[i];
+ entry = (JSRegHashEntry *)
+ JS_DHashTableOperate(xdr->reghash, clasp->name,
+ JS_DHASH_ADD);
+ entry->name = clasp->name;
+ entry->index = i;
+ }
+ }
+ }
+
+ /* If we managed to create reghash, use it for O(1) Find. */
+ if (xdr->reghash) {
+ entry = (JSRegHashEntry *)
+ JS_DHashTableOperate(xdr->reghash, name, JS_DHASH_LOOKUP);
+ if (JS_DHASH_ENTRY_IS_BUSY(&entry->hdr))
+ return CLASS_INDEX_TO_ID(entry->index);
+ }
+ }
+
+ /* Only a few classes, or we couldn't malloc reghash: use linear search. */
+ for (i = 0; i < numclasses; i++) {
+ if (!strcmp(name, xdr->registry[i]->name))
+ return CLASS_INDEX_TO_ID(i);
+ }
+ return 0;
+}
+
+JS_PUBLIC_API(JSClass *)
+JS_XDRFindClassById(JSXDRState *xdr, uint32 id)
+{
+ uintN i = CLASS_ID_TO_INDEX(id);
+
+ if (i >= xdr->numclasses)
+ return NULL;
+ return xdr->registry[i];
+}
+
+#endif /* JS_HAS_XDR */
diff --git a/third_party/js-1.7/jsxdrapi.h b/third_party/js-1.7/jsxdrapi.h
new file mode 100644
index 0000000..35d9918
--- /dev/null
+++ b/third_party/js-1.7/jsxdrapi.h
@@ -0,0 +1,223 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsxdrapi_h___
+#define jsxdrapi_h___
+
+/*
+ * JS external data representation interface API.
+ *
+ * The XDR system is comprised of three major parts:
+ *
+ * - the state serialization/deserialization APIs, which allow consumers
+ * of the API to serialize JS runtime state (script bytecodes, atom maps,
+ * object graphs, etc.) for later restoration. These portions
+ * are implemented in various appropriate files, such as jsscript.c
+ * for the script portions and jsobj.c for object state.
+ * - the callback APIs through which the runtime requests an opaque
+ * representation of a native object, and through which the runtime
+ * constructs a live native object from an opaque representation. These
+ * portions are the responsibility of the native object implementor.
+ * - utility functions for en/decoding of primitive types, such as
+ * JSStrings. This portion is implemented in jsxdrapi.c.
+ *
+ * Spiritually guided by Sun's XDR, where appropriate.
+ */
+
+#include "jspubtd.h"
+#include "jsprvtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/* We use little-endian byteorder for all encoded data */
+
+#if defined IS_LITTLE_ENDIAN
+#define JSXDR_SWAB32(x) x
+#define JSXDR_SWAB16(x) x
+#elif defined IS_BIG_ENDIAN
+#define JSXDR_SWAB32(x) (((uint32)(x) >> 24) | \
+ (((uint32)(x) >> 8) & 0xff00) | \
+ (((uint32)(x) << 8) & 0xff0000) | \
+ ((uint32)(x) << 24))
+#define JSXDR_SWAB16(x) (((uint16)(x) >> 8) | ((uint16)(x) << 8))
+#else
+#error "unknown byte order"
+#endif
+
+#define JSXDR_ALIGN 4
+
+typedef enum JSXDRMode {
+ JSXDR_ENCODE,
+ JSXDR_DECODE,
+ JSXDR_FREE
+} JSXDRMode;
+
+typedef enum JSXDRWhence {
+ JSXDR_SEEK_SET,
+ JSXDR_SEEK_CUR,
+ JSXDR_SEEK_END
+} JSXDRWhence;
+
+typedef struct JSXDROps {
+ JSBool (*get32)(JSXDRState *, uint32 *);
+ JSBool (*set32)(JSXDRState *, uint32 *);
+ JSBool (*getbytes)(JSXDRState *, char *, uint32);
+ JSBool (*setbytes)(JSXDRState *, char *, uint32);
+ void * (*raw)(JSXDRState *, uint32);
+ JSBool (*seek)(JSXDRState *, int32, JSXDRWhence);
+ uint32 (*tell)(JSXDRState *);
+ void (*finalize)(JSXDRState *);
+} JSXDROps;
+
+struct JSXDRState {
+ JSXDRMode mode;
+ JSXDROps *ops;
+ JSContext *cx;
+ JSClass **registry;
+ uintN numclasses;
+ uintN maxclasses;
+ void *reghash;
+ void *userdata;
+ JSScript *script;
+};
+
+extern JS_PUBLIC_API(void)
+JS_XDRInitBase(JSXDRState *xdr, JSXDRMode mode, JSContext *cx);
+
+extern JS_PUBLIC_API(JSXDRState *)
+JS_XDRNewMem(JSContext *cx, JSXDRMode mode);
+
+extern JS_PUBLIC_API(void *)
+JS_XDRMemGetData(JSXDRState *xdr, uint32 *lp);
+
+extern JS_PUBLIC_API(void)
+JS_XDRMemSetData(JSXDRState *xdr, void *data, uint32 len);
+
+extern JS_PUBLIC_API(uint32)
+JS_XDRMemDataLeft(JSXDRState *xdr);
+
+extern JS_PUBLIC_API(void)
+JS_XDRMemResetData(JSXDRState *xdr);
+
+extern JS_PUBLIC_API(void)
+JS_XDRDestroy(JSXDRState *xdr);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRUint8(JSXDRState *xdr, uint8 *b);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRUint16(JSXDRState *xdr, uint16 *s);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRUint32(JSXDRState *xdr, uint32 *lp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRBytes(JSXDRState *xdr, char *bytes, uint32 len);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRCString(JSXDRState *xdr, char **sp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRCStringOrNull(JSXDRState *xdr, char **sp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRString(JSXDRState *xdr, JSString **strp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRStringOrNull(JSXDRState *xdr, JSString **strp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRDouble(JSXDRState *xdr, jsdouble **dp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRValue(JSXDRState *xdr, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRScript(JSXDRState *xdr, JSScript **scriptp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRRegisterClass(JSXDRState *xdr, JSClass *clasp, uint32 *lp);
+
+extern JS_PUBLIC_API(uint32)
+JS_XDRFindClassIdByName(JSXDRState *xdr, const char *name);
+
+extern JS_PUBLIC_API(JSClass *)
+JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
+
+/*
+ * Magic numbers.
+ */
+#define JSXDR_MAGIC_SCRIPT_1 0xdead0001
+#define JSXDR_MAGIC_SCRIPT_2 0xdead0002
+#define JSXDR_MAGIC_SCRIPT_3 0xdead0003
+#define JSXDR_MAGIC_SCRIPT_4 0xdead0004
+#define JSXDR_MAGIC_SCRIPT_5 0xdead0005
+#define JSXDR_MAGIC_SCRIPT_CURRENT JSXDR_MAGIC_SCRIPT_5
+
+/*
+ * Bytecode version number. Decrement the second term whenever JS bytecode
+ * changes incompatibly.
+ *
+ * This version number should be XDR'ed once near the front of any file or
+ * larger storage unit containing XDR'ed bytecode and other data, and checked
+ * before deserialization of bytecode. If the saved version does not match
+ * the current version, abort deserialization and invalidate the file.
+ */
+#define JSXDR_BYTECODE_VERSION (0xb973c0de - 16)
+
+/*
+ * Library-private functions.
+ */
+extern JSBool
+js_XDRAtom(JSXDRState *xdr, JSAtom **atomp);
+
+extern JSBool
+js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp);
+
+/*
+ * FIXME: This is non-unicode version of js_XDRStringAtom that performs lossy
+ * conversion. Do not use it in the new code! See bug 325202.
+ */
+extern JSBool
+js_XDRCStringAtom(JSXDRState *xdr, JSAtom **atomp);
+
+JS_END_EXTERN_C
+
+#endif /* ! jsxdrapi_h___ */
diff --git a/third_party/js-1.7/jsxml.c b/third_party/js-1.7/jsxml.c
new file mode 100644
index 0000000..1266255
--- /dev/null
+++ b/third_party/js-1.7/jsxml.c
@@ -0,0 +1,8357 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey E4X code, released August, 2004.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include "jsconfig.h"
+
+#if JS_HAS_XML_SUPPORT
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsprf.h"
+#include "jsutil.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsxml.h"
+
+#ifdef DEBUG
+#include <string.h> /* for #ifdef DEBUG memset calls */
+#endif
+
+/*
+ * NOTES
+ * - in the js shell, you must use the -x command line option, or call
+ * options('xml') before compiling anything that uses XML literals
+ *
+ * TODO
+ * - XXXbe patrol
+ * - Fuse objects and their JSXML* private data into single GC-things
+ * - fix function::foo vs. x.(foo == 42) collision using proper namespacing
+ * - fix the !TCF_HAS_DEFXMLNS optimization in js_FoldConstants
+ * - JSCLASS_DOCUMENT_OBSERVER support -- live two-way binding to Gecko's DOM!
+ * - JS_TypeOfValue sure could use a cleaner interface to "types"
+ */
+
+#ifdef DEBUG_brendan
+#define METERING 1
+#endif
+
+#ifdef METERING
+static struct {
+ jsrefcount qname;
+ jsrefcount qnameobj;
+ jsrefcount liveqname;
+ jsrefcount liveqnameobj;
+ jsrefcount namespace;
+ jsrefcount namespaceobj;
+ jsrefcount livenamespace;
+ jsrefcount livenamespaceobj;
+ jsrefcount xml;
+ jsrefcount xmlobj;
+ jsrefcount livexml;
+ jsrefcount livexmlobj;
+} xml_stats;
+
+#define METER(x) JS_ATOMIC_INCREMENT(&(x))
+#define UNMETER(x) JS_ATOMIC_DECREMENT(&(x))
+#else
+#define METER(x) /* nothing */
+#define UNMETER(x) /* nothing */
+#endif
+
+/*
+ * Random utilities and global functions.
+ */
+const char js_isXMLName_str[] = "isXMLName";
+const char js_XMLList_str[] = "XMLList";
+const char js_localName_str[] = "localName";
+const char js_xml_parent_str[] = "parent";
+const char js_prefix_str[] = "prefix";
+const char js_toXMLString_str[] = "toXMLString";
+const char js_uri_str[] = "uri";
+
+const char js_amp_entity_str[] = "&amp;";
+const char js_gt_entity_str[] = "&gt;";
+const char js_lt_entity_str[] = "&lt;";
+const char js_quot_entity_str[] = "&quot;";
+
+#define IS_EMPTY(str) (JSSTRING_LENGTH(str) == 0)
+#define IS_STAR(str) (JSSTRING_LENGTH(str) == 1 && *JSSTRING_CHARS(str) == '*')
+
+static JSBool
+xml_isXMLName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = BOOLEAN_TO_JSVAL(js_IsXMLName(cx, argv[0]));
+ return JS_TRUE;
+}
+
+/*
+ * Namespace class and library functions.
+ */
+enum namespace_tinyid {
+ NAMESPACE_PREFIX = -1,
+ NAMESPACE_URI = -2
+};
+
+static JSBool
+namespace_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXMLNamespace *ns;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ ns = (JSXMLNamespace *)
+ JS_GetInstancePrivate(cx, obj, &js_NamespaceClass.base, NULL);
+ if (!ns)
+ return JS_TRUE;
+
+ switch (JSVAL_TO_INT(id)) {
+ case NAMESPACE_PREFIX:
+ *vp = ns->prefix ? STRING_TO_JSVAL(ns->prefix) : JSVAL_VOID;
+ break;
+ case NAMESPACE_URI:
+ *vp = STRING_TO_JSVAL(ns->uri);
+ break;
+ }
+ return JS_TRUE;
+}
+
+static void
+namespace_finalize(JSContext *cx, JSObject *obj)
+{
+ JSXMLNamespace *ns;
+ JSRuntime *rt;
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, obj);
+ if (!ns)
+ return;
+ JS_ASSERT(ns->object == obj);
+ ns->object = NULL;
+ UNMETER(xml_stats.livenamespaceobj);
+
+ rt = cx->runtime;
+ if (rt->functionNamespaceObject == obj)
+ rt->functionNamespaceObject = NULL;
+}
+
+static void
+namespace_mark_vector(JSContext *cx, JSXMLNamespace **vec, uint32 len)
+{
+ uint32 i;
+ JSXMLNamespace *ns;
+
+ for (i = 0; i < len; i++) {
+ ns = vec[i];
+ {
+#ifdef GC_MARK_DEBUG
+ char buf[100];
+
+ JS_snprintf(buf, sizeof buf, "%s=%s",
+ ns->prefix ? JS_GetStringBytes(ns->prefix) : "",
+ JS_GetStringBytes(ns->uri));
+#endif
+ GC_MARK(cx, ns, buf);
+ }
+ }
+}
+
+static uint32
+namespace_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSXMLNamespace *ns;
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, obj);
+ GC_MARK(cx, ns, "private");
+ return 0;
+}
+
+static JSBool
+namespace_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSXMLNamespace *ns, *ns2;
+ JSObject *obj2;
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, obj);
+ JS_ASSERT(JSVAL_IS_OBJECT(v));
+ obj2 = JSVAL_TO_OBJECT(v);
+ if (!obj2 || OBJ_GET_CLASS(cx, obj2) != &js_NamespaceClass.base) {
+ *bp = JS_FALSE;
+ } else {
+ ns2 = (JSXMLNamespace *) JS_GetPrivate(cx, obj2);
+ *bp = js_EqualStrings(ns->uri, ns2->uri);
+ }
+ return JS_TRUE;
+}
+
+JS_FRIEND_DATA(JSExtendedClass) js_NamespaceClass = {
+ { "Namespace",
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE | JSCLASS_IS_EXTENDED |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Namespace),
+ JS_PropertyStub, JS_PropertyStub, namespace_getProperty, NULL,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, namespace_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, namespace_mark, NULL },
+ namespace_equality,NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+#define NAMESPACE_ATTRS \
+ (JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED)
+
+static JSPropertySpec namespace_props[] = {
+ {js_prefix_str, NAMESPACE_PREFIX, NAMESPACE_ATTRS, 0, 0},
+ {js_uri_str, NAMESPACE_URI, NAMESPACE_ATTRS, 0, 0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+namespace_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXMLNamespace *ns;
+
+ ns = (JSXMLNamespace *)
+ JS_GetInstancePrivate(cx, obj, &js_NamespaceClass.base, argv);
+ if (!ns)
+ return JS_FALSE;
+
+ *rval = STRING_TO_JSVAL(ns->uri);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec namespace_methods[] = {
+ {js_toString_str, namespace_toString, 0,0,0},
+ {0,0,0,0,0}
+};
+
+JSXMLNamespace *
+js_NewXMLNamespace(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared)
+{
+ JSXMLNamespace *ns;
+
+ ns = (JSXMLNamespace *)
+ js_NewGCThing(cx, GCX_NAMESPACE, sizeof(JSXMLNamespace));
+ if (!ns)
+ return NULL;
+ ns->object = NULL;
+ ns->prefix = prefix;
+ ns->uri = uri;
+ ns->declared = declared;
+ METER(xml_stats.namespace);
+ METER(xml_stats.livenamespace);
+ return ns;
+}
+
+void
+js_MarkXMLNamespace(JSContext *cx, JSXMLNamespace *ns)
+{
+ GC_MARK(cx, ns->object, "object");
+ GC_MARK(cx, ns->prefix, "prefix");
+ GC_MARK(cx, ns->uri, "uri");
+}
+
+void
+js_FinalizeXMLNamespace(JSContext *cx, JSXMLNamespace *ns)
+{
+ UNMETER(xml_stats.livenamespace);
+}
+
+JSObject *
+js_NewXMLNamespaceObject(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared)
+{
+ JSXMLNamespace *ns;
+
+ ns = js_NewXMLNamespace(cx, prefix, uri, declared);
+ if (!ns)
+ return NULL;
+ return js_GetXMLNamespaceObject(cx, ns);
+}
+
+JSObject *
+js_GetXMLNamespaceObject(JSContext *cx, JSXMLNamespace *ns)
+{
+ JSObject *obj;
+
+ obj = ns->object;
+ if (obj) {
+ JS_ASSERT(JS_GetPrivate(cx, obj) == ns);
+ return obj;
+ }
+ obj = js_NewObject(cx, &js_NamespaceClass.base, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, ns)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ ns->object = obj;
+ METER(xml_stats.namespaceobj);
+ METER(xml_stats.livenamespaceobj);
+ return obj;
+}
+
+/*
+ * QName class and library functions.
+ */
+enum qname_tinyid {
+ QNAME_URI = -1,
+ QNAME_LOCALNAME = -2
+};
+
+static JSBool
+qname_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXMLQName *qn;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ qn = (JSXMLQName *)
+ JS_GetInstancePrivate(cx, obj, &js_QNameClass.base, NULL);
+ if (!qn)
+ return JS_TRUE;
+
+ switch (JSVAL_TO_INT(id)) {
+ case QNAME_URI:
+ *vp = qn->uri ? STRING_TO_JSVAL(qn->uri) : JSVAL_NULL;
+ break;
+ case QNAME_LOCALNAME:
+ *vp = STRING_TO_JSVAL(qn->localName);
+ break;
+ }
+ return JS_TRUE;
+}
+
+static void
+qname_finalize(JSContext *cx, JSObject *obj)
+{
+ JSXMLQName *qn;
+
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ if (!qn)
+ return;
+ JS_ASSERT(qn->object == obj);
+ qn->object = NULL;
+ UNMETER(xml_stats.liveqnameobj);
+}
+
+static void
+anyname_finalize(JSContext* cx, JSObject* obj)
+{
+ JSRuntime *rt;
+
+ /* Make sure the next call to js_GetAnyName doesn't try to use obj. */
+ rt = cx->runtime;
+ if (rt->anynameObject == obj)
+ rt->anynameObject = NULL;
+
+ qname_finalize(cx, obj);
+}
+
+static uint32
+qname_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSXMLQName *qn;
+
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ GC_MARK(cx, qn, "private");
+ return 0;
+}
+
+static JSBool
+qname_identity(JSXMLQName *qna, JSXMLQName *qnb)
+{
+ if (!qna->uri ^ !qnb->uri)
+ return JS_FALSE;
+ if (qna->uri && !js_EqualStrings(qna->uri, qnb->uri))
+ return JS_FALSE;
+ return js_EqualStrings(qna->localName, qnb->localName);
+}
+
+static JSBool
+qname_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSXMLQName *qn, *qn2;
+ JSObject *obj2;
+
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ JS_ASSERT(JSVAL_IS_OBJECT(v));
+ obj2 = JSVAL_TO_OBJECT(v);
+ if (!obj2 || OBJ_GET_CLASS(cx, obj2) != &js_QNameClass.base) {
+ *bp = JS_FALSE;
+ } else {
+ qn2 = (JSXMLQName *) JS_GetPrivate(cx, obj2);
+ *bp = qname_identity(qn, qn2);
+ }
+ return JS_TRUE;
+}
+
+JS_FRIEND_DATA(JSExtendedClass) js_QNameClass = {
+ { "QName",
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE | JSCLASS_IS_EXTENDED |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_QName),
+ JS_PropertyStub, JS_PropertyStub, qname_getProperty, NULL,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, qname_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, qname_mark, NULL },
+ qname_equality, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * Classes for the ECMA-357-internal types AttributeName and AnyName, which
+ * are like QName, except that they have no property getters. They share the
+ * qname_toString method, and therefore are exposed as constructable objects
+ * in this implementation.
+ */
+JS_FRIEND_DATA(JSClass) js_AttributeNameClass = {
+ js_AttributeName_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_AttributeName),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, qname_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, qname_mark, NULL
+};
+
+JS_FRIEND_DATA(JSClass) js_AnyNameClass = {
+ js_AnyName_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_AnyName),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, anyname_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, qname_mark, NULL
+};
+
+#define QNAME_ATTRS \
+ (JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED)
+
+static JSPropertySpec qname_props[] = {
+ {js_uri_str, QNAME_URI, QNAME_ATTRS, 0, 0},
+ {js_localName_str, QNAME_LOCALNAME, QNAME_ATTRS, 0, 0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+qname_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSClass *clasp;
+ JSXMLQName *qn;
+ JSString *str, *qualstr;
+ size_t length;
+ jschar *chars;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp == &js_AttributeNameClass || clasp == &js_AnyNameClass) {
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ } else {
+ qn = (JSXMLQName *)
+ JS_GetInstancePrivate(cx, obj, &js_QNameClass.base, argv);
+ if (!qn)
+ return JS_FALSE;
+ }
+
+ if (!qn->uri) {
+ /* No uri means wildcard qualifier. */
+ str = ATOM_TO_STRING(cx->runtime->atomState.starQualifierAtom);
+ } else if (IS_EMPTY(qn->uri)) {
+ /* Empty string for uri means localName is in no namespace. */
+ str = cx->runtime->emptyString;
+ } else {
+ qualstr = ATOM_TO_STRING(cx->runtime->atomState.qualifierAtom);
+ str = js_ConcatStrings(cx, qn->uri, qualstr);
+ if (!str)
+ return JS_FALSE;
+ }
+ str = js_ConcatStrings(cx, str, qn->localName);
+ if (!str)
+ return JS_FALSE;
+
+ if (str && clasp == &js_AttributeNameClass) {
+ length = JSSTRING_LENGTH(str);
+ chars = (jschar *) JS_malloc(cx, (length + 2) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ *chars = '@';
+ js_strncpy(chars + 1, JSSTRING_CHARS(str), length);
+ chars[++length] = 0;
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec qname_methods[] = {
+ {js_toString_str, qname_toString, 0,0,0},
+ {0,0,0,0,0}
+};
+
+JSXMLQName *
+js_NewXMLQName(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName)
+{
+ JSXMLQName *qn;
+
+ qn = (JSXMLQName *) js_NewGCThing(cx, GCX_QNAME, sizeof(JSXMLQName));
+ if (!qn)
+ return NULL;
+ qn->object = NULL;
+ qn->uri = uri;
+ qn->prefix = prefix;
+ qn->localName = localName;
+ METER(xml_stats.qname);
+ METER(xml_stats.liveqname);
+ return qn;
+}
+
+void
+js_MarkXMLQName(JSContext *cx, JSXMLQName *qn)
+{
+ GC_MARK(cx, qn->object, "object");
+ GC_MARK(cx, qn->uri, "uri");
+ GC_MARK(cx, qn->prefix, "prefix");
+ GC_MARK(cx, qn->localName, "localName");
+}
+
+void
+js_FinalizeXMLQName(JSContext *cx, JSXMLQName *qn)
+{
+ UNMETER(xml_stats.liveqname);
+}
+
+JSObject *
+js_NewXMLQNameObject(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName)
+{
+ JSXMLQName *qn;
+
+ qn = js_NewXMLQName(cx, uri, prefix, localName);
+ if (!qn)
+ return NULL;
+ return js_GetXMLQNameObject(cx, qn);
+}
+
+JSObject *
+js_GetXMLQNameObject(JSContext *cx, JSXMLQName *qn)
+{
+ JSObject *obj;
+
+ obj = qn->object;
+ if (obj) {
+ JS_ASSERT(JS_GetPrivate(cx, obj) == qn);
+ return obj;
+ }
+ obj = js_NewObject(cx, &js_QNameClass.base, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, qn)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ qn->object = obj;
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+ return obj;
+}
+
+JSObject *
+js_GetAttributeNameObject(JSContext *cx, JSXMLQName *qn)
+{
+ JSObject *obj;
+
+ obj = qn->object;
+ if (obj) {
+ if (OBJ_GET_CLASS(cx, obj) == &js_AttributeNameClass)
+ return obj;
+ qn = js_NewXMLQName(cx, qn->uri, qn->prefix, qn->localName);
+ if (!qn)
+ return NULL;
+ }
+
+ obj = js_NewObject(cx, &js_AttributeNameClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, qn)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+
+ qn->object = obj;
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+ return obj;
+}
+
+JSObject *
+js_ConstructXMLQNameObject(JSContext *cx, jsval nsval, jsval lnval)
+{
+ jsval argv[2];
+
+ /*
+ * ECMA-357 11.1.2,
+ * The _QualifiedIdentifier : PropertySelector :: PropertySelector_
+ * production, step 2.
+ */
+ if (!JSVAL_IS_PRIMITIVE(nsval) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(nsval)) == &js_AnyNameClass) {
+ nsval = JSVAL_NULL;
+ }
+
+ argv[0] = nsval;
+ argv[1] = lnval;
+ return js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 2, argv);
+}
+
+static JSBool
+IsXMLName(const jschar *cp, size_t n)
+{
+ JSBool rv;
+ jschar c;
+
+ rv = JS_FALSE;
+ if (n != 0 && JS_ISXMLNSSTART(*cp)) {
+ while (--n != 0) {
+ c = *++cp;
+ if (!JS_ISXMLNS(c))
+ return rv;
+ }
+ rv = JS_TRUE;
+ }
+ return rv;
+}
+
+JSBool
+js_IsXMLName(JSContext *cx, jsval v)
+{
+ JSClass *clasp;
+ JSXMLQName *qn;
+ JSString *name;
+ JSErrorReporter older;
+
+ /*
+ * Inline specialization of the QName constructor called with v passed as
+ * the only argument, to compute the localName for the constructed qname,
+ * without actually allocating the object or computing its uri and prefix.
+ * See ECMA-357 13.1.2.1 step 1 and 13.3.2.
+ */
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ (clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)),
+ clasp == &js_QNameClass.base ||
+ clasp == &js_AttributeNameClass ||
+ clasp == &js_AnyNameClass)) {
+ qn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ name = qn->localName;
+ } else {
+ older = JS_SetErrorReporter(cx, NULL);
+ name = js_ValueToString(cx, v);
+ JS_SetErrorReporter(cx, older);
+ if (!name) {
+ JS_ClearPendingException(cx);
+ return JS_FALSE;
+ }
+ }
+
+ return IsXMLName(JSSTRING_CHARS(name), JSSTRING_LENGTH(name));
+}
+
+static JSBool
+Namespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval urival, prefixval;
+ JSObject *uriobj;
+ JSBool isNamespace, isQName;
+ JSClass *clasp;
+ JSString *empty, *prefix;
+ JSXMLNamespace *ns, *ns2;
+ JSXMLQName *qn;
+
+ urival = argv[argc > 1];
+ isNamespace = isQName = JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(urival)) {
+ uriobj = JSVAL_TO_OBJECT(urival);
+ clasp = OBJ_GET_CLASS(cx, uriobj);
+ isNamespace = (clasp == &js_NamespaceClass.base);
+ isQName = (clasp == &js_QNameClass.base);
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ else uriobj = NULL;
+#endif
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /* Namespace called as function. */
+ if (argc == 1 && isNamespace) {
+ /* Namespace called with one Namespace argument is identity. */
+ *rval = urival;
+ return JS_TRUE;
+ }
+
+ /* Create and return a new QName object exactly as if constructed. */
+ obj = js_NewObject(cx, &js_NamespaceClass.base, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ METER(xml_stats.namespaceobj);
+ METER(xml_stats.livenamespaceobj);
+
+ /*
+ * Create and connect private data to rooted obj early, so we don't have
+ * to worry about rooting string newborns hanging off of the private data
+ * further below.
+ */
+ empty = cx->runtime->emptyString;
+ ns = js_NewXMLNamespace(cx, empty, empty, JS_FALSE);
+ if (!ns)
+ return JS_FALSE;
+ if (!JS_SetPrivate(cx, obj, ns))
+ return JS_FALSE;
+ ns->object = obj;
+
+ if (argc == 1) {
+ if (isNamespace) {
+ ns2 = (JSXMLNamespace *) JS_GetPrivate(cx, uriobj);
+ ns->uri = ns2->uri;
+ ns->prefix = ns2->prefix;
+ } else if (isQName &&
+ (qn = (JSXMLQName *) JS_GetPrivate(cx, uriobj))->uri) {
+ ns->uri = qn->uri;
+ ns->prefix = qn->prefix;
+ } else {
+ ns->uri = js_ValueToString(cx, urival);
+ if (!ns->uri)
+ return JS_FALSE;
+
+ /* NULL here represents *undefined* in ECMA-357 13.2.2 3(c)iii. */
+ if (!IS_EMPTY(ns->uri))
+ ns->prefix = NULL;
+ }
+ } else if (argc == 2) {
+ if (isQName &&
+ (qn = (JSXMLQName *) JS_GetPrivate(cx, uriobj))->uri) {
+ ns->uri = qn->uri;
+ } else {
+ ns->uri = js_ValueToString(cx, urival);
+ if (!ns->uri)
+ return JS_FALSE;
+ }
+
+ prefixval = argv[0];
+ if (IS_EMPTY(ns->uri)) {
+ if (!JSVAL_IS_VOID(prefixval)) {
+ prefix = js_ValueToString(cx, prefixval);
+ if (!prefix)
+ return JS_FALSE;
+ if (!IS_EMPTY(prefix)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAMESPACE,
+ js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(prefix)));
+ return JS_FALSE;
+ }
+ }
+ } else if (JSVAL_IS_VOID(prefixval) || !js_IsXMLName(cx, prefixval)) {
+ /* NULL here represents *undefined* in ECMA-357 13.2.2 4(d) etc. */
+ ns->prefix = NULL;
+ } else {
+ prefix = js_ValueToString(cx, prefixval);
+ if (!prefix)
+ return JS_FALSE;
+ ns->prefix = prefix;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+QName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval nameval, nsval;
+ JSBool isQName, isNamespace;
+ JSXMLQName *qn;
+ JSString *uri, *prefix, *name;
+ JSObject *nsobj;
+ JSClass *clasp;
+ JSXMLNamespace *ns;
+
+ nameval = argv[argc > 1];
+ isQName =
+ !JSVAL_IS_PRIMITIVE(nameval) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(nameval)) == &js_QNameClass.base;
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /* QName called as function. */
+ if (argc == 1 && isQName) {
+ /* QName called with one QName argument is identity. */
+ *rval = nameval;
+ return JS_TRUE;
+ }
+
+ /*
+ * Create and return a new QName object exactly as if constructed.
+ * Use the constructor's clasp so we can be shared by AttributeName
+ * (see below after this function).
+ */
+ obj = js_NewObject(cx,
+ JS_ValueToFunction(cx, argv[-2])->clasp,
+ NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+
+ if (isQName) {
+ /* If namespace is not specified and name is a QName, clone it. */
+ qn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(nameval));
+ if (argc == 1) {
+ uri = qn->uri;
+ prefix = qn->prefix;
+ name = qn->localName;
+ goto out;
+ }
+
+ /* Namespace and qname were passed -- use the qname's localName. */
+ nameval = STRING_TO_JSVAL(qn->localName);
+ }
+
+ if (argc == 0) {
+ name = cx->runtime->emptyString;
+ } else {
+ name = js_ValueToString(cx, nameval);
+ if (!name)
+ return JS_FALSE;
+
+ /* Use argv[1] as a local root for name, even if it was not passed. */
+ argv[1] = STRING_TO_JSVAL(name);
+ }
+
+ nsval = argv[0];
+ if (argc == 1 || JSVAL_IS_VOID(nsval)) {
+ if (IS_STAR(name)) {
+ nsval = JSVAL_NULL;
+ } else {
+ if (!js_GetDefaultXMLNamespace(cx, &nsval))
+ return JS_FALSE;
+ }
+ }
+
+ if (JSVAL_IS_NULL(nsval)) {
+ /* NULL prefix represents *undefined* in ECMA-357 13.3.2 5(a). */
+ uri = prefix = NULL;
+ } else {
+ /*
+ * Inline specialization of the Namespace constructor called with
+ * nsval passed as the only argument, to compute the uri and prefix
+ * for the constructed namespace, without actually allocating the
+ * object or computing other members. See ECMA-357 13.3.2 6(a) and
+ * 13.2.2.
+ */
+ isNamespace = isQName = JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(nsval)) {
+ nsobj = JSVAL_TO_OBJECT(nsval);
+ clasp = OBJ_GET_CLASS(cx, nsobj);
+ isNamespace = (clasp == &js_NamespaceClass.base);
+ isQName = (clasp == &js_QNameClass.base);
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ else nsobj = NULL;
+#endif
+
+ if (isNamespace) {
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ uri = ns->uri;
+ prefix = ns->prefix;
+ } else if (isQName &&
+ (qn = (JSXMLQName *) JS_GetPrivate(cx, nsobj))->uri) {
+ uri = qn->uri;
+ prefix = qn->prefix;
+ } else {
+ uri = js_ValueToString(cx, nsval);
+ if (!uri)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(uri); /* local root */
+
+ /* NULL here represents *undefined* in ECMA-357 13.2.2 3(c)iii. */
+ prefix = IS_EMPTY(uri) ? cx->runtime->emptyString : NULL;
+ }
+ }
+
+out:
+ qn = js_NewXMLQName(cx, uri, prefix, name);
+ if (!qn)
+ return JS_FALSE;
+ if (!JS_SetPrivate(cx, obj, qn))
+ return JS_FALSE;
+ qn->object = obj;
+ return JS_TRUE;
+}
+
+static JSBool
+AttributeName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ /*
+ * Since js_AttributeNameClass was initialized, obj will have that as its
+ * class, not js_QNameClass.
+ */
+ return QName(cx, obj, argc, argv, rval);
+}
+
+/*
+ * XMLArray library functions.
+ */
+static JSBool
+namespace_identity(const void *a, const void *b)
+{
+ const JSXMLNamespace *nsa = (const JSXMLNamespace *) a;
+ const JSXMLNamespace *nsb = (const JSXMLNamespace *) b;
+
+ if (nsa->prefix && nsb->prefix) {
+ if (!js_EqualStrings(nsa->prefix, nsb->prefix))
+ return JS_FALSE;
+ } else {
+ if (nsa->prefix || nsb->prefix)
+ return JS_FALSE;
+ }
+ return js_EqualStrings(nsa->uri, nsb->uri);
+}
+
+static JSBool
+attr_identity(const void *a, const void *b)
+{
+ const JSXML *xmla = (const JSXML *) a;
+ const JSXML *xmlb = (const JSXML *) b;
+
+ return qname_identity(xmla->name, xmlb->name);
+}
+
+static void
+XMLArrayCursorInit(JSXMLArrayCursor *cursor, JSXMLArray *array)
+{
+ JSXMLArrayCursor *next;
+
+ cursor->array = array;
+ cursor->index = 0;
+ next = cursor->next = array->cursors;
+ if (next)
+ next->prevp = &cursor->next;
+ cursor->prevp = &array->cursors;
+ array->cursors = cursor;
+ cursor->root = NULL;
+}
+
+static void
+XMLArrayCursorFinish(JSXMLArrayCursor *cursor)
+{
+ JSXMLArrayCursor *next;
+
+ if (!cursor->array)
+ return;
+ next = cursor->next;
+ if (next)
+ next->prevp = cursor->prevp;
+ *cursor->prevp = next;
+ cursor->array = NULL;
+}
+
+static void *
+XMLArrayCursorNext(JSXMLArrayCursor *cursor)
+{
+ JSXMLArray *array;
+
+ array = cursor->array;
+ if (!array || cursor->index >= array->length)
+ return NULL;
+ return cursor->root = array->vector[cursor->index++];
+}
+
+static void *
+XMLArrayCursorItem(JSXMLArrayCursor *cursor)
+{
+ JSXMLArray *array;
+
+ array = cursor->array;
+ if (!array || cursor->index >= array->length)
+ return NULL;
+ return cursor->root = array->vector[cursor->index];
+}
+
+static void
+XMLArrayCursorMark(JSContext *cx, JSXMLArrayCursor *cursor)
+{
+ while (cursor) {
+ GC_MARK(cx, cursor->root, "cursor->root");
+ cursor = cursor->next;
+ }
+}
+
+/* NB: called with null cx from the GC, via xml_mark => XMLArrayTrim. */
+static JSBool
+XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity)
+{
+ void **vector;
+
+ if (capacity == 0) {
+ /* We could let realloc(p, 0) free this, but purify gets confused. */
+ if (array->vector)
+ free(array->vector);
+ vector = NULL;
+ } else {
+ if ((size_t)capacity > ~(size_t)0 / sizeof(void *) ||
+ !(vector = (void **)
+ realloc(array->vector, capacity * sizeof(void *)))) {
+ if (cx)
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+ array->capacity = JSXML_PRESET_CAPACITY | capacity;
+ array->vector = vector;
+ return JS_TRUE;
+}
+
+static void
+XMLArrayTrim(JSXMLArray *array)
+{
+ if (array->capacity & JSXML_PRESET_CAPACITY)
+ return;
+ if (array->length < array->capacity)
+ XMLArraySetCapacity(NULL, array, array->length);
+}
+
+static JSBool
+XMLArrayInit(JSContext *cx, JSXMLArray *array, uint32 capacity)
+{
+ array->length = array->capacity = 0;
+ array->vector = NULL;
+ array->cursors = NULL;
+ return capacity == 0 || XMLArraySetCapacity(cx, array, capacity);
+}
+
+static void
+XMLArrayFinish(JSContext *cx, JSXMLArray *array)
+{
+ JSXMLArrayCursor *cursor;
+
+ JS_free(cx, array->vector);
+
+ while ((cursor = array->cursors) != NULL)
+ XMLArrayCursorFinish(cursor);
+
+#ifdef DEBUG
+ memset(array, 0xd5, sizeof *array);
+#endif
+}
+
+#define XML_NOT_FOUND ((uint32) -1)
+
+static uint32
+XMLArrayFindMember(const JSXMLArray *array, void *elt, JSIdentityOp identity)
+{
+ void **vector;
+ uint32 i, n;
+
+ /* The identity op must not reallocate array->vector. */
+ vector = array->vector;
+ if (identity) {
+ for (i = 0, n = array->length; i < n; i++) {
+ if (identity(vector[i], elt))
+ return i;
+ }
+ } else {
+ for (i = 0, n = array->length; i < n; i++) {
+ if (vector[i] == elt)
+ return i;
+ }
+ }
+ return XML_NOT_FOUND;
+}
+
+/*
+ * Grow array vector capacity by powers of two to LINEAR_THRESHOLD, and after
+ * that, grow by LINEAR_INCREMENT. Both must be powers of two, and threshold
+ * should be greater than increment.
+ */
+#define LINEAR_THRESHOLD 256
+#define LINEAR_INCREMENT 32
+
+static JSBool
+XMLArrayAddMember(JSContext *cx, JSXMLArray *array, uint32 index, void *elt)
+{
+ uint32 capacity, i;
+ int log2;
+ void **vector;
+
+ if (index >= array->length) {
+ if (index >= JSXML_CAPACITY(array)) {
+ /* Arrange to clear JSXML_PRESET_CAPACITY from array->capacity. */
+ capacity = index + 1;
+ if (index >= LINEAR_THRESHOLD) {
+ capacity = JS_ROUNDUP(capacity, LINEAR_INCREMENT);
+ } else {
+ JS_CEILING_LOG2(log2, capacity);
+ capacity = JS_BIT(log2);
+ }
+ if ((size_t)capacity > ~(size_t)0 / sizeof(void *) ||
+ !(vector = (void **)
+ realloc(array->vector, capacity * sizeof(void *)))) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ array->capacity = capacity;
+ array->vector = vector;
+ for (i = array->length; i < index; i++)
+ vector[i] = NULL;
+ }
+ array->length = index + 1;
+ }
+
+ array->vector[index] = elt;
+ return JS_TRUE;
+}
+
+static JSBool
+XMLArrayInsert(JSContext *cx, JSXMLArray *array, uint32 i, uint32 n)
+{
+ uint32 j;
+ JSXMLArrayCursor *cursor;
+
+ j = array->length;
+ JS_ASSERT(i <= j);
+ if (!XMLArraySetCapacity(cx, array, j + n))
+ return JS_FALSE;
+
+ array->length = j + n;
+ JS_ASSERT(n != (uint32)-1);
+ while (j != i) {
+ --j;
+ array->vector[j + n] = array->vector[j];
+ }
+
+ for (cursor = array->cursors; cursor; cursor = cursor->next) {
+ if (cursor->index > i)
+ cursor->index += n;
+ }
+ return JS_TRUE;
+}
+
+static void *
+XMLArrayDelete(JSContext *cx, JSXMLArray *array, uint32 index, JSBool compress)
+{
+ uint32 length;
+ void **vector, *elt;
+ JSXMLArrayCursor *cursor;
+
+ length = array->length;
+ if (index >= length)
+ return NULL;
+
+ vector = array->vector;
+ elt = vector[index];
+ if (compress) {
+ while (++index < length)
+ vector[index-1] = vector[index];
+ array->length = length - 1;
+ array->capacity = JSXML_CAPACITY(array);
+ } else {
+ vector[index] = NULL;
+ }
+
+ for (cursor = array->cursors; cursor; cursor = cursor->next) {
+ if (cursor->index > index)
+ --cursor->index;
+ }
+ return elt;
+}
+
+static void
+XMLArrayTruncate(JSContext *cx, JSXMLArray *array, uint32 length)
+{
+ void **vector;
+
+ JS_ASSERT(!array->cursors);
+ if (length >= array->length)
+ return;
+
+ if (length == 0) {
+ if (array->vector)
+ free(array->vector);
+ vector = NULL;
+ } else {
+ vector = realloc(array->vector, length * sizeof(void *));
+ if (!vector)
+ return;
+ }
+
+ if (array->length > length)
+ array->length = length;
+ array->capacity = length;
+ array->vector = vector;
+}
+
+#define XMLARRAY_FIND_MEMBER(a,e,f) XMLArrayFindMember(a, (void *)(e), f)
+#define XMLARRAY_HAS_MEMBER(a,e,f) (XMLArrayFindMember(a, (void *)(e), f) != \
+ XML_NOT_FOUND)
+#define XMLARRAY_MEMBER(a,i,t) (((i) < (a)->length) \
+ ? (t *) (a)->vector[i] \
+ : NULL)
+#define XMLARRAY_SET_MEMBER(a,i,e) JS_BEGIN_MACRO \
+ if ((a)->length <= (i)) \
+ (a)->length = (i) + 1; \
+ ((a)->vector[i] = (void *)(e)); \
+ JS_END_MACRO
+#define XMLARRAY_ADD_MEMBER(x,a,i,e)XMLArrayAddMember(x, a, i, (void *)(e))
+#define XMLARRAY_INSERT(x,a,i,n) XMLArrayInsert(x, a, i, n)
+#define XMLARRAY_APPEND(x,a,e) XMLARRAY_ADD_MEMBER(x, a, (a)->length, (e))
+#define XMLARRAY_DELETE(x,a,i,c,t) ((t *) XMLArrayDelete(x, a, i, c))
+#define XMLARRAY_TRUNCATE(x,a,n) XMLArrayTruncate(x, a, n)
+
+/*
+ * Define XML setting property strings and constants early, so everyone can
+ * use the same names and their magic numbers (tinyids, flags).
+ */
+static const char js_ignoreComments_str[] = "ignoreComments";
+static const char js_ignoreProcessingInstructions_str[]
+ = "ignoreProcessingInstructions";
+static const char js_ignoreWhitespace_str[] = "ignoreWhitespace";
+static const char js_prettyPrinting_str[] = "prettyPrinting";
+static const char js_prettyIndent_str[] = "prettyIndent";
+
+/*
+ * NB: These XML static property tinyids must
+ * (a) not collide with the generic negative tinyids at the top of jsfun.c;
+ * (b) index their corresponding xml_static_props array elements.
+ * Don't change 'em!
+ */
+enum xml_static_tinyid {
+ XML_IGNORE_COMMENTS,
+ XML_IGNORE_PROCESSING_INSTRUCTIONS,
+ XML_IGNORE_WHITESPACE,
+ XML_PRETTY_PRINTING,
+ XML_PRETTY_INDENT
+};
+
+static JSBool
+xml_setting_getter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setting_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSBool b;
+ uint8 flag;
+
+ JS_ASSERT(JSVAL_IS_INT(id));
+ if (!js_ValueToBoolean(cx, *vp, &b))
+ return JS_FALSE;
+
+ flag = JS_BIT(JSVAL_TO_INT(id));
+ if (b)
+ cx->xmlSettingFlags |= flag;
+ else
+ cx->xmlSettingFlags &= ~flag;
+ return JS_TRUE;
+}
+
+static JSPropertySpec xml_static_props[] = {
+ {js_ignoreComments_str, XML_IGNORE_COMMENTS, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_ignoreProcessingInstructions_str,
+ XML_IGNORE_PROCESSING_INSTRUCTIONS, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_ignoreWhitespace_str, XML_IGNORE_WHITESPACE, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_prettyPrinting_str, XML_PRETTY_PRINTING, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_prettyIndent_str, XML_PRETTY_INDENT, JSPROP_PERMANENT,
+ xml_setting_getter, NULL},
+ {0,0,0,0,0}
+};
+
+/* Derive cx->xmlSettingFlags bits from xml_static_props tinyids. */
+#define XSF_IGNORE_COMMENTS JS_BIT(XML_IGNORE_COMMENTS)
+#define XSF_IGNORE_PROCESSING_INSTRUCTIONS \
+ JS_BIT(XML_IGNORE_PROCESSING_INSTRUCTIONS)
+#define XSF_IGNORE_WHITESPACE JS_BIT(XML_IGNORE_WHITESPACE)
+#define XSF_PRETTY_PRINTING JS_BIT(XML_PRETTY_PRINTING)
+#define XSF_CACHE_VALID JS_BIT(XML_PRETTY_INDENT)
+
+/*
+ * Extra, unrelated but necessarily disjoint flag used by ParseNodeToXML.
+ * This flag means a couple of things:
+ *
+ * - The top JSXML created for a parse tree must have an object owning it.
+ *
+ * - That the default namespace normally inherited from the temporary
+ * <parent xmlns='...'> tag that wraps a runtime-concatenated XML source
+ * string must, in the case of a precompiled XML object tree, inherit via
+ * ad-hoc code in ParseNodeToXML.
+ *
+ * Because of the second purpose, we name this flag XSF_PRECOMPILED_ROOT.
+ */
+#define XSF_PRECOMPILED_ROOT (XSF_CACHE_VALID << 1)
+
+/* Macros for special-casing xml:, xmlns= and xmlns:foo= in ParseNodeToQName. */
+#define IS_XML(str) \
+ (JSSTRING_LENGTH(str) == 3 && IS_XML_CHARS(JSSTRING_CHARS(str)))
+
+#define IS_XMLNS(str) \
+ (JSSTRING_LENGTH(str) == 5 && IS_XMLNS_CHARS(JSSTRING_CHARS(str)))
+
+#define IS_XML_CHARS(chars) \
+ (JS_TOLOWER((chars)[0]) == 'x' && \
+ JS_TOLOWER((chars)[1]) == 'm' && \
+ JS_TOLOWER((chars)[2]) == 'l')
+
+#define HAS_NS_AFTER_XML(chars) \
+ (JS_TOLOWER((chars)[3]) == 'n' && \
+ JS_TOLOWER((chars)[4]) == 's')
+
+#define IS_XMLNS_CHARS(chars) \
+ (IS_XML_CHARS(chars) && HAS_NS_AFTER_XML(chars))
+
+#define STARTS_WITH_XML(chars,length) \
+ (length >= 3 && IS_XML_CHARS(chars))
+
+static const char xml_namespace_str[] = "http://www.w3.org/XML/1998/namespace";
+static const char xmlns_namespace_str[] = "http://www.w3.org/2000/xmlns/";
+
+static JSXMLQName *
+ParseNodeToQName(JSContext *cx, JSParseNode *pn, JSXMLArray *inScopeNSes,
+ JSBool isAttributeName)
+{
+ JSString *str, *uri, *prefix, *localName;
+ size_t length, offset;
+ const jschar *start, *limit, *colon;
+ uint32 n;
+ JSXMLNamespace *ns;
+
+ JS_ASSERT(pn->pn_arity == PN_NULLARY);
+ str = ATOM_TO_STRING(pn->pn_atom);
+ length = JSSTRING_LENGTH(str);
+ start = JSSTRING_CHARS(str);
+ JS_ASSERT(length != 0 && *start != '@');
+ JS_ASSERT(length != 1 || *start != '*');
+
+ uri = cx->runtime->emptyString;
+ limit = start + length;
+ colon = js_strchr_limit(start, ':', limit);
+ if (colon) {
+ offset = PTRDIFF(colon, start, jschar);
+ prefix = js_NewDependentString(cx, str, 0, offset, 0);
+ if (!prefix)
+ return NULL;
+
+ if (STARTS_WITH_XML(start, offset)) {
+ if (offset == 3) {
+ uri = JS_InternString(cx, xml_namespace_str);
+ if (!uri)
+ return NULL;
+ } else if (offset == 5 && HAS_NS_AFTER_XML(start)) {
+ uri = JS_InternString(cx, xmlns_namespace_str);
+ if (!uri)
+ return NULL;
+ } else {
+ uri = NULL;
+ }
+ } else {
+ uri = NULL;
+ n = inScopeNSes->length;
+ while (n != 0) {
+ --n;
+ ns = XMLARRAY_MEMBER(inScopeNSes, n, JSXMLNamespace);
+ if (ns->prefix && js_EqualStrings(ns->prefix, prefix)) {
+ uri = ns->uri;
+ break;
+ }
+ }
+ }
+
+ if (!uri) {
+ js_ReportCompileErrorNumber(cx, pn,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_XML_NAMESPACE,
+ js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(prefix)));
+ return NULL;
+ }
+
+ localName = js_NewStringCopyN(cx, colon + 1, length - (offset + 1), 0);
+ if (!localName)
+ return NULL;
+ } else {
+ if (isAttributeName) {
+ /*
+ * An unprefixed attribute is not in any namespace, so set prefix
+ * as well as uri to the empty string.
+ */
+ prefix = uri;
+ } else {
+ /*
+ * Loop from back to front looking for the closest declared default
+ * namespace.
+ */
+ n = inScopeNSes->length;
+ while (n != 0) {
+ --n;
+ ns = XMLARRAY_MEMBER(inScopeNSes, n, JSXMLNamespace);
+ if (!ns->prefix || IS_EMPTY(ns->prefix)) {
+ uri = ns->uri;
+ break;
+ }
+ }
+ prefix = IS_EMPTY(uri) ? cx->runtime->emptyString : NULL;
+ }
+ localName = str;
+ }
+
+ return js_NewXMLQName(cx, uri, prefix, localName);
+}
+
+static JSString *
+ChompXMLWhitespace(JSContext *cx, JSString *str)
+{
+ size_t length, newlength, offset;
+ const jschar *cp, *start, *end;
+ jschar c;
+
+ length = JSSTRING_LENGTH(str);
+ for (cp = start = JSSTRING_CHARS(str), end = cp + length; cp < end; cp++) {
+ c = *cp;
+ if (!JS_ISXMLSPACE(c))
+ break;
+ }
+ while (end > cp) {
+ c = end[-1];
+ if (!JS_ISXMLSPACE(c))
+ break;
+ --end;
+ }
+ newlength = PTRDIFF(end, cp, jschar);
+ if (newlength == length)
+ return str;
+ offset = PTRDIFF(cp, start, jschar);
+ return js_NewDependentString(cx, str, offset, newlength, 0);
+}
+
+static JSXML *
+ParseNodeToXML(JSContext *cx, JSParseNode *pn, JSXMLArray *inScopeNSes,
+ uintN flags)
+{
+ JSXML *xml, *kid, *attr, *attrj;
+ JSString *str;
+ uint32 length, n, i, j;
+ JSParseNode *pn2, *pn3, *head, **pnp;
+ JSXMLNamespace *ns;
+ JSXMLQName *qn, *attrjqn;
+ JSXMLClass xml_class;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_OVER_RECURSED);
+ return NULL;
+ }
+
+#define PN2X_SKIP_CHILD ((JSXML *) 1)
+
+ /*
+ * Cases return early to avoid common code that gets an outermost xml's
+ * object, which protects GC-things owned by xml and its descendants from
+ * garbage collection.
+ */
+ xml = NULL;
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ switch (pn->pn_type) {
+ case TOK_XMLELEM:
+ length = inScopeNSes->length;
+ pn2 = pn->pn_head;
+ xml = ParseNodeToXML(cx, pn2, inScopeNSes, flags);
+ if (!xml)
+ goto fail;
+
+ flags &= ~XSF_PRECOMPILED_ROOT;
+ n = pn->pn_count;
+ JS_ASSERT(n >= 2);
+ n -= 2;
+ if (!XMLArraySetCapacity(cx, &xml->xml_kids, n))
+ goto fail;
+
+ i = 0;
+ while ((pn2 = pn2->pn_next) != NULL) {
+ if (!pn2->pn_next) {
+ /* Don't append the end tag! */
+ JS_ASSERT(pn2->pn_type == TOK_XMLETAGO);
+ break;
+ }
+
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ n > 1 && pn2->pn_type == TOK_XMLSPACE) {
+ --n;
+ continue;
+ }
+
+ kid = ParseNodeToXML(cx, pn2, inScopeNSes, flags);
+ if (kid == PN2X_SKIP_CHILD) {
+ --n;
+ continue;
+ }
+
+ if (!kid)
+ goto fail;
+
+ /* Store kid in xml right away, to protect it from GC. */
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, kid);
+ kid->parent = xml;
+ ++i;
+
+ /* XXX where is this documented in an XML spec, or in E4X? */
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ n > 1 && kid->xml_class == JSXML_CLASS_TEXT) {
+ str = ChompXMLWhitespace(cx, kid->xml_value);
+ if (!str)
+ goto fail;
+ kid->xml_value = str;
+ }
+ }
+
+ JS_ASSERT(i == n);
+ if (n < pn->pn_count - 2)
+ XMLArrayTrim(&xml->xml_kids);
+ XMLARRAY_TRUNCATE(cx, inScopeNSes, length);
+ break;
+
+ case TOK_XMLLIST:
+ xml = js_NewXML(cx, JSXML_CLASS_LIST);
+ if (!xml)
+ goto fail;
+
+ n = pn->pn_count;
+ if (!XMLArraySetCapacity(cx, &xml->xml_kids, n))
+ goto fail;
+
+ i = 0;
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ /*
+ * Always ignore insignificant whitespace in lists -- we shouldn't
+ * condition this on an XML.ignoreWhitespace setting when the list
+ * constructor is XMLList (note XML/XMLList unification hazard).
+ */
+ if (pn2->pn_type == TOK_XMLSPACE) {
+ --n;
+ continue;
+ }
+
+ kid = ParseNodeToXML(cx, pn2, inScopeNSes, flags);
+ if (kid == PN2X_SKIP_CHILD) {
+ --n;
+ continue;
+ }
+
+ if (!kid)
+ goto fail;
+
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, kid);
+ ++i;
+ }
+
+ if (n < pn->pn_count)
+ XMLArrayTrim(&xml->xml_kids);
+ break;
+
+ case TOK_XMLSTAGO:
+ case TOK_XMLPTAGC:
+ length = inScopeNSes->length;
+ pn2 = pn->pn_head;
+ JS_ASSERT(pn2->pn_type == TOK_XMLNAME);
+ if (pn2->pn_arity == PN_LIST)
+ goto syntax;
+
+ xml = js_NewXML(cx, JSXML_CLASS_ELEMENT);
+ if (!xml)
+ goto fail;
+
+ /* First pass: check syntax and process namespace declarations. */
+ JS_ASSERT(pn->pn_count >= 1);
+ n = pn->pn_count - 1;
+ pnp = &pn2->pn_next;
+ head = *pnp;
+ while ((pn2 = *pnp) != NULL) {
+ size_t length;
+ const jschar *chars;
+
+ if (pn2->pn_type != TOK_XMLNAME || pn2->pn_arity != PN_NULLARY)
+ goto syntax;
+
+ /* Enforce "Well-formedness constraint: Unique Att Spec". */
+ for (pn3 = head; pn3 != pn2; pn3 = pn3->pn_next->pn_next) {
+ if (pn3->pn_atom == pn2->pn_atom) {
+ js_ReportCompileErrorNumber(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_DUPLICATE_XML_ATTR,
+ js_ValueToPrintableString(cx,
+ ATOM_KEY(pn2->pn_atom)));
+ goto fail;
+ }
+ }
+
+ str = ATOM_TO_STRING(pn2->pn_atom);
+ pn2 = pn2->pn_next;
+ JS_ASSERT(pn2);
+ if (pn2->pn_type != TOK_XMLATTR)
+ goto syntax;
+
+ length = JSSTRING_LENGTH(str);
+ chars = JSSTRING_CHARS(str);
+ if (length >= 5 &&
+ IS_XMLNS_CHARS(chars) &&
+ (length == 5 || chars[5] == ':')) {
+ JSString *uri, *prefix;
+
+ uri = ATOM_TO_STRING(pn2->pn_atom);
+ if (length == 5) {
+ /* 10.3.2.1. Step 6(h)(i)(1)(a). */
+ prefix = cx->runtime->emptyString;
+ } else {
+ prefix = js_NewStringCopyN(cx, chars + 6, length - 6, 0);
+ if (!prefix)
+ goto fail;
+ }
+
+ /*
+ * Once the new ns is appended to xml->xml_namespaces, it is
+ * protected from GC by the object that owns xml -- which is
+ * either xml->object if outermost, or the object owning xml's
+ * oldest ancestor if !outermost.
+ */
+ ns = js_NewXMLNamespace(cx, prefix, uri, JS_TRUE);
+ if (!ns)
+ goto fail;
+
+ /*
+ * Don't add a namespace that's already in scope. If someone
+ * extracts a child property from its parent via [[Get]], then
+ * we enforce the invariant, noted many times in ECMA-357, that
+ * the child's namespaces form a possibly-improper superset of
+ * its ancestors' namespaces.
+ */
+ if (!XMLARRAY_HAS_MEMBER(inScopeNSes, ns, namespace_identity)) {
+ if (!XMLARRAY_APPEND(cx, inScopeNSes, ns) ||
+ !XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns)) {
+ goto fail;
+ }
+ }
+
+ JS_ASSERT(n >= 2);
+ n -= 2;
+ *pnp = pn2->pn_next;
+ /* XXXbe recycle pn2 */
+ continue;
+ }
+
+ pnp = &pn2->pn_next;
+ }
+
+ /*
+ * If called from js_ParseNodeToXMLObject, emulate the effect of the
+ * <parent xmlns='%s'>...</parent> wrapping done by "ToXML Applied to
+ * the String Type" (ECMA-357 10.3.1).
+ */
+ if (flags & XSF_PRECOMPILED_ROOT) {
+ JS_ASSERT(length >= 1);
+ ns = XMLARRAY_MEMBER(inScopeNSes, 0, JSXMLNamespace);
+ JS_ASSERT(!XMLARRAY_HAS_MEMBER(&xml->xml_namespaces, ns,
+ namespace_identity));
+ ns = js_NewXMLNamespace(cx, ns->prefix, ns->uri, JS_FALSE);
+ if (!ns)
+ goto fail;
+ if (!XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns))
+ goto fail;
+ }
+ XMLArrayTrim(&xml->xml_namespaces);
+
+ /* Second pass: process tag name and attributes, using namespaces. */
+ pn2 = pn->pn_head;
+ qn = ParseNodeToQName(cx, pn2, inScopeNSes, JS_FALSE);
+ if (!qn)
+ goto fail;
+ xml->name = qn;
+
+ JS_ASSERT((n & 1) == 0);
+ n >>= 1;
+ if (!XMLArraySetCapacity(cx, &xml->xml_attrs, n))
+ goto fail;
+
+ for (i = 0; (pn2 = pn2->pn_next) != NULL; i++) {
+ qn = ParseNodeToQName(cx, pn2, inScopeNSes, JS_TRUE);
+ if (!qn) {
+ xml->xml_attrs.length = i;
+ goto fail;
+ }
+
+ /*
+ * Enforce "Well-formedness constraint: Unique Att Spec", part 2:
+ * this time checking local name and namespace URI.
+ */
+ for (j = 0; j < i; j++) {
+ attrj = XMLARRAY_MEMBER(&xml->xml_attrs, j, JSXML);
+ attrjqn = attrj->name;
+ if (js_EqualStrings(attrjqn->uri, qn->uri) &&
+ js_EqualStrings(attrjqn->localName, qn->localName)) {
+ js_ReportCompileErrorNumber(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_DUPLICATE_XML_ATTR,
+ js_ValueToPrintableString(cx,
+ ATOM_KEY(pn2->pn_atom)));
+ goto fail;
+ }
+ }
+
+ pn2 = pn2->pn_next;
+ JS_ASSERT(pn2);
+ JS_ASSERT(pn2->pn_type == TOK_XMLATTR);
+
+ attr = js_NewXML(cx, JSXML_CLASS_ATTRIBUTE);
+ if (!attr)
+ goto fail;
+
+ XMLARRAY_SET_MEMBER(&xml->xml_attrs, i, attr);
+ attr->parent = xml;
+ attr->name = qn;
+ attr->xml_value = ATOM_TO_STRING(pn2->pn_atom);
+ }
+
+ /* Point tag closes its own namespace scope. */
+ if (pn->pn_type == TOK_XMLPTAGC)
+ XMLARRAY_TRUNCATE(cx, inScopeNSes, length);
+ break;
+
+ case TOK_XMLSPACE:
+ case TOK_XMLTEXT:
+ case TOK_XMLCDATA:
+ case TOK_XMLCOMMENT:
+ case TOK_XMLPI:
+ str = ATOM_TO_STRING(pn->pn_atom);
+ qn = NULL;
+ if (pn->pn_type == TOK_XMLCOMMENT) {
+ if (flags & XSF_IGNORE_COMMENTS)
+ goto skip_child;
+ xml_class = JSXML_CLASS_COMMENT;
+ } else if (pn->pn_type == TOK_XMLPI) {
+ if (IS_XML(str)) {
+ js_ReportCompileErrorNumber(cx, pn,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_RESERVED_ID,
+ js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(str)));
+ goto fail;
+ }
+
+ if (flags & XSF_IGNORE_PROCESSING_INSTRUCTIONS)
+ goto skip_child;
+
+ qn = ParseNodeToQName(cx, pn, inScopeNSes, JS_FALSE);
+ if (!qn)
+ goto fail;
+
+ str = pn->pn_atom2
+ ? ATOM_TO_STRING(pn->pn_atom2)
+ : cx->runtime->emptyString;
+ xml_class = JSXML_CLASS_PROCESSING_INSTRUCTION;
+ } else {
+ /* CDATA section content, or element text. */
+ xml_class = JSXML_CLASS_TEXT;
+ }
+
+ xml = js_NewXML(cx, xml_class);
+ if (!xml)
+ goto fail;
+ xml->name = qn;
+ if (pn->pn_type == TOK_XMLSPACE)
+ xml->xml_flags |= XMLF_WHITESPACE_TEXT;
+ xml->xml_value = str;
+ break;
+
+ default:
+ goto syntax;
+ }
+
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) xml);
+ if ((flags & XSF_PRECOMPILED_ROOT) && !js_GetXMLObject(cx, xml))
+ return NULL;
+ return xml;
+
+skip_child:
+ js_LeaveLocalRootScope(cx);
+ return PN2X_SKIP_CHILD;
+
+#undef PN2X_SKIP_CHILD
+
+syntax:
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_XML_MARKUP);
+fail:
+ js_LeaveLocalRootScope(cx);
+ return NULL;
+}
+
+/*
+ * XML helper, object-ops, and library functions. We start with the helpers,
+ * in ECMA-357 order, but merging XML (9.1) and XMLList (9.2) helpers.
+ */
+static JSBool
+GetXMLSetting(JSContext *cx, const char *name, jsval *vp)
+{
+ jsval v;
+
+ if (!js_FindClassObject(cx, NULL, INT_TO_JSID(JSProto_XML), &v))
+ return JS_FALSE;
+ if (!VALUE_IS_FUNCTION(cx, v)) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ return JS_GetProperty(cx, JSVAL_TO_OBJECT(v), name, vp);
+}
+
+static JSBool
+FillSettingsCache(JSContext *cx)
+{
+ int i;
+ const char *name;
+ jsval v;
+ JSBool isSet;
+
+ /* Note: XML_PRETTY_INDENT is not a boolean setting. */
+ for (i = XML_IGNORE_COMMENTS; i < XML_PRETTY_INDENT; i++) {
+ name = xml_static_props[i].name;
+ if (!GetXMLSetting(cx, name, &v) || !js_ValueToBoolean(cx, v, &isSet))
+ return JS_FALSE;
+ if (isSet)
+ cx->xmlSettingFlags |= JS_BIT(i);
+ else
+ cx->xmlSettingFlags &= ~JS_BIT(i);
+ }
+
+ cx->xmlSettingFlags |= XSF_CACHE_VALID;
+ return JS_TRUE;
+}
+
+static JSBool
+GetBooleanXMLSetting(JSContext *cx, const char *name, JSBool *bp)
+{
+ int i;
+
+ if (!(cx->xmlSettingFlags & XSF_CACHE_VALID) && !FillSettingsCache(cx))
+ return JS_FALSE;
+
+ for (i = 0; xml_static_props[i].name; i++) {
+ if (!strcmp(xml_static_props[i].name, name)) {
+ *bp = (cx->xmlSettingFlags & JS_BIT(i)) != 0;
+ return JS_TRUE;
+ }
+ }
+ *bp = JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+GetUint32XMLSetting(JSContext *cx, const char *name, uint32 *uip)
+{
+ jsval v;
+
+ return GetXMLSetting(cx, name, &v) && js_ValueToECMAUint32(cx, v, uip);
+}
+
+static JSBool
+GetXMLSettingFlags(JSContext *cx, uintN *flagsp)
+{
+ JSBool flag;
+
+ /* Just get the first flag to validate the setting flags cache. */
+ if (!GetBooleanXMLSetting(cx, js_ignoreComments_str, &flag))
+ return JS_FALSE;
+ *flagsp = cx->xmlSettingFlags;
+ return JS_TRUE;
+}
+
+static JSXML *
+ParseXMLSource(JSContext *cx, JSString *src)
+{
+ jsval nsval;
+ JSXMLNamespace *ns;
+ size_t urilen, srclen, length, offset, dstlen;
+ jschar *chars;
+ const jschar *srcp, *endp;
+ void *mark;
+ JSTokenStream *ts;
+ uintN lineno;
+ JSStackFrame *fp;
+ JSOp op;
+ JSParseNode *pn;
+ JSXML *xml;
+ JSXMLArray nsarray;
+ uintN flags;
+
+ static const char prefix[] = "<parent xmlns='";
+ static const char middle[] = "'>";
+ static const char suffix[] = "</parent>";
+
+#define constrlen(constr) (sizeof(constr) - 1)
+
+ if (!js_GetDefaultXMLNamespace(cx, &nsval))
+ return NULL;
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(nsval));
+
+ urilen = JSSTRING_LENGTH(ns->uri);
+ srclen = JSSTRING_LENGTH(src);
+ length = constrlen(prefix) + urilen + constrlen(middle) + srclen +
+ constrlen(suffix);
+
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return NULL;
+
+ dstlen = length;
+ js_InflateStringToBuffer(cx, prefix, constrlen(prefix), chars, &dstlen);
+ offset = dstlen;
+ js_strncpy(chars + offset, JSSTRING_CHARS(ns->uri), urilen);
+ offset += urilen;
+ dstlen = length - offset + 1;
+ js_InflateStringToBuffer(cx, middle, constrlen(middle), chars + offset,
+ &dstlen);
+ offset += dstlen;
+ srcp = JSSTRING_CHARS(src);
+ js_strncpy(chars + offset, srcp, srclen);
+ offset += srclen;
+ dstlen = length - offset + 1;
+ js_InflateStringToBuffer(cx, suffix, constrlen(suffix), chars + offset,
+ &dstlen);
+ chars [offset + dstlen] = 0;
+
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewBufferTokenStream(cx, chars, length);
+ if (!ts)
+ return NULL;
+ for (fp = cx->fp; fp && !fp->pc; fp = fp->down)
+ continue;
+ if (fp) {
+ op = (JSOp) *fp->pc;
+ if (op == JSOP_TOXML || op == JSOP_TOXMLLIST) {
+ ts->filename = fp->script->filename;
+ lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ for (endp = srcp + srclen; srcp < endp; srcp++)
+ if (*srcp == '\n')
+ --lineno;
+ ts->lineno = lineno;
+ }
+ }
+
+ JS_KEEP_ATOMS(cx->runtime);
+ pn = js_ParseXMLTokenStream(cx, cx->fp->scopeChain, ts, JS_FALSE);
+ xml = NULL;
+ if (pn && XMLArrayInit(cx, &nsarray, 1)) {
+ if (GetXMLSettingFlags(cx, &flags))
+ xml = ParseNodeToXML(cx, pn, &nsarray, flags);
+
+ XMLArrayFinish(cx, &nsarray);
+ }
+ JS_UNKEEP_ATOMS(cx->runtime);
+
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ JS_free(cx, chars);
+ return xml;
+
+#undef constrlen
+}
+
+/*
+ * Errata in 10.3.1, 10.4.1, and 13.4.4.24 (at least).
+ *
+ * 10.3.1 Step 6(a) fails to NOTE that implementations that do not enforce
+ * the constraint:
+ *
+ * for all x belonging to XML:
+ * x.[[InScopeNamespaces]] >= x.[[Parent]].[[InScopeNamespaces]]
+ *
+ * must union x.[[InScopeNamespaces]] into x[0].[[InScopeNamespaces]] here
+ * (in new sub-step 6(a), renumbering the others to (b) and (c)).
+ *
+ * Same goes for 10.4.1 Step 7(a).
+ *
+ * In order for XML.prototype.namespaceDeclarations() to work correctly, the
+ * default namespace thereby unioned into x[0].[[InScopeNamespaces]] must be
+ * flagged as not declared, so that 13.4.4.24 Step 8(a) can exclude all such
+ * undeclared namespaces associated with x not belonging to ancestorNS.
+ */
+static JSXML *
+OrphanXMLChild(JSContext *cx, JSXML *xml, uint32 i)
+{
+ JSXMLNamespace *ns;
+
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, 0, JSXMLNamespace);
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!ns || !xml)
+ return xml;
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ if (!XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns))
+ return NULL;
+ ns->declared = JS_FALSE;
+ }
+ xml->parent = NULL;
+ return xml;
+}
+
+static JSObject *
+ToXML(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSXML *xml;
+ JSClass *clasp;
+ JSString *str;
+ uint32 length;
+
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ goto bad;
+ } else {
+ obj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, obj)) {
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ if (xml->xml_kids.length != 1)
+ goto bad;
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (xml) {
+ JS_ASSERT(xml->xml_class != JSXML_CLASS_LIST);
+ return js_GetXMLObject(cx, xml);
+ }
+ }
+ return obj;
+ }
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->flags & JSCLASS_DOCUMENT_OBSERVER) {
+ JS_ASSERT(0);
+ }
+
+ if (clasp != &js_StringClass &&
+ clasp != &js_NumberClass &&
+ clasp != &js_BooleanClass) {
+ goto bad;
+ }
+ }
+
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ if (IS_EMPTY(str)) {
+ length = 0;
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ xml = NULL;
+#endif
+ } else {
+ xml = ParseXMLSource(cx, str);
+ if (!xml)
+ return NULL;
+ length = JSXML_LENGTH(xml);
+ }
+
+ if (length == 0) {
+ obj = js_NewXMLObject(cx, JSXML_CLASS_TEXT);
+ if (!obj)
+ return NULL;
+ } else if (length == 1) {
+ xml = OrphanXMLChild(cx, xml, 0);
+ if (!xml)
+ return NULL;
+ obj = js_GetXMLObject(cx, xml);
+ if (!obj)
+ return NULL;
+ } else {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+ return obj;
+
+bad:
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_CONVERSION,
+ JS_GetStringBytes(str));
+ }
+ return NULL;
+}
+
+static JSBool
+Append(JSContext *cx, JSXML *list, JSXML *kid);
+
+static JSObject *
+ToXMLList(JSContext *cx, jsval v)
+{
+ JSObject *obj, *listobj;
+ JSXML *xml, *list, *kid;
+ JSClass *clasp;
+ JSString *str;
+ uint32 i, length;
+
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ goto bad;
+ } else {
+ obj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, obj)) {
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return NULL;
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ if (!Append(cx, list, xml))
+ return NULL;
+ return listobj;
+ }
+ return obj;
+ }
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->flags & JSCLASS_DOCUMENT_OBSERVER) {
+ JS_ASSERT(0);
+ }
+
+ if (clasp != &js_StringClass &&
+ clasp != &js_NumberClass &&
+ clasp != &js_BooleanClass) {
+ goto bad;
+ }
+ }
+
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ if (IS_EMPTY(str)) {
+ xml = NULL;
+ length = 0;
+ } else {
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ xml = ParseXMLSource(cx, str);
+ if (!xml) {
+ js_LeaveLocalRootScope(cx);
+ return NULL;
+ }
+ length = JSXML_LENGTH(xml);
+ }
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (listobj) {
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ for (i = 0; i < length; i++) {
+ kid = OrphanXMLChild(cx, xml, i);
+ if (!kid || !Append(cx, list, kid)) {
+ listobj = NULL;
+ break;
+ }
+ }
+ }
+
+ if (xml)
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) listobj);
+ return listobj;
+
+bad:
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XMLLIST_CONVERSION,
+ JS_GetStringBytes(str));
+ }
+ return NULL;
+}
+
+/*
+ * ECMA-357 10.2.1 Steps 5-7 pulled out as common subroutines of XMLToXMLString
+ * and their library-public js_* counterparts. The guts of MakeXMLCDataString,
+ * MakeXMLCommentString, and MakeXMLPIString are further factored into a common
+ * MakeXMLSpecialString subroutine.
+ *
+ * These functions take ownership of sb->base, if sb is non-null, in all cases
+ * of success or failure.
+ */
+static JSString *
+MakeXMLSpecialString(JSContext *cx, JSStringBuffer *sb,
+ JSString *str, JSString *str2,
+ const jschar *prefix, size_t prefixlength,
+ const jschar *suffix, size_t suffixlength)
+{
+ JSStringBuffer localSB;
+ size_t length, length2, newlength;
+ jschar *bp, *base;
+
+ if (!sb) {
+ sb = &localSB;
+ js_InitStringBuffer(sb);
+ }
+
+ length = JSSTRING_LENGTH(str);
+ length2 = str2 ? JSSTRING_LENGTH(str2) : 0;
+ newlength = STRING_BUFFER_OFFSET(sb) +
+ prefixlength + length + ((length2 != 0) ? 1 + length2 : 0) +
+ suffixlength;
+ bp = base = (jschar *)
+ JS_realloc(cx, sb->base, (newlength + 1) * sizeof(jschar));
+ if (!bp) {
+ js_FinishStringBuffer(sb);
+ return NULL;
+ }
+
+ bp += STRING_BUFFER_OFFSET(sb);
+ js_strncpy(bp, prefix, prefixlength);
+ bp += prefixlength;
+ js_strncpy(bp, JSSTRING_CHARS(str), length);
+ bp += length;
+ if (length2 != 0) {
+ *bp++ = (jschar) ' ';
+ js_strncpy(bp, JSSTRING_CHARS(str2), length2);
+ bp += length2;
+ }
+ js_strncpy(bp, suffix, suffixlength);
+ bp[suffixlength] = 0;
+
+ str = js_NewString(cx, base, newlength, 0);
+ if (!str)
+ free(base);
+ return str;
+}
+
+static JSString *
+MakeXMLCDATAString(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ static const jschar cdata_prefix_ucNstr[] = {'<', '!', '[',
+ 'C', 'D', 'A', 'T', 'A',
+ '['};
+ static const jschar cdata_suffix_ucNstr[] = {']', ']', '>'};
+
+ return MakeXMLSpecialString(cx, sb, str, NULL,
+ cdata_prefix_ucNstr, 9,
+ cdata_suffix_ucNstr, 3);
+}
+
+static JSString *
+MakeXMLCommentString(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ static const jschar comment_prefix_ucNstr[] = {'<', '!', '-', '-'};
+ static const jschar comment_suffix_ucNstr[] = {'-', '-', '>'};
+
+ return MakeXMLSpecialString(cx, sb, str, NULL,
+ comment_prefix_ucNstr, 4,
+ comment_suffix_ucNstr, 3);
+}
+
+static JSString *
+MakeXMLPIString(JSContext *cx, JSStringBuffer *sb, JSString *name,
+ JSString *value)
+{
+ static const jschar pi_prefix_ucNstr[] = {'<', '?'};
+ static const jschar pi_suffix_ucNstr[] = {'?', '>'};
+
+ return MakeXMLSpecialString(cx, sb, name, value,
+ pi_prefix_ucNstr, 2,
+ pi_suffix_ucNstr, 2);
+}
+
+/*
+ * ECMA-357 10.2.1 17(d-g) pulled out into a common subroutine that appends
+ * equals, a double quote, an attribute value, and a closing double quote.
+ */
+static void
+AppendAttributeValue(JSContext *cx, JSStringBuffer *sb, JSString *valstr)
+{
+ js_AppendCString(sb, "=\"");
+ valstr = js_EscapeAttributeValue(cx, valstr);
+ if (!valstr) {
+ free(sb->base);
+ sb->base = STRING_BUFFER_ERROR_BASE;
+ return;
+ }
+ js_AppendJSString(sb, valstr);
+ js_AppendChar(sb, '"');
+}
+
+/*
+ * ECMA-357 10.2.1.1 EscapeElementValue helper method.
+ *
+ * This function takes ownership of sb->base, if sb is non-null, in all cases
+ * of success or failure.
+ */
+static JSString *
+EscapeElementValue(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ size_t length, newlength;
+ const jschar *cp, *start, *end;
+ jschar c;
+
+ length = newlength = JSSTRING_LENGTH(str);
+ for (cp = start = JSSTRING_CHARS(str), end = cp + length; cp < end; cp++) {
+ c = *cp;
+ if (c == '<' || c == '>')
+ newlength += 3;
+ else if (c == '&')
+ newlength += 4;
+
+ if (newlength < length) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ }
+ if ((sb && STRING_BUFFER_OFFSET(sb) != 0) || newlength > length) {
+ JSStringBuffer localSB;
+ if (!sb) {
+ sb = &localSB;
+ js_InitStringBuffer(sb);
+ }
+ if (!sb->grow(sb, newlength)) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ for (cp = start; cp < end; cp++) {
+ c = *cp;
+ if (c == '<')
+ js_AppendCString(sb, js_lt_entity_str);
+ else if (c == '>')
+ js_AppendCString(sb, js_gt_entity_str);
+ else if (c == '&')
+ js_AppendCString(sb, js_amp_entity_str);
+ else
+ js_AppendChar(sb, c);
+ }
+ JS_ASSERT(STRING_BUFFER_OK(sb));
+ str = js_NewString(cx, sb->base, STRING_BUFFER_OFFSET(sb), 0);
+ if (!str)
+ js_FinishStringBuffer(sb);
+ }
+ return str;
+}
+
+/*
+ * ECMA-357 10.2.1.2 EscapeAttributeValue helper method.
+ * This function takes ownership of sb->base, if sb is non-null, in all cases.
+ */
+static JSString *
+EscapeAttributeValue(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ size_t length, newlength;
+ const jschar *cp, *start, *end;
+ jschar c;
+
+ length = newlength = JSSTRING_LENGTH(str);
+ for (cp = start = JSSTRING_CHARS(str), end = cp + length; cp < end; cp++) {
+ c = *cp;
+ if (c == '"')
+ newlength += 5;
+ else if (c == '<')
+ newlength += 3;
+ else if (c == '&' || c == '\n' || c == '\r' || c == '\t')
+ newlength += 4;
+
+ if (newlength < length) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ }
+ if ((sb && STRING_BUFFER_OFFSET(sb) != 0) || newlength > length) {
+ JSStringBuffer localSB;
+ if (!sb) {
+ sb = &localSB;
+ js_InitStringBuffer(sb);
+ }
+ if (!sb->grow(sb, newlength)) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ for (cp = start; cp < end; cp++) {
+ c = *cp;
+ if (c == '"')
+ js_AppendCString(sb, js_quot_entity_str);
+ else if (c == '<')
+ js_AppendCString(sb, js_lt_entity_str);
+ else if (c == '&')
+ js_AppendCString(sb, js_amp_entity_str);
+ else if (c == '\n')
+ js_AppendCString(sb, "&#xA;");
+ else if (c == '\r')
+ js_AppendCString(sb, "&#xD;");
+ else if (c == '\t')
+ js_AppendCString(sb, "&#x9;");
+ else
+ js_AppendChar(sb, c);
+ }
+ JS_ASSERT(STRING_BUFFER_OK(sb));
+ str = js_NewString(cx, sb->base, STRING_BUFFER_OFFSET(sb), 0);
+ if (!str)
+ js_FinishStringBuffer(sb);
+ }
+ return str;
+}
+
+/* 13.3.5.4 [[GetNamespace]]([InScopeNamespaces]) */
+static JSXMLNamespace *
+GetNamespace(JSContext *cx, JSXMLQName *qn, const JSXMLArray *inScopeNSes)
+{
+ JSXMLNamespace *match, *ns;
+ uint32 i, n;
+ jsval argv[2];
+ JSObject *nsobj;
+
+ JS_ASSERT(qn->uri);
+ if (!qn->uri) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAMESPACE,
+ qn->prefix
+ ? js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(qn->prefix))
+ : js_type_strs[JSTYPE_VOID]);
+ return NULL;
+ }
+
+ /* Look for a matching namespace in inScopeNSes, if provided. */
+ match = NULL;
+ if (inScopeNSes) {
+ for (i = 0, n = inScopeNSes->length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(inScopeNSes, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+
+ /*
+ * Erratum, very tricky, and not specified in ECMA-357 13.3.5.4:
+ * If we preserve prefixes, we must match null qn->prefix against
+ * an empty ns->prefix, in order to avoid generating redundant
+ * prefixed and default namespaces for cases such as:
+ *
+ * x = <t xmlns="http://foo.com"/>
+ * print(x.toXMLString());
+ *
+ * Per 10.3.2.1, the namespace attribute in t has an empty string
+ * prefix (*not* a null prefix), per 10.3.2.1 Step 6(h)(i)(1):
+ *
+ * 1. If the [local name] property of a is "xmlns"
+ * a. Map ns.prefix to the empty string
+ *
+ * But t's name has a null prefix in this implementation, meaning
+ * *undefined*, per 10.3.2.1 Step 6(c)'s NOTE (which refers to
+ * the http://www.w3.org/TR/xml-infoset/ spec, item 2.2.3, without
+ * saying how "no value" maps to an ECMA-357 value -- but it must
+ * map to the *undefined* prefix value).
+ *
+ * Since "" != undefined (or null, in the current implementation)
+ * the ECMA-357 spec will fail to match in [[GetNamespace]] called
+ * on t with argument {} U {(prefix="", uri="http://foo.com")}.
+ * This spec bug leads to ToXMLString results that duplicate the
+ * declared namespace.
+ */
+ if (js_EqualStrings(ns->uri, qn->uri) &&
+ (ns->prefix == qn->prefix ||
+ ((ns->prefix && qn->prefix)
+ ? js_EqualStrings(ns->prefix, qn->prefix)
+ : IS_EMPTY(ns->prefix ? ns->prefix : qn->prefix)))) {
+ match = ns;
+ break;
+ }
+ }
+ }
+
+ /* If we didn't match, make a new namespace from qn. */
+ if (!match) {
+ argv[0] = qn->prefix ? STRING_TO_JSVAL(qn->prefix) : JSVAL_VOID;
+ argv[1] = STRING_TO_JSVAL(qn->uri);
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, NULL,
+ 2, argv);
+ if (!nsobj)
+ return NULL;
+ match = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ }
+ return match;
+}
+
+static JSString *
+GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls)
+{
+ const jschar *cp, *start, *end;
+ size_t length, newlength, offset;
+ uint32 i, n, m, serial;
+ jschar *bp, *dp;
+ JSBool done;
+ JSXMLNamespace *ns;
+ JSString *prefix;
+
+ JS_ASSERT(!IS_EMPTY(uri));
+
+ /*
+ * If there are no *declared* namespaces, skip all collision detection and
+ * return a short prefix quickly; an example of such a situation:
+ *
+ * var x = <f/>;
+ * var n = new Namespace("http://example.com/");
+ * x.@n::att = "val";
+ * x.toXMLString();
+ *
+ * This is necessary for various log10 uses below to be valid.
+ */
+ if (decls->length == 0)
+ return JS_NewStringCopyZ(cx, "a");
+
+ /*
+ * Try peeling off the last filename suffix or pathname component till
+ * we have a valid XML name. This heuristic will prefer "xul" given
+ * ".../there.is.only.xul", "xbl" given ".../xbl", and "xbl2" given any
+ * likely URI of the form ".../xbl2/2005".
+ */
+ start = JSSTRING_CHARS(uri);
+ cp = end = start + JSSTRING_LENGTH(uri);
+ while (--cp > start) {
+ if (*cp == '.' || *cp == '/' || *cp == ':') {
+ ++cp;
+ length = PTRDIFF(end, cp, jschar);
+ if (IsXMLName(cp, length) && !STARTS_WITH_XML(cp, length))
+ break;
+ end = --cp;
+ }
+ }
+ length = PTRDIFF(end, cp, jschar);
+
+ /*
+ * If the namespace consisted only of non-XML names or names that begin
+ * case-insensitively with "xml", arbitrarily create a prefix consisting
+ * of 'a's of size length (allowing dp-calculating code to work with or
+ * without this branch executing) plus the space for storing a hyphen and
+ * the serial number (avoiding reallocation if a collision happens).
+ */
+ bp = (jschar *) cp;
+ newlength = length;
+ if (STARTS_WITH_XML(cp, length) || !IsXMLName(cp, length)) {
+ newlength = length + 2 + (size_t) log10(decls->length);
+ bp = (jschar *)
+ JS_malloc(cx, (newlength + 1) * sizeof(jschar));
+ if (!bp)
+ return NULL;
+
+ bp[newlength] = 0;
+ for (i = 0; i < newlength; i++)
+ bp[i] = 'a';
+ }
+
+ /*
+ * Now search through decls looking for a collision. If we collide with
+ * an existing prefix, start tacking on a hyphen and a serial number.
+ */
+ serial = 0;
+ do {
+ done = JS_TRUE;
+ for (i = 0, n = decls->length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(decls, i, JSXMLNamespace);
+ if (ns && ns->prefix &&
+ JSSTRING_LENGTH(ns->prefix) == newlength &&
+ !memcmp(JSSTRING_CHARS(ns->prefix), bp,
+ newlength * sizeof(jschar))) {
+ if (bp == cp) {
+ newlength = length + 2 + (size_t) log10(n);
+ bp = (jschar *)
+ JS_malloc(cx, (newlength + 1) * sizeof(jschar));
+ if (!bp)
+ return NULL;
+ js_strncpy(bp, cp, length);
+ }
+
+ ++serial;
+ JS_ASSERT(serial <= n);
+ dp = bp + length + 2 + (size_t) log10(serial);
+ *dp = 0;
+ for (m = serial; m != 0; m /= 10)
+ *--dp = (jschar)('0' + m % 10);
+ *--dp = '-';
+ JS_ASSERT(dp == bp + length);
+
+ done = JS_FALSE;
+ break;
+ }
+ }
+ } while (!done);
+
+ if (bp == cp) {
+ offset = PTRDIFF(cp, start, jschar);
+ prefix = js_NewDependentString(cx, uri, offset, length, 0);
+ } else {
+ prefix = js_NewString(cx, bp, newlength, 0);
+ if (!prefix)
+ JS_free(cx, bp);
+ }
+ return prefix;
+}
+
+static JSBool
+namespace_match(const void *a, const void *b)
+{
+ const JSXMLNamespace *nsa = (const JSXMLNamespace *) a;
+ const JSXMLNamespace *nsb = (const JSXMLNamespace *) b;
+
+ if (nsb->prefix)
+ return nsa->prefix && js_EqualStrings(nsa->prefix, nsb->prefix);
+ return js_EqualStrings(nsa->uri, nsb->uri);
+}
+
+/* ECMA-357 10.2.1 and 10.2.2 */
+static JSString *
+XMLToXMLString(JSContext *cx, JSXML *xml, const JSXMLArray *ancestorNSes,
+ uintN indentLevel)
+{
+ JSBool pretty, indentKids;
+ JSStringBuffer sb;
+ JSString *str, *prefix, *kidstr;
+ JSXMLArrayCursor cursor;
+ uint32 i, n;
+ JSXMLArray empty, decls, ancdecls;
+ JSXMLNamespace *ns, *ns2;
+ uintN nextIndentLevel;
+ JSXML *attr, *kid;
+
+ if (!GetBooleanXMLSetting(cx, js_prettyPrinting_str, &pretty))
+ return NULL;
+
+ js_InitStringBuffer(&sb);
+ if (pretty)
+ js_RepeatChar(&sb, ' ', indentLevel);
+ str = NULL;
+
+ switch (xml->xml_class) {
+ case JSXML_CLASS_TEXT:
+ /* Step 4. */
+ if (pretty) {
+ str = ChompXMLWhitespace(cx, xml->xml_value);
+ if (!str)
+ return NULL;
+ } else {
+ str = xml->xml_value;
+ }
+ return EscapeElementValue(cx, &sb, str);
+
+ case JSXML_CLASS_ATTRIBUTE:
+ /* Step 5. */
+ return EscapeAttributeValue(cx, &sb, xml->xml_value);
+
+ case JSXML_CLASS_COMMENT:
+ /* Step 6. */
+ return MakeXMLCommentString(cx, &sb, xml->xml_value);
+
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ /* Step 7. */
+ return MakeXMLPIString(cx, &sb, xml->name->localName, xml->xml_value);
+
+ case JSXML_CLASS_LIST:
+ /* ECMA-357 10.2.2. */
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ i = 0;
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (pretty && i != 0)
+ js_AppendChar(&sb, '\n');
+
+ kidstr = XMLToXMLString(cx, kid, ancestorNSes, indentLevel);
+ if (!kidstr)
+ break;
+
+ js_AppendJSString(&sb, kidstr);
+ ++i;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid)
+ goto list_out;
+
+ if (!sb.base) {
+ if (!STRING_BUFFER_OK(&sb)) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ return cx->runtime->emptyString;
+ }
+
+ str = js_NewString(cx, sb.base, STRING_BUFFER_OFFSET(&sb), 0);
+ list_out:
+ if (!str)
+ js_FinishStringBuffer(&sb);
+ return str;
+
+ default:;
+ }
+
+ /* After this point, control must flow through label out: to exit. */
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+
+ /* ECMA-357 10.2.1 step 8 onward: handle ToXMLString on an XML element. */
+ if (!ancestorNSes) {
+ XMLArrayInit(cx, &empty, 0);
+ ancestorNSes = &empty;
+ }
+ XMLArrayInit(cx, &decls, 0);
+ ancdecls.capacity = 0;
+
+ /* Clone in-scope namespaces not in ancestorNSes into decls. */
+ XMLArrayCursorInit(&cursor, &xml->xml_namespaces);
+ while ((ns = (JSXMLNamespace *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (!ns->declared)
+ continue;
+ if (!XMLARRAY_HAS_MEMBER(ancestorNSes, ns, namespace_identity)) {
+ /* NOTE: may want to exclude unused namespaces here. */
+ ns2 = js_NewXMLNamespace(cx, ns->prefix, ns->uri, JS_TRUE);
+ if (!ns2 || !XMLARRAY_APPEND(cx, &decls, ns2))
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (ns)
+ goto out;
+
+ /*
+ * Union ancestorNSes and decls into ancdecls. Note that ancdecls does
+ * not own its member references. In the spec, ancdecls has no name, but
+ * is always written out as (AncestorNamespaces U namespaceDeclarations).
+ */
+ if (!XMLArrayInit(cx, &ancdecls, ancestorNSes->length + decls.length))
+ goto out;
+ for (i = 0, n = ancestorNSes->length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(ancestorNSes, i, JSXMLNamespace);
+ if (!ns2)
+ continue;
+ JS_ASSERT(!XMLARRAY_HAS_MEMBER(&decls, ns2, namespace_identity));
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns2))
+ goto out;
+ }
+ for (i = 0, n = decls.length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(&decls, i, JSXMLNamespace);
+ if (!ns2)
+ continue;
+ JS_ASSERT(!XMLARRAY_HAS_MEMBER(&ancdecls, ns2, namespace_identity));
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns2))
+ goto out;
+ }
+
+ /* Step 11, except we don't clone ns unless its prefix is undefined. */
+ ns = GetNamespace(cx, xml->name, &ancdecls);
+ if (!ns)
+ goto out;
+
+ /* Step 12 (NULL means *undefined* here), plus the deferred ns cloning. */
+ if (!ns->prefix) {
+ /*
+ * Create a namespace prefix that isn't used by any member of decls.
+ * Assign the new prefix to a copy of ns. Flag this namespace as if
+ * it were declared, for assertion-testing's sake later below.
+ *
+ * Erratum: if ns->prefix and xml->name are both null (*undefined* in
+ * ECMA-357), we know that xml was named using the default namespace
+ * (proof: see GetNamespace and the Namespace constructor called with
+ * two arguments). So we ought not generate a new prefix here, when
+ * we can declare ns as the default namespace for xml.
+ *
+ * This helps descendants inherit the namespace instead of redundantly
+ * redeclaring it with generated prefixes in each descendant.
+ */
+ if (!xml->name->prefix) {
+ prefix = cx->runtime->emptyString;
+ } else {
+ prefix = GeneratePrefix(cx, ns->uri, &ancdecls);
+ if (!prefix)
+ goto out;
+ }
+ ns = js_NewXMLNamespace(cx, prefix, ns->uri, JS_TRUE);
+ if (!ns)
+ goto out;
+
+ /*
+ * If the xml->name was unprefixed, we must remove any declared default
+ * namespace from decls before appending ns. How can you get a default
+ * namespace in decls that doesn't match the one from name? Apparently
+ * by calling x.setNamespace(ns) where ns has no prefix. The other way
+ * to fix this is to update x's in-scope namespaces when setNamespace
+ * is called, but that's not specified by ECMA-357.
+ *
+ * Likely Erratum here, depending on whether the lack of update to x's
+ * in-scope namespace in XML.prototype.setNamespace (13.4.4.36) is an
+ * erratum or not. Note that changing setNamespace to update the list
+ * of in-scope namespaces will change x.namespaceDeclarations().
+ */
+ if (IS_EMPTY(prefix)) {
+ i = XMLArrayFindMember(&decls, ns, namespace_match);
+ if (i != XML_NOT_FOUND)
+ XMLArrayDelete(cx, &decls, i, JS_TRUE);
+ }
+
+ /*
+ * In the spec, ancdecls has no name, but is always written out as
+ * (AncestorNamespaces U namespaceDeclarations). Since we compute
+ * that union in ancdecls, any time we append a namespace strong
+ * ref to decls, we must also append a weak ref to ancdecls. Order
+ * matters here: code at label out: releases strong refs in decls.
+ */
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns) ||
+ !XMLARRAY_APPEND(cx, &decls, ns)) {
+ goto out;
+ }
+ }
+
+ /* Format the element or point-tag into sb. */
+ js_AppendChar(&sb, '<');
+
+ if (ns->prefix && !IS_EMPTY(ns->prefix)) {
+ js_AppendJSString(&sb, ns->prefix);
+ js_AppendChar(&sb, ':');
+ }
+ js_AppendJSString(&sb, xml->name->localName);
+
+ /*
+ * Step 16 makes a union to avoid writing two loops in step 17, to share
+ * common attribute value appending spec-code. We prefer two loops for
+ * faster code and less data overhead.
+ */
+
+ /* Step 17(b): append attributes. */
+ XMLArrayCursorInit(&cursor, &xml->xml_attrs);
+ while ((attr = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ js_AppendChar(&sb, ' ');
+ ns2 = GetNamespace(cx, attr->name, &ancdecls);
+ if (!ns2)
+ break;
+
+ /* 17(b)(ii): NULL means *undefined* here. */
+ if (!ns2->prefix) {
+ prefix = GeneratePrefix(cx, ns2->uri, &ancdecls);
+ if (!prefix)
+ break;
+
+ /* Again, we avoid copying ns2 until we know it's prefix-less. */
+ ns2 = js_NewXMLNamespace(cx, prefix, ns2->uri, JS_TRUE);
+ if (!ns2)
+ break;
+
+ /*
+ * In the spec, ancdecls has no name, but is always written out as
+ * (AncestorNamespaces U namespaceDeclarations). Since we compute
+ * that union in ancdecls, any time we append a namespace strong
+ * ref to decls, we must also append a weak ref to ancdecls. Order
+ * matters here: code at label out: releases strong refs in decls.
+ */
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns2) ||
+ !XMLARRAY_APPEND(cx, &decls, ns2)) {
+ break;
+ }
+ }
+
+ /* 17(b)(iii). */
+ if (!IS_EMPTY(ns2->prefix)) {
+ js_AppendJSString(&sb, ns2->prefix);
+ js_AppendChar(&sb, ':');
+ }
+
+ /* 17(b)(iv). */
+ js_AppendJSString(&sb, attr->name->localName);
+
+ /* 17(d-g). */
+ AppendAttributeValue(cx, &sb, attr->xml_value);
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (attr)
+ goto out;
+
+ /* Step 17(c): append XML namespace declarations. */
+ XMLArrayCursorInit(&cursor, &decls);
+ while ((ns2 = (JSXMLNamespace *) XMLArrayCursorNext(&cursor)) != NULL) {
+ JS_ASSERT(ns2->declared);
+
+ js_AppendCString(&sb, " xmlns");
+
+ /* 17(c)(ii): NULL means *undefined* here. */
+ if (!ns2->prefix) {
+ prefix = GeneratePrefix(cx, ns2->uri, &ancdecls);
+ if (!prefix)
+ break;
+ ns2->prefix = prefix;
+ }
+
+ /* 17(c)(iii). */
+ if (!IS_EMPTY(ns2->prefix)) {
+ js_AppendChar(&sb, ':');
+ js_AppendJSString(&sb, ns2->prefix);
+ }
+
+ /* 17(d-g). */
+ AppendAttributeValue(cx, &sb, ns2->uri);
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (ns2)
+ goto out;
+
+ /* Step 18: handle point tags. */
+ n = xml->xml_kids.length;
+ if (n == 0) {
+ js_AppendCString(&sb, "/>");
+ } else {
+ /* Steps 19 through 25: handle element content, and open the end-tag. */
+ js_AppendChar(&sb, '>');
+ indentKids = n > 1 ||
+ (n == 1 &&
+ (kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML)) &&
+ kid->xml_class != JSXML_CLASS_TEXT);
+
+ if (pretty && indentKids) {
+ if (!GetUint32XMLSetting(cx, js_prettyIndent_str, &i))
+ goto out;
+ nextIndentLevel = indentLevel + i;
+ } else {
+ nextIndentLevel = 0;
+ }
+
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (pretty && indentKids)
+ js_AppendChar(&sb, '\n');
+
+ kidstr = XMLToXMLString(cx, kid, &ancdecls, nextIndentLevel);
+ if (!kidstr)
+ break;
+
+ js_AppendJSString(&sb, kidstr);
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid)
+ goto out;
+
+ if (pretty && indentKids) {
+ js_AppendChar(&sb, '\n');
+ js_RepeatChar(&sb, ' ', indentLevel);
+ }
+ js_AppendCString(&sb, "</");
+
+ /* Step 26. */
+ if (ns->prefix && !IS_EMPTY(ns->prefix)) {
+ js_AppendJSString(&sb, ns->prefix);
+ js_AppendChar(&sb, ':');
+ }
+
+ /* Step 27. */
+ js_AppendJSString(&sb, xml->name->localName);
+ js_AppendChar(&sb, '>');
+ }
+
+ if (!STRING_BUFFER_OK(&sb)) {
+ JS_ReportOutOfMemory(cx);
+ goto out;
+ }
+
+ str = js_NewString(cx, sb.base, STRING_BUFFER_OFFSET(&sb), 0);
+out:
+ js_LeaveLocalRootScopeWithResult(cx, STRING_TO_JSVAL(str));
+ if (!str && STRING_BUFFER_OK(&sb))
+ js_FinishStringBuffer(&sb);
+ XMLArrayFinish(cx, &decls);
+ if (ancdecls.capacity != 0)
+ XMLArrayFinish(cx, &ancdecls);
+ return str;
+}
+
+/* ECMA-357 10.2 */
+static JSString *
+ToXMLString(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSString *str;
+ JSXML *xml;
+
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_CONVERSION,
+ js_type_strs[JSVAL_IS_NULL(v)
+ ? JSTYPE_NULL
+ : JSTYPE_VOID]);
+ return NULL;
+ }
+
+ if (JSVAL_IS_BOOLEAN(v) || JSVAL_IS_NUMBER(v))
+ return js_ValueToString(cx, v);
+
+ if (JSVAL_IS_STRING(v))
+ return EscapeElementValue(cx, NULL, JSVAL_TO_STRING(v));
+
+ obj = JSVAL_TO_OBJECT(v);
+ if (!OBJECT_IS_XML(cx, obj)) {
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, &v))
+ return NULL;
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ return EscapeElementValue(cx, NULL, str);
+ }
+
+ /* Handle non-element cases in this switch, returning from each case. */
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ return XMLToXMLString(cx, xml, NULL, 0);
+}
+
+static JSXMLQName *
+ToAttributeName(JSContext *cx, jsval v)
+{
+ JSString *name, *uri, *prefix;
+ JSObject *obj;
+ JSClass *clasp;
+ JSXMLQName *qn;
+ JSTempValueRooter tvr;
+
+ if (JSVAL_IS_STRING(v)) {
+ name = JSVAL_TO_STRING(v);
+ uri = prefix = cx->runtime->emptyString;
+ } else {
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ name = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (name) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_ATTR_NAME,
+ JS_GetStringBytes(name));
+ }
+ return NULL;
+ }
+
+ obj = JSVAL_TO_OBJECT(v);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp == &js_AttributeNameClass)
+ return (JSXMLQName *) JS_GetPrivate(cx, obj);
+
+ if (clasp == &js_QNameClass.base) {
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ uri = qn->uri;
+ prefix = qn->prefix;
+ name = qn->localName;
+ } else {
+ if (clasp == &js_AnyNameClass) {
+ name = ATOM_TO_STRING(cx->runtime->atomState.starAtom);
+ } else {
+ name = js_ValueToString(cx, v);
+ if (!name)
+ return NULL;
+ }
+ uri = prefix = cx->runtime->emptyString;
+ }
+ }
+
+ qn = js_NewXMLQName(cx, uri, prefix, name);
+ if (!qn)
+ return NULL;
+
+ JS_PUSH_TEMP_ROOT_GCTHING(cx, qn, &tvr);
+ obj = js_GetAttributeNameObject(cx, qn);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!obj)
+ return NULL;
+ return qn;
+}
+
+static JSXMLQName *
+ToXMLName(JSContext *cx, jsval v, jsid *funidp)
+{
+ JSString *name;
+ JSObject *obj;
+ JSClass *clasp;
+ uint32 index;
+ JSXMLQName *qn;
+ JSAtom *atom;
+
+ if (JSVAL_IS_STRING(v)) {
+ name = JSVAL_TO_STRING(v);
+ } else {
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ name = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (name)
+ goto bad;
+ return NULL;
+ }
+
+ obj = JSVAL_TO_OBJECT(v);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp == &js_AttributeNameClass || clasp == &js_QNameClass.base)
+ goto out;
+ if (clasp == &js_AnyNameClass) {
+ name = ATOM_TO_STRING(cx->runtime->atomState.starAtom);
+ goto construct;
+ }
+ name = js_ValueToString(cx, v);
+ if (!name)
+ return NULL;
+ }
+
+ /*
+ * ECMA-357 10.6.1 step 1 seems to be incorrect. The spec says:
+ *
+ * 1. If ToString(ToNumber(P)) == ToString(P), throw a TypeError exception
+ *
+ * First, _P_ should be _s_, to refer to the given string.
+ *
+ * Second, why does ToXMLName applied to the string type throw TypeError
+ * only for numeric literals without any leading or trailing whitespace?
+ *
+ * If the idea is to reject uint32 property names, then the check needs to
+ * be stricter, to exclude hexadecimal and floating point literals.
+ */
+ if (js_IdIsIndex(STRING_TO_JSVAL(name), &index))
+ goto bad;
+
+ if (*JSSTRING_CHARS(name) == '@') {
+ name = js_NewDependentString(cx, name, 1, JSSTRING_LENGTH(name) - 1, 0);
+ if (!name)
+ return NULL;
+ *funidp = 0;
+ return ToAttributeName(cx, STRING_TO_JSVAL(name));
+ }
+
+construct:
+ v = STRING_TO_JSVAL(name);
+ obj = js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 1, &v);
+ if (!obj)
+ return NULL;
+
+out:
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ atom = cx->runtime->atomState.lazy.functionNamespaceURIAtom;
+ if (qn->uri && atom &&
+ (qn->uri == ATOM_TO_STRING(atom) ||
+ js_EqualStrings(qn->uri, ATOM_TO_STRING(atom)))) {
+ if (!JS_ValueToId(cx, STRING_TO_JSVAL(qn->localName), funidp))
+ return NULL;
+ } else {
+ *funidp = 0;
+ }
+ return qn;
+
+bad:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAME,
+ js_ValueToPrintableString(cx, STRING_TO_JSVAL(name)));
+ return NULL;
+}
+
+/* ECMA-357 9.1.1.13 XML [[AddInScopeNamespace]]. */
+static JSBool
+AddInScopeNamespace(JSContext *cx, JSXML *xml, JSXMLNamespace *ns)
+{
+ JSXMLNamespace *match, *ns2;
+ uint32 i, n, m;
+
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+
+ /* NULL means *undefined* here -- see ECMA-357 9.1.1.13 step 2. */
+ if (!ns->prefix) {
+ match = NULL;
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (ns2 && js_EqualStrings(ns2->uri, ns->uri)) {
+ match = ns2;
+ break;
+ }
+ }
+ if (!match && !XMLARRAY_ADD_MEMBER(cx, &xml->xml_namespaces, n, ns))
+ return JS_FALSE;
+ } else {
+ if (IS_EMPTY(ns->prefix) && IS_EMPTY(xml->name->uri))
+ return JS_TRUE;
+ match = NULL;
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ m = XML_NOT_FOUND;
+#endif
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (ns2 && ns2->prefix &&
+ js_EqualStrings(ns2->prefix, ns->prefix)) {
+ match = ns2;
+ m = i;
+ break;
+ }
+ }
+ if (match && !js_EqualStrings(match->uri, ns->uri)) {
+ ns2 = XMLARRAY_DELETE(cx, &xml->xml_namespaces, m, JS_TRUE,
+ JSXMLNamespace);
+ JS_ASSERT(ns2 == match);
+ match->prefix = NULL;
+ if (!AddInScopeNamespace(cx, xml, match))
+ return JS_FALSE;
+ }
+ if (!XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns))
+ return JS_FALSE;
+ }
+
+ /* OPTION: enforce that descendants have superset namespaces. */
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.2.1.6 XMLList [[Append]]. */
+static JSBool
+Append(JSContext *cx, JSXML *list, JSXML *xml)
+{
+ uint32 i, j, k, n;
+ JSXML *kid;
+
+ JS_ASSERT(list->xml_class == JSXML_CLASS_LIST);
+ i = list->xml_kids.length;
+ n = 1;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ list->xml_target = xml->xml_target;
+ list->xml_targetprop = xml->xml_targetprop;
+ n = JSXML_LENGTH(xml);
+ k = i + n;
+ if (!XMLArraySetCapacity(cx, &list->xml_kids, k))
+ return JS_FALSE;
+ for (j = 0; j < n; j++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, j, JSXML);
+ if (kid)
+ XMLARRAY_SET_MEMBER(&list->xml_kids, i + j, kid);
+ }
+ return JS_TRUE;
+ }
+
+ list->xml_target = xml->parent;
+ if (xml->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION)
+ list->xml_targetprop = NULL;
+ else
+ list->xml_targetprop = xml->name;
+ if (!XMLARRAY_ADD_MEMBER(cx, &list->xml_kids, i, xml))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.7 XML [[DeepCopy]] and 9.2.1.7 XMLList [[DeepCopy]]. */
+static JSXML *
+DeepCopyInLRS(JSContext *cx, JSXML *xml, uintN flags);
+
+static JSXML *
+DeepCopy(JSContext *cx, JSXML *xml, JSObject *obj, uintN flags)
+{
+ JSXML *copy;
+ JSBool ok;
+
+ /* Our caller may not be protecting newborns with a local root scope. */
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ copy = DeepCopyInLRS(cx, xml, flags);
+ if (copy) {
+ if (obj) {
+ /* Caller provided the object for this copy, hook 'em up. */
+ ok = JS_SetPrivate(cx, obj, copy);
+ if (ok)
+ copy->object = obj;
+ } else {
+ ok = js_GetXMLObject(cx, copy) != NULL;
+ }
+ if (!ok)
+ copy = NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) copy);
+ return copy;
+}
+
+/*
+ * (i) We must be in a local root scope (InLRS).
+ * (ii) parent must have a rooted object.
+ * (iii) from's owning object must be locked if not thread-local.
+ */
+static JSBool
+DeepCopySetInLRS(JSContext *cx, JSXMLArray *from, JSXMLArray *to, JSXML *parent,
+ uintN flags)
+{
+ uint32 j, n;
+ JSXMLArrayCursor cursor;
+ JSBool ok;
+ JSXML *kid, *kid2;
+ JSString *str;
+
+ JS_ASSERT(cx->localRootStack);
+
+ n = from->length;
+ if (!XMLArraySetCapacity(cx, to, n))
+ return JS_FALSE;
+
+ XMLArrayCursorInit(&cursor, from);
+ j = 0;
+ ok = JS_TRUE;
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if ((flags & XSF_IGNORE_COMMENTS) &&
+ kid->xml_class == JSXML_CLASS_COMMENT) {
+ continue;
+ }
+ if ((flags & XSF_IGNORE_PROCESSING_INSTRUCTIONS) &&
+ kid->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION) {
+ continue;
+ }
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ (kid->xml_flags & XMLF_WHITESPACE_TEXT)) {
+ continue;
+ }
+ kid2 = DeepCopyInLRS(cx, kid, flags);
+ if (!kid2) {
+ to->length = j;
+ ok = JS_FALSE;
+ break;
+ }
+
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ n > 1 && kid2->xml_class == JSXML_CLASS_TEXT) {
+ str = ChompXMLWhitespace(cx, kid2->xml_value);
+ if (!str) {
+ to->length = j;
+ ok = JS_FALSE;
+ break;
+ }
+ kid2->xml_value = str;
+ }
+
+ XMLARRAY_SET_MEMBER(to, j, kid2);
+ ++j;
+ if (parent->xml_class != JSXML_CLASS_LIST)
+ kid2->parent = parent;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (!ok)
+ return JS_FALSE;
+
+ if (j < n)
+ XMLArrayTrim(to);
+ return JS_TRUE;
+}
+
+static JSXML *
+DeepCopyInLRS(JSContext *cx, JSXML *xml, uintN flags)
+{
+ JSXML *copy;
+ JSXMLQName *qn;
+ JSBool ok;
+ uint32 i, n;
+ JSXMLNamespace *ns, *ns2;
+
+ /* Our caller must be protecting newborn objects. */
+ JS_ASSERT(cx->localRootStack);
+
+ copy = js_NewXML(cx, xml->xml_class);
+ if (!copy)
+ return NULL;
+ qn = xml->name;
+ if (qn) {
+ qn = js_NewXMLQName(cx, qn->uri, qn->prefix, qn->localName);
+ if (!qn) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ copy->name = qn;
+ copy->xml_flags = xml->xml_flags;
+
+ if (JSXML_HAS_VALUE(xml)) {
+ copy->xml_value = xml->xml_value;
+ ok = JS_TRUE;
+ } else {
+ ok = DeepCopySetInLRS(cx, &xml->xml_kids, &copy->xml_kids, copy, flags);
+ if (!ok)
+ goto out;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ copy->xml_target = xml->xml_target;
+ copy->xml_targetprop = xml->xml_targetprop;
+ } else {
+ n = xml->xml_namespaces.length;
+ ok = XMLArraySetCapacity(cx, &copy->xml_namespaces, n);
+ if (!ok)
+ goto out;
+ for (i = 0; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+ ns2 = js_NewXMLNamespace(cx, ns->prefix, ns->uri, ns->declared);
+ if (!ns2) {
+ copy->xml_namespaces.length = i;
+ ok = JS_FALSE;
+ goto out;
+ }
+ XMLARRAY_SET_MEMBER(&copy->xml_namespaces, i, ns2);
+ }
+
+ ok = DeepCopySetInLRS(cx, &xml->xml_attrs, &copy->xml_attrs, copy,
+ 0);
+ if (!ok)
+ goto out;
+ }
+ }
+
+out:
+ if (!ok)
+ return NULL;
+ return copy;
+}
+
+static void
+ReportBadXMLName(JSContext *cx, jsval id)
+{
+ JSString *name;
+
+ name = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, id, NULL);
+ if (name) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAME,
+ JS_GetStringBytes(name));
+ }
+}
+
+/* ECMA-357 9.1.1.4 XML [[DeleteByIndex]]. */
+static JSBool
+DeleteByIndex(JSContext *cx, JSXML *xml, jsval id, jsval *vp)
+{
+ uint32 index;
+ JSXML *kid;
+
+ if (!js_IdIsIndex(id, &index)) {
+ ReportBadXMLName(cx, id);
+ return JS_FALSE;
+ }
+
+ if (JSXML_HAS_KIDS(xml) && index < xml->xml_kids.length) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (kid)
+ kid->parent = NULL;
+ XMLArrayDelete(cx, &xml->xml_kids, index, JS_TRUE);
+ }
+
+ *vp = JSVAL_TRUE;
+ return JS_TRUE;
+}
+
+typedef JSBool (*JSXMLNameMatcher)(JSXMLQName *nameqn, JSXML *xml);
+
+static JSBool
+MatchAttrName(JSXMLQName *nameqn, JSXML *attr)
+{
+ JSXMLQName *attrqn = attr->name;
+
+ return (IS_STAR(nameqn->localName) ||
+ js_EqualStrings(attrqn->localName, nameqn->localName)) &&
+ (!nameqn->uri ||
+ js_EqualStrings(attrqn->uri, nameqn->uri));
+}
+
+static JSBool
+MatchElemName(JSXMLQName *nameqn, JSXML *elem)
+{
+ return (IS_STAR(nameqn->localName) ||
+ (elem->xml_class == JSXML_CLASS_ELEMENT &&
+ js_EqualStrings(elem->name->localName, nameqn->localName))) &&
+ (!nameqn->uri ||
+ (elem->xml_class == JSXML_CLASS_ELEMENT &&
+ js_EqualStrings(elem->name->uri, nameqn->uri)));
+}
+
+/* ECMA-357 9.1.1.8 XML [[Descendants]] and 9.2.1.8 XMLList [[Descendants]]. */
+static JSBool
+DescendantsHelper(JSContext *cx, JSXML *xml, JSXMLQName *nameqn, JSXML *list)
+{
+ uint32 i, n;
+ JSXML *attr, *kid;
+
+ if (xml->xml_class == JSXML_CLASS_ELEMENT &&
+ OBJ_GET_CLASS(cx, nameqn->object) == &js_AttributeNameClass) {
+ for (i = 0, n = xml->xml_attrs.length; i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (attr && MatchAttrName(nameqn, attr)) {
+ if (!Append(cx, list, attr))
+ return JS_FALSE;
+ }
+ }
+ }
+
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!kid)
+ continue;
+ if (OBJ_GET_CLASS(cx, nameqn->object) != &js_AttributeNameClass &&
+ MatchElemName(nameqn, kid)) {
+ if (!Append(cx, list, kid))
+ return JS_FALSE;
+ }
+ if (!DescendantsHelper(cx, kid, nameqn, list))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSXML *
+Descendants(JSContext *cx, JSXML *xml, jsval id)
+{
+ jsid funid;
+ JSXMLQName *nameqn;
+ JSObject *listobj;
+ JSXML *list, *kid;
+ uint32 i, n;
+ JSBool ok;
+
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ return NULL;
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return NULL;
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ if (funid)
+ return list;
+
+ /*
+ * Protect nameqn's object and strings from GC by linking list to it
+ * temporarily. The cx->newborn[GCX_OBJECT] GC root protects listobj,
+ * which protects list. Any other object allocations occuring beneath
+ * DescendantsHelper use local roots.
+ */
+ list->name = nameqn;
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ ok = JS_TRUE;
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = DescendantsHelper(cx, kid, nameqn, list);
+ if (!ok)
+ break;
+ }
+ }
+ } else {
+ ok = DescendantsHelper(cx, xml, nameqn, list);
+ }
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) list);
+ if (!ok)
+ return NULL;
+ list->name = NULL;
+ return list;
+}
+
+static JSBool
+xml_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+/* Recursive (JSXML *) parameterized version of Equals. */
+static JSBool
+XMLEquals(JSContext *cx, JSXML *xml, JSXML *vxml, JSBool *bp)
+{
+ JSXMLQName *qn, *vqn;
+ uint32 i, j, n;
+ JSXMLArrayCursor cursor, vcursor;
+ JSXML *kid, *vkid, *attr, *vattr;
+ JSBool ok;
+ JSObject *xobj, *vobj;
+
+retry:
+ if (xml->xml_class != vxml->xml_class) {
+ if (xml->xml_class == JSXML_CLASS_LIST && xml->xml_kids.length == 1) {
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (xml)
+ goto retry;
+ }
+ if (vxml->xml_class == JSXML_CLASS_LIST && vxml->xml_kids.length == 1) {
+ vxml = XMLARRAY_MEMBER(&vxml->xml_kids, 0, JSXML);
+ if (vxml)
+ goto retry;
+ }
+ *bp = JS_FALSE;
+ return JS_TRUE;
+ }
+
+ qn = xml->name;
+ vqn = vxml->name;
+ if (qn) {
+ *bp = vqn &&
+ js_EqualStrings(qn->localName, vqn->localName) &&
+ js_EqualStrings(qn->uri, vqn->uri);
+ } else {
+ *bp = vqn == NULL;
+ }
+ if (!*bp)
+ return JS_TRUE;
+
+ if (JSXML_HAS_VALUE(xml)) {
+ *bp = js_EqualStrings(xml->xml_value, vxml->xml_value);
+ } else if (xml->xml_kids.length != vxml->xml_kids.length) {
+ *bp = JS_FALSE;
+ } else {
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ XMLArrayCursorInit(&vcursor, &vxml->xml_kids);
+ for (;;) {
+ kid = (JSXML *) XMLArrayCursorNext(&cursor);
+ vkid = (JSXML *) XMLArrayCursorNext(&vcursor);
+ if (!kid || !vkid) {
+ *bp = !kid && !vkid;
+ ok = JS_TRUE;
+ break;
+ }
+ xobj = js_GetXMLObject(cx, kid);
+ vobj = js_GetXMLObject(cx, vkid);
+ ok = xobj && vobj &&
+ xml_equality(cx, xobj, OBJECT_TO_JSVAL(vobj), bp);
+ if (!ok || !*bp)
+ break;
+ }
+ XMLArrayCursorFinish(&vcursor);
+ XMLArrayCursorFinish(&cursor);
+ if (!ok)
+ return JS_FALSE;
+
+ if (*bp && xml->xml_class == JSXML_CLASS_ELEMENT) {
+ n = xml->xml_attrs.length;
+ if (n != vxml->xml_attrs.length)
+ *bp = JS_FALSE;
+ for (i = 0; *bp && i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (!attr)
+ continue;
+ j = XMLARRAY_FIND_MEMBER(&vxml->xml_attrs, attr, attr_identity);
+ if (j == XML_NOT_FOUND) {
+ *bp = JS_FALSE;
+ break;
+ }
+ vattr = XMLARRAY_MEMBER(&vxml->xml_attrs, j, JSXML);
+ if (!vattr)
+ continue;
+ *bp = js_EqualStrings(attr->xml_value, vattr->xml_value);
+ }
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.9 XML [[Equals]] and 9.2.1.9 XMLList [[Equals]]. */
+static JSBool
+Equals(JSContext *cx, JSXML *xml, jsval v, JSBool *bp)
+{
+ JSObject *vobj;
+ JSXML *vxml;
+
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ *bp = JS_FALSE;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ if (xml->xml_kids.length == 1) {
+ vxml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (!vxml)
+ return JS_TRUE;
+ vobj = js_GetXMLObject(cx, vxml);
+ if (!vobj)
+ return JS_FALSE;
+ return js_XMLObjectOps.equality(cx, vobj, v, bp);
+ }
+ if (JSVAL_IS_VOID(v) && xml->xml_kids.length == 0)
+ *bp = JS_TRUE;
+ }
+ } else {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (!OBJECT_IS_XML(cx, vobj)) {
+ *bp = JS_FALSE;
+ } else {
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ if (!XMLEquals(cx, xml, vxml, bp))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+CheckCycle(JSContext *cx, JSXML *xml, JSXML *kid)
+{
+ JS_ASSERT(kid->xml_class != JSXML_CLASS_LIST);
+
+ do {
+ if (xml == kid) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CYCLIC_VALUE, js_XML_str);
+ return JS_FALSE;
+ }
+ } while ((xml = xml->parent) != NULL);
+
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.11 XML [[Insert]]. */
+static JSBool
+Insert(JSContext *cx, JSXML *xml, uint32 i, jsval v)
+{
+ uint32 j, n;
+ JSXML *vxml, *kid;
+ JSObject *vobj;
+ JSString *str;
+
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ n = 1;
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj)) {
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ if (vxml->xml_class == JSXML_CLASS_LIST) {
+ n = vxml->xml_kids.length;
+ if (n == 0)
+ return JS_TRUE;
+ for (j = 0; j < n; j++) {
+ kid = XMLARRAY_MEMBER(&vxml->xml_kids, j, JSXML);
+ if (!kid)
+ continue;
+ if (!CheckCycle(cx, xml, kid))
+ return JS_FALSE;
+ }
+ } else if (vxml->xml_class == JSXML_CLASS_ELEMENT) {
+ /* OPTION: enforce that descendants have superset namespaces. */
+ if (!CheckCycle(cx, xml, vxml))
+ return JS_FALSE;
+ }
+ }
+ }
+ if (!vxml) {
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return JS_FALSE;
+
+ vxml = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!vxml)
+ return JS_FALSE;
+ vxml->xml_value = str;
+ }
+
+ if (i > xml->xml_kids.length)
+ i = xml->xml_kids.length;
+
+ if (!XMLArrayInsert(cx, &xml->xml_kids, i, n))
+ return JS_FALSE;
+
+ if (vxml->xml_class == JSXML_CLASS_LIST) {
+ for (j = 0; j < n; j++) {
+ kid = XMLARRAY_MEMBER(&vxml->xml_kids, j, JSXML);
+ if (!kid)
+ continue;
+ kid->parent = xml;
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i + j, kid);
+
+ /* OPTION: enforce that descendants have superset namespaces. */
+ }
+ } else {
+ vxml->parent = xml;
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, vxml);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+IndexToIdVal(JSContext *cx, uint32 index, jsval *idvp)
+{
+ JSString *str;
+
+ if (index <= JSVAL_INT_MAX) {
+ *idvp = INT_TO_JSVAL(index);
+ } else {
+ str = js_NumberToString(cx, (jsdouble) index);
+ if (!str)
+ return JS_FALSE;
+ *idvp = STRING_TO_JSVAL(str);
+ }
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.12 XML [[Replace]]. */
+static JSBool
+Replace(JSContext *cx, JSXML *xml, jsval id, jsval v)
+{
+ uint32 i, n;
+ JSXML *vxml, *kid;
+ JSObject *vobj;
+ jsval junk;
+ JSString *str;
+
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ if (!js_IdIsIndex(id, &i)) {
+ ReportBadXMLName(cx, id);
+ return JS_FALSE;
+ }
+
+ /*
+ * 9.1.1.12
+ * [[Replace]] handles _i >= x.[[Length]]_ by incrementing _x.[[Length]_.
+ * It should therefore constrain callers to pass in _i <= x.[[Length]]_.
+ */
+ n = xml->xml_kids.length;
+ if (i >= n) {
+ if (!IndexToIdVal(cx, n, &id))
+ return JS_FALSE;
+ i = n;
+ }
+
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj))
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+
+ switch (vxml ? vxml->xml_class : JSXML_CLASS_LIMIT) {
+ case JSXML_CLASS_ELEMENT:
+ /* OPTION: enforce that descendants have superset namespaces. */
+ if (!CheckCycle(cx, xml, vxml))
+ return JS_FALSE;
+ case JSXML_CLASS_COMMENT:
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ case JSXML_CLASS_TEXT:
+ goto do_replace;
+
+ case JSXML_CLASS_LIST:
+ if (i < n && !DeleteByIndex(cx, xml, id, &junk))
+ return JS_FALSE;
+ if (!Insert(cx, xml, i, v))
+ return JS_FALSE;
+ break;
+
+ default:
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return JS_FALSE;
+
+ vxml = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!vxml)
+ return JS_FALSE;
+ vxml->xml_value = str;
+
+ do_replace:
+ vxml->parent = xml;
+ if (i < n) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid)
+ kid->parent = NULL;
+ }
+ if (!XMLARRAY_ADD_MEMBER(cx, &xml->xml_kids, i, vxml))
+ return JS_FALSE;
+ break;
+ }
+
+ return JS_TRUE;
+}
+
+/* Forward declared -- its implementation uses other statics that call it. */
+static JSBool
+ResolveValue(JSContext *cx, JSXML *list, JSXML **result);
+
+/* ECMA-357 9.1.1.3 XML [[Delete]], 9.2.1.3 XML [[Delete]]. */
+static JSBool
+DeleteProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXML *xml, *kid, *parent;
+ JSBool isIndex;
+ JSXMLArray *array;
+ uint32 length, index, kidIndex, deleteCount;
+ JSXMLQName *nameqn;
+ jsid funid;
+ JSObject *nameobj, *kidobj;
+ JSXMLNameMatcher matcher;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ isIndex = js_IdIsIndex(id, &index);
+ if (JSXML_HAS_KIDS(xml)) {
+ array = &xml->xml_kids;
+ length = array->length;
+ } else {
+ array = NULL;
+ length = 0;
+ }
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* ECMA-357 9.2.1.3. */
+ if (isIndex && index < length) {
+ kid = XMLARRAY_MEMBER(array, index, JSXML);
+ if (!kid)
+ goto out;
+ parent = kid->parent;
+ if (parent) {
+ JS_ASSERT(parent != xml);
+ JS_ASSERT(JSXML_HAS_KIDS(parent));
+
+ if (kid->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ nameqn = kid->name;
+ nameobj = js_GetAttributeNameObject(cx, nameqn);
+ if (!nameobj || !js_GetXMLObject(cx, parent))
+ return JS_FALSE;
+
+ id = OBJECT_TO_JSVAL(nameobj);
+ if (!DeleteProperty(cx, parent->object, id, vp))
+ return JS_FALSE;
+ } else {
+ kidIndex = XMLARRAY_FIND_MEMBER(&parent->xml_kids, kid,
+ NULL);
+ JS_ASSERT(kidIndex != XML_NOT_FOUND);
+ if (!IndexToIdVal(cx, kidIndex, &id))
+ return JS_FALSE;
+ if (!DeleteByIndex(cx, parent, id, vp))
+ return JS_FALSE;
+ }
+ }
+
+ XMLArrayDelete(cx, array, index, JS_TRUE);
+ } else {
+ for (index = 0; index < length; index++) {
+ kid = XMLARRAY_MEMBER(array, index, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !DeleteProperty(cx, kidobj, id, vp))
+ return JS_FALSE;
+ }
+ }
+ }
+ } else {
+ /* ECMA-357 9.1.1.3. */
+ if (isIndex) {
+ /* See NOTE in spec: this variation is reserved for future use. */
+ ReportBadXMLName(cx, id);
+ return JS_FALSE;
+ }
+
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ if (funid)
+ goto out;
+ nameobj = nameqn->object;
+
+ if (OBJ_GET_CLASS(cx, nameobj) == &js_AttributeNameClass) {
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ goto out;
+ array = &xml->xml_attrs;
+ length = array->length;
+ matcher = MatchAttrName;
+ } else {
+ matcher = MatchElemName;
+ }
+ if (length != 0) {
+ deleteCount = 0;
+ for (index = 0; index < length; index++) {
+ kid = XMLARRAY_MEMBER(array, index, JSXML);
+ if (kid && matcher(nameqn, kid)) {
+ kid->parent = NULL;
+ XMLArrayDelete(cx, array, index, JS_FALSE);
+ ++deleteCount;
+ } else if (deleteCount != 0) {
+ XMLARRAY_SET_MEMBER(array,
+ index - deleteCount,
+ array->vector[index]);
+ }
+ }
+ array->length -= deleteCount;
+ }
+ }
+
+out:
+ *vp = JSVAL_TRUE;
+ return JS_TRUE;
+}
+
+static JSBool
+SyncInScopeNamespaces(JSContext *cx, JSXML *xml)
+{
+ JSXMLArray *nsarray;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+
+ nsarray = &xml->xml_namespaces;
+ while ((xml = xml->parent) != NULL) {
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (ns && !XMLARRAY_HAS_MEMBER(nsarray, ns, namespace_identity)) {
+ if (!XMLARRAY_APPEND(cx, nsarray, ns))
+ return JS_FALSE;
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+GetNamedProperty(JSContext *cx, JSXML *xml, JSXMLQName* nameqn,
+ JSBool attributes, JSXML *list)
+{
+ JSXMLArray *array;
+ JSXMLNameMatcher matcher;
+ JSXMLArrayCursor cursor;
+ JSXML *kid;
+ JSBool ok;
+
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ if (attributes) {
+ array = &xml->xml_attrs;
+ matcher = MatchAttrName;
+ } else {
+ array = &xml->xml_kids;
+ matcher = MatchElemName;
+ }
+
+ XMLArrayCursorInit(&cursor, array);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (matcher(nameqn, kid)) {
+ if (!attributes && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = SyncInScopeNamespaces(cx, kid);
+ if (!ok)
+ goto out;
+ }
+ ok = Append(cx, list, kid);
+ if (!ok)
+ goto out;
+ }
+ }
+ ok = JS_TRUE;
+
+ out:
+ XMLArrayCursorFinish(&cursor);
+ return ok;
+}
+
+/* ECMA-357 9.1.1.1 XML [[Get]] and 9.2.1.1 XMLList [[Get]]. */
+static JSBool
+GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXML *xml, *list, *kid;
+ uint32 index;
+ JSObject *kidobj, *listobj;
+ JSXMLQName *nameqn;
+ jsid funid;
+ jsval roots[2];
+ JSTempValueRooter tvr;
+ JSBool attributes;
+ JSXMLArrayCursor cursor;
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL);
+ if (!xml)
+ return JS_TRUE;
+
+ if (js_IdIsIndex(id, &index)) {
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ *vp = (index == 0) ? OBJECT_TO_JSVAL(obj) : JSVAL_VOID;
+ } else {
+ /*
+ * ECMA-357 9.2.1.1 starts here.
+ *
+ * Erratum: 9.2 is not completely clear that indexed properties
+ * correspond to kids, but that's what it seems to say, and it's
+ * what any sane user would want.
+ */
+ if (index < xml->xml_kids.length) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (!kid) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+
+ *vp = OBJECT_TO_JSVAL(kidobj);
+ } else {
+ *vp = JSVAL_VOID;
+ }
+ }
+ return JS_TRUE;
+ }
+
+ /*
+ * ECMA-357 9.2.1.1/9.1.1.1 qname case.
+ */
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ if (funid)
+ return js_GetXMLFunction(cx, obj, funid, vp);
+
+ roots[0] = OBJECT_TO_JSVAL(nameqn->object);
+ JS_PUSH_TEMP_ROOT(cx, 1, roots, &tvr);
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (listobj) {
+ roots[1] = OBJECT_TO_JSVAL(listobj);
+ tvr.count++;
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ attributes = (OBJ_GET_CLASS(cx, nameqn->object) ==
+ &js_AttributeNameClass);
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT &&
+ !GetNamedProperty(cx, kid, nameqn, attributes, list)) {
+ listobj = NULL;
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ if (!GetNamedProperty(cx, xml, nameqn, attributes, list))
+ listobj = NULL;
+ }
+
+ /*
+ * Erratum: ECMA-357 9.1.1.1 misses that [[Append]] sets the given
+ * list's [[TargetProperty]] to the property that is being appended.
+ * This means that any use of the internal [[Get]] property returns
+ * a list which, when used by e.g. [[Insert]] duplicates the last
+ * element matched by id.
+ * See bug 336921.
+ */
+ list->xml_target = xml;
+ list->xml_targetprop = nameqn;
+ *vp = OBJECT_TO_JSVAL(listobj);
+ }
+
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return listobj != NULL;
+}
+
+static JSXML *
+CopyOnWrite(JSContext *cx, JSXML *xml, JSObject *obj)
+{
+ JS_ASSERT(xml->object != obj);
+
+ xml = DeepCopy(cx, xml, obj, 0);
+ if (!xml)
+ return NULL;
+
+ JS_ASSERT(xml->object == obj);
+ return xml;
+}
+
+#define CHECK_COPY_ON_WRITE(cx,xml,obj) \
+ (xml->object == obj ? xml : CopyOnWrite(cx, xml, obj))
+
+static JSString *
+KidToString(JSContext *cx, JSXML *xml, uint32 index)
+{
+ JSXML *kid;
+ JSObject *kidobj;
+
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (!kid)
+ return cx->runtime->emptyString;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return NULL;
+ return js_ValueToString(cx, OBJECT_TO_JSVAL(kidobj));
+}
+
+/* ECMA-357 9.1.1.2 XML [[Put]] and 9.2.1.2 XMLList [[Put]]. */
+static JSBool
+PutProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSBool ok, primitiveAssign;
+ enum { OBJ_ROOT, ID_ROOT, VAL_ROOT };
+ jsval roots[3];
+ JSTempValueRooter tvr;
+ JSXML *xml, *vxml, *rxml, *kid, *attr, *parent, *copy, *kid2, *match;
+ JSObject *vobj, *nameobj, *attrobj, *parentobj, *kidobj, *copyobj;
+ JSXMLQName *targetprop, *nameqn, *attrqn;
+ uint32 index, i, j, k, n, q;
+ jsval attrval, nsval, junk;
+ jsid funid;
+ JSString *left, *right, *space;
+ JSXMLNamespace *ns;
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL);
+ if (!xml)
+ return JS_TRUE;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ /* Precompute vxml for 9.2.1.2 2(c)(vii)(2-3) and 2(d) and 9.1.1.2 1. */
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(*vp)) {
+ vobj = JSVAL_TO_OBJECT(*vp);
+ if (OBJECT_IS_XML(cx, vobj))
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+
+ /* Control flow after here must exit via label out. */
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+ roots[OBJ_ROOT] = OBJECT_TO_JSVAL(obj);
+ roots[ID_ROOT] = id;
+ roots[VAL_ROOT] = *vp;
+ JS_PUSH_TEMP_ROOT(cx, 3, roots, &tvr);
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* ECMA-357 9.2.1.2. */
+ if (js_IdIsIndex(id, &index)) {
+ /* Step 1 sets i to the property index. */
+ i = index;
+
+ /* 2(a-b). */
+ if (xml->xml_target) {
+ ok = ResolveValue(cx, xml->xml_target, &rxml);
+ if (!ok)
+ goto out;
+ if (!rxml)
+ goto out;
+ JS_ASSERT(rxml->object);
+ } else {
+ rxml = NULL;
+ }
+
+ /* 2(c). */
+ if (index >= xml->xml_kids.length) {
+ /* 2(c)(i). */
+ if (rxml) {
+ if (rxml->xml_class == JSXML_CLASS_LIST) {
+ if (rxml->xml_kids.length != 1)
+ goto out;
+ rxml = XMLARRAY_MEMBER(&rxml->xml_kids, 0, JSXML);
+ if (!rxml)
+ goto out;
+ ok = js_GetXMLObject(cx, rxml) != NULL;
+ if (!ok)
+ goto out;
+ }
+
+ /*
+ * Erratum: ECMA-357 9.2.1.2 step 2(c)(ii) sets
+ * _y.[[Parent]] = r_ where _r_ is the result of
+ * [[ResolveValue]] called on _x.[[TargetObject]] in
+ * 2(a)(i). This can result in text parenting text:
+ *
+ * var MYXML = new XML();
+ * MYXML.appendChild(new XML("<TEAM>Giants</TEAM>"));
+ *
+ * (testcase from Werner Sharp <wsharp@macromedia.com>).
+ *
+ * To match insertChildAfter, insertChildBefore,
+ * prependChild, and setChildren, we should silently
+ * do nothing in this case.
+ */
+ if (!JSXML_HAS_KIDS(rxml))
+ goto out;
+ }
+
+ /* 2(c)(ii) is distributed below as several js_NewXML calls. */
+ targetprop = xml->xml_targetprop;
+ if (!targetprop || IS_STAR(targetprop->localName)) {
+ /* 2(c)(iv)(1-2), out of order w.r.t. 2(c)(iii). */
+ kid = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!kid)
+ goto bad;
+ } else {
+ nameobj = js_GetXMLQNameObject(cx, targetprop);
+ if (!nameobj)
+ goto bad;
+ if (OBJ_GET_CLASS(cx, nameobj) == &js_AttributeNameClass) {
+ /*
+ * 2(c)(iii)(1-3).
+ * Note that rxml can't be null here, because target
+ * and targetprop are non-null.
+ */
+ ok = GetProperty(cx, rxml->object, id, &attrval);
+ if (!ok)
+ goto out;
+ if (JSVAL_IS_PRIMITIVE(attrval)) /* no such attribute */
+ goto out;
+ attrobj = JSVAL_TO_OBJECT(attrval);
+ attr = (JSXML *) JS_GetPrivate(cx, attrobj);
+ if (JSXML_LENGTH(attr) != 0)
+ goto out;
+
+ kid = js_NewXML(cx, JSXML_CLASS_ATTRIBUTE);
+ } else {
+ /* 2(c)(v). */
+ kid = js_NewXML(cx, JSXML_CLASS_ELEMENT);
+ }
+ if (!kid)
+ goto bad;
+
+ /* An important bit of 2(c)(ii). */
+ kid->name = targetprop;
+ }
+
+ /* Final important bit of 2(c)(ii). */
+ kid->parent = rxml;
+
+ /* 2(c)(vi-vii). */
+ i = xml->xml_kids.length;
+ if (kid->xml_class != JSXML_CLASS_ATTRIBUTE) {
+ /*
+ * 2(c)(vii)(1) tests whether _y.[[Parent]]_ is not null.
+ * y.[[Parent]] is here called kid->parent, which we know
+ * from 2(c)(ii) is _r_, here called rxml. So let's just
+ * test that! Erratum, the spec should be simpler here.
+ */
+ if (rxml) {
+ JS_ASSERT(JSXML_HAS_KIDS(rxml));
+ n = rxml->xml_kids.length;
+ j = n - 1;
+ if (n != 0 && i != 0) {
+ for (n = j, j = 0; j < n; j++) {
+ if (rxml->xml_kids.vector[j] ==
+ xml->xml_kids.vector[i-1]) {
+ break;
+ }
+ }
+ }
+
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ goto bad;
+ ok = Insert(cx, rxml, j + 1, OBJECT_TO_JSVAL(kidobj));
+ if (!ok)
+ goto out;
+ }
+
+ /*
+ * 2(c)(vii)(2-3).
+ * Erratum: [[PropertyName]] in 2(c)(vii)(3) must be a
+ * typo for [[TargetProperty]].
+ */
+ if (vxml) {
+ kid->name = (vxml->xml_class == JSXML_CLASS_LIST)
+ ? vxml->xml_targetprop
+ : vxml->name;
+ }
+ }
+
+ /* 2(c)(viii). */
+ ok = Append(cx, xml, kid);
+ if (!ok)
+ goto out;
+ }
+
+ /* 2(d). */
+ if (!vxml ||
+ vxml->xml_class == JSXML_CLASS_TEXT ||
+ vxml->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (!ok)
+ goto out;
+ roots[VAL_ROOT] = *vp;
+ }
+
+ /* 2(e). */
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!kid)
+ goto out;
+ parent = kid->parent;
+ if (kid->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ nameobj = js_GetAttributeNameObject(cx, kid->name);
+ if (!nameobj)
+ goto bad;
+ id = OBJECT_TO_JSVAL(nameobj);
+
+ if (parent) {
+ /* 2(e)(i). */
+ parentobj = js_GetXMLObject(cx, parent);
+ if (!parentobj)
+ goto bad;
+ ok = PutProperty(cx, parentobj, id, vp);
+ if (!ok)
+ goto out;
+
+ /* 2(e)(ii). */
+ ok = GetProperty(cx, parentobj, id, vp);
+ if (!ok)
+ goto out;
+ attr = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(*vp));
+
+ /* 2(e)(iii). */
+ xml->xml_kids.vector[i] = attr->xml_kids.vector[0];
+ }
+ }
+
+ /* 2(f). */
+ else if (vxml && vxml->xml_class == JSXML_CLASS_LIST) {
+ /* 2(f)(i) Create a shallow copy _c_ of _V_. */
+ copyobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!copyobj)
+ goto bad;
+ copy = (JSXML *) JS_GetPrivate(cx, copyobj);
+ n = vxml->xml_kids.length;
+ ok = XMLArraySetCapacity(cx, &copy->xml_kids, n);
+ if (!ok)
+ goto out;
+ for (k = 0; k < n; k++) {
+ kid2 = XMLARRAY_MEMBER(&vxml->xml_kids, k, JSXML);
+ XMLARRAY_SET_MEMBER(&copy->xml_kids, k, kid2);
+ }
+
+ JS_ASSERT(parent != xml);
+ if (parent) {
+ q = XMLARRAY_FIND_MEMBER(&parent->xml_kids, kid, NULL);
+ JS_ASSERT(q != XML_NOT_FOUND);
+
+ ok = IndexToIdVal(cx, q, &id);
+ if (!ok)
+ goto out;
+ ok = Replace(cx, parent, id, OBJECT_TO_JSVAL(copyobj));
+ if (!ok)
+ goto out;
+
+#ifdef DEBUG
+ /* Erratum: this loop in the spec is useless. */
+ for (j = 0, n = copy->xml_kids.length; j < n; j++) {
+ kid2 = XMLARRAY_MEMBER(&parent->xml_kids, q + j, JSXML);
+ JS_ASSERT(XMLARRAY_MEMBER(&copy->xml_kids, j, JSXML)
+ == kid2);
+ }
+#endif
+ }
+
+ /*
+ * 2(f)(iv-vi).
+ * Erratum: notice the unhandled zero-length V basis case and
+ * the off-by-one errors for the n != 0 cases in the spec.
+ */
+ if (n == 0) {
+ XMLArrayDelete(cx, &xml->xml_kids, i, JS_TRUE);
+ } else {
+ ok = XMLArrayInsert(cx, &xml->xml_kids, i + 1, n - 1);
+ if (!ok)
+ goto out;
+
+ for (j = 0; j < n; j++)
+ xml->xml_kids.vector[i + j] = copy->xml_kids.vector[j];
+ }
+ }
+
+ /* 2(g). */
+ else if (vxml || JSXML_HAS_VALUE(kid)) {
+ if (parent) {
+ q = XMLARRAY_FIND_MEMBER(&parent->xml_kids, kid, NULL);
+ JS_ASSERT(q != XML_NOT_FOUND);
+
+ ok = IndexToIdVal(cx, q, &id);
+ if (!ok)
+ goto out;
+ ok = Replace(cx, parent, id, *vp);
+ if (!ok)
+ goto out;
+
+ vxml = XMLARRAY_MEMBER(&parent->xml_kids, q, JSXML);
+ if (!vxml)
+ goto out;
+ roots[VAL_ROOT] = *vp = OBJECT_TO_JSVAL(vxml->object);
+ }
+
+ /*
+ * 2(g)(iii).
+ * Erratum: _V_ may not be of type XML, but all index-named
+ * properties _x[i]_ in an XMLList _x_ must be of type XML,
+ * according to 9.2.1.1 Overview and other places in the spec.
+ *
+ * Thanks to 2(d), we know _V_ (*vp here) is either a string
+ * or an XML/XMLList object. If *vp is a string, call ToXML
+ * on it to satisfy the constraint.
+ */
+ if (!vxml) {
+ JS_ASSERT(JSVAL_IS_STRING(*vp));
+ vobj = ToXML(cx, *vp);
+ if (!vobj)
+ goto bad;
+ roots[VAL_ROOT] = *vp = OBJECT_TO_JSVAL(vobj);
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, vxml);
+ }
+
+ /* 2(h). */
+ else {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ goto bad;
+ id = ATOM_KEY(cx->runtime->atomState.starAtom);
+ ok = PutProperty(cx, kidobj, id, vp);
+ if (!ok)
+ goto out;
+ }
+ } else {
+ /*
+ * 3.
+ * Erratum: if x.[[Length]] > 1 or [[ResolveValue]] returns null
+ * or an r with r.[[Length]] != 1, throw TypeError.
+ */
+ n = JSXML_LENGTH(xml);
+ if (n > 1)
+ goto type_error;
+ if (n == 0) {
+ ok = ResolveValue(cx, xml, &rxml);
+ if (!ok)
+ goto out;
+ if (!rxml || JSXML_LENGTH(rxml) != 1)
+ goto type_error;
+ ok = Append(cx, xml, rxml);
+ if (!ok)
+ goto out;
+ }
+ JS_ASSERT(JSXML_LENGTH(xml) == 1);
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (!kid)
+ goto out;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ goto bad;
+ ok = PutProperty(cx, kidobj, id, vp);
+ if (!ok)
+ goto out;
+ }
+ } else {
+ /*
+ * ECMA-357 9.1.1.2.
+ * Erratum: move steps 3 and 4 to before 1 and 2, to avoid wasted
+ * effort in ToString or [[DeepCopy]].
+ */
+ if (js_IdIsIndex(id, &index)) {
+ /* See NOTE in spec: this variation is reserved for future use. */
+ ReportBadXMLName(cx, id);
+ goto bad;
+ }
+
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ goto bad;
+ if (funid) {
+ ok = js_SetProperty(cx, obj, funid, vp);
+ goto out;
+ }
+ nameobj = nameqn->object;
+
+ if (JSXML_HAS_VALUE(xml))
+ goto out;
+
+ if (!vxml ||
+ vxml->xml_class == JSXML_CLASS_TEXT ||
+ vxml->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (!ok)
+ goto out;
+ } else {
+ rxml = DeepCopyInLRS(cx, vxml, 0);
+ if (!rxml || !js_GetXMLObject(cx, rxml))
+ goto bad;
+ vxml = rxml;
+ *vp = OBJECT_TO_JSVAL(vxml->object);
+ }
+ roots[VAL_ROOT] = *vp;
+
+ /*
+ * 6.
+ * Erratum: why is this done here, so early? use is way later....
+ */
+ ok = js_GetDefaultXMLNamespace(cx, &nsval);
+ if (!ok)
+ goto out;
+
+ if (OBJ_GET_CLASS(cx, nameobj) == &js_AttributeNameClass) {
+ /* 7(a). */
+ if (!js_IsXMLName(cx, OBJECT_TO_JSVAL(nameobj)))
+ goto out;
+
+ /* 7(b-c). */
+ if (vxml && vxml->xml_class == JSXML_CLASS_LIST) {
+ n = vxml->xml_kids.length;
+ if (n == 0) {
+ *vp = STRING_TO_JSVAL(cx->runtime->emptyString);
+ } else {
+ left = KidToString(cx, vxml, 0);
+ if (!left)
+ goto bad;
+
+ space = ATOM_TO_STRING(cx->runtime->atomState.spaceAtom);
+ for (i = 1; i < n; i++) {
+ left = js_ConcatStrings(cx, left, space);
+ if (!left)
+ goto bad;
+ right = KidToString(cx, vxml, i);
+ if (!right)
+ goto bad;
+ left = js_ConcatStrings(cx, left, right);
+ if (!left)
+ goto bad;
+ }
+
+ roots[VAL_ROOT] = *vp = STRING_TO_JSVAL(left);
+ }
+ } else {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (!ok)
+ goto out;
+ roots[VAL_ROOT] = *vp;
+ }
+
+ /* 7(d-e). */
+ match = NULL;
+ for (i = 0, n = xml->xml_attrs.length; i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (!attr)
+ continue;
+ attrqn = attr->name;
+ if (js_EqualStrings(attrqn->localName, nameqn->localName) &&
+ (!nameqn->uri ||
+ js_EqualStrings(attrqn->uri, nameqn->uri))) {
+ if (!match) {
+ match = attr;
+ } else {
+ nameobj = js_GetAttributeNameObject(cx, attrqn);
+ if (!nameobj)
+ goto bad;
+
+ id = OBJECT_TO_JSVAL(nameobj);
+ ok = DeleteProperty(cx, obj, id, &junk);
+ if (!ok)
+ goto out;
+ --i;
+ }
+ }
+ }
+
+ /* 7(f). */
+ attr = match;
+ if (!attr) {
+ /* 7(f)(i-ii). */
+ if (!nameqn->uri) {
+ left = right = cx->runtime->emptyString;
+ } else {
+ left = nameqn->uri;
+ right = nameqn->prefix;
+ }
+ nameqn = js_NewXMLQName(cx, left, right, nameqn->localName);
+ if (!nameqn)
+ goto bad;
+
+ /* 7(f)(iii). */
+ attr = js_NewXML(cx, JSXML_CLASS_ATTRIBUTE);
+ if (!attr)
+ goto bad;
+ attr->parent = xml;
+ attr->name = nameqn;
+
+ /* 7(f)(iv). */
+ ok = XMLARRAY_ADD_MEMBER(cx, &xml->xml_attrs, n, attr);
+ if (!ok)
+ goto out;
+
+ /* 7(f)(v-vi). */
+ ns = GetNamespace(cx, nameqn, NULL);
+ if (!ns)
+ goto bad;
+ ok = AddInScopeNamespace(cx, xml, ns);
+ if (!ok)
+ goto out;
+ }
+
+ /* 7(g). */
+ attr->xml_value = JSVAL_TO_STRING(*vp);
+ goto out;
+ }
+
+ /* 8-9. */
+ if (!js_IsXMLName(cx, OBJECT_TO_JSVAL(nameobj)) &&
+ !IS_STAR(nameqn->localName)) {
+ goto out;
+ }
+
+ /* 10-11. */
+ id = JSVAL_VOID;
+ primitiveAssign = !vxml && !IS_STAR(nameqn->localName);
+
+ /* 12. */
+ k = n = xml->xml_kids.length;
+ kid2 = NULL;
+ while (k != 0) {
+ --k;
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, k, JSXML);
+ if (kid && MatchElemName(nameqn, kid)) {
+ if (!JSVAL_IS_VOID(id)) {
+ ok = DeleteByIndex(cx, xml, id, &junk);
+ if (!ok)
+ goto out;
+ }
+ ok = IndexToIdVal(cx, k, &id);
+ if (!ok)
+ goto out;
+ kid2 = kid;
+ }
+ }
+
+ /*
+ * Erratum: ECMA-357 specified child insertion inconsistently:
+ * insertChildBefore and insertChildAfter insert an arbitrary XML
+ * instance, and therefore can create cycles, but appendChild as
+ * specified by the "Overview" of 13.4.4.3 calls [[DeepCopy]] on
+ * its argument. But the "Semantics" in 13.4.4.3 do not include
+ * any [[DeepCopy]] call.
+ *
+ * Fixing this (https://bugzilla.mozilla.org/show_bug.cgi?id=312692)
+ * required adding cycle detection, and allowing duplicate kids to
+ * be created (see comment 6 in the bug). Allowing duplicate kid
+ * references means the loop above will delete all but the lowest
+ * indexed reference, and each [[DeleteByIndex]] nulls the kid's
+ * parent. Thus the need to restore parent here. This is covered
+ * by https://bugzilla.mozilla.org/show_bug.cgi?id=327564.
+ */
+ if (kid2) {
+ JS_ASSERT(kid2->parent == xml || !kid2->parent);
+ if (!kid2->parent)
+ kid2->parent = xml;
+ }
+
+ /* 13. */
+ if (JSVAL_IS_VOID(id)) {
+ /* 13(a). */
+ ok = IndexToIdVal(cx, n, &id);
+ if (!ok)
+ goto out;
+
+ /* 13(b). */
+ if (primitiveAssign) {
+ if (!nameqn->uri) {
+ ns = (JSXMLNamespace *)
+ JS_GetPrivate(cx, JSVAL_TO_OBJECT(nsval));
+ left = ns->uri;
+ right = ns->prefix;
+ } else {
+ left = nameqn->uri;
+ right = nameqn->prefix;
+ }
+ nameqn = js_NewXMLQName(cx, left, right, nameqn->localName);
+ if (!nameqn)
+ goto bad;
+
+ /* 13(b)(iii). */
+ vobj = js_NewXMLObject(cx, JSXML_CLASS_ELEMENT);
+ if (!vobj)
+ goto bad;
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ vxml->parent = xml;
+ vxml->name = nameqn;
+
+ /* 13(b)(iv-vi). */
+ ns = GetNamespace(cx, nameqn, NULL);
+ if (!ns)
+ goto bad;
+ ok = Replace(cx, xml, id, OBJECT_TO_JSVAL(vobj));
+ if (!ok)
+ goto out;
+ ok = AddInScopeNamespace(cx, vxml, ns);
+ if (!ok)
+ goto out;
+ }
+ }
+
+ /* 14. */
+ if (primitiveAssign) {
+ JSXMLArrayCursor cursor;
+
+ js_IdIsIndex(id, &index);
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ cursor.index = index;
+ kid = (JSXML *) XMLArrayCursorItem(&cursor);
+ if (JSXML_HAS_KIDS(kid)) {
+ XMLArrayFinish(cx, &kid->xml_kids);
+ ok = XMLArrayInit(cx, &kid->xml_kids, 1);
+ }
+
+ /* 14(b-c). */
+ /* XXXbe Erratum? redundant w.r.t. 7(b-c) else clause above */
+ if (ok) {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (ok && !IS_EMPTY(JSVAL_TO_STRING(*vp))) {
+ roots[VAL_ROOT] = *vp;
+ if ((JSXML *) XMLArrayCursorItem(&cursor) == kid)
+ ok = Replace(cx, kid, JSVAL_ZERO, *vp);
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ /* 15(a). */
+ ok = Replace(cx, xml, id, *vp);
+ }
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ js_LeaveLocalRootScope(cx);
+ return ok;
+
+type_error:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XMLLIST_PUT,
+ js_ValueToPrintableString(cx, id));
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+/* ECMA-357 9.1.1.10 XML [[ResolveValue]], 9.2.1.10 XMLList [[ResolveValue]]. */
+static JSBool
+ResolveValue(JSContext *cx, JSXML *list, JSXML **result)
+{
+ JSXML *target, *base;
+ JSXMLQName *targetprop;
+ JSObject *targetpropobj;
+ jsval id, tv;
+
+ /* Our caller must be protecting newborn objects. */
+ JS_ASSERT(cx->localRootStack);
+
+ if (list->xml_class != JSXML_CLASS_LIST || list->xml_kids.length != 0) {
+ if (!js_GetXMLObject(cx, list))
+ return JS_FALSE;
+ *result = list;
+ return JS_TRUE;
+ }
+
+ target = list->xml_target;
+ targetprop = list->xml_targetprop;
+ if (!target || !targetprop || IS_STAR(targetprop->localName)) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+
+ targetpropobj = js_GetXMLQNameObject(cx, targetprop);
+ if (!targetpropobj)
+ return JS_FALSE;
+ if (OBJ_GET_CLASS(cx, targetpropobj) == &js_AttributeNameClass) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+
+ if (!ResolveValue(cx, target, &base))
+ return JS_FALSE;
+ if (!base) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+ if (!js_GetXMLObject(cx, base))
+ return JS_FALSE;
+
+ id = OBJECT_TO_JSVAL(targetpropobj);
+ if (!GetProperty(cx, base->object, id, &tv))
+ return JS_FALSE;
+ target = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(tv));
+
+ if (JSXML_LENGTH(target) == 0) {
+ if (base->xml_class == JSXML_CLASS_LIST && JSXML_LENGTH(base) > 1) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+ tv = STRING_TO_JSVAL(cx->runtime->emptyString);
+ if (!PutProperty(cx, base->object, id, &tv))
+ return JS_FALSE;
+ if (!GetProperty(cx, base->object, id, &tv))
+ return JS_FALSE;
+ target = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(tv));
+ }
+
+ *result = target;
+ return JS_TRUE;
+}
+
+/*
+ * HasProperty must be able to return a found JSProperty and the object in
+ * which it was found, if id is of the form function::name. For other ids,
+ * if they index or name an XML child, we return FOUND_XML_PROPERTY in *propp
+ * and null in *objp.
+ *
+ * DROP_PROPERTY helps HasProperty callers drop function properties without
+ * trying to drop the magic FOUND_XML_PROPERTY cookie.
+ */
+#define FOUND_XML_PROPERTY ((JSProperty *) 1)
+#define DROP_PROPERTY(cx,pobj,prop) (((prop) != FOUND_XML_PROPERTY) \
+ ? OBJ_DROP_PROPERTY(cx, pobj, prop) \
+ : (void) 0)
+
+/* ECMA-357 9.1.1.6 XML [[HasProperty]] and 9.2.1.5 XMLList [[HasProperty]]. */
+static JSBool
+HasProperty(JSContext *cx, JSObject *obj, jsval id, JSObject **objp,
+ JSProperty **propp)
+{
+ JSXML *xml, *kid;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+ JSXMLQName *qn;
+ jsid funid;
+ JSXMLArray *array;
+ JSXMLNameMatcher matcher;
+ uint32 i, n;
+
+ *objp = NULL;
+ *propp = NULL;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ n = JSXML_LENGTH(xml);
+ if (js_IdIsIndex(id, &i)) {
+ if (i < n)
+ *propp = FOUND_XML_PROPERTY;
+ return JS_TRUE;
+ }
+
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !HasProperty(cx, kidobj, id, objp, propp))
+ break;
+ if (*propp)
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid)
+ return *propp != NULL;
+ } else {
+ if (xml->xml_class == JSXML_CLASS_ELEMENT && js_IdIsIndex(id, &i)) {
+ if (i == 0)
+ *propp = FOUND_XML_PROPERTY;
+ return JS_TRUE;
+ }
+
+ qn = ToXMLName(cx, id, &funid);
+ if (!qn)
+ return JS_FALSE;
+ if (funid)
+ return js_LookupProperty(cx, obj, funid, objp, propp);
+
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+
+ if (OBJ_GET_CLASS(cx, qn->object) == &js_AttributeNameClass) {
+ array = &xml->xml_attrs;
+ matcher = MatchAttrName;
+ } else {
+ array = &xml->xml_kids;
+ matcher = MatchElemName;
+ }
+ for (i = 0, n = array->length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(array, i, JSXML);
+ if (kid && matcher(qn, kid)) {
+ *propp = FOUND_XML_PROPERTY;
+ return JS_TRUE;
+ }
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static void
+xml_finalize(JSContext *cx, JSObject *obj)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (!xml)
+ return;
+ if (xml->object == obj)
+ xml->object = NULL;
+ UNMETER(xml_stats.livexmlobj);
+}
+
+static void
+xml_mark_vector(JSContext *cx, JSXML **vec, uint32 len)
+{
+ uint32 i;
+ JSXML *elt;
+
+ for (i = 0; i < len; i++) {
+ elt = vec[i];
+ {
+#ifdef GC_MARK_DEBUG
+ char buf[120];
+
+ if (elt->xml_class == JSXML_CLASS_LIST) {
+ strcpy(buf, js_XMLList_str);
+ } else if (JSXML_HAS_NAME(elt)) {
+ JSXMLQName *qn = elt->name;
+
+ JS_snprintf(buf, sizeof buf, "%s::%s",
+ qn->uri ? JS_GetStringBytes(qn->uri) : "*",
+ JS_GetStringBytes(qn->localName));
+ } else {
+ JSString *str = elt->xml_value;
+ size_t srclen = JSSTRING_LENGTH(str);
+ size_t dstlen = sizeof buf;
+
+ if (srclen >= sizeof buf / 6)
+ srclen = sizeof buf / 6 - 1;
+ js_DeflateStringToBuffer(cx, JSSTRING_CHARS(str), srclen,
+ buf, &dstlen);
+ }
+#endif
+ GC_MARK(cx, elt, buf);
+ }
+ }
+}
+
+/*
+ * js_XMLObjectOps.newObjectMap == js_NewObjectMap, so XML objects appear to
+ * be native. Therefore, xml_lookupProperty must return a valid JSProperty
+ * pointer parameter via *propp to signify "property found". Since the only
+ * call to xml_lookupProperty is via OBJ_LOOKUP_PROPERTY, and then only from
+ * js_FindXMLProperty (in this file), js_FindProperty (in jsobj.c, called from
+ * jsinterp.c) or from JSOP_IN case in the interpreter, the only time we add a
+ * JSScopeProperty here is when an unqualified name or XML name is being
+ * accessed or when "name in xml" is called.
+ *
+ * This scope property keeps the JSOP_NAME code in js_Interpret happy by
+ * giving it an sprop with (getter, setter) == (GetProperty, PutProperty).
+ *
+ * NB: xml_deleteProperty must take care to remove any property added here.
+ *
+ * FIXME This clashes with the function namespace implementation which also
+ * uses native properties. Effectively after xml_lookupProperty any property
+ * stored previously using assignments to xml.function::name will be removed.
+ * We partially workaround the problem in js_GetXMLFunction. There we take
+ * advantage of the fact that typically function:: is used to access the
+ * functions from XML.prototype. So when js_GetProperty returns a non-function
+ * property, we assume that it represents the result of GetProperty setter
+ * hiding the function and use an extra prototype chain lookup to recover it.
+ * For a proper solution see bug 355257.
+ */
+static JSBool
+xml_lookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ JSScopeProperty *sprop;
+
+ if (!HasProperty(cx, obj, ID_TO_VALUE(id), objp, propp))
+ return JS_FALSE;
+
+ if (*propp == FOUND_XML_PROPERTY) {
+ sprop = js_AddNativeProperty(cx, obj, id, GetProperty, PutProperty,
+ SPROP_INVALID_SLOT, JSPROP_ENUMERATE,
+ 0, 0);
+ if (!sprop)
+ return JS_FALSE;
+
+ JS_LOCK_OBJ(cx, obj);
+ *objp = obj;
+ *propp = (JSProperty *) sprop;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_defineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ JSProperty **propp)
+{
+ if (VALUE_IS_FUNCTION(cx, value) || getter || setter ||
+ (attrs & JSPROP_ENUMERATE) == 0 ||
+ (attrs & (JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED))) {
+ return js_DefineProperty(cx, obj, id, value, getter, setter, attrs,
+ propp);
+ }
+
+ if (!PutProperty(cx, obj, ID_TO_VALUE(id), &value))
+ return JS_FALSE;
+ if (propp)
+ *propp = NULL;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ if (id == JS_DEFAULT_XML_NAMESPACE_ID) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ return GetProperty(cx, obj, ID_TO_VALUE(id), vp);
+}
+
+static JSBool
+xml_setProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ return PutProperty(cx, obj, ID_TO_VALUE(id), vp);
+}
+
+static JSBool
+FoundProperty(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ JSBool *foundp)
+{
+ JSObject *pobj;
+
+ if (prop) {
+ *foundp = JS_TRUE;
+ } else {
+ if (!HasProperty(cx, obj, ID_TO_VALUE(id), &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ DROP_PROPERTY(cx, pobj, prop);
+ *foundp = (prop != NULL);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_getAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool found;
+
+ if (!FoundProperty(cx, obj, id, prop, &found))
+ return JS_FALSE;
+ *attrsp = found ? JSPROP_ENUMERATE : 0;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool found;
+
+ if (!FoundProperty(cx, obj, id, prop, &found))
+ return JS_FALSE;
+ if (found) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_SET_XML_ATTRS);
+ }
+ return !found;
+}
+
+static JSBool
+xml_deleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
+{
+ /*
+ * If this object has its own (mutable) scope, and if id isn't an index,
+ * then we may have added a property to the scope in xml_lookupProperty
+ * for it to return to mean "found" and to provide a handle for access
+ * operations to call the property's getter or setter. The property also
+ * helps speed up unqualified accesses via the property cache, avoiding
+ * what amount to two HasProperty searches.
+ *
+ * But now it's time to remove any such property, to purge the property
+ * cache and remove the scope entry.
+ */
+ if (OBJ_SCOPE(obj)->object == obj && !JSID_IS_INT(id)) {
+ if (!js_DeleteProperty(cx, obj, id, rval))
+ return JS_FALSE;
+ }
+
+ return DeleteProperty(cx, obj, ID_TO_VALUE(id), rval);
+}
+
+static JSBool
+xml_defaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
+{
+ JSXML *xml;
+
+ if (hint == JSTYPE_OBJECT) {
+ /* Called from for..in code in js_Interpret: return an XMLList. */
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ obj = ToXMLList(cx, OBJECT_TO_JSVAL(obj));
+ if (!obj)
+ return JS_FALSE;
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ }
+
+ return JS_CallFunctionName(cx, obj, js_toString_str, 0, NULL, vp);
+}
+
+static JSBool
+xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ JSXML *xml;
+ uint32 length, index;
+ JSXMLArrayCursor *cursor;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ length = JSXML_LENGTH(xml);
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ if (length == 0) {
+ cursor = NULL;
+ } else {
+ cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor);
+ if (!cursor)
+ return JS_FALSE;
+ XMLArrayCursorInit(cursor, &xml->xml_kids);
+ }
+ *statep = PRIVATE_TO_JSVAL(cursor);
+ if (idp)
+ *idp = INT_TO_JSID(length);
+ break;
+
+ case JSENUMERATE_NEXT:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor && cursor->array && (index = cursor->index) < length) {
+ *idp = INT_TO_JSID(index);
+ cursor->index = index + 1;
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSENUMERATE_DESTROY:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor) {
+ XMLArrayCursorFinish(cursor);
+ JS_free(cx, cursor);
+ }
+ *statep = JSVAL_NULL;
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_hasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ return JS_TRUE;
+}
+
+static uint32
+xml_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ GC_MARK(cx, xml, "private");
+ return js_Mark(cx, obj, NULL);
+}
+
+static void
+xml_clear(JSContext *cx, JSObject *obj)
+{
+}
+
+static JSBool
+HasSimpleContent(JSXML *xml)
+{
+ JSXML *kid;
+ JSBool simple;
+ uint32 i, n;
+
+again:
+ switch (xml->xml_class) {
+ case JSXML_CLASS_COMMENT:
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ return JS_FALSE;
+ case JSXML_CLASS_LIST:
+ if (xml->xml_kids.length == 0)
+ return JS_TRUE;
+ if (xml->xml_kids.length == 1) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (kid) {
+ xml = kid;
+ goto again;
+ }
+ }
+ /* FALL THROUGH */
+ default:
+ simple = JS_TRUE;
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ simple = JS_FALSE;
+ break;
+ }
+ }
+ return simple;
+ }
+}
+
+/*
+ * 11.2.2.1 Step 3(d) onward.
+ */
+static JSObject *
+xml_getMethod(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSTempValueRooter tvr;
+
+ JS_ASSERT(JS_InstanceOf(cx, obj, &js_XMLClass, NULL));
+
+ /*
+ * As our callers have a bad habit of passing a pointer to an unrooted
+ * local value as vp, we use a proper root here.
+ */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ if (!js_GetXMLFunction(cx, obj, id, &tvr.u.value))
+ obj = NULL;
+ *vp = tvr.u.value;
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return obj;
+}
+
+static JSBool
+xml_setMethod(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ return js_SetProperty(cx, obj, id, vp);
+}
+
+static JSBool
+xml_enumerateValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp, jsval *vp)
+{
+ JSXML *xml, *kid;
+ uint32 length, index;
+ JSXMLArrayCursor *cursor;
+ JSObject *kidobj;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ length = JSXML_LENGTH(xml);
+ JS_ASSERT(INT_FITS_IN_JSVAL(length));
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ if (length == 0) {
+ cursor = NULL;
+ } else {
+ cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor);
+ if (!cursor)
+ return JS_FALSE;
+ XMLArrayCursorInit(cursor, &xml->xml_kids);
+ }
+ *statep = PRIVATE_TO_JSVAL(cursor);
+ if (idp)
+ *idp = INT_TO_JSID(length);
+ if (vp)
+ *vp = JSVAL_VOID;
+ break;
+
+ case JSENUMERATE_NEXT:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor && cursor->array && (index = cursor->index) < length) {
+ while (!(kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML))) {
+ if (++index == length)
+ goto destroy;
+ }
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+ JS_ASSERT(INT_FITS_IN_JSVAL(index));
+ *idp = INT_TO_JSID(index);
+ *vp = OBJECT_TO_JSVAL(kidobj);
+ cursor->index = index + 1;
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSENUMERATE_DESTROY:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor) {
+ destroy:
+ XMLArrayCursorFinish(cursor);
+ JS_free(cx, cursor);
+ }
+ *statep = JSVAL_NULL;
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSXML *xml, *vxml;
+ JSObject *vobj;
+ JSBool ok;
+ JSString *str, *vstr;
+ jsdouble d, d2;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj))
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ ok = Equals(cx, xml, v, bp);
+ } else if (vxml) {
+ if (vxml->xml_class == JSXML_CLASS_LIST) {
+ ok = Equals(cx, vxml, OBJECT_TO_JSVAL(obj), bp);
+ } else {
+ if (((xml->xml_class == JSXML_CLASS_TEXT ||
+ xml->xml_class == JSXML_CLASS_ATTRIBUTE) &&
+ HasSimpleContent(vxml)) ||
+ ((vxml->xml_class == JSXML_CLASS_TEXT ||
+ vxml->xml_class == JSXML_CLASS_ATTRIBUTE) &&
+ HasSimpleContent(xml))) {
+ ok = js_EnterLocalRootScope(cx);
+ if (ok) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ vstr = js_ValueToString(cx, v);
+ ok = str && vstr;
+ if (ok)
+ *bp = js_EqualStrings(str, vstr);
+ js_LeaveLocalRootScope(cx);
+ }
+ } else {
+ ok = XMLEquals(cx, xml, vxml, bp);
+ }
+ }
+ } else {
+ ok = js_EnterLocalRootScope(cx);
+ if (ok) {
+ if (HasSimpleContent(xml)) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ vstr = js_ValueToString(cx, v);
+ ok = str && vstr;
+ if (ok)
+ *bp = js_EqualStrings(str, vstr);
+ } else if (JSVAL_IS_STRING(v) || JSVAL_IS_NUMBER(v)) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str) {
+ ok = JS_FALSE;
+ } else if (JSVAL_IS_STRING(v)) {
+ *bp = js_EqualStrings(str, JSVAL_TO_STRING(v));
+ } else {
+ ok = js_ValueToNumber(cx, STRING_TO_JSVAL(str), &d);
+ if (ok) {
+ d2 = JSVAL_IS_INT(v) ? JSVAL_TO_INT(v)
+ : *JSVAL_TO_DOUBLE(v);
+ *bp = JSDOUBLE_COMPARE(d, ==, d2, JS_FALSE);
+ }
+ }
+ } else {
+ *bp = JS_FALSE;
+ }
+ js_LeaveLocalRootScope(cx);
+ }
+ }
+ return ok;
+}
+
+static JSBool
+xml_concatenate(JSContext *cx, JSObject *obj, jsval v, jsval *vp)
+{
+ JSBool ok;
+ JSObject *listobj, *robj;
+ JSXML *list, *lxml, *rxml;
+
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ lxml = (JSXML *) JS_GetPrivate(cx, obj);
+ ok = Append(cx, list, lxml);
+ if (!ok)
+ goto out;
+
+ if (VALUE_IS_XML(cx, v)) {
+ rxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ } else {
+ robj = ToXML(cx, v);
+ if (!robj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ rxml = (JSXML *) JS_GetPrivate(cx, robj);
+ }
+ ok = Append(cx, list, rxml);
+ if (!ok)
+ goto out;
+
+ *vp = OBJECT_TO_JSVAL(listobj);
+out:
+ js_LeaveLocalRootScopeWithResult(cx, *vp);
+ return ok;
+}
+
+/* Use js_NewObjectMap so XML objects satisfy OBJ_IS_NATIVE tests. */
+JS_FRIEND_DATA(JSXMLObjectOps) js_XMLObjectOps = {
+ { js_NewObjectMap, js_DestroyObjectMap,
+ xml_lookupProperty, xml_defineProperty,
+ xml_getProperty, xml_setProperty,
+ xml_getAttributes, xml_setAttributes,
+ xml_deleteProperty, xml_defaultValue,
+ xml_enumerate, js_CheckAccess,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, xml_hasInstance,
+ js_SetProtoOrParent, js_SetProtoOrParent,
+ xml_mark, xml_clear,
+ NULL, NULL },
+ xml_getMethod, xml_setMethod,
+ xml_enumerateValues, xml_equality,
+ xml_concatenate
+};
+
+static JSObjectOps *
+xml_getObjectOps(JSContext *cx, JSClass *clasp)
+{
+ return &js_XMLObjectOps.base;
+}
+
+JS_FRIEND_DATA(JSClass) js_XMLClass = {
+ js_XML_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_XML),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, xml_finalize,
+ xml_getObjectOps, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+static JSObject *
+CallConstructorFunction(JSContext *cx, JSObject *obj, JSClass *clasp,
+ uintN argc, jsval *argv)
+{
+ JSObject *tmp;
+ jsval rval;
+
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ if (!JS_CallFunctionName(cx, obj, clasp->name, argc, argv, &rval))
+ return NULL;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(rval));
+ return JSVAL_TO_OBJECT(rval);
+}
+
+static JSXML *
+StartNonListXMLMethod(JSContext *cx, JSObject **objp, jsval *argv)
+{
+ JSXML *xml;
+ JSFunction *fun;
+
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, argv[-2]));
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, *objp, &js_XMLClass, argv);
+ if (!xml || xml->xml_class != JSXML_CLASS_LIST)
+ return xml;
+
+ if (xml->xml_kids.length == 1) {
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (xml) {
+ *objp = js_GetXMLObject(cx, xml);
+ if (!*objp)
+ return NULL;
+ argv[-1] = OBJECT_TO_JSVAL(*objp);
+ return xml;
+ }
+ }
+
+ fun = (JSFunction *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[-2]));
+ if (fun) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", xml->xml_kids.length);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NON_LIST_XML_METHOD,
+ JS_GetFunctionName(fun), numBuf);
+ }
+ return NULL;
+}
+
+#define XML_METHOD_PROLOG \
+ JS_BEGIN_MACRO \
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, argv); \
+ if (!xml) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+#define NON_LIST_XML_METHOD_PROLOG \
+ JS_BEGIN_MACRO \
+ xml = StartNonListXMLMethod(cx, &obj, argv); \
+ if (!xml) \
+ return JS_FALSE; \
+ JS_ASSERT(xml->xml_class != JSXML_CLASS_LIST); \
+ JS_END_MACRO
+
+static JSBool
+xml_addNamespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSObject *nsobj;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ nsobj = CallConstructorFunction(cx, obj, &js_NamespaceClass.base, 1, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nsobj);
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ if (!AddInScopeNamespace(cx, xml, ns))
+ return JS_FALSE;
+ ns->declared = JS_TRUE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_appendChild(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *vxml;
+ jsval name, v;
+ JSObject *vobj;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ if (!js_GetAnyName(cx, &name))
+ return JS_FALSE;
+
+ if (!GetProperty(cx, obj, name, &v))
+ return JS_FALSE;
+
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
+ vobj = JSVAL_TO_OBJECT(v);
+ JS_ASSERT(OBJECT_IS_XML(cx, vobj));
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ JS_ASSERT(vxml->xml_class == JSXML_CLASS_LIST);
+
+ if (!IndexToIdVal(cx, vxml->xml_kids.length, &name))
+ return JS_FALSE;
+ if (!PutProperty(cx, JSVAL_TO_OBJECT(v), name, &argv[0]))
+ return JS_FALSE;
+
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_attribute(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXMLQName *qn;
+
+ qn = ToAttributeName(cx, argv[0]);
+ if (!qn)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(qn->object); /* local root */
+ return GetProperty(cx, obj, argv[0], rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_attributes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval name;
+ JSXMLQName *qn;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ name = ATOM_KEY(cx->runtime->atomState.starAtom);
+ qn = ToAttributeName(cx, name);
+ if (!qn)
+ return JS_FALSE;
+ name = OBJECT_TO_JSVAL(qn->object);
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, name, &tvr);
+ ok = GetProperty(cx, obj, name, rval);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+static JSXML *
+xml_list_helper(JSContext *cx, JSXML *xml, jsval *rval)
+{
+ JSObject *listobj;
+ JSXML *list;
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return NULL;
+
+ *rval = OBJECT_TO_JSVAL(listobj);
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ list->xml_target = xml;
+ return list;
+}
+
+static JSBool
+xml_child_helper(JSContext *cx, JSObject *obj, JSXML *xml, jsval name,
+ jsval *rval)
+{
+ uint32 index;
+ JSXML *kid;
+ JSObject *kidobj;
+
+ /* ECMA-357 13.4.4.6 */
+ JS_ASSERT(xml->xml_class != JSXML_CLASS_LIST);
+
+ if (js_IdIsIndex(name, &index)) {
+ if (index >= JSXML_LENGTH(xml)) {
+ *rval = JSVAL_VOID;
+ } else {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (!kid) {
+ *rval = JSVAL_VOID;
+ } else {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(kidobj);
+ }
+ }
+ return JS_TRUE;
+ }
+
+ return GetProperty(cx, obj, name, rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_child(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ JSXMLArrayCursor cursor;
+ jsval name, v;
+ JSObject *kidobj;
+
+ XML_METHOD_PROLOG;
+ name = argv[0];
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* ECMA-357 13.5.4.4 */
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ break;
+ if (!xml_child_helper(cx, kidobj, kid, name, &v))
+ break;
+ if (JSVAL_IS_VOID(v)) {
+ /* The property didn't exist in this kid. */
+ continue;
+ }
+
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if ((!JSXML_HAS_KIDS(vxml) || vxml->xml_kids.length != 0) &&
+ !Append(cx, list, vxml)) {
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ return !kid;
+ }
+
+ /* ECMA-357 Edition 2 13.3.4.6 (note 13.3, not 13.4 as in Edition 1). */
+ if (!xml_child_helper(cx, obj, xml, name, rval))
+ return JS_FALSE;
+ if (JSVAL_IS_VOID(*rval) && !xml_list_helper(cx, xml, rval))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_childIndex(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *parent;
+ uint32 i, n;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ parent = xml->parent;
+ if (!parent || xml->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ for (i = 0, n = JSXML_LENGTH(parent); i < n; i++) {
+ if (XMLARRAY_MEMBER(&parent->xml_kids, i, JSXML) == xml)
+ break;
+ }
+ JS_ASSERT(i < n);
+ return js_NewNumberValue(cx, i, rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_children(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval name;
+
+ name = ATOM_KEY(cx->runtime->atomState.starAtom);
+ return GetProperty(cx, obj, name, rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_comments(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ JSBool ok;
+ uint32 i, n;
+ JSObject *kidobj;
+ jsval v;
+
+ XML_METHOD_PROLOG;
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+
+ ok = JS_TRUE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.6 Step 2. */
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_comments(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ break;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0) {
+ ok = Append(cx, list, vxml);
+ if (!ok)
+ break;
+ }
+ }
+ }
+ } else {
+ /* 13.4.4.9 Step 2. */
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_COMMENT) {
+ ok = Append(cx, list, kid);
+ if (!ok)
+ break;
+ }
+ }
+ }
+
+ return ok;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_contains(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ jsval value;
+ JSBool eq;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+
+ XML_METHOD_PROLOG;
+ value = argv[0];
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ eq = JS_FALSE;
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !xml_equality(cx, kidobj, value, &eq))
+ break;
+ if (eq)
+ break;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid && !eq)
+ return JS_FALSE;
+ } else {
+ if (!xml_equality(cx, obj, value, &eq))
+ return JS_FALSE;
+ }
+ *rval = BOOLEAN_TO_JSVAL(eq);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_copy(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *copy;
+
+ XML_METHOD_PROLOG;
+ copy = DeepCopy(cx, xml, NULL, 0);
+ if (!copy)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(copy->object);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_descendants(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *list;
+ jsval name;
+
+ XML_METHOD_PROLOG;
+ name = (argc == 0) ? ATOM_KEY(cx->runtime->atomState.starAtom) : argv[0];
+ list = Descendants(cx, xml, name);
+ if (!list)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(list->object);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_elements(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ jsval name, v;
+ JSXMLQName *nameqn;
+ jsid funid;
+ JSBool ok;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+ uint32 i, n;
+
+ XML_METHOD_PROLOG;
+ name = (argc == 0) ? ATOM_KEY(cx->runtime->atomState.starAtom) : argv[0];
+ nameqn = ToXMLName(cx, name, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nameqn->object);
+
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+ if (funid)
+ return JS_TRUE;
+
+ list->xml_targetprop = nameqn;
+ ok = JS_TRUE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.6 */
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_elements(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ break;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0) {
+ ok = Append(cx, list, vxml);
+ if (!ok)
+ break;
+ }
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT &&
+ MatchElemName(nameqn, kid)) {
+ ok = Append(cx, list, kid);
+ if (!ok)
+ break;
+ }
+ }
+ }
+
+ return ok;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_hasOwnProperty(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval name;
+ JSObject *pobj;
+ JSProperty *prop;
+
+ if (!JS_InstanceOf(cx, obj, &js_XMLClass, argv))
+ return JS_FALSE;
+
+ name = argv[0];
+ if (!HasProperty(cx, obj, name, &pobj, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ return js_HasOwnPropertyHelper(cx, obj, js_LookupProperty, argc, argv,
+ rval);
+ }
+ DROP_PROPERTY(cx, pobj, prop);
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_hasComplexContent(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ JSObject *kidobj;
+ uint32 i, n;
+
+ XML_METHOD_PROLOG;
+again:
+ switch (xml->xml_class) {
+ case JSXML_CLASS_ATTRIBUTE:
+ case JSXML_CLASS_COMMENT:
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ case JSXML_CLASS_TEXT:
+ *rval = JSVAL_FALSE;
+ break;
+ case JSXML_CLASS_LIST:
+ if (xml->xml_kids.length == 0) {
+ *rval = JSVAL_TRUE;
+ } else if (xml->xml_kids.length == 1) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (kid) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+ obj = kidobj;
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ goto again;
+ }
+ }
+ /* FALL THROUGH */
+ default:
+ *rval = JSVAL_FALSE;
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ *rval = JSVAL_TRUE;
+ break;
+ }
+ }
+ break;
+ }
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_hasSimpleContent(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+
+ XML_METHOD_PROLOG;
+ *rval = BOOLEAN_TO_JSVAL(HasSimpleContent(xml));
+ return JS_TRUE;
+}
+
+typedef struct JSTempRootedNSArray {
+ JSTempValueRooter tvr;
+ JSXMLArray array;
+ jsval value; /* extra root for temporaries */
+} JSTempRootedNSArray;
+
+JS_STATIC_DLL_CALLBACK(void)
+mark_temp_ns_array(JSContext *cx, JSTempValueRooter *tvr)
+{
+ JSTempRootedNSArray *tmp = (JSTempRootedNSArray *)tvr;
+
+ namespace_mark_vector(cx,
+ (JSXMLNamespace **)tmp->array.vector,
+ tmp->array.length);
+ XMLArrayCursorMark(cx, tmp->array.cursors);
+ if (JSVAL_IS_GCTHING(tmp->value))
+ GC_MARK(cx, JSVAL_TO_GCTHING(tmp->value), "temp_ns_array_value");
+}
+
+static void
+InitTempNSArray(JSContext *cx, JSTempRootedNSArray *tmp)
+{
+ XMLArrayInit(cx, &tmp->array, 0);
+ tmp->value = JSVAL_NULL;
+ JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_ns_array, &tmp->tvr);
+}
+
+static void
+FinishTempNSArray(JSContext *cx, JSTempRootedNSArray *tmp)
+{
+ JS_ASSERT(tmp->tvr.u.marker == mark_temp_ns_array);
+ JS_POP_TEMP_ROOT(cx, &tmp->tvr);
+ XMLArrayFinish(cx, &tmp->array);
+}
+
+/*
+ * Populate a new JS array with elements of JSTempRootedNSArray.array and
+ * place the result into rval. rval must point to a rooted location.
+ */
+static JSBool
+TempNSArrayToJSArray(JSContext *cx, JSTempRootedNSArray *tmp, jsval *rval)
+{
+ JSObject *arrayobj;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+ JSObject *nsobj;
+
+ arrayobj = js_NewArrayObject(cx, 0, NULL);
+ if (!arrayobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(arrayobj);
+ for (i = 0, n = tmp->array.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&tmp->array, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+ nsobj = js_GetXMLNamespaceObject(cx, ns);
+ if (!nsobj)
+ return JS_FALSE;
+ tmp->value = OBJECT_TO_JSVAL(nsobj);
+ if (!OBJ_SET_PROPERTY(cx, arrayobj, INT_TO_JSID(i), &tmp->value))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+FindInScopeNamespaces(JSContext *cx, JSXML *xml, JSXMLArray *nsarray)
+{
+ uint32 length, i, j, n;
+ JSXMLNamespace *ns, *ns2;
+
+ length = nsarray->length;
+ do {
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ continue;
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+
+ for (j = 0; j < length; j++) {
+ ns2 = XMLARRAY_MEMBER(nsarray, j, JSXMLNamespace);
+ if (ns2 &&
+ ((ns2->prefix && ns->prefix)
+ ? js_EqualStrings(ns2->prefix, ns->prefix)
+ : js_EqualStrings(ns2->uri, ns->uri))) {
+ break;
+ }
+ }
+
+ if (j == length) {
+ if (!XMLARRAY_APPEND(cx, nsarray, ns))
+ return JS_FALSE;
+ ++length;
+ }
+ }
+ } while ((xml = xml->parent) != NULL);
+ JS_ASSERT(length == nsarray->length);
+
+ return JS_TRUE;
+}
+
+static JSBool
+xml_inScopeNamespaces(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSTempRootedNSArray namespaces;
+ JSBool ok;
+
+ NON_LIST_XML_METHOD_PROLOG;
+
+ InitTempNSArray(cx, &namespaces);
+ ok = FindInScopeNamespaces(cx, xml, &namespaces.array) &&
+ TempNSArrayToJSArray(cx, &namespaces, rval);
+ FinishTempNSArray(cx, &namespaces);
+ return ok;
+}
+
+static JSBool
+xml_insertChildAfter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ jsval arg;
+ uint32 i;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ arg = argv[0];
+ if (JSVAL_IS_NULL(arg)) {
+ kid = NULL;
+ i = 0;
+ } else {
+ if (!VALUE_IS_XML(cx, arg))
+ return JS_TRUE;
+ kid = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(arg));
+ i = XMLARRAY_FIND_MEMBER(&xml->xml_kids, kid, NULL);
+ if (i == XML_NOT_FOUND)
+ return JS_TRUE;
+ ++i;
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ if (!Insert(cx, xml, i, argv[1]))
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_insertChildBefore(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ jsval arg;
+ uint32 i;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ arg = argv[0];
+ if (JSVAL_IS_NULL(arg)) {
+ kid = NULL;
+ i = xml->xml_kids.length;
+ } else {
+ if (!VALUE_IS_XML(cx, arg))
+ return JS_TRUE;
+ kid = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(arg));
+ i = XMLARRAY_FIND_MEMBER(&xml->xml_kids, kid, NULL);
+ if (i == XML_NOT_FOUND)
+ return JS_TRUE;
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ if (!Insert(cx, xml, i, argv[1]))
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_length(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml;
+
+ XML_METHOD_PROLOG;
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ *rval = JSVAL_ONE;
+ } else {
+ if (!js_NewNumberValue(cx, xml->xml_kids.length, rval))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_localName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ *rval = xml->name ? STRING_TO_JSVAL(xml->name->localName) : JSVAL_NULL;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_name(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml;
+ JSObject *nameobj;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!xml->name) {
+ *rval = JSVAL_NULL;
+ } else {
+ nameobj = js_GetXMLQNameObject(cx, xml->name);
+ if (!nameobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(nameobj);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_namespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSString *prefix;
+ JSTempRootedNSArray inScopeNSes;
+ JSBool ok;
+ jsuint i, length;
+ JSXMLNamespace *ns;
+ JSObject *nsobj;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (argc == 0 && !JSXML_HAS_NAME(xml)) {
+ *rval = JSVAL_NULL;
+ return JS_TRUE;
+ }
+
+ if (argc == 0) {
+ prefix = NULL;
+ } else {
+ prefix = js_ValueToString(cx, argv[0]);
+ if (!prefix)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(prefix); /* local root */
+ }
+
+ /* After this point the control must flow through label out. */
+ InitTempNSArray(cx, &inScopeNSes);
+ ok = FindInScopeNamespaces(cx, xml, &inScopeNSes.array);
+ if (!ok)
+ goto out;
+
+ if (!prefix) {
+ ns = GetNamespace(cx, xml->name, &inScopeNSes.array);
+ if (!ns) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ } else {
+ ns = NULL;
+ for (i = 0, length = inScopeNSes.array.length; i < length; i++) {
+ ns = XMLARRAY_MEMBER(&inScopeNSes.array, i, JSXMLNamespace);
+ if (ns && ns->prefix && js_EqualStrings(ns->prefix, prefix))
+ break;
+ ns = NULL;
+ }
+ }
+
+ if (!ns) {
+ *rval = JSVAL_VOID;
+ } else {
+ nsobj = js_GetXMLNamespaceObject(cx, ns);
+ if (!nsobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(nsobj);
+ }
+
+ out:
+ FinishTempNSArray(cx, &inScopeNSes);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_namespaceDeclarations(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *yml;
+ JSBool ok;
+ JSTempRootedNSArray ancestors, declared;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (JSXML_HAS_VALUE(xml))
+ return JS_TRUE;
+
+ /* From here, control flow must goto out to finish these arrays. */
+ ok = JS_TRUE;
+ InitTempNSArray(cx, &ancestors);
+ InitTempNSArray(cx, &declared);
+ yml = xml;
+
+ while ((yml = yml->parent) != NULL) {
+ JS_ASSERT(yml->xml_class == JSXML_CLASS_ELEMENT);
+ for (i = 0, n = yml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&yml->xml_namespaces, i, JSXMLNamespace);
+ if (ns &&
+ !XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) {
+ ok = XMLARRAY_APPEND(cx, &ancestors.array, ns);
+ if (!ok)
+ goto out;
+ }
+ }
+ }
+
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+ if (!ns->declared)
+ continue;
+ if (!XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) {
+ ok = XMLARRAY_APPEND(cx, &declared.array, ns);
+ if (!ok)
+ goto out;
+ }
+ }
+
+ ok = TempNSArrayToJSArray(cx, &declared, rval);
+
+out:
+ /* Finishing must be in reverse order of initialization to follow LIFO. */
+ FinishTempNSArray(cx, &declared);
+ FinishTempNSArray(cx, &ancestors);
+ return ok;
+}
+
+static const char js_attribute_str[] = "attribute";
+static const char js_text_str[] = "text";
+
+/* Exported to jsgc.c #ifdef GC_MARK_DEBUG. */
+const char *js_xml_class_str[] = {
+ "list",
+ "element",
+ js_attribute_str,
+ "processing-instruction",
+ js_text_str,
+ "comment"
+};
+
+static JSBool
+xml_nodeKind(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSString *str;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ str = JS_InternString(cx, js_xml_class_str[xml->xml_class]);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+NormalizingDelete(JSContext *cx, JSObject *obj, JSXML *xml, jsval id)
+{
+ jsval junk;
+
+ if (xml->xml_class == JSXML_CLASS_LIST)
+ return DeleteProperty(cx, obj, id, &junk);
+ return DeleteByIndex(cx, xml, id, &junk);
+}
+
+/*
+ * Erratum? the testcase js/tests/e4x/XML/13.4.4.26.js wants all-whitespace
+ * text between tags to be removed by normalize.
+ */
+static JSBool
+IsXMLSpace(JSString *str)
+{
+ const jschar *cp, *end;
+
+ cp = JSSTRING_CHARS(str);
+ end = cp + JSSTRING_LENGTH(str);
+ while (cp < end) {
+ if (!JS_ISXMLSPACE(*cp))
+ return JS_FALSE;
+ ++cp;
+ }
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_normalize(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid, *kid2;
+ uint32 i, n;
+ JSObject *kidobj;
+ JSString *str;
+ jsval junk;
+
+ XML_METHOD_PROLOG;
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!kid)
+ continue;
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !xml_normalize(cx, kidobj, argc, argv, &junk))
+ return JS_FALSE;
+ } else if (kid->xml_class == JSXML_CLASS_TEXT) {
+ while (i + 1 < n &&
+ (kid2 = XMLARRAY_MEMBER(&xml->xml_kids, i + 1, JSXML)) &&
+ kid2->xml_class == JSXML_CLASS_TEXT) {
+ str = js_ConcatStrings(cx, kid->xml_value, kid2->xml_value);
+ if (!str)
+ return JS_FALSE;
+ if (!NormalizingDelete(cx, obj, xml, INT_TO_JSVAL(i + 1)))
+ return JS_FALSE;
+ n = xml->xml_kids.length;
+ kid->xml_value = str;
+ }
+ if (IS_EMPTY(kid->xml_value) || IsXMLSpace(kid->xml_value)) {
+ if (!NormalizingDelete(cx, obj, xml, INT_TO_JSVAL(i)))
+ return JS_FALSE;
+ n = xml->xml_kids.length;
+ --i;
+ }
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_parent(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *parent, *kid;
+ uint32 i, n;
+ JSObject *parentobj;
+
+ XML_METHOD_PROLOG;
+ parent = xml->parent;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ *rval = JSVAL_VOID;
+ n = xml->xml_kids.length;
+ if (n == 0)
+ return JS_TRUE;
+
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (!kid)
+ return JS_TRUE;
+ parent = kid->parent;
+ for (i = 1; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->parent != parent)
+ return JS_TRUE;
+ }
+ }
+
+ if (!parent) {
+ *rval = JSVAL_NULL;
+ return JS_TRUE;
+ }
+
+ parentobj = js_GetXMLObject(cx, parent);
+ if (!parentobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(parentobj);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_processingInstructions(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ jsval name, v;
+ JSXMLQName *nameqn;
+ jsid funid;
+ JSBool ok;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+ uint32 i, n;
+
+ XML_METHOD_PROLOG;
+ name = (argc == 0) ? ATOM_KEY(cx->runtime->atomState.starAtom) : argv[0];
+ nameqn = ToXMLName(cx, name, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nameqn->object);
+
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+ if (funid)
+ return JS_TRUE;
+
+ list->xml_targetprop = nameqn;
+ ok = JS_TRUE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.17 Step 4 (misnumbered 9 -- Erratum?). */
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_processingInstructions(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ break;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0) {
+ ok = Append(cx, list, vxml);
+ if (!ok)
+ break;
+ }
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ /* 13.4.4.28 Step 4. */
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION &&
+ (IS_STAR(nameqn->localName) ||
+ js_EqualStrings(nameqn->localName, kid->name->localName))) {
+ ok = Append(cx, list, kid);
+ if (!ok)
+ break;
+ }
+ }
+ }
+
+ return ok;
+}
+
+static JSBool
+xml_prependChild(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return Insert(cx, xml, 0, argv[0]);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_propertyIsEnumerable(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ jsval name;
+ uint32 index;
+
+ XML_METHOD_PROLOG;
+ name = argv[0];
+ *rval = JSVAL_FALSE;
+ if (js_IdIsIndex(name, &index)) {
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.18. */
+ *rval = BOOLEAN_TO_JSVAL(index < xml->xml_kids.length);
+ } else {
+ /* 13.4.4.30. */
+ *rval = BOOLEAN_TO_JSVAL(index == 0);
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+namespace_full_match(const void *a, const void *b)
+{
+ const JSXMLNamespace *nsa = (const JSXMLNamespace *) a;
+ const JSXMLNamespace *nsb = (const JSXMLNamespace *) b;
+
+ if (nsa->prefix && nsb->prefix &&
+ !js_EqualStrings(nsa->prefix, nsb->prefix)) {
+ return JS_FALSE;
+ }
+ return js_EqualStrings(nsa->uri, nsb->uri);
+}
+
+static JSBool
+xml_removeNamespace_helper(JSContext *cx, JSXML *xml, JSXMLNamespace *ns)
+{
+ JSXMLNamespace *thisns, *attrns;
+ uint32 i, n;
+ JSXML *attr, *kid;
+
+ thisns = GetNamespace(cx, xml->name, &xml->xml_namespaces);
+ JS_ASSERT(thisns);
+ if (thisns == ns)
+ return JS_TRUE;
+
+ for (i = 0, n = xml->xml_attrs.length; i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (!attr)
+ continue;
+ attrns = GetNamespace(cx, attr->name, &xml->xml_namespaces);
+ JS_ASSERT(attrns);
+ if (attrns == ns)
+ return JS_TRUE;
+ }
+
+ i = XMLARRAY_FIND_MEMBER(&xml->xml_namespaces, ns, namespace_full_match);
+ if (i != XML_NOT_FOUND)
+ XMLArrayDelete(cx, &xml->xml_namespaces, i, JS_TRUE);
+
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ if (!xml_removeNamespace_helper(cx, kid, ns))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_removeNamespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSObject *nsobj;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ nsobj = CallConstructorFunction(cx, obj, &js_NamespaceClass.base, 1, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nsobj);
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+
+ /* NOTE: remove ns from each ancestor if not used by that ancestor. */
+ return xml_removeNamespace_helper(cx, xml, ns);
+}
+
+static JSBool
+xml_replace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *vxml, *kid;
+ jsval name, value, id, junk;
+ uint32 index;
+ JSObject *nameobj;
+ JSXMLQName *nameqn;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+
+ value = argv[1];
+ vxml = VALUE_IS_XML(cx, value)
+ ? (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(value))
+ : NULL;
+ if (!vxml) {
+ if (!JS_ConvertValue(cx, value, JSTYPE_STRING, &argv[1]))
+ return JS_FALSE;
+ value = argv[1];
+ } else {
+ vxml = DeepCopy(cx, vxml, NULL, 0);
+ if (!vxml)
+ return JS_FALSE;
+ value = argv[1] = OBJECT_TO_JSVAL(vxml->object);
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ name = argv[0];
+ if (js_IdIsIndex(name, &index))
+ return Replace(cx, xml, name, value);
+
+ /* Call function QName per spec, not ToXMLName, to avoid attribute names. */
+ nameobj = CallConstructorFunction(cx, obj, &js_QNameClass.base, 1, &name);
+ if (!nameobj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nameobj);
+ nameqn = (JSXMLQName *) JS_GetPrivate(cx, nameobj);
+
+ id = JSVAL_VOID;
+ index = xml->xml_kids.length;
+ while (index != 0) {
+ --index;
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (kid && MatchElemName(nameqn, kid)) {
+ if (!JSVAL_IS_VOID(id) && !DeleteByIndex(cx, xml, id, &junk))
+ return JS_FALSE;
+ if (!IndexToIdVal(cx, index, &id))
+ return JS_FALSE;
+ }
+ }
+ if (JSVAL_IS_VOID(id))
+ return JS_TRUE;
+ return Replace(cx, xml, id, value);
+}
+
+static JSBool
+xml_setChildren(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ if (!StartNonListXMLMethod(cx, &obj, argv))
+ return JS_FALSE;
+
+ if (!PutProperty(cx, obj, ATOM_KEY(cx->runtime->atomState.starAtom),
+ &argv[0])) {
+ return JS_FALSE;
+ }
+
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setLocalName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ jsval name;
+ JSXMLQName *nameqn;
+ JSString *namestr;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_NAME(xml))
+ return JS_TRUE;
+
+ name = argv[0];
+ if (!JSVAL_IS_PRIMITIVE(name) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(name)) == &js_QNameClass.base) {
+ nameqn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(name));
+ namestr = nameqn->localName;
+ } else {
+ if (!JS_ConvertValue(cx, name, JSTYPE_STRING, &argv[0]))
+ return JS_FALSE;
+ name = argv[0];
+ namestr = JSVAL_TO_STRING(name);
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ xml->name->localName = namestr;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *nsowner;
+ jsval name;
+ JSXMLQName *nameqn;
+ JSObject *nameobj;
+ JSXMLArray *nsarray;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_NAME(xml))
+ return JS_TRUE;
+
+ name = argv[0];
+ if (!JSVAL_IS_PRIMITIVE(name) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(name)) == &js_QNameClass.base &&
+ !(nameqn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(name)))
+ ->uri) {
+ name = argv[0] = STRING_TO_JSVAL(nameqn->localName);
+ }
+
+ nameobj = js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 1, &name);
+ if (!nameobj)
+ return JS_FALSE;
+ nameqn = (JSXMLQName *) JS_GetPrivate(cx, nameobj);
+
+ /* ECMA-357 13.4.4.35 Step 4. */
+ if (xml->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION)
+ nameqn->uri = cx->runtime->emptyString;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ xml->name = nameqn;
+
+ /*
+ * Erratum: nothing in 13.4.4.35 talks about making the name match the
+ * in-scope namespaces, either by finding an in-scope namespace with a
+ * matching uri and setting the new name's prefix to that namespace's
+ * prefix, or by extending the in-scope namespaces for xml (which are in
+ * xml->parent if xml is an attribute or a PI).
+ */
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ nsowner = xml;
+ } else {
+ if (!xml->parent || xml->parent->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ nsowner = xml->parent;
+ }
+
+ if (nameqn->prefix) {
+ /*
+ * The name being set has a prefix, which originally came from some
+ * namespace object (which may be the null namespace, where both the
+ * prefix and uri are the empty string). We must go through a full
+ * GetNamespace in case that namespace is in-scope in nsowner.
+ *
+ * If we find such an in-scope namespace, we return true right away,
+ * in this block. Otherwise, we fall through to the final return of
+ * AddInScopeNamespace(cx, nsowner, ns).
+ */
+ ns = GetNamespace(cx, nameqn, &nsowner->xml_namespaces);
+ if (!ns)
+ return JS_FALSE;
+
+ /* XXXbe have to test membership to see whether GetNamespace added */
+ if (XMLARRAY_HAS_MEMBER(&nsowner->xml_namespaces, ns, NULL))
+ return JS_TRUE;
+ } else {
+ /*
+ * At this point, we know nameqn->prefix is null, so nameqn->uri can't
+ * be the empty string (the null namespace always uses the empty string
+ * for both prefix and uri).
+ *
+ * This means we must inline GetNamespace and specialize it to match
+ * uri only, never prefix. If we find a namespace with nameqn's uri
+ * already in nsowner->xml_namespaces, then all that we need do is set
+ * nameqn->prefix to that namespace's prefix.
+ *
+ * If no such namespace exists, we can create one without going through
+ * the constructor, because we know nameqn->uri is non-empty (so prefix
+ * does not need to be converted from null to empty by QName).
+ */
+ JS_ASSERT(!IS_EMPTY(nameqn->uri));
+
+ nsarray = &nsowner->xml_namespaces;
+ for (i = 0, n = nsarray->length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(nsarray, i, JSXMLNamespace);
+ if (ns && js_EqualStrings(ns->uri, nameqn->uri)) {
+ nameqn->prefix = ns->prefix;
+ return JS_TRUE;
+ }
+ }
+
+ ns = js_NewXMLNamespace(cx, NULL, nameqn->uri, JS_TRUE);
+ if (!ns)
+ return JS_FALSE;
+ }
+
+ return AddInScopeNamespace(cx, nsowner, ns);
+}
+
+static JSBool
+xml_setNamespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *nsowner;
+ JSObject *nsobj, *qnobj;
+ JSXMLNamespace *ns;
+ jsval qnargv[2];
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_NAME(xml))
+ return JS_TRUE;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml || !js_GetXMLQNameObject(cx, xml->name))
+ return JS_FALSE;
+
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, obj, 1, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ ns->declared = JS_TRUE;
+
+ qnargv[0] = argv[0] = OBJECT_TO_JSVAL(nsobj);
+ qnargv[1] = OBJECT_TO_JSVAL(xml->name->object);
+ qnobj = js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 2, qnargv);
+ if (!qnobj)
+ return JS_FALSE;
+
+ xml->name = (JSXMLQName *) JS_GetPrivate(cx, qnobj);
+
+ /*
+ * Erratum: the spec fails to update the governing in-scope namespaces.
+ * See the erratum noted in xml_setName, above.
+ */
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ nsowner = xml;
+ } else {
+ if (!xml->parent || xml->parent->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ nsowner = xml->parent;
+ }
+ return AddInScopeNamespace(cx, nsowner, ns);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_text(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ uint32 i, n;
+ JSBool ok;
+ JSObject *kidobj;
+ jsval v;
+
+ XML_METHOD_PROLOG;
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ ok = JS_TRUE;
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_text(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ return JS_FALSE;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0 && !Append(cx, list, vxml))
+ return JS_FALSE;
+ }
+ }
+ } else {
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_TEXT) {
+ if (!Append(cx, list, kid))
+ return JS_FALSE;
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_toXMLString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = ToXMLString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSString *
+xml_toString_helper(JSContext *cx, JSXML *xml)
+{
+ JSString *str, *kidstr;
+ JSXML *kid;
+ JSXMLArrayCursor cursor;
+
+ if (xml->xml_class == JSXML_CLASS_ATTRIBUTE ||
+ xml->xml_class == JSXML_CLASS_TEXT) {
+ return xml->xml_value;
+ }
+
+ if (!HasSimpleContent(xml))
+ return ToXMLString(cx, OBJECT_TO_JSVAL(xml->object));
+
+ str = cx->runtime->emptyString;
+ js_EnterLocalRootScope(cx);
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class != JSXML_CLASS_COMMENT &&
+ kid->xml_class != JSXML_CLASS_PROCESSING_INSTRUCTION) {
+ kidstr = xml_toString_helper(cx, kid);
+ if (!kidstr) {
+ str = NULL;
+ break;
+ }
+ str = js_ConcatStrings(cx, str, kidstr);
+ if (!str)
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ js_LeaveLocalRootScopeWithResult(cx, STRING_TO_JSVAL(str));
+ return str;
+}
+
+static JSBool
+xml_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSString *str;
+
+ XML_METHOD_PROLOG;
+ str = xml_toString_helper(cx, xml);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec xml_methods[] = {
+ {"addNamespace", xml_addNamespace, 1,0,0},
+ {"appendChild", xml_appendChild, 1,0,0},
+ {js_attribute_str, xml_attribute, 1,0,0},
+ {"attributes", xml_attributes, 0,0,0},
+ {"child", xml_child, 1,0,0},
+ {"childIndex", xml_childIndex, 0,0,0},
+ {"children", xml_children, 0,0,0},
+ {"comments", xml_comments, 0,0,0},
+ {"contains", xml_contains, 1,0,0},
+ {"copy", xml_copy, 0,0,0},
+ {"descendants", xml_descendants, 1,0,0},
+ {"elements", xml_elements, 1,0,0},
+ {"hasOwnProperty", xml_hasOwnProperty, 1,0,0},
+ {"hasComplexContent", xml_hasComplexContent, 1,0,0},
+ {"hasSimpleContent", xml_hasSimpleContent, 1,0,0},
+ {"inScopeNamespaces", xml_inScopeNamespaces, 0,0,0},
+ {"insertChildAfter", xml_insertChildAfter, 2,0,0},
+ {"insertChildBefore", xml_insertChildBefore, 2,0,0},
+ {js_length_str, xml_length, 0,0,0},
+ {js_localName_str, xml_localName, 0,0,0},
+ {js_name_str, xml_name, 0,0,0},
+ {js_namespace_str, xml_namespace, 1,0,0},
+ {"namespaceDeclarations", xml_namespaceDeclarations, 0,0,0},
+ {"nodeKind", xml_nodeKind, 0,0,0},
+ {"normalize", xml_normalize, 0,0,0},
+ {js_xml_parent_str, xml_parent, 0,0,0},
+ {"processingInstructions",xml_processingInstructions,1,0,0},
+ {"prependChild", xml_prependChild, 1,0,0},
+ {"propertyIsEnumerable", xml_propertyIsEnumerable, 1,0,0},
+ {"removeNamespace", xml_removeNamespace, 1,0,0},
+ {"replace", xml_replace, 2,0,0},
+ {"setChildren", xml_setChildren, 1,0,0},
+ {"setLocalName", xml_setLocalName, 1,0,0},
+ {"setName", xml_setName, 1,0,0},
+ {"setNamespace", xml_setNamespace, 1,0,0},
+ {js_text_str, xml_text, 0,0,0},
+ {js_toString_str, xml_toString, 0,0,0},
+ {js_toXMLString_str, xml_toXMLString, 0,0,0},
+ {js_toSource_str, xml_toXMLString, 0,0,0},
+ {js_valueOf_str, xml_valueOf, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+CopyXMLSettings(JSContext *cx, JSObject *from, JSObject *to)
+{
+ int i;
+ const char *name;
+ jsval v;
+
+ for (i = XML_IGNORE_COMMENTS; i < XML_PRETTY_INDENT; i++) {
+ name = xml_static_props[i].name;
+ if (!JS_GetProperty(cx, from, name, &v))
+ return JS_FALSE;
+ if (JSVAL_IS_BOOLEAN(v) && !JS_SetProperty(cx, to, name, &v))
+ return JS_FALSE;
+ }
+
+ name = xml_static_props[i].name;
+ if (!JS_GetProperty(cx, from, name, &v))
+ return JS_FALSE;
+ if (JSVAL_IS_NUMBER(v) && !JS_SetProperty(cx, to, name, &v))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+SetDefaultXMLSettings(JSContext *cx, JSObject *obj)
+{
+ int i;
+ jsval v;
+
+ for (i = XML_IGNORE_COMMENTS; i < XML_PRETTY_INDENT; i++) {
+ v = JSVAL_TRUE;
+ if (!JS_SetProperty(cx, obj, xml_static_props[i].name, &v))
+ return JS_FALSE;
+ }
+ v = INT_TO_JSVAL(2);
+ return JS_SetProperty(cx, obj, xml_static_props[i].name, &v);
+}
+
+static JSBool
+xml_settings(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *settings;
+
+ settings = JS_NewObject(cx, NULL, NULL, NULL);
+ if (!settings)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(settings);
+ return CopyXMLSettings(cx, obj, settings);
+}
+
+static JSBool
+xml_setSettings(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval v;
+ JSBool ok;
+ JSObject *settings;
+
+ v = argv[0];
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ cx->xmlSettingFlags = 0;
+ ok = SetDefaultXMLSettings(cx, obj);
+ } else {
+ if (JSVAL_IS_PRIMITIVE(v))
+ return JS_TRUE;
+ settings = JSVAL_TO_OBJECT(v);
+ cx->xmlSettingFlags = 0;
+ ok = CopyXMLSettings(cx, settings, obj);
+ }
+ if (ok)
+ cx->xmlSettingFlags |= XSF_CACHE_VALID;
+ return ok;
+}
+
+static JSBool
+xml_defaultSettings(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSObject *settings;
+
+ settings = JS_NewObject(cx, NULL, NULL, NULL);
+ if (!settings)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(settings);
+ return SetDefaultXMLSettings(cx, settings);
+}
+
+static JSFunctionSpec xml_static_methods[] = {
+ {"settings", xml_settings, 0,0,0},
+ {"setSettings", xml_setSettings, 1,0,0},
+ {"defaultSettings", xml_defaultSettings, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+XML(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSXML *xml, *copy;
+ JSObject *xobj, *vobj;
+ JSClass *clasp;
+
+ v = argv[0];
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ v = STRING_TO_JSVAL(cx->runtime->emptyString);
+
+ xobj = ToXML(cx, v);
+ if (!xobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(xobj);
+ xml = (JSXML *) JS_GetPrivate(cx, xobj);
+
+ if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && !JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ clasp = OBJ_GET_CLASS(cx, vobj);
+ if (clasp == &js_XMLClass ||
+ (clasp->flags & JSCLASS_DOCUMENT_OBSERVER)) {
+ /* No need to lock obj, it's newly constructed and thread local. */
+ copy = DeepCopy(cx, xml, obj, 0);
+ if (!copy)
+ return JS_FALSE;
+ JS_ASSERT(copy->object == obj);
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+XMLList(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSObject *vobj, *listobj;
+ JSXML *xml, *list;
+
+ v = argv[0];
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ v = STRING_TO_JSVAL(cx->runtime->emptyString);
+
+ if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && !JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj)) {
+ xml = (JSXML *) JS_GetPrivate(cx, vobj);
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(listobj);
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ if (!Append(cx, list, xml))
+ return JS_FALSE;
+ return JS_TRUE;
+ }
+ }
+ }
+
+ /* Toggle on XML support since the script has explicitly requested it. */
+ listobj = ToXMLList(cx, v);
+ if (!listobj)
+ return JS_FALSE;
+
+ *rval = OBJECT_TO_JSVAL(listobj);
+ return JS_TRUE;
+}
+
+#define JSXML_LIST_SIZE (offsetof(JSXML, u) + sizeof(struct JSXMLListVar))
+#define JSXML_ELEMENT_SIZE (offsetof(JSXML, u) + sizeof(struct JSXMLVar))
+#define JSXML_LEAF_SIZE (offsetof(JSXML, u) + sizeof(JSString *))
+
+static size_t sizeof_JSXML[JSXML_CLASS_LIMIT] = {
+ JSXML_LIST_SIZE, /* JSXML_CLASS_LIST */
+ JSXML_ELEMENT_SIZE, /* JSXML_CLASS_ELEMENT */
+ JSXML_LEAF_SIZE, /* JSXML_CLASS_ATTRIBUTE */
+ JSXML_LEAF_SIZE, /* JSXML_CLASS_PROCESSING_INSTRUCTION */
+ JSXML_LEAF_SIZE, /* JSXML_CLASS_TEXT */
+ JSXML_LEAF_SIZE /* JSXML_CLASS_COMMENT */
+};
+
+#ifdef DEBUG_notme
+JSCList xml_leaks = JS_INIT_STATIC_CLIST(&xml_leaks);
+uint32 xml_serial;
+#endif
+
+JSXML *
+js_NewXML(JSContext *cx, JSXMLClass xml_class)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) js_NewGCThing(cx, GCX_XML, sizeof_JSXML[xml_class]);
+ if (!xml)
+ return NULL;
+
+ xml->object = NULL;
+ xml->domnode = NULL;
+ xml->parent = NULL;
+ xml->name = NULL;
+ xml->xml_class = xml_class;
+ xml->xml_flags = 0;
+ if (JSXML_CLASS_HAS_VALUE(xml_class)) {
+ xml->xml_value = cx->runtime->emptyString;
+ } else {
+ XMLArrayInit(cx, &xml->xml_kids, 0);
+ if (xml_class == JSXML_CLASS_LIST) {
+ xml->xml_target = NULL;
+ xml->xml_targetprop = NULL;
+ } else {
+ XMLArrayInit(cx, &xml->xml_namespaces, 0);
+ XMLArrayInit(cx, &xml->xml_attrs, 0);
+ }
+ }
+
+#ifdef DEBUG_notme
+ JS_APPEND_LINK(&xml->links, &xml_leaks);
+ xml->serial = xml_serial++;
+#endif
+ METER(xml_stats.xml);
+ METER(xml_stats.livexml);
+ return xml;
+}
+
+void
+js_MarkXML(JSContext *cx, JSXML *xml)
+{
+ GC_MARK(cx, xml->object, "object");
+ GC_MARK(cx, xml->name, "name");
+ GC_MARK(cx, xml->parent, "xml_parent");
+
+ if (JSXML_HAS_VALUE(xml)) {
+ GC_MARK(cx, xml->xml_value, "value");
+ return;
+ }
+
+ xml_mark_vector(cx,
+ (JSXML **) xml->xml_kids.vector,
+ xml->xml_kids.length);
+ XMLArrayCursorMark(cx, xml->xml_kids.cursors);
+ XMLArrayTrim(&xml->xml_kids);
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ if (xml->xml_target)
+ GC_MARK(cx, xml->xml_target, "target");
+ if (xml->xml_targetprop)
+ GC_MARK(cx, xml->xml_targetprop, "targetprop");
+ } else {
+ namespace_mark_vector(cx,
+ (JSXMLNamespace **) xml->xml_namespaces.vector,
+ xml->xml_namespaces.length);
+ XMLArrayCursorMark(cx, xml->xml_namespaces.cursors);
+ XMLArrayTrim(&xml->xml_namespaces);
+
+ xml_mark_vector(cx,
+ (JSXML **) xml->xml_attrs.vector,
+ xml->xml_attrs.length);
+ XMLArrayCursorMark(cx, xml->xml_attrs.cursors);
+ XMLArrayTrim(&xml->xml_attrs);
+ }
+}
+
+void
+js_FinalizeXML(JSContext *cx, JSXML *xml)
+{
+ if (JSXML_HAS_KIDS(xml)) {
+ XMLArrayFinish(cx, &xml->xml_kids);
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ XMLArrayFinish(cx, &xml->xml_namespaces);
+ XMLArrayFinish(cx, &xml->xml_attrs);
+ }
+ }
+
+#ifdef DEBUG_notme
+ JS_REMOVE_LINK(&xml->links);
+#endif
+
+ UNMETER(xml_stats.livexml);
+}
+
+JSObject *
+js_ParseNodeToXMLObject(JSContext *cx, JSParseNode *pn)
+{
+ jsval nsval;
+ JSXMLNamespace *ns;
+ JSXMLArray nsarray;
+ JSXML *xml;
+
+ if (!js_GetDefaultXMLNamespace(cx, &nsval))
+ return NULL;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(nsval));
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(nsval));
+
+ if (!XMLArrayInit(cx, &nsarray, 1))
+ return NULL;
+
+ XMLARRAY_APPEND(cx, &nsarray, ns);
+ xml = ParseNodeToXML(cx, pn, &nsarray, XSF_PRECOMPILED_ROOT);
+ XMLArrayFinish(cx, &nsarray);
+ if (!xml)
+ return NULL;
+
+ return xml->object;
+}
+
+JSObject *
+js_NewXMLObject(JSContext *cx, JSXMLClass xml_class)
+{
+ JSXML *xml;
+ JSObject *obj;
+ JSTempValueRooter tvr;
+
+ xml = js_NewXML(cx, xml_class);
+ if (!xml)
+ return NULL;
+ JS_PUSH_TEMP_ROOT_GCTHING(cx, xml, &tvr);
+ obj = js_GetXMLObject(cx, xml);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return obj;
+}
+
+static JSObject *
+NewXMLObject(JSContext *cx, JSXML *xml)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_XMLClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, xml)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ METER(xml_stats.xmlobj);
+ METER(xml_stats.livexmlobj);
+ return obj;
+}
+
+JSObject *
+js_GetXMLObject(JSContext *cx, JSXML *xml)
+{
+ JSObject *obj;
+
+ obj = xml->object;
+ if (obj) {
+ JS_ASSERT(JS_GetPrivate(cx, obj) == xml);
+ return obj;
+ }
+
+ /*
+ * A JSXML cannot be shared among threads unless it has an object.
+ * A JSXML cannot be given an object unless:
+ * (a) it has no parent; or
+ * (b) its parent has no object (therefore is thread-private); or
+ * (c) its parent's object is locked.
+ *
+ * Once given an object, a JSXML is immutable.
+ */
+ JS_ASSERT(!xml->parent ||
+ !xml->parent->object ||
+ JS_IS_OBJ_LOCKED(cx, xml->parent->object));
+
+ obj = NewXMLObject(cx, xml);
+ if (!obj)
+ return NULL;
+ xml->object = obj;
+ return obj;
+}
+
+JSObject *
+js_InitNamespaceClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_NamespaceClass.base, Namespace, 2,
+ namespace_props, namespace_methods, NULL, NULL);
+}
+
+JSObject *
+js_InitQNameClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_QNameClass.base, QName, 2,
+ qname_props, qname_methods, NULL, NULL);
+}
+
+JSObject *
+js_InitAttributeNameClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_AttributeNameClass, AttributeName, 2,
+ qname_props, qname_methods, NULL, NULL);
+}
+
+JSObject *
+js_InitAnyNameClass(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ if (!js_GetAnyName(cx, &v))
+ return NULL;
+ return JSVAL_TO_OBJECT(v);
+}
+
+JSObject *
+js_InitXMLClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *pobj, *ctor;
+ JSFunction *fun;
+ JSXML *xml;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ jsval cval, argv[1], junk;
+
+ /* Define the isXMLName function. */
+ if (!JS_DefineFunction(cx, obj, js_isXMLName_str, xml_isXMLName, 1, 0))
+ return NULL;
+
+ /* Define the XML class constructor and prototype. */
+ proto = JS_InitClass(cx, obj, NULL, &js_XMLClass, XML, 1,
+ NULL, xml_methods,
+ xml_static_props, xml_static_methods);
+ if (!proto)
+ return NULL;
+
+ xml = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!xml || !JS_SetPrivate(cx, proto, xml))
+ return NULL;
+ xml->object = proto;
+ METER(xml_stats.xmlobj);
+ METER(xml_stats.livexmlobj);
+
+ /*
+ * Prepare to set default settings on the XML constructor we just made.
+ * NB: We can't use JS_GetConstructor, because it calls OBJ_GET_PROPERTY,
+ * which is xml_getProperty, which creates a new XMLList every time! We
+ * must instead call js_LookupProperty directly.
+ */
+ if (!js_LookupProperty(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState.constructorAtom),
+ &pobj, &prop)) {
+ return NULL;
+ }
+ JS_ASSERT(prop);
+ sprop = (JSScopeProperty *) prop;
+ JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)));
+ cval = OBJ_GET_SLOT(cx, pobj, sprop->slot);
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, cval));
+
+ /* Set default settings. */
+ ctor = JSVAL_TO_OBJECT(cval);
+ argv[0] = JSVAL_VOID;
+ if (!xml_setSettings(cx, ctor, 1, argv, &junk))
+ return NULL;
+
+ /* Define the XMLList function and give it the same prototype as XML. */
+ fun = JS_DefineFunction(cx, obj, js_XMLList_str, XMLList, 1, 0);
+ if (!fun)
+ return NULL;
+ if (!js_SetClassPrototype(cx, fun->object, proto,
+ JSPROP_READONLY | JSPROP_PERMANENT)) {
+ return NULL;
+ }
+ return proto;
+}
+
+JSObject *
+js_InitXMLClasses(JSContext *cx, JSObject *obj)
+{
+ if (!js_InitNamespaceClass(cx, obj))
+ return NULL;
+ if (!js_InitQNameClass(cx, obj))
+ return NULL;
+ if (!js_InitAttributeNameClass(cx, obj))
+ return NULL;
+ if (!js_InitAnyNameClass(cx, obj))
+ return NULL;
+ return js_InitXMLClass(cx, obj);
+}
+
+JSBool
+js_GetFunctionNamespace(JSContext *cx, jsval *vp)
+{
+ JSRuntime *rt;
+ JSObject *obj;
+ JSAtom *atom;
+ JSString *prefix, *uri;
+
+ /* An invalid URI, for internal use only, guaranteed not to collide. */
+ static const char anti_uri[] = "@mozilla.org/js/function";
+
+ /* Optimize by avoiding JS_LOCK_GC(rt) for the common case. */
+ rt = cx->runtime;
+ obj = rt->functionNamespaceObject;
+ if (!obj) {
+ JS_LOCK_GC(rt);
+ obj = rt->functionNamespaceObject;
+ if (!obj) {
+ JS_UNLOCK_GC(rt);
+ atom = js_Atomize(cx, js_function_str, 8, 0);
+ JS_ASSERT(atom);
+ prefix = ATOM_TO_STRING(atom);
+
+ /*
+ * Note that any race to atomize anti_uri here is resolved by
+ * the atom table code, such that at most one atom for anti_uri
+ * is created. We store in rt->atomState.lazy unconditionally,
+ * since we are guaranteed to overwrite either null or the same
+ * atom pointer.
+ */
+ atom = js_Atomize(cx, anti_uri, sizeof anti_uri - 1, ATOM_PINNED);
+ if (!atom)
+ return JS_FALSE;
+ rt->atomState.lazy.functionNamespaceURIAtom = atom;
+
+ uri = ATOM_TO_STRING(atom);
+ obj = js_NewXMLNamespaceObject(cx, prefix, uri, JS_FALSE);
+ if (!obj)
+ return JS_FALSE;
+
+ /*
+ * Avoid entraining any in-scope Object.prototype. The loss of
+ * Namespace.prototype is not detectable, as there is no way to
+ * refer to this instance in scripts. When used to qualify method
+ * names, its prefix and uri references are copied to the QName.
+ */
+ OBJ_SET_PROTO(cx, obj, NULL);
+ OBJ_SET_PARENT(cx, obj, NULL);
+
+ JS_LOCK_GC(rt);
+ if (!rt->functionNamespaceObject)
+ rt->functionNamespaceObject = obj;
+ else
+ obj = rt->functionNamespaceObject;
+ }
+ JS_UNLOCK_GC(rt);
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * Note the asymmetry between js_GetDefaultXMLNamespace and js_SetDefaultXML-
+ * Namespace. Get searches fp->scopeChain for JS_DEFAULT_XML_NAMESPACE_ID,
+ * while Set sets JS_DEFAULT_XML_NAMESPACE_ID in fp->varobj (unless fp is a
+ * lightweight function activation). There's no requirement that fp->varobj
+ * lie directly on fp->scopeChain, although it should be reachable using the
+ * prototype chain from a scope object (cf. JSOPTION_VAROBJFIX in jsapi.h).
+ *
+ * If Get can't find JS_DEFAULT_XML_NAMESPACE_ID along the scope chain, it
+ * creates a default namespace via 'new Namespace()'. In contrast, Set uses
+ * its v argument as the uri of a new Namespace, with "" as the prefix. See
+ * ECMA-357 12.1 and 12.1.1. Note that if Set is called with a Namespace n,
+ * the default XML namespace will be set to ("", n.uri). So the uri string
+ * is really the only usefully stored value of the default namespace.
+ */
+JSBool
+js_GetDefaultXMLNamespace(JSContext *cx, jsval *vp)
+{
+ JSStackFrame *fp;
+ JSObject *nsobj, *obj, *tmp;
+ jsval v;
+
+ fp = cx->fp;
+ nsobj = fp->xmlNamespace;
+ if (nsobj) {
+ *vp = OBJECT_TO_JSVAL(nsobj);
+ return JS_TRUE;
+ }
+
+ obj = NULL;
+ for (tmp = fp->scopeChain; tmp; tmp = OBJ_GET_PARENT(cx, obj)) {
+ obj = tmp;
+ if (!OBJ_GET_PROPERTY(cx, obj, JS_DEFAULT_XML_NAMESPACE_ID, &v))
+ return JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ fp->xmlNamespace = JSVAL_TO_OBJECT(v);
+ *vp = v;
+ return JS_TRUE;
+ }
+ }
+
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, obj, 0, NULL);
+ if (!nsobj)
+ return JS_FALSE;
+ v = OBJECT_TO_JSVAL(nsobj);
+ if (obj &&
+ !OBJ_DEFINE_PROPERTY(cx, obj, JS_DEFAULT_XML_NAMESPACE_ID, v,
+ JS_PropertyStub, JS_PropertyStub,
+ JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+ fp->xmlNamespace = nsobj;
+ *vp = v;
+ return JS_TRUE;
+}
+
+JSBool
+js_SetDefaultXMLNamespace(JSContext *cx, jsval v)
+{
+ jsval argv[2];
+ JSObject *nsobj, *varobj;
+ JSStackFrame *fp;
+
+ argv[0] = STRING_TO_JSVAL(cx->runtime->emptyString);
+ argv[1] = v;
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, NULL,
+ 2, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ v = OBJECT_TO_JSVAL(nsobj);
+
+ fp = cx->fp;
+ varobj = fp->varobj;
+ if (varobj) {
+ if (!OBJ_DEFINE_PROPERTY(cx, varobj, JS_DEFAULT_XML_NAMESPACE_ID, v,
+ JS_PropertyStub, JS_PropertyStub,
+ JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+ } else {
+ JS_ASSERT(fp->fun && !JSFUN_HEAVYWEIGHT_TEST(fp->fun->flags));
+ }
+ fp->xmlNamespace = JSVAL_TO_OBJECT(v);
+ return JS_TRUE;
+}
+
+JSBool
+js_ToAttributeName(JSContext *cx, jsval *vp)
+{
+ JSXMLQName *qn;
+
+ qn = ToAttributeName(cx, *vp);
+ if (!qn)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(qn->object);
+ return JS_TRUE;
+}
+
+JSString *
+js_EscapeAttributeValue(JSContext *cx, JSString *str)
+{
+ return EscapeAttributeValue(cx, NULL, str);
+}
+
+JSString *
+js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str, JSString *str2)
+{
+ size_t len, len2, newlen;
+ jschar *chars;
+
+ if (JSSTRING_IS_DEPENDENT(str) ||
+ !(*js_GetGCThingFlags(str) & GCF_MUTABLE)) {
+ str = js_NewStringCopyN(cx, JSSTRING_CHARS(str), JSSTRING_LENGTH(str),
+ 0);
+ if (!str)
+ return NULL;
+ }
+
+ len = str->length;
+ len2 = JSSTRING_LENGTH(str2);
+ newlen = (isName) ? len + 1 + len2 : len + 2 + len2 + 1;
+ chars = (jschar *) JS_realloc(cx, str->chars, (newlen+1) * sizeof(jschar));
+ if (!chars)
+ return NULL;
+
+ /*
+ * Reallocating str (because we know it has no other references) requires
+ * purging any deflated string cached for it.
+ */
+ js_PurgeDeflatedStringCache(cx->runtime, str);
+
+ str->chars = chars;
+ str->length = newlen;
+ chars += len;
+ if (isName) {
+ *chars++ = ' ';
+ js_strncpy(chars, JSSTRING_CHARS(str2), len2);
+ chars += len2;
+ } else {
+ *chars++ = '=';
+ *chars++ = '"';
+ js_strncpy(chars, JSSTRING_CHARS(str2), len2);
+ chars += len2;
+ *chars++ = '"';
+ }
+ *chars = 0;
+ return str;
+}
+
+JSString *
+js_EscapeElementValue(JSContext *cx, JSString *str)
+{
+ return EscapeElementValue(cx, NULL, str);
+}
+
+JSString *
+js_ValueToXMLString(JSContext *cx, jsval v)
+{
+ return ToXMLString(cx, v);
+}
+
+static JSBool
+anyname_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = ATOM_KEY(cx->runtime->atomState.starAtom);
+ return JS_TRUE;
+}
+
+JSBool
+js_GetAnyName(JSContext *cx, jsval *vp)
+{
+ JSRuntime *rt;
+ JSObject *obj;
+ JSXMLQName *qn;
+ JSBool ok;
+
+ /* Optimize by avoiding JS_LOCK_GC(rt) for the common case. */
+ rt = cx->runtime;
+ obj = rt->anynameObject;
+ if (!obj) {
+ JS_LOCK_GC(rt);
+ obj = rt->anynameObject;
+ if (!obj) {
+ JS_UNLOCK_GC(rt);
+
+ /*
+ * Protect multiple newborns created below, in the do-while(0)
+ * loop used to ensure that we leave this local root scope.
+ */
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+
+ do {
+ qn = js_NewXMLQName(cx, rt->emptyString, rt->emptyString,
+ ATOM_TO_STRING(rt->atomState.starAtom));
+ if (!qn) {
+ ok = JS_FALSE;
+ break;
+ }
+
+ obj = js_NewObject(cx, &js_AnyNameClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, qn)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ ok = JS_FALSE;
+ break;
+ }
+ qn->object = obj;
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+
+ /*
+ * Avoid entraining any Object.prototype found via cx's scope
+ * chain or global object. This loses the default toString,
+ * but no big deal: we want to customize toString anyway for
+ * clearer diagnostics.
+ */
+ if (!JS_DefineFunction(cx, obj, js_toString_str,
+ anyname_toString, 0, 0)) {
+ ok = JS_FALSE;
+ break;
+ }
+ OBJ_SET_PROTO(cx, obj, NULL);
+ JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
+ } while (0);
+
+ js_LeaveLocalRootScopeWithResult(cx, OBJECT_TO_JSVAL(obj));
+ if (!ok)
+ return JS_FALSE;
+
+ JS_LOCK_GC(rt);
+ if (!rt->anynameObject)
+ rt->anynameObject = obj;
+ else
+ obj = rt->anynameObject;
+ }
+ JS_UNLOCK_GC(rt);
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+JSBool
+js_FindXMLProperty(JSContext *cx, jsval name, JSObject **objp, jsval *namep)
+{
+ JSXMLQName *qn;
+ jsid funid, id;
+ JSObject *obj, *pobj, *lastobj;
+ JSProperty *prop;
+ const char *printable;
+
+ qn = ToXMLName(cx, name, &funid);
+ if (!qn)
+ return JS_FALSE;
+ id = OBJECT_TO_JSID(qn->object);
+
+ obj = cx->fp->scopeChain;
+ do {
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ /*
+ * Call OBJ_THIS_OBJECT to skip any With object that wraps an XML
+ * object to carry scope chain linkage in js_FilterXMLList.
+ */
+ pobj = OBJ_THIS_OBJECT(cx, obj);
+ if (OBJECT_IS_XML(cx, pobj)) {
+ *objp = pobj;
+ *namep = ID_TO_VALUE(id);
+ return JS_TRUE;
+ }
+ }
+
+ lastobj = obj;
+ } while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL);
+
+ printable = js_ValueToPrintableString(cx, name);
+ if (printable) {
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_UNDEFINED_XML_NAME, printable);
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_GetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp)
+{
+ return GetProperty(cx, obj, name, vp);
+}
+
+JSBool
+js_GetXMLFunction(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *target;
+ JSXML *xml;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ JS_ASSERT(OBJECT_IS_XML(cx, obj));
+
+ /* After this point, control must flow through label out: to exit. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, NULL, &tvr);
+
+ /*
+ * See comments before xml_lookupProperty about the need for the proto
+ * chain lookup.
+ */
+ target = obj;
+ for (;;) {
+ ok = js_GetProperty(cx, target, id, vp);
+ if (!ok)
+ goto out;
+ if (VALUE_IS_FUNCTION(cx, *vp)) {
+ ok = JS_TRUE;
+ goto out;
+ }
+ target = OBJ_GET_PROTO(cx, target);
+ if (target == NULL)
+ break;
+ tvr.u.object = target;
+ }
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (HasSimpleContent(xml)) {
+ /* Search in String.prototype to implement 11.2.2.1 Step 3(f). */
+ ok = js_GetClassPrototype(cx, NULL, INT_TO_JSID(JSProto_String),
+ &tvr.u.object);
+ if (!ok)
+ goto out;
+ JS_ASSERT(tvr.u.object);
+ ok = OBJ_GET_PROPERTY(cx, tvr.u.object, id, vp);
+ }
+
+ out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+JSBool
+js_SetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp)
+{
+ return PutProperty(cx, obj, name, vp);
+}
+
+static JSXML *
+GetPrivate(JSContext *cx, JSObject *obj, const char *method)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL);
+ if (!xml) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_METHOD,
+ js_XML_str, method, OBJ_GET_CLASS(cx, obj)->name);
+ }
+ return xml;
+}
+
+JSBool
+js_GetXMLDescendants(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXML *xml, *list;
+
+ xml = GetPrivate(cx, obj, "descendants internal method");
+ if (!xml)
+ return JS_FALSE;
+
+ list = Descendants(cx, xml, id);
+ if (!list)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(list->object);
+ return JS_TRUE;
+}
+
+JSBool
+js_DeleteXMLListElements(JSContext *cx, JSObject *listobj)
+{
+ JSXML *list;
+ uint32 n;
+ jsval junk;
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ for (n = list->xml_kids.length; n != 0; --n) {
+ if (!DeleteProperty(cx, listobj, INT_TO_JSID(0), &junk))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_FilterXMLList(JSContext *cx, JSObject *obj, jsbytecode *pc, jsval *vp)
+{
+ JSBool ok, match;
+ JSStackFrame *fp;
+ uint32 flags;
+ JSObject *scobj, *listobj, *resobj, *withobj, *kidobj;
+ JSXML *xml, *list, *result, *kid;
+ JSXMLArrayCursor cursor;
+
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+
+ /* All control flow after this point must exit via label out or bad. */
+ *vp = JSVAL_NULL;
+ fp = cx->fp;
+ flags = fp->flags;
+ fp->flags = flags | JSFRAME_FILTERING;
+ scobj = js_GetScopeChain(cx, fp);
+ withobj = NULL;
+ if (!scobj)
+ goto bad;
+ xml = GetPrivate(cx, obj, "filtering predicate operator");
+ if (!xml)
+ goto bad;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ list = xml;
+ } else {
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ goto bad;
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ ok = Append(cx, list, xml);
+ if (!ok)
+ goto out;
+ }
+
+ resobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!resobj)
+ goto bad;
+ result = (JSXML *) JS_GetPrivate(cx, resobj);
+
+ /* Hoist the scope chain update out of the loop over kids. */
+ withobj = js_NewWithObject(cx, NULL, scobj, -1);
+ if (!withobj)
+ goto bad;
+ fp->scopeChain = withobj;
+
+ XMLArrayCursorInit(&cursor, &list->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ break;
+ OBJ_SET_PROTO(cx, withobj, kidobj);
+ ok = js_Interpret(cx, pc, vp) && js_ValueToBoolean(cx, *vp, &match);
+ if (ok && match)
+ ok = Append(cx, result, kid);
+ if (!ok)
+ break;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (!ok)
+ goto out;
+ if (kid)
+ goto bad;
+
+ *vp = OBJECT_TO_JSVAL(resobj);
+
+out:
+ fp->flags = flags | (fp->flags & JSFRAME_POP_BLOCKS);
+ if (withobj) {
+ fp->scopeChain = scobj;
+ JS_SetPrivate(cx, withobj, NULL);
+ }
+ js_LeaveLocalRootScopeWithResult(cx, *vp);
+ return ok;
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+JSObject *
+js_ValueToXMLObject(JSContext *cx, jsval v)
+{
+ return ToXML(cx, v);
+}
+
+JSObject *
+js_ValueToXMLListObject(JSContext *cx, jsval v)
+{
+ return ToXMLList(cx, v);
+}
+
+JSObject *
+js_CloneXMLObject(JSContext *cx, JSObject *obj)
+{
+ uintN flags;
+ JSXML *xml;
+
+ if (!GetXMLSettingFlags(cx, &flags))
+ return NULL;
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (flags & (XSF_IGNORE_COMMENTS |
+ XSF_IGNORE_PROCESSING_INSTRUCTIONS |
+ XSF_IGNORE_WHITESPACE)) {
+ xml = DeepCopy(cx, xml, NULL, flags);
+ if (!xml)
+ return NULL;
+ return xml->object;
+ }
+ return NewXMLObject(cx, xml);
+}
+
+JSObject *
+js_NewXMLSpecialObject(JSContext *cx, JSXMLClass xml_class, JSString *name,
+ JSString *value)
+{
+ uintN flags;
+ JSObject *obj;
+ JSXML *xml;
+ JSXMLQName *qn;
+
+ if (!GetXMLSettingFlags(cx, &flags))
+ return NULL;
+
+ if ((xml_class == JSXML_CLASS_COMMENT &&
+ (flags & XSF_IGNORE_COMMENTS)) ||
+ (xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION &&
+ (flags & XSF_IGNORE_PROCESSING_INSTRUCTIONS))) {
+ return js_NewXMLObject(cx, JSXML_CLASS_TEXT);
+ }
+
+ obj = js_NewXMLObject(cx, xml_class);
+ if (!obj)
+ return NULL;
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (name) {
+ qn = js_NewXMLQName(cx, cx->runtime->emptyString, NULL, name);
+ if (!qn)
+ return NULL;
+ xml->name = qn;
+ }
+ xml->xml_value = value;
+ return obj;
+}
+
+JSString *
+js_MakeXMLCDATAString(JSContext *cx, JSString *str)
+{
+ return MakeXMLCDATAString(cx, NULL, str);
+}
+
+JSString *
+js_MakeXMLCommentString(JSContext *cx, JSString *str)
+{
+ return MakeXMLCommentString(cx, NULL, str);
+}
+
+JSString *
+js_MakeXMLPIString(JSContext *cx, JSString *name, JSString *str)
+{
+ return MakeXMLPIString(cx, NULL, name, str);
+}
+
+#endif /* JS_HAS_XML_SUPPORT */
diff --git a/third_party/js-1.7/jsxml.h b/third_party/js-1.7/jsxml.h
new file mode 100644
index 0000000..71e591a
--- /dev/null
+++ b/third_party/js-1.7/jsxml.h
@@ -0,0 +1,332 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey E4X code, released August, 2004.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsxml_h___
+#define jsxml_h___
+
+#include "jsstddef.h"
+#include "jspubtd.h"
+
+extern const char js_AnyName_str[];
+extern const char js_AttributeName_str[];
+extern const char js_isXMLName_str[];
+extern const char js_XMLList_str[];
+
+extern const char js_amp_entity_str[];
+extern const char js_gt_entity_str[];
+extern const char js_lt_entity_str[];
+extern const char js_quot_entity_str[];
+
+struct JSXMLNamespace {
+ JSObject *object;
+ JSString *prefix;
+ JSString *uri;
+ JSBool declared; /* true if declared in its XML tag */
+};
+
+extern JSXMLNamespace *
+js_NewXMLNamespace(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared);
+
+extern void
+js_MarkXMLNamespace(JSContext *cx, JSXMLNamespace *ns);
+
+extern void
+js_FinalizeXMLNamespace(JSContext *cx, JSXMLNamespace *ns);
+
+extern JSObject *
+js_NewXMLNamespaceObject(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared);
+
+extern JSObject *
+js_GetXMLNamespaceObject(JSContext *cx, JSXMLNamespace *ns);
+
+struct JSXMLQName {
+ JSObject *object;
+ JSString *uri;
+ JSString *prefix;
+ JSString *localName;
+};
+
+extern JSXMLQName *
+js_NewXMLQName(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName);
+
+extern void
+js_MarkXMLQName(JSContext *cx, JSXMLQName *qn);
+
+extern void
+js_FinalizeXMLQName(JSContext *cx, JSXMLQName *qn);
+
+extern JSObject *
+js_NewXMLQNameObject(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName);
+
+extern JSObject *
+js_GetXMLQNameObject(JSContext *cx, JSXMLQName *qn);
+
+extern JSObject *
+js_GetAttributeNameObject(JSContext *cx, JSXMLQName *qn);
+
+extern JSObject *
+js_ConstructXMLQNameObject(JSContext *cx, jsval nsval, jsval lnval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSIdentityOp)(const void *a, const void *b);
+
+struct JSXMLArray {
+ uint32 length;
+ uint32 capacity;
+ void **vector;
+ JSXMLArrayCursor *cursors;
+};
+
+#define JSXML_PRESET_CAPACITY JS_BIT(31)
+#define JSXML_CAPACITY_MASK JS_BITMASK(31)
+#define JSXML_CAPACITY(array) ((array)->capacity & JSXML_CAPACITY_MASK)
+
+struct JSXMLArrayCursor {
+ JSXMLArray *array;
+ uint32 index;
+ JSXMLArrayCursor *next;
+ JSXMLArrayCursor **prevp;
+ void *root;
+};
+
+/*
+ * NB: don't reorder this enum without changing all array initializers that
+ * depend on it in jsxml.c.
+ */
+typedef enum JSXMLClass {
+ JSXML_CLASS_LIST,
+ JSXML_CLASS_ELEMENT,
+ JSXML_CLASS_ATTRIBUTE,
+ JSXML_CLASS_PROCESSING_INSTRUCTION,
+ JSXML_CLASS_TEXT,
+ JSXML_CLASS_COMMENT,
+ JSXML_CLASS_LIMIT
+} JSXMLClass;
+
+#define JSXML_CLASS_HAS_KIDS(class_) ((class_) < JSXML_CLASS_ATTRIBUTE)
+#define JSXML_CLASS_HAS_VALUE(class_) ((class_) >= JSXML_CLASS_ATTRIBUTE)
+#define JSXML_CLASS_HAS_NAME(class_) \
+ ((uintN)((class_) - JSXML_CLASS_ELEMENT) <= \
+ (uintN)(JSXML_CLASS_PROCESSING_INSTRUCTION - JSXML_CLASS_ELEMENT))
+
+#ifdef DEBUG_notme
+#include "jsclist.h"
+#endif
+
+struct JSXML {
+#ifdef DEBUG_notme
+ JSCList links;
+ uint32 serial;
+#endif
+ JSObject *object;
+ void *domnode; /* DOM node if mapped info item */
+ JSXML *parent;
+ JSXMLQName *name;
+ uint16 xml_class; /* discriminates u, below */
+ uint16 xml_flags; /* flags, see below */
+ union {
+ struct JSXMLListVar {
+ JSXMLArray kids; /* NB: must come first */
+ JSXML *target;
+ JSXMLQName *targetprop;
+ } list;
+ struct JSXMLVar {
+ JSXMLArray kids; /* NB: must come first */
+ JSXMLArray namespaces;
+ JSXMLArray attrs;
+ } elem;
+ JSString *value;
+ } u;
+
+ /* Don't add anything after u -- see js_NewXML for why. */
+};
+
+/* union member shorthands */
+#define xml_kids u.list.kids
+#define xml_target u.list.target
+#define xml_targetprop u.list.targetprop
+#define xml_namespaces u.elem.namespaces
+#define xml_attrs u.elem.attrs
+#define xml_value u.value
+
+/* xml_flags values */
+#define XMLF_WHITESPACE_TEXT 0x1
+
+/* xml_class-testing macros */
+#define JSXML_HAS_KIDS(xml) JSXML_CLASS_HAS_KIDS((xml)->xml_class)
+#define JSXML_HAS_VALUE(xml) JSXML_CLASS_HAS_VALUE((xml)->xml_class)
+#define JSXML_HAS_NAME(xml) JSXML_CLASS_HAS_NAME((xml)->xml_class)
+#define JSXML_LENGTH(xml) (JSXML_CLASS_HAS_KIDS((xml)->xml_class) \
+ ? (xml)->xml_kids.length \
+ : 0)
+
+extern JSXML *
+js_NewXML(JSContext *cx, JSXMLClass xml_class);
+
+extern void
+js_MarkXML(JSContext *cx, JSXML *xml);
+
+extern void
+js_FinalizeXML(JSContext *cx, JSXML *xml);
+
+extern JSObject *
+js_ParseNodeToXMLObject(JSContext *cx, JSParseNode *pn);
+
+extern JSObject *
+js_NewXMLObject(JSContext *cx, JSXMLClass xml_class);
+
+extern JSObject *
+js_GetXMLObject(JSContext *cx, JSXML *xml);
+
+extern JS_FRIEND_DATA(JSXMLObjectOps) js_XMLObjectOps;
+extern JS_FRIEND_DATA(JSClass) js_XMLClass;
+extern JS_FRIEND_DATA(JSExtendedClass) js_NamespaceClass;
+extern JS_FRIEND_DATA(JSExtendedClass) js_QNameClass;
+extern JS_FRIEND_DATA(JSClass) js_AttributeNameClass;
+extern JS_FRIEND_DATA(JSClass) js_AnyNameClass;
+
+/*
+ * Macros to test whether an object or a value is of type "xml" (per typeof).
+ * NB: jsapi.h must be included before any call to VALUE_IS_XML.
+ */
+#define OBJECT_IS_XML(cx,obj) ((obj)->map->ops == &js_XMLObjectOps.base)
+#define VALUE_IS_XML(cx,v) (!JSVAL_IS_PRIMITIVE(v) && \
+ OBJECT_IS_XML(cx, JSVAL_TO_OBJECT(v)))
+
+extern JSObject *
+js_InitNamespaceClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitQNameClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitAttributeNameClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitAnyNameClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitXMLClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitXMLClasses(JSContext *cx, JSObject *obj);
+
+extern JSBool
+js_GetFunctionNamespace(JSContext *cx, jsval *vp);
+
+extern JSBool
+js_GetDefaultXMLNamespace(JSContext *cx, jsval *vp);
+
+extern JSBool
+js_SetDefaultXMLNamespace(JSContext *cx, jsval v);
+
+/*
+ * Return true if v is a XML QName object, or if it converts to a string that
+ * contains a valid XML qualified name (one containing no :), false otherwise.
+ * NB: This function is an infallible predicate, it hides exceptions.
+ */
+extern JSBool
+js_IsXMLName(JSContext *cx, jsval v);
+
+extern JSBool
+js_ToAttributeName(JSContext *cx, jsval *vp);
+
+extern JSString *
+js_EscapeAttributeValue(JSContext *cx, JSString *str);
+
+extern JSString *
+js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str,
+ JSString *str2);
+
+extern JSString *
+js_EscapeElementValue(JSContext *cx, JSString *str);
+
+extern JSString *
+js_ValueToXMLString(JSContext *cx, jsval v);
+
+extern JSBool
+js_GetAnyName(JSContext *cx, jsval *vp);
+
+extern JSBool
+js_FindXMLProperty(JSContext *cx, jsval name, JSObject **objp, jsval *namep);
+
+extern JSBool
+js_GetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp);
+
+extern JSBool
+js_GetXMLFunction(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
+
+extern JSBool
+js_SetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp);
+
+extern JSBool
+js_GetXMLDescendants(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_DeleteXMLListElements(JSContext *cx, JSObject *listobj);
+
+extern JSBool
+js_FilterXMLList(JSContext *cx, JSObject *obj, jsbytecode *pc, jsval *vp);
+
+extern JSObject *
+js_ValueToXMLObject(JSContext *cx, jsval v);
+
+extern JSObject *
+js_ValueToXMLListObject(JSContext *cx, jsval v);
+
+extern JSObject *
+js_CloneXMLObject(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_NewXMLSpecialObject(JSContext *cx, JSXMLClass xml_class, JSString *name,
+ JSString *value);
+
+extern JSString *
+js_MakeXMLCDATAString(JSContext *cx, JSString *str);
+
+extern JSString *
+js_MakeXMLCommentString(JSContext *cx, JSString *str);
+
+extern JSString *
+js_MakeXMLPIString(JSContext *cx, JSString *name, JSString *str);
+
+#endif /* jsxml_h___ */
diff --git a/third_party/js-1.7/lock_SunOS.s b/third_party/js-1.7/lock_SunOS.s
new file mode 100644
index 0000000..7a842d1
--- /dev/null
+++ b/third_party/js-1.7/lock_SunOS.s
@@ -0,0 +1,114 @@
+!
+! The contents of this file are subject to the Netscape Public
+! License Version 1.1 (the "License"); you may not use this file
+! except in compliance with the License. You may obtain a copy of
+! the License at http://www.mozilla.org/NPL/
+!
+! Software distributed under the License is distributed on an "AS
+! IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+! implied. See the License for the specific language governing
+! rights and limitations under the License.
+!
+! The Original Code is Mozilla Communicator client code, released
+! March 31, 1998.
+!
+! The Initial Developer of the Original Code is Netscape
+! Communications Corporation. Portions created by Netscape are
+! Copyright (C) 1998-1999 Netscape Communications Corporation. All
+! Rights Reserved.
+!
+! Contributor(s):
+!
+! Alternatively, the contents of this file may be used under the
+! terms of the GNU Public License (the "GPL"), in which case the
+! provisions of the GPL are applicable instead of those above.
+! If you wish to allow use of your version of this file only
+! under the terms of the GPL and not to allow others to use your
+! version of this file under the NPL, indicate your decision by
+! deleting the provisions above and replace them with the notice
+! and other provisions required by the GPL. If you do not delete
+! the provisions above, a recipient may use your version of this
+! file under either the NPL or the GPL.
+!
+
+!
+! atomic compare-and-swap routines for V8 sparc
+! and for V8+ (ultrasparc)
+!
+!
+! standard asm linkage macros; this module must be compiled
+! with the -P option (use C preprocessor)
+
+#include <sys/asm_linkage.h>
+
+! ======================================================================
+!
+! Perform the sequence *a = b atomically with respect to previous value
+! of a (a0). If *a==a0 then assign *a to b, all in one atomic operation.
+! Returns 1 if assignment happened, and 0 otherwise.
+!
+! usage : old_val = compare_and_swap(address, oldval, newval)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [input] - the old value to compare with
+! %o2 [input] - the new value to set for [%o0]
+! %o3 [local] - work register
+! -----------------------
+#ifndef ULTRA_SPARC
+! v8
+
+ ENTRY(compare_and_swap) ! standard assembler/ELF prologue
+
+ stbar
+ mov -1,%o3 ! busy flag
+ swap [%o0],%o3 ! get current value
+l1: cmp %o3,-1 ! busy?
+ be,a l1 ! if so, spin
+ swap [%o0],%o3 ! using branch-delay to swap back value
+ cmp %o1,%o3 ! compare old with current
+ be,a l2 ! if equal then swap in new value
+ swap [%o0],%o2 ! done.
+ swap [%o0],%o3 ! otherwise, swap back current value
+ retl
+ mov 0,%o0 ! return false
+l2: retl
+ mov 1,%o0 ! return true
+
+ SET_SIZE(compare_and_swap) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+#else /* ULTRA_SPARC */
+! ======================================================================
+!
+! v9
+
+ ENTRY(compare_and_swap) ! standard assembler/ELF prologue
+
+ stbar
+ cas [%o0],%o1,%o2 ! compare *w with old value and set to new if equal
+ cmp %o1,%o2 ! did we succeed?
+ be,a m1 ! yes
+ mov 1,%o0 ! return true (annulled when no jump)
+ mov 0,%o0 ! return false
+m1: retl
+ nop
+
+ SET_SIZE(compare_and_swap) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+#endif
diff --git a/third_party/js-1.7/perfect.js b/third_party/js-1.7/perfect.js
new file mode 100644
index 0000000..aeca121
--- /dev/null
+++ b/third_party/js-1.7/perfect.js
@@ -0,0 +1,39 @@
+// Some simple testing of new, eval and some string stuff.
+
+// constructor -- expression array initialization
+function ExprArray(n,v)
+{
+ // Initializes n values to v coerced to a string.
+ for (var i = 0; i < n; i++) {
+ this[i] = "" + v;
+ }
+}
+
+
+// Print the perfect numbers up to n and the sum expression for n's divisors.
+function perfect(n)
+{
+ print("The perfect numbers up to " + n + " are:");
+
+ // We build sumOfDivisors[i] to hold a string expression for
+ // the sum of the divisors of i, excluding i itself.
+ var sumOfDivisors = new ExprArray(n+1,1);
+ for (var divisor = 2; divisor <= n; divisor++) {
+ for (var j = divisor + divisor; j <= n; j += divisor) {
+ sumOfDivisors[j] += " + " + divisor;
+ }
+ // At this point everything up to 'divisor' has its sumOfDivisors
+ // expression calculated, so we can determine whether it's perfect
+ // already by evaluating.
+ if (eval(sumOfDivisors[divisor]) == divisor) {
+ print("" + divisor + " = " + sumOfDivisors[divisor]);
+ }
+ }
+ print("That's all.");
+}
+
+
+print("\nA number is 'perfect' if it is equal to the sum of its")
+print("divisors (excluding itself).\n");
+perfect(500);
+
diff --git a/third_party/js-1.7/plify_jsdhash.sed b/third_party/js-1.7/plify_jsdhash.sed
new file mode 100644
index 0000000..eff4901
--- /dev/null
+++ b/third_party/js-1.7/plify_jsdhash.sed
@@ -0,0 +1,33 @@
+/ * Double hashing implementation./a\
+ * GENERATED BY js/src/plify_jsdhash.sed -- DO NOT EDIT!!!
+/ * Double hashing, a la Knuth 6./a\
+ * GENERATED BY js/src/plify_jsdhash.sed -- DO NOT EDIT!!!
+s/jsdhash_h___/pldhash_h___/
+s/jsdhash\.bigdump/pldhash.bigdump/
+s/jstypes\.h/nscore.h/
+s/jsbit\.h/prbit.h/
+s/jsdhash\.h/pldhash.h/
+s/jsdhash\.c/pldhash.c/
+s/jsdhash:/pldhash:/
+s/jsutil\.h/nsDebug.h/
+s/JS_DHASH/PL_DHASH/g
+s/JS_DHash/PL_DHash/g
+s/JSDHash/PLDHash/g
+s/JSHash/PLHash/g
+s/uint32 /PRUint32/g
+s/\([^U]\)int32 /\1PRInt32/g
+s/uint16 /PRUint16/g
+s/\([^U]\)int16 /\1PRInt16/g
+s/uint32/PRUint32/g
+s/\([^U]\)int32/\1PRInt32/g
+s/uint16/PRUint16/g
+s/\([^U]\)int16/\1PRInt16/g
+s/JSBool/PRBool/g
+s/extern JS_PUBLIC_API(\([^()]*\))/NS_COM_GLUE \1/
+s/JS_PUBLIC_API(\([^()]*\))/\1/
+s/JS_DLL_CALLBACK/PR_CALLBACK/
+s/JS_STATIC_DLL_CALLBACK/PR_STATIC_CALLBACK/
+s/JS_NewDHashTable/PL_NewDHashTable/
+s/JS_ASSERT(0)/NS_NOTREACHED("0")/
+s/\( *\)JS_ASSERT(\(.*\));/\1NS_ASSERTION(\2,\n\1 "\2");/
+s/JS_/PR_/g
diff --git a/third_party/js-1.7/prmjtime.c b/third_party/js-1.7/prmjtime.c
new file mode 100644
index 0000000..3228af8
--- /dev/null
+++ b/third_party/js-1.7/prmjtime.c
@@ -0,0 +1,439 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR time code.
+ */
+#include "jsstddef.h"
+#ifdef SOLARIS
+#define _REENTRANT 1
+#endif
+#include <string.h>
+#include <time.h>
+#include "jstypes.h"
+#include "jsutil.h"
+
+#include "jsprf.h"
+#include "prmjtime.h"
+
+#define PRMJ_DO_MILLISECONDS 1
+
+#ifdef XP_OS2
+#include <sys/timeb.h>
+#endif
+#ifdef XP_WIN
+#include <windows.h>
+#endif
+
+#if defined(XP_UNIX) || defined(XP_BEOS)
+
+#ifdef _SVID_GETTOD /* Defined only on Solaris, see Solaris <sys/types.h> */
+extern int gettimeofday(struct timeval *tv);
+#endif
+
+#include <sys/time.h>
+
+#endif /* XP_UNIX */
+
+#define IS_LEAP(year) \
+ (year != 0 && ((((year & 0x3) == 0) && \
+ ((year - ((year/100) * 100)) != 0)) || \
+ (year - ((year/400) * 400)) == 0))
+
+#define PRMJ_HOUR_SECONDS 3600L
+#define PRMJ_DAY_SECONDS (24L * PRMJ_HOUR_SECONDS)
+#define PRMJ_YEAR_SECONDS (PRMJ_DAY_SECONDS * 365L)
+#define PRMJ_MAX_UNIX_TIMET 2145859200L /*time_t value equiv. to 12/31/2037 */
+/* function prototypes */
+static void PRMJ_basetime(JSInt64 tsecs, PRMJTime *prtm);
+/*
+ * get the difference in seconds between this time zone and UTC (GMT)
+ */
+JSInt32
+PRMJ_LocalGMTDifference()
+{
+#if defined(XP_UNIX) || defined(XP_WIN) || defined(XP_OS2) || defined(XP_BEOS)
+ struct tm ltime;
+
+ /* get the difference between this time zone and GMT */
+ memset((char *)&ltime,0,sizeof(ltime));
+ ltime.tm_mday = 2;
+ ltime.tm_year = 70;
+#ifdef SUNOS4
+ ltime.tm_zone = 0;
+ ltime.tm_gmtoff = 0;
+ return timelocal(&ltime) - (24 * 3600);
+#else
+ return mktime(&ltime) - (24L * 3600L);
+#endif
+#endif
+}
+
+/* Constants for GMT offset from 1970 */
+#define G1970GMTMICROHI 0x00dcdcad /* micro secs to 1970 hi */
+#define G1970GMTMICROLOW 0x8b3fa000 /* micro secs to 1970 low */
+
+#define G2037GMTMICROHI 0x00e45fab /* micro secs to 2037 high */
+#define G2037GMTMICROLOW 0x7a238000 /* micro secs to 2037 low */
+
+/* Convert from base time to extended time */
+static JSInt64
+PRMJ_ToExtendedTime(JSInt32 base_time)
+{
+ JSInt64 exttime;
+ JSInt64 g1970GMTMicroSeconds;
+ JSInt64 low;
+ JSInt32 diff;
+ JSInt64 tmp;
+ JSInt64 tmp1;
+
+ diff = PRMJ_LocalGMTDifference();
+ JSLL_UI2L(tmp, PRMJ_USEC_PER_SEC);
+ JSLL_I2L(tmp1,diff);
+ JSLL_MUL(tmp,tmp,tmp1);
+
+ JSLL_UI2L(g1970GMTMicroSeconds,G1970GMTMICROHI);
+ JSLL_UI2L(low,G1970GMTMICROLOW);
+#ifndef JS_HAVE_LONG_LONG
+ JSLL_SHL(g1970GMTMicroSeconds,g1970GMTMicroSeconds,16);
+ JSLL_SHL(g1970GMTMicroSeconds,g1970GMTMicroSeconds,16);
+#else
+ JSLL_SHL(g1970GMTMicroSeconds,g1970GMTMicroSeconds,32);
+#endif
+ JSLL_ADD(g1970GMTMicroSeconds,g1970GMTMicroSeconds,low);
+
+ JSLL_I2L(exttime,base_time);
+ JSLL_ADD(exttime,exttime,g1970GMTMicroSeconds);
+ JSLL_SUB(exttime,exttime,tmp);
+ return exttime;
+}
+
+JSInt64
+PRMJ_Now(void)
+{
+#ifdef XP_OS2
+ JSInt64 s, us, ms2us, s2us;
+ struct timeb b;
+#endif
+#ifdef XP_WIN
+ JSInt64 s, us,
+ win2un = JSLL_INIT(0x19DB1DE, 0xD53E8000),
+ ten = JSLL_INIT(0, 10);
+ FILETIME time, midnight;
+#endif
+#if defined(XP_UNIX) || defined(XP_BEOS)
+ struct timeval tv;
+ JSInt64 s, us, s2us;
+#endif /* XP_UNIX */
+
+#ifdef XP_OS2
+ ftime(&b);
+ JSLL_UI2L(ms2us, PRMJ_USEC_PER_MSEC);
+ JSLL_UI2L(s2us, PRMJ_USEC_PER_SEC);
+ JSLL_UI2L(s, b.time);
+ JSLL_UI2L(us, b.millitm);
+ JSLL_MUL(us, us, ms2us);
+ JSLL_MUL(s, s, s2us);
+ JSLL_ADD(s, s, us);
+ return s;
+#endif
+#ifdef XP_WIN
+ /* The windows epoch is around 1600. The unix epoch is around 1970.
+ win2un is the difference (in windows time units which are 10 times
+ more precise than the JS time unit) */
+ GetSystemTimeAsFileTime(&time);
+ /* Win9x gets confused at midnight
+ http://support.microsoft.com/default.aspx?scid=KB;en-us;q224423
+ So if the low part (precision <8mins) is 0 then we get the time
+ again. */
+ if (!time.dwLowDateTime) {
+ GetSystemTimeAsFileTime(&midnight);
+ time.dwHighDateTime = midnight.dwHighDateTime;
+ }
+ JSLL_UI2L(s, time.dwHighDateTime);
+ JSLL_UI2L(us, time.dwLowDateTime);
+ JSLL_SHL(s, s, 32);
+ JSLL_ADD(s, s, us);
+ JSLL_SUB(s, s, win2un);
+ JSLL_DIV(s, s, ten);
+ return s;
+#endif
+
+#if defined(XP_UNIX) || defined(XP_BEOS)
+#ifdef _SVID_GETTOD /* Defined only on Solaris, see Solaris <sys/types.h> */
+ gettimeofday(&tv);
+#else
+ gettimeofday(&tv, 0);
+#endif /* _SVID_GETTOD */
+ JSLL_UI2L(s2us, PRMJ_USEC_PER_SEC);
+ JSLL_UI2L(s, tv.tv_sec);
+ JSLL_UI2L(us, tv.tv_usec);
+ JSLL_MUL(s, s, s2us);
+ JSLL_ADD(s, s, us);
+ return s;
+#endif /* XP_UNIX */
+}
+
+/* Get the DST timezone offset for the time passed in */
+JSInt64
+PRMJ_DSTOffset(JSInt64 local_time)
+{
+ JSInt64 us2s;
+ time_t local;
+ JSInt32 diff;
+ JSInt64 maxtimet;
+ struct tm tm;
+ PRMJTime prtm;
+#ifndef HAVE_LOCALTIME_R
+ struct tm *ptm;
+#endif
+
+
+ JSLL_UI2L(us2s, PRMJ_USEC_PER_SEC);
+ JSLL_DIV(local_time, local_time, us2s);
+
+ /* get the maximum of time_t value */
+ JSLL_UI2L(maxtimet,PRMJ_MAX_UNIX_TIMET);
+
+ if(JSLL_CMP(local_time,>,maxtimet)){
+ JSLL_UI2L(local_time,PRMJ_MAX_UNIX_TIMET);
+ } else if(!JSLL_GE_ZERO(local_time)){
+ /*go ahead a day to make localtime work (does not work with 0) */
+ JSLL_UI2L(local_time,PRMJ_DAY_SECONDS);
+ }
+ JSLL_L2UI(local,local_time);
+ PRMJ_basetime(local_time,&prtm);
+#ifndef HAVE_LOCALTIME_R
+ ptm = localtime(&local);
+ if(!ptm){
+ return JSLL_ZERO;
+ }
+ tm = *ptm;
+#else
+ localtime_r(&local,&tm); /* get dst information */
+#endif
+
+ diff = ((tm.tm_hour - prtm.tm_hour) * PRMJ_HOUR_SECONDS) +
+ ((tm.tm_min - prtm.tm_min) * 60);
+
+ if(diff < 0){
+ diff += PRMJ_DAY_SECONDS;
+ }
+
+ JSLL_UI2L(local_time,diff);
+
+ JSLL_MUL(local_time,local_time,us2s);
+
+ return(local_time);
+}
+
+/* Format a time value into a buffer. Same semantics as strftime() */
+size_t
+PRMJ_FormatTime(char *buf, int buflen, char *fmt, PRMJTime *prtm)
+{
+#if defined(XP_UNIX) || defined(XP_WIN) || defined(XP_OS2) || defined(XP_BEOS)
+ struct tm a;
+
+ /* Zero out the tm struct. Linux, SunOS 4 struct tm has extra members int
+ * tm_gmtoff, char *tm_zone; when tm_zone is garbage, strftime gets
+ * confused and dumps core. NSPR20 prtime.c attempts to fill these in by
+ * calling mktime on the partially filled struct, but this doesn't seem to
+ * work as well; the result string has "can't get timezone" for ECMA-valid
+ * years. Might still make sense to use this, but find the range of years
+ * for which valid tz information exists, and map (per ECMA hint) from the
+ * given year into that range.
+
+ * N.B. This hasn't been tested with anything that actually _uses_
+ * tm_gmtoff; zero might be the wrong thing to set it to if you really need
+ * to format a time. This fix is for jsdate.c, which only uses
+ * JS_FormatTime to get a string representing the time zone. */
+ memset(&a, 0, sizeof(struct tm));
+
+ a.tm_sec = prtm->tm_sec;
+ a.tm_min = prtm->tm_min;
+ a.tm_hour = prtm->tm_hour;
+ a.tm_mday = prtm->tm_mday;
+ a.tm_mon = prtm->tm_mon;
+ a.tm_wday = prtm->tm_wday;
+ a.tm_year = prtm->tm_year - 1900;
+ a.tm_yday = prtm->tm_yday;
+ a.tm_isdst = prtm->tm_isdst;
+
+ /* Even with the above, SunOS 4 seems to detonate if tm_zone and tm_gmtoff
+ * are null. This doesn't quite work, though - the timezone is off by
+ * tzoff + dst. (And mktime seems to return -1 for the exact dst
+ * changeover time.)
+
+ */
+
+#if defined(SUNOS4)
+ if (mktime(&a) == -1) {
+ /* Seems to fail whenever the requested date is outside of the 32-bit
+ * UNIX epoch. We could proceed at this point (setting a.tm_zone to
+ * "") but then strftime returns a string with a 2-digit field of
+ * garbage for the year. So we return 0 and hope jsdate.c
+ * will fall back on toString.
+ */
+ return 0;
+ }
+#endif
+
+ return strftime(buf, buflen, fmt, &a);
+#endif
+}
+
+/* table for number of days in a month */
+static int mtab[] = {
+ /* jan, feb,mar,apr,may,jun */
+ 31,28,31,30,31,30,
+ /* july,aug,sep,oct,nov,dec */
+ 31,31,30,31,30,31
+};
+
+/*
+ * basic time calculation functionality for localtime and gmtime
+ * setups up prtm argument with correct values based upon input number
+ * of seconds.
+ */
+static void
+PRMJ_basetime(JSInt64 tsecs, PRMJTime *prtm)
+{
+ /* convert tsecs back to year,month,day,hour,secs */
+ JSInt32 year = 0;
+ JSInt32 month = 0;
+ JSInt32 yday = 0;
+ JSInt32 mday = 0;
+ JSInt32 wday = 6; /* start on a Sunday */
+ JSInt32 days = 0;
+ JSInt32 seconds = 0;
+ JSInt32 minutes = 0;
+ JSInt32 hours = 0;
+ JSInt32 isleap = 0;
+ JSInt64 result;
+ JSInt64 result1;
+ JSInt64 result2;
+ JSInt64 base;
+
+ JSLL_UI2L(result,0);
+ JSLL_UI2L(result1,0);
+ JSLL_UI2L(result2,0);
+
+ /* get the base time via UTC */
+ base = PRMJ_ToExtendedTime(0);
+ JSLL_UI2L(result, PRMJ_USEC_PER_SEC);
+ JSLL_DIV(base,base,result);
+ JSLL_ADD(tsecs,tsecs,base);
+
+ JSLL_UI2L(result, PRMJ_YEAR_SECONDS);
+ JSLL_UI2L(result1,PRMJ_DAY_SECONDS);
+ JSLL_ADD(result2,result,result1);
+
+ /* get the year */
+ while ((isleap == 0) ? !JSLL_CMP(tsecs,<,result) : !JSLL_CMP(tsecs,<,result2)) {
+ /* subtract a year from tsecs */
+ JSLL_SUB(tsecs,tsecs,result);
+ days += 365;
+ /* is it a leap year ? */
+ if(IS_LEAP(year)){
+ JSLL_SUB(tsecs,tsecs,result1);
+ days++;
+ }
+ year++;
+ isleap = IS_LEAP(year);
+ }
+
+ JSLL_UI2L(result1,PRMJ_DAY_SECONDS);
+
+ JSLL_DIV(result,tsecs,result1);
+ JSLL_L2I(mday,result);
+
+ /* let's find the month */
+ while(((month == 1 && isleap) ?
+ (mday >= mtab[month] + 1) :
+ (mday >= mtab[month]))){
+ yday += mtab[month];
+ days += mtab[month];
+
+ mday -= mtab[month];
+
+ /* it's a Feb, check if this is a leap year */
+ if(month == 1 && isleap != 0){
+ yday++;
+ days++;
+ mday--;
+ }
+ month++;
+ }
+
+ /* now adjust tsecs */
+ JSLL_MUL(result,result,result1);
+ JSLL_SUB(tsecs,tsecs,result);
+
+ mday++; /* day of month always start with 1 */
+ days += mday;
+ wday = (days + wday) % 7;
+
+ yday += mday;
+
+ /* get the hours */
+ JSLL_UI2L(result1,PRMJ_HOUR_SECONDS);
+ JSLL_DIV(result,tsecs,result1);
+ JSLL_L2I(hours,result);
+ JSLL_MUL(result,result,result1);
+ JSLL_SUB(tsecs,tsecs,result);
+
+ /* get minutes */
+ JSLL_UI2L(result1,60);
+ JSLL_DIV(result,tsecs,result1);
+ JSLL_L2I(minutes,result);
+ JSLL_MUL(result,result,result1);
+ JSLL_SUB(tsecs,tsecs,result);
+
+ JSLL_L2I(seconds,tsecs);
+
+ prtm->tm_usec = 0L;
+ prtm->tm_sec = (JSInt8)seconds;
+ prtm->tm_min = (JSInt8)minutes;
+ prtm->tm_hour = (JSInt8)hours;
+ prtm->tm_mday = (JSInt8)mday;
+ prtm->tm_mon = (JSInt8)month;
+ prtm->tm_wday = (JSInt8)wday;
+ prtm->tm_year = (JSInt16)year;
+ prtm->tm_yday = (JSInt16)yday;
+}
diff --git a/third_party/js-1.7/prmjtime.h b/third_party/js-1.7/prmjtime.h
new file mode 100644
index 0000000..b74fe84
--- /dev/null
+++ b/third_party/js-1.7/prmjtime.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef prmjtime_h___
+#define prmjtime_h___
+/*
+ * PR date stuff for mocha and java. Placed here temporarily not to break
+ * Navigator and localize changes to mocha.
+ */
+#include <time.h>
+#include "jslong.h"
+#ifdef MOZILLA_CLIENT
+#include "jscompat.h"
+#endif
+
+JS_BEGIN_EXTERN_C
+
+typedef struct PRMJTime PRMJTime;
+
+/*
+ * Broken down form of 64 bit time value.
+ */
+struct PRMJTime {
+ JSInt32 tm_usec; /* microseconds of second (0-999999) */
+ JSInt8 tm_sec; /* seconds of minute (0-59) */
+ JSInt8 tm_min; /* minutes of hour (0-59) */
+ JSInt8 tm_hour; /* hour of day (0-23) */
+ JSInt8 tm_mday; /* day of month (1-31) */
+ JSInt8 tm_mon; /* month of year (0-11) */
+ JSInt8 tm_wday; /* 0=sunday, 1=monday, ... */
+ JSInt16 tm_year; /* absolute year, AD */
+ JSInt16 tm_yday; /* day of year (0 to 365) */
+ JSInt8 tm_isdst; /* non-zero if DST in effect */
+};
+
+/* Some handy constants */
+#define PRMJ_USEC_PER_SEC 1000000L
+#define PRMJ_USEC_PER_MSEC 1000L
+
+/* Return the current local time in micro-seconds */
+extern JSInt64
+PRMJ_Now(void);
+
+/* get the difference between this time zone and gmt timezone in seconds */
+extern JSInt32
+PRMJ_LocalGMTDifference(void);
+
+/* Format a time value into a buffer. Same semantics as strftime() */
+extern size_t
+PRMJ_FormatTime(char *buf, int buflen, char *fmt, PRMJTime *tm);
+
+/* Get the DST offset for the local time passed in */
+extern JSInt64
+PRMJ_DSTOffset(JSInt64 local_time);
+
+JS_END_EXTERN_C
+
+#endif /* prmjtime_h___ */
+
diff --git a/third_party/js-1.7/resource.h b/third_party/js-1.7/resource.h
new file mode 100644
index 0000000..9301810
--- /dev/null
+++ b/third_party/js-1.7/resource.h
@@ -0,0 +1,15 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Developer Studio generated include file.
+// Used by js3240.rc
+//
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE 101
+#define _APS_NEXT_COMMAND_VALUE 40001
+#define _APS_NEXT_CONTROL_VALUE 1000
+#define _APS_NEXT_SYMED_VALUE 101
+#endif
+#endif
diff --git a/third_party/js-1.7/rules.mk b/third_party/js-1.7/rules.mk
new file mode 100644
index 0000000..8d484db
--- /dev/null
+++ b/third_party/js-1.7/rules.mk
@@ -0,0 +1,193 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998-1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Michael Ang <mang@subcarrier.org>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# JSRef GNUmake makefile rules
+#
+
+ifdef USE_MSVC
+LIB_OBJS = $(addprefix $(OBJDIR)/, $(LIB_CFILES:.c=.obj))
+PROG_OBJS = $(addprefix $(OBJDIR)/, $(PROG_CFILES:.c=.obj))
+else
+LIB_OBJS = $(addprefix $(OBJDIR)/, $(LIB_CFILES:.c=.o))
+LIB_OBJS += $(addprefix $(OBJDIR)/, $(LIB_ASFILES:.s=.o))
+PROG_OBJS = $(addprefix $(OBJDIR)/, $(PROG_CFILES:.c=.o))
+endif
+
+CFILES = $(LIB_CFILES) $(PROG_CFILES)
+OBJS = $(LIB_OBJS) $(PROG_OBJS)
+
+ifdef USE_MSVC
+# TARGETS = $(LIBRARY) # $(PROGRAM) not supported for MSVC yet
+TARGETS += $(SHARED_LIBRARY) $(PROGRAM) # it is now
+else
+TARGETS += $(LIBRARY) $(SHARED_LIBRARY) $(PROGRAM)
+endif
+
+all:
+ +$(LOOP_OVER_PREDIRS)
+ifneq "$(strip $(TARGETS))" ""
+ $(MAKE) -f Makefile.ref $(TARGETS)
+endif
+ +$(LOOP_OVER_DIRS)
+
+$(OBJDIR)/%: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ $(CFLAGS) $*.c $(LDFLAGS)
+
+# This rule must come before the rule with no dep on header
+$(OBJDIR)/%.o: %.c %.h
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $*.c
+
+
+$(OBJDIR)/%.o: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $*.c
+
+$(OBJDIR)/%.o: %.s
+ @$(MAKE_OBJDIR)
+ $(AS) -o $@ $(ASFLAGS) $*.s
+
+# This rule must come before rule with no dep on header
+$(OBJDIR)/%.obj: %.c %.h
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $(JSDLL_CFLAGS) $*.c
+
+$(OBJDIR)/%.obj: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $(JSDLL_CFLAGS) $*.c
+
+$(OBJDIR)/js.obj: js.c
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $<
+
+ifeq ($(OS_ARCH),OS2)
+$(LIBRARY): $(LIB_OBJS)
+ $(AR) $@ $? $(AR_OS2_SUFFIX)
+ $(RANLIB) $@
+else
+ifdef USE_MSVC
+$(SHARED_LIBRARY): $(LIB_OBJS)
+ link.exe $(LIB_LINK_FLAGS) /base:0x61000000 $(OTHER_LIBS) \
+ /out:"$@" /pdb:none\
+ /implib:"$(OBJDIR)/$(@F:.dll=.lib)" $^
+else
+$(LIBRARY): $(LIB_OBJS)
+ $(AR) rv $@ $?
+ $(RANLIB) $@
+
+$(SHARED_LIBRARY): $(LIB_OBJS)
+ $(MKSHLIB) -o $@ $(LIB_OBJS) $(LDFLAGS) $(OTHER_LIBS)
+endif
+endif
+
+# Java stuff
+$(CLASSDIR)/$(OBJDIR)/$(JARPATH)/%.class: %.java
+ mkdir -p $(@D)
+ $(JAVAC) $(JAVAC_FLAGS) $<
+
+define MAKE_OBJDIR
+if test ! -d $(@D); then rm -rf $(@D); mkdir -p $(@D); fi
+endef
+
+ifdef DIRS
+LOOP_OVER_DIRS = \
+ @for d in $(DIRS); do \
+ if test -d $$d; then \
+ set -e; \
+ echo "cd $$d; $(MAKE) -f Makefile.ref $@"; \
+ cd $$d; $(MAKE) -f Makefile.ref $@; cd ..; \
+ set +e; \
+ else \
+ echo "Skipping non-directory $$d..."; \
+ fi; \
+ done
+endif
+
+ifdef PREDIRS
+LOOP_OVER_PREDIRS = \
+ @for d in $(PREDIRS); do \
+ if test -d $$d; then \
+ set -e; \
+ echo "cd $$d; $(MAKE) -f Makefile.ref $@"; \
+ cd $$d; $(MAKE) -f Makefile.ref $@; cd ..; \
+ set +e; \
+ else \
+ echo "Skipping non-directory $$d..."; \
+ fi; \
+ done
+endif
+
+export:
+ +$(LOOP_OVER_PREDIRS)
+ mkdir -p $(DIST)/include $(DIST)/$(LIBDIR) $(DIST)/bin
+ifneq "$(strip $(HFILES))" ""
+ $(CP) $(HFILES) $(DIST)/include
+endif
+ifneq "$(strip $(LIBRARY))" ""
+ $(CP) $(LIBRARY) $(DIST)/$(LIBDIR)
+endif
+ifneq "$(strip $(JARS))" ""
+ $(CP) $(JARS) $(DIST)/$(LIBDIR)
+endif
+ifneq "$(strip $(SHARED_LIBRARY))" ""
+ $(CP) $(SHARED_LIBRARY) $(DIST)/$(LIBDIR)
+endif
+ifneq "$(strip $(PROGRAM))" ""
+ $(CP) $(PROGRAM) $(DIST)/bin
+endif
+ +$(LOOP_OVER_DIRS)
+
+clean:
+ rm -rf $(OBJS) $(GARBAGE)
+ @cd fdlibm; $(MAKE) -f Makefile.ref clean
+
+clobber:
+ rm -rf $(OBJS) $(TARGETS) $(DEPENDENCIES)
+ @cd fdlibm; $(MAKE) -f Makefile.ref clobber
+
+depend:
+ gcc -MM $(CFLAGS) $(LIB_CFILES)
+
+tar:
+ tar cvf $(TARNAME) $(TARFILES)
+ gzip $(TARNAME)
+
diff --git a/third_party/js-1.7/win32.order b/third_party/js-1.7/win32.order
new file mode 100644
index 0000000..cf4e8c4
--- /dev/null
+++ b/third_party/js-1.7/win32.order
@@ -0,0 +1,391 @@
+js_MarkGCThing ; 5893956
+JS_GetPrivate ; 2090130
+JS_HashTableRawLookup ; 1709984
+js_Mark ; 1547496
+js_GetToken ; 1406677
+js_UngetToken ; 1154416
+js_MarkAtom ; 992874
+js_MatchToken ; 980277
+js_CompareStrings ; 662772
+js_Lock ; 628184
+js_Unlock ; 628184
+js_AtomizeString ; 611102
+js_HashString ; 611102
+js_DropScopeProperty ; 546476
+JS_malloc ; 484350
+js_Atomize ; 464433
+js_InflateStringToBuffer ; 460739
+js_HoldScopeProperty ; 442612
+JS_free ; 382991
+js_MarkScript ; 376942
+js_HashId ; 365238
+JS_CompareValues ; 352366
+js_IdToValue ; 337594
+JS_GetClass ; 325296
+js_LookupProperty ; 324680
+js_GetAtom ; 244669
+js_DropProperty ; 223217
+JS_GetParent ; 209680
+js_LiveContext ; 205767
+js_PeekToken ; 200646
+js_GetSlotThreadSafe ; 198839
+JS_GetStringChars ; 190862
+JS_HashTableRawAdd ; 179156
+js_FoldConstants ; 162626
+js_EmitTree ; 145634
+JS_EnumerateStub ; 140640
+js_NewSrcNote ; 136983
+js_GetProperty ; 135639
+js_NewScopeProperty ; 135057
+js_MutateScope ; 135057
+js_GetMutableScope ; 135057
+js_AllocSlot ; 132401
+JS_GetRuntime ; 127316
+JS_FrameIterator ; 121963
+JS_GetFrameFunctionObject ; 120567
+js_AllocGCThing ; 119828
+js_DestroyScopeProperty ; 115989
+js_Emit3 ; 109135
+js_AtomizeChars ; 108038
+JS_HashTableLookup ; 107154
+JS_InstanceOf ; 103905
+js_DefineProperty ; 99514
+js_strncpy ; 88276
+js_PeekTokenSameLine ; 87197
+js_HoldObjectMap ; 79084
+js_DropObjectMap ; 77824
+js_NewObject ; 72421
+js_ValueToString ; 72143
+js_GetClassPrototype ; 66235
+js_UnlockRuntime ; 64699
+js_LockRuntime ; 64699
+js_ContextIterator ; 64586
+JS_ClearWatchPointsForObject ; 64155
+js_FinalizeObject ; 63925
+js_IndexAtom ; 63789
+JS_SetPrivate ; 63702
+JS_GetGlobalObject ; 63546
+js_Emit1 ; 63012
+JS_ContextIterator ; 57847
+JS_GetInstancePrivate ; 57817
+JS_HashTableRawRemove ; 57057
+js_AllocRawStack ; 54181
+js_Invoke ; 53568
+js_FindProperty ; 53150
+JS_GetFrameScript ; 51395
+js_LinkFunctionObject ; 50651
+js_SetSrcNoteOffset ; 47735
+js_InWithStatement ; 47346
+js_NewFunction ; 47074
+js_NewSrcNote2 ; 46165
+JS_HashTableAdd ; 45503
+JS_HashTableRemove ; 45213
+js_InCatchBlock ; 42198
+js_AddRootRT ; 40587
+js_AddRoot ; 40587
+js_SetProperty ; 40558
+JS_AddNamedRoot ; 40462
+js_RemoveRoot ; 40384
+JS_RemoveRootRT ; 38129
+js_NewString ; 37471
+js_DefineFunction ; 36629
+JS_GetContextThread ; 36498
+JS_LookupProperty ; 35137
+JS_ValueToString ; 34072
+JS_realloc ; 33776
+JS_DefineFunction ; 33268
+JS_SetErrorReporter ; 32851
+js_FinalizeString ; 30311
+js_FinalizeStringRT ; 30311
+JS_ArenaAllocate ; 30099
+JS_BeginRequest ; 29323
+JS_EndRequest ; 29323
+JS_GetContextPrivate ; 29189
+JS_CompactArenaPool ; 28874
+js_ValueToStringAtom ; 27934
+JS_ValueToId ; 26517
+js_ValueToBoolean ; 25908
+JS_InternString ; 25467
+js_PopStatement ; 24364
+js_PushStatement ; 24364
+js_NewStringCopyN ; 23911
+js_FlushPropertyCacheByProp ; 23883
+js_GetStringBytes ; 23421
+JS_ArenaRelease ; 23267
+JS_GetStringBytes ; 23106
+js_FreeStack ; 22399
+js_AllocStack ; 22399
+JS_SetProperty ; 21240
+js_InitObjectMap ; 19991
+js_NewScope ; 19991
+js_strlen ; 19070
+JS_GetScriptPrincipals ; 18063
+js_SrcNoteLength ; 17369
+js_DestroyObjectMap ; 17198
+js_DestroyScope ; 17198
+JS_GetStringLength ; 16306
+js_PopStatementCG ; 15418
+JS_GetFrameAnnotation ; 14949
+js_FreeRawStack ; 14032
+js_Interpret ; 14032
+js_TransferScopeLock ; 13899
+JS_ResolveStandardClass ; 13645
+JS_ResumeRequest ; 12837
+JS_SuspendRequest ; 12837
+JS_GetProperty ; 12488
+JS_NewObject ; 11660
+js_AllocTryNotes ; 11418
+js_NewNumberValue ; 10859
+js_InternalInvoke ; 10051
+js_NewDouble ; 9936
+js_SetJumpOffset ; 9886
+js_SkipWhiteSpace ; 9299
+js_NewDoubleValue ; 7474
+JS_GetPendingException ; 7404
+js_NewObjectMap ; 7236
+JS_ClearPendingException ; 7092
+JS_strtod ; 7053
+js_strtod ; 7053
+js_InflateString ; 7004
+JS_GetFunctionName ; 6808
+JS_NewHashTable ; 6794
+JS_NewFunction ; 6575
+js_FreeSlot ; 6476
+js_LockScope ; 6332
+JS_HashTableEnumerateEntries ; 6285
+js_GetLengthProperty ; 6162
+js_LockObj ; 6149
+JS_NewUCStringCopyN ; 5994
+JS_NewNumberValue ; 5904
+js_NewStringCopyZ ; 5809
+JS_NewUCStringCopyZ ; 5809
+js_DeflateString ; 5612
+js_ValueToNumber ; 5456
+JS_SetOptions ; 5322
+js_NewScript ; 4941
+js_InitCodeGenerator ; 4810
+js_FinishTakingSrcNotes ; 4810
+js_NewScriptFromParams ; 4810
+js_InitAtomMap ; 4810
+js_FinishTakingTryNotes ; 4810
+js_NewScriptFromCG ; 4810
+js_FinishCodeGenerator ; 4810
+JS_strdup ; 4534
+JS_HashTableDestroy ; 4119
+js_CheckRedeclaration ; 3965
+JS_DefineFunctions ; 3808
+js_EmitFunctionBody ; 3739
+js_TryMethod ; 3685
+js_DefaultValue ; 3610
+js_CloneFunctionObject ; 3577
+JS_InitClass ; 3546
+js_SetClassPrototype ; 3377
+JS_GetPrototype ; 3268
+JS_DefineProperties ; 3115
+js_FindVariable ; 3093
+js_DestroyScript ; 3041
+JS_ClearScriptTraps ; 3041
+js_FreeAtomMap ; 3041
+JS_NewStringCopyZ ; 2953
+js_AtomizeObject ; 2709
+JS_ValueToBoolean ; 2643
+js_SetLengthProperty ; 2637
+JS_GetOptions ; 2593
+js_ValueToObject ; 2522
+js_ValueToNonNullObject ; 2510
+js_StringToObject ; 2482
+JS_SetElement ; 2448
+js_NumberToString ; 2407
+JS_TypeOfValue ; 2275
+js_NewBufferTokenStream ; 2253
+js_NewTokenStream ; 2253
+js_CloseTokenStream ; 2253
+JS_RemoveRoot ; 2148
+JS_NewDouble ; 2129
+JS_vsnprintf ; 1937
+JS_snprintf ; 1937
+JS_CallFunctionValue ; 1844
+JS_DHashVoidPtrKeyStub ; 1840
+JS_DHashTableOperate ; 1840
+js_SetProtoOrParent ; 1758
+js_DoubleToInteger ; 1729
+JS_SetVersion ; 1531
+js_ValueToFunction ; 1476
+JS_SetPrototype ; 1408
+JS_CeilingLog2 ; 1317
+js_Execute ; 1199
+js_CompileFunctionBody ; 1182
+JS_CompileUCFunctionForPrincipals ; 1182
+js_GetSrcNoteOffset ; 1139
+JS_DHashMatchEntryStub ; 1094
+JS_VersionToString ; 1090
+JS_CompileUCScriptForPrincipals ; 1071
+js_CompileTokenStream ; 1071
+js_CurrentThreadId ; 1058
+JS_IdToValue ; 1046
+js_ConstructObject ; 974
+JS_DestroyScript ; 967
+js_PCToLineNumber ; 967
+JS_DefineProperty ; 930
+JS_GetScriptFilename ; 924
+JS_GetFramePC ; 899
+JS_EvaluateUCScriptForPrincipals ; 892
+JS_PCToLineNumber ; 848
+JS_StringToVersion ; 761
+js_ExecuteRegExp ; 755
+JS_MaybeGC ; 717
+JS_ValueToNumber ; 698
+JS_GetVersion ; 698
+JS_AliasProperty ; 693
+js_AtomizeValue ; 664
+js_BooleanToString ; 664
+js_SetSlotThreadSafe ; 596
+JS_DHashClearEntryStub ; 584
+JS_DHashTableRawRemove ; 584
+JS_DefineObject ; 557
+js_PutCallObject ; 516
+js_GetCallObject ; 516
+js_strchr ; 511
+JS_DefineUCProperty ; 480
+JS_dtostr ; 475
+JS_ValueToInt32 ; 464
+js_ValueToInt32 ; 464
+JS_FinishArenaPool ; 453
+js_NewTryNote ; 441
+js_strtointeger ; 437
+JS_vsmprintf ; 428
+JS_DHashTableInit ; 423
+JS_DHashAllocTable ; 423
+JS_DHashGetStubOps ; 423
+JS_NewDHashTable ; 423
+JS_DHashTableDestroy ; 423
+JS_DHashFreeTable ; 423
+JS_DHashTableFinish ; 423
+js_EmitBreak ; 412
+js_GetAttributes ; 412
+JS_DefineConstDoubles ; 407
+JS_ArenaGrow ; 374
+js_AtomizeInt ; 372
+JS_SetParent ; 345
+JS_CloneFunctionObject ; 343
+JS_IsNativeFrame ; 343
+JS_ReportErrorNumber ; 340
+js_ErrorToException ; 340
+js_ReportErrorNumberVA ; 340
+js_GetErrorMessage ; 340
+js_ExpandErrorArguments ; 340
+js_ReportUncaughtException ; 315
+JS_IsExceptionPending ; 315
+js_ReportErrorAgain ; 315
+js_ErrorFromException ; 315
+JS_LookupUCProperty ; 307
+JS_InitArenaPool ; 293
+PRMJ_Now ; 262
+DllMain@12 ; 235
+JS_ExecuteScript ; 232
+JS_GetFrameFunction ; 226
+PRMJ_LocalGMTDifference ; 175
+JS_GetConstructor ; 175
+JS_SetGlobalObject ; 164
+js_LockGCThing ; 155
+js_NewRegExpObject ; 152
+js_NewRegExp ; 152
+js_InitObjectClass ; 131
+js_InitFunctionClass ; 131
+js_EmitN ; 128
+JS_ArenaFinish ; 124
+js_GC ; 124
+js_SweepAtomState ; 124
+js_MarkAtomState ; 124
+JS_ArenaRealloc ; 124
+js_ForceGC ; 124
+js_FlushPropertyCache ; 122
+js_InitNumberClass ; 114
+JS_smprintf ; 112
+js_DoubleToECMAInt32 ; 112
+js_ValueToECMAInt32 ; 111
+JS_ValueToECMAInt32 ; 111
+JS_SetContextPrivate ; 109
+PRMJ_DSTOffset ; 108
+js_Clear ; 105
+JS_ClearScope ; 105
+JS_NewScriptObject ; 104
+JS_smprintf_free ; 104
+JS_ConvertValue ; 99
+js_GetSrcNote ; 98
+JS_ValueToECMAUint32 ; 93
+js_ValueToECMAUint32 ; 93
+js_printf ; 93
+js_DoubleToECMAUint32 ; 93
+js_DestroyRegExp ; 89
+js_UnlockGCThing ; 89
+js_TryValueOf ; 87
+js_NewSrcNote3 ; 86
+JS_ConvertStub ; 81
+JS_SetPendingException ; 80
+js_InitStringClass ; 79
+JS_GC ; 78
+js_InitArrayClass ; 74
+js_InitDateClass ; 67
+JS_NewContext ; 64
+JS_AddArgumentFormatter ; 64
+js_InitContextForLocking ; 64
+js_NewContext ; 64
+JS_SetBranchCallback ; 64
+JS_ClearRegExpStatics ; 64
+js_InitRegExpStatics ; 64
+js_InitCallClass ; 63
+js_InitRegExpClass ; 61
+js_Enumerate ; 58
+JS_DestroyContext ; 46
+js_DestroyContext ; 46
+js_FreeRegExpStatics ; 46
+js_InitScanner ; 39
+js_NewPrinter ; 36
+js_DestroyPrinter ; 36
+js_GetPrinterOutput ; 36
+JS_FreeArenaPool ; 36
+js_DecompileCode ; 34
+js_EmitContinue ; 33
+js_CheckAccess ; 30
+js_DecompileValueGenerator ; 28
+js_InitMathClass ; 27
+js_InitExceptionClasses ; 25
+js_NewArrayObject ; 24
+js_InitArgumentsClass ; 21
+js_puts ; 20
+js_InitBooleanClass ; 19
+JS_InitStandardClasses ; 19
+js_InitScriptClass ; 19
+js_obj_toString ; 15
+js_GetArgsValue ; 14
+js_GetArgsObject ; 14
+js_AtomizeDouble ; 12
+JS_DestroyIdArray ; 11
+js_NewIdArray ; 11
+JS_GetElement ; 11
+JS_EvaluateScript ; 9
+JS_EvaluateUCScript ; 9
+JS_DecompileFunction ; 8
+js_DecompileFunction ; 8
+JS_NewString ; 8
+js_SetStringBytes ; 8
+JS_GetArrayLength ; 7
+JS_NewArrayObject ; 7
+JS_IsArrayObject ; 7
+JS_ValueToObject ; 7
+JS_DefineElement ; 6
+js_DecompileScript ; 6
+JS_PushArguments ; 4
+JS_PopArguments ; 4
+JS_PushArgumentsVA ; 4
+js_PutArgsObject ; 2
+JS_SetGCCallbackRT ; 2
+JS_Init ; 1
+js_SetupLocks ; 1
+js_InitRuntimeNumberState ; 1
+js_InitRuntimeStringState ; 1
+js_InitLock ; 1
+js_InitGC ; 1
+js_InitAtomState ; 1
+js_InitStringGlobals ; 1
diff --git a/third_party/linenoise/Makefile b/third_party/linenoise/Makefile
new file mode 100644
index 0000000..a285410
--- /dev/null
+++ b/third_party/linenoise/Makefile
@@ -0,0 +1,7 @@
+linenoise_example: linenoise.h linenoise.c
+
+linenoise_example: linenoise.c example.c
+ $(CC) -Wall -W -Os -g -o linenoise_example linenoise.c example.c
+
+clean:
+ rm -f linenoise_example
diff --git a/third_party/linenoise/README.markdown b/third_party/linenoise/README.markdown
new file mode 100644
index 0000000..6c693ed
--- /dev/null
+++ b/third_party/linenoise/README.markdown
@@ -0,0 +1,47 @@
+# Linenoise
+
+A minimal, zero-config, BSD licensed, readline replacement.
+
+News: linenoise now includes minimal completion support, thanks to Pieter Noordhuis (@pnoordhuis).
+
+News: linenoise is now part of [Android](http://android.git.kernel.org/?p=platform/system/core.git;a=tree;f=liblinenoise;h=56450eaed7f783760e5e6a5993ef75cde2e29dea;hb=HEAD Android)!
+
+## Can a line editing library be 20k lines of code?
+
+Line editing with some support for history is a really important feature for command line utilities. Instead of retyping almost the same stuff again and again it's just much better to hit the up arrow and edit on syntax errors, or in order to try a slightly different command. But apparently code dealing with terminals is some sort of Black Magic: readline is 30k lines of code, libedit 20k. Is it reasonable to link small utilities to huge libraries just to get a minimal support for line editing?
+
+So what usually happens is either:
+
+ * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Readl world example of this problem: Tclsh).
+ * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance).
+
+The result is a pollution of binaries without line editing support.
+
+So I spent more or less two hours doing a reality check resulting in this little library: is it *really* needed for a line editing library to be 20k lines of code? Apparently not, it is possibe to get a very small, zero configuration, trivial to embed library, that solves the problem. Smaller programs will just include this, supporing line editing out of the box. Larger programs may use this little library or just checking with configure if readline/libedit is available and resorting to linenoise if not.
+
+## Terminals, in 2010.
+
+Apparently almost every terminal you can happen to use today has some kind of support for VT100 alike escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it.
+
+Since it's so young I guess there are a few bugs, or the lib may not compile or work with some operating system, but it's a matter of a few weeks and eventually we'll get it right, and there will be no excuses for not shipping command line tools without built-in line editing support.
+
+The library is currently less than 400 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software.
+
+## Tested with...
+
+ * Linux text only console ($TERM = linux)
+ * Linux KDE terminal application ($TERM = xterm)
+ * Linux xterm ($TERM = xterm)
+ * Mac OS X iTerm ($TERM = xterm)
+ * Mac OS X default Terminal.app ($TERM = xterm)
+ * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen)
+ * IBM AIX 6.1
+ * FreeBSD xterm ($TERM = xterm)
+
+Please test it everywhere you can and report back!
+
+## Let's push this forward!
+
+Please fork it and add something interesting and send me a pull request. What's especially interesting are fixes, new key bindings, completion.
+
+Send feedbacks to antirez at gmail
diff --git a/third_party/linenoise/example.c b/third_party/linenoise/example.c
new file mode 100644
index 0000000..ea0b515
--- /dev/null
+++ b/third_party/linenoise/example.c
@@ -0,0 +1,27 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "linenoise.h"
+
+
+void completion(const char *buf, linenoiseCompletions *lc) {
+ if (buf[0] == 'h') {
+ linenoiseAddCompletion(lc,"hello");
+ linenoiseAddCompletion(lc,"hello there");
+ }
+}
+
+int main(void) {
+ char *line;
+
+ linenoiseSetCompletionCallback(completion);
+ linenoiseHistoryLoad("history.txt"); /* Load the history at startup */
+ while((line = linenoise("hello> ")) != NULL) {
+ if (line[0] != '\0') {
+ printf("echo: '%s'\n", line);
+ linenoiseHistoryAdd(line);
+ linenoiseHistorySave("history.txt"); /* Save every new entry */
+ }
+ free(line);
+ }
+ return 0;
+}
diff --git a/third_party/linenoise/history.txt b/third_party/linenoise/history.txt
new file mode 100644
index 0000000..70858d8
--- /dev/null
+++ b/third_party/linenoise/history.txt
@@ -0,0 +1,3 @@
+hi
+this is fun
+hel
diff --git a/third_party/linenoise/linenoise.cpp b/third_party/linenoise/linenoise.cpp
new file mode 100644
index 0000000..ce3e3c5
--- /dev/null
+++ b/third_party/linenoise/linenoise.cpp
@@ -0,0 +1,836 @@
+/* linenoise.c -- guerrilla line editing library against the idea that a
+ * line editing lib needs to be 20,000 lines of C code.
+ *
+ * You can find the latest source code at:
+ *
+ * http://github.com/antirez/linenoise
+ *
+ * Does a number of crazy assumptions that happen to be true in 99.9999% of
+ * the 2010 UNIX computers around.
+ *
+ * Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com>
+ * Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * References:
+ * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+ * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html
+ *
+ * Todo list:
+ * - Switch to gets() if $TERM is something we can't support.
+ * - Filter bogus Ctrl+<char> combinations.
+ * - Win32 support
+ *
+ * Bloat:
+ * - Completion?
+ * - History search like Ctrl+r in readline?
+ *
+ * List of escape sequences used by this program, we do everything just
+ * with three sequences. In order to be so cheap we may have some
+ * flickering effect with some slow terminal, but the lesser sequences
+ * the more compatible.
+ *
+ * CHA (Cursor Horizontal Absolute)
+ * Sequence: ESC [ n G
+ * Effect: moves cursor to column n (1 based)
+ *
+ * EL (Erase Line)
+ * Sequence: ESC [ n K
+ * Effect: if n is 0 or missing, clear from cursor to end of line
+ * Effect: if n is 1, clear from beginning of line to cursor
+ * Effect: if n is 2, clear entire line
+ *
+ * CUF (CUrsor Forward)
+ * Sequence: ESC [ n C
+ * Effect: moves cursor forward of n chars
+ *
+ * The following are used to clear the screen: ESC [ H ESC [ 2 J
+ * This is actually composed of two sequences:
+ *
+ * cursorhome
+ * Sequence: ESC [ H
+ * Effect: moves the cursor to upper left corner
+ *
+ * ED2 (Clear entire screen)
+ * Sequence: ESC [ 2 J
+ * Effect: clear the whole screen
+ *
+ */
+
+#ifdef _WIN32
+
+#include <conio.h>
+#include <windows.h>
+#include <stdio.h>
+#include <io.h>
+#include <errno.h>
+#define snprintf _snprintf
+#define strcasecmp _stricmp
+#define strdup _strdup
+#define isatty _isatty
+#define write _write
+#define STDIN_FILENO 0
+
+static HANDLE console_in, console_out;
+static DWORD oldMode;
+
+
+#else /* _WIN32 */
+
+#include <termios.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+static struct termios orig_termios; /* in order to restore at exit */
+#endif /* _WIN32 */
+
+#include "linenoise.h"
+
+#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
+#define LINENOISE_MAX_LINE 4096
+static const char *unsupported_term[] = {"dumb","cons25",NULL};
+static linenoiseCompletionCallback *completionCallback = NULL;
+
+static int rawmode = 0; /* for atexit() function to check if restore is needed*/
+static int atexit_registered = 0; /* register atexit just 1 time */
+static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN;
+static int history_len = 0;
+static int history_index = 0;
+char **history = NULL;
+
+static void linenoiseAtExit(void);
+int linenoiseHistoryAdd(const char *line);
+
+static int isUnsupportedTerm(void) {
+ char *term = getenv("TERM");
+ int j;
+
+ if (term == NULL) return 0;
+ for (j = 0; unsupported_term[j]; j++)
+ if (!strcasecmp(term,unsupported_term[j])) return 1;
+ return 0;
+}
+
+static void freeHistory(void) {
+ if (history) {
+ int j;
+
+ for (j = 0; j < history_len; j++)
+ free(history[j]);
+ free(history);
+ }
+}
+
+static int enableRawMode(int fd) {
+#ifdef _WIN32
+ if (!console_in) {
+ console_in = GetStdHandle(STD_INPUT_HANDLE);
+ console_out = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ GetConsoleMode(console_in, &oldMode);
+ SetConsoleMode(console_in, oldMode & ~(ENABLE_LINE_INPUT | ENABLE_LINE_INPUT));
+ }
+ return 0;
+#else
+ struct termios raw;
+
+ if (!isatty(STDIN_FILENO)) goto fatal;
+ if (!atexit_registered) {
+ atexit(linenoiseAtExit);
+ atexit_registered = 1;
+ }
+ if (tcgetattr(fd,&orig_termios) == -1) goto fatal;
+
+ raw = orig_termios; /* modify the original mode */
+ /* input modes: no break, no CR to NL, no parity check, no strip char,
+ * no start/stop output control. */
+ raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
+ /* output modes - disable post processing */
+ raw.c_oflag &= ~(OPOST);
+ /* control modes - set 8 bit chars */
+ raw.c_cflag |= (CS8);
+ /* local modes - choing off, canonical off, no extended functions,
+ * no signal chars (^Z,^C) */
+ raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
+ /* control chars - set return condition: min number of bytes and timer.
+ * We want read to return every single byte, without timeout. */
+ raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */
+
+ /* put terminal in raw mode after flushing */
+ if (tcsetattr(fd,TCSADRAIN,&raw) < 0) goto fatal;
+ rawmode = 1;
+ return 0;
+
+fatal:
+ errno = ENOTTY;
+ return -1;
+#endif
+}
+
+static void disableRawMode(int fd) {
+#ifdef _WIN32
+ SetConsoleMode(console_in, oldMode);
+ console_in = 0;
+ console_out = 0;
+#else
+ /* Don't even check the return value as it's too late. */
+ if (rawmode && tcsetattr(fd,TCSADRAIN,&orig_termios) != -1)
+ rawmode = 0;
+#endif
+}
+
+/* At exit we'll try to fix the terminal to the initial conditions. */
+static void linenoiseAtExit(void) {
+ disableRawMode(STDIN_FILENO);
+ freeHistory();
+}
+
+static int getColumns(void) {
+#ifdef _WIN32
+ CONSOLE_SCREEN_BUFFER_INFO inf = { 0 };
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ return inf.dwSize.X;
+#else
+ struct winsize ws;
+
+ if (ioctl(1, TIOCGWINSZ, &ws) == -1) return 80;
+ return ws.ws_col;
+#endif
+}
+
+#ifdef _WIN32
+static void output(const char* str, size_t len, int x, int y)
+{
+ COORD pos = { (SHORT)x, (SHORT)y };
+ DWORD count = 0;
+ WriteConsoleOutputCharacterA(console_out, str, len, pos, &count);
+}
+#endif
+
+static void refreshLine(int fd, const char *prompt, char *buf, size_t len, size_t pos, size_t cols) {
+ size_t plen = strlen(prompt);
+
+ while((plen+pos) >= cols) {
+ buf++;
+ len--;
+ pos--;
+ }
+ while (plen+len > cols) {
+ len--;
+ }
+
+#ifdef _WIN32
+ CONSOLE_SCREEN_BUFFER_INFO inf = { 0 };
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ output(prompt, plen, 0, inf.dwCursorPosition.Y);
+ output(buf, len, plen, inf.dwCursorPosition.Y);
+ if (plen + len < (size_t)inf.dwSize.X) {
+ /* Blank to EOL */
+ char* tmp = (char*)malloc(inf.dwSize.X - (plen + len));
+ memset(tmp, ' ', inf.dwSize.X - (plen + len));
+ output(tmp, inf.dwSize.X - (plen + len), len + plen, inf.dwCursorPosition.Y);
+ free(tmp);
+ }
+ inf.dwCursorPosition.X = (SHORT)(pos + plen);
+ SetConsoleCursorPosition(console_out, inf.dwCursorPosition);
+#else
+ {
+ char seq[64];
+ int highlight = -1;
+
+ if (pos < len) {
+ /* this scans for a brace matching buf[pos] to highlight */
+ int scanDirection = 0;
+ if (strchr("}])", buf[pos]))
+ scanDirection = -1; /* backwards */
+ else if (strchr("{[(", buf[pos]))
+ scanDirection = 1; /* forwards */
+
+ if (scanDirection) {
+ int unmatched = scanDirection;
+ int i;
+ for(i = pos + scanDirection; i >= 0 && i < (int)len; i += scanDirection){
+ /* TODO: the right thing when inside a string */
+ if (strchr("}])", buf[i]))
+ unmatched--;
+ else if (strchr("{[(", buf[i]))
+ unmatched++;
+
+ if (unmatched == 0) {
+ highlight = i;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Cursor to left edge */
+ snprintf(seq,64,"\x1b[1G");
+ if (write(fd,seq,strlen(seq)) == -1) return;
+ /* Write the prompt and the current buffer content */
+ if (write(fd,prompt,strlen(prompt)) == -1) return;
+
+ if (highlight == -1) {
+ if (write(fd,buf,len) == -1) return;
+ } else {
+ if (write(fd,buf,highlight) == -1) return;
+ if (write(fd,"\x1b[1;34m",7) == -1) return; /* bright blue (visible with both B&W bg) */
+ if (write(fd,&buf[highlight],1) == -1) return;
+ if (write(fd,"\x1b[0m",4) == -1) return; /* reset */
+ if (write(fd,buf+highlight+1,len-highlight-1) == -1) return;
+ }
+
+ /* Erase to right */
+ snprintf(seq,64,"\x1b[0K");
+ if (write(fd,seq,strlen(seq)) == -1) return;
+ /* Move cursor to original position. */
+ snprintf(seq,64,"\x1b[1G\x1b[%dC", (int)(pos+plen));
+ if (write(fd,seq,strlen(seq)) == -1) return;
+ }
+#endif
+}
+
+/* Note that this should parse some special keys into their emacs ctrl-key combos
+ * Return of -1 signifies unrecognized code
+ */
+static char linenoiseReadChar(int fd){
+#ifdef _WIN32
+ INPUT_RECORD rec;
+ DWORD count;
+ do {
+ ReadConsoleInputA(console_in, &rec, 1, &count);
+ } while (rec.EventType != KEY_EVENT || !rec.Event.KeyEvent.bKeyDown);
+
+ if (rec.Event.KeyEvent.uChar.AsciiChar == 0) {
+ /* handle keys that aren't converted to ASCII */
+ switch (rec.Event.KeyEvent.wVirtualKeyCode) {
+ case VK_LEFT: return 2; /* ctrl-b */
+ case VK_RIGHT: return 6; /* ctrl-f */
+ case VK_UP: return 16; /* ctrl-p */
+ case VK_DOWN: return 14; /* ctrl-n */
+ case VK_DELETE: return 127; /* ascii DEL byte */
+ case VK_HOME: return 1; /* ctrl-a */
+ case VK_END: return 5; /* ctrl-e */
+ default: return -1;
+ }
+ }
+ return rec.Event.KeyEvent.uChar.AsciiChar;
+#else
+ char c;
+ int nread;
+ char seq[2], seq2[2];
+
+ nread = read(fd,&c,1);
+ if (nread <= 0) return 0;
+
+#if defined(_DEBUG)
+ if (c == 28) { /* ctrl-\ */
+ /* special debug mode. prints all keys hit. ctrl-c to get out */
+ printf("\x1b[1G\n"); /* go to first column of new line */
+ while (true) {
+ char keys[10];
+ int ret = read(fd, keys, 10);
+ int i;
+
+ if (ret <= 0) {
+ printf("\nret: %d\n", ret);
+ }
+
+ for (i=0; i < ret; i++)
+ printf("%d ", (int)keys[i]);
+ printf("\x1b[1G\n"); /* go to first column of new line */
+
+ if (keys[0] == 3) /* ctrl-c. may cause signal instead */
+ return -1;
+ }
+ }
+#endif
+
+ if (c == 27) { /* escape */
+ if (read(fd,seq,2) == -1) return 0;
+ if (seq[0] == 91){
+ if (seq[1] == 68) { /* left arrow */
+ return 2; /* ctrl-b */
+ } else if (seq[1] == 67) { /* right arrow */
+ return 6; /* ctrl-f */
+ } else if (seq[1] == 65) { /* up arrow */
+ return 16; /* ctrl-p */
+ } else if (seq[1] == 66) { /* down arrow */
+ return 14; /* ctrl-n */
+ } else if (seq[1] > 48 && seq[1] < 57) {
+ /* extended escape */
+ if (read(fd,seq2,2) == -1) return 0;
+ if (seq2[0] == 126) {
+ if (seq[1] == 49 || seq[1] == 55) { /* home (linux console and rxvt based) */
+ return 1; /* ctrl-a */
+ } else if (seq[1] == 52 || seq[1] == 56 ) { /* end (linux console and rxvt based) */
+ return 5; /* ctrl-e */
+ } else if (seq[1] == 51) { /* delete */
+ return 127; /* ascii DEL byte */
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ if (seq[1] == 51 && seq2[0] == 126) { /* delete */
+ return 127; /* ascii DEL byte */
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ } else if (seq[0] == 79){
+ if (seq[1] == 72) { /* home (xterm based) */
+ return 1; /* ctrl-a */
+ } else if (seq[1] == 70) { /* end (xterm based) */
+ return 5; /* ctrl-e */
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ } else if (c == 127) {
+ /* some consoles use 127 for backspace rather than delete.
+ * we only use it for delete */
+ return 8;
+ }
+
+ return c; /* normalish character */
+#endif
+}
+
+static void beep() {
+ /* doesn't do anything on windows but harmless */
+ fprintf(stderr, "\x7");
+ fflush(stderr);
+}
+
+static void freeCompletions(linenoiseCompletions *lc) {
+ size_t i;
+ for (i = 0; i < lc->len; i++)
+ free(lc->cvec[i]);
+ if (lc->cvec != NULL)
+ free(lc->cvec);
+}
+
+static int completeLine(int fd, const char *prompt, char *buf, size_t buflen, size_t *len, size_t *pos, size_t cols) {
+ linenoiseCompletions lc = { 0, NULL };
+ int nwritten;
+ char c = 0;
+
+ completionCallback(buf,&lc);
+ if (lc.len == 0) {
+ beep();
+ } else {
+ size_t stop = 0, i = 0;
+ size_t clen;
+
+ while(!stop) {
+ /* Show completion or original buffer */
+ if (i < lc.len) {
+ clen = strlen(lc.cvec[i]);
+ refreshLine(fd,prompt,lc.cvec[i],clen,clen,cols);
+ } else {
+ refreshLine(fd,prompt,buf,*len,*pos,cols);
+ }
+
+ do {
+ c = linenoiseReadChar(fd);
+ } while (c == (char)-1);
+
+ switch(c) {
+ case 0:
+ freeCompletions(&lc);
+ return -1;
+ case 9: /* tab */
+ i = (i+1) % (lc.len+1);
+ if (i == lc.len) beep();
+ break;
+ case 27: /* escape */
+ /* Re-show original buffer */
+ if (i < lc.len) {
+ refreshLine(fd,prompt,buf,*len,*pos,cols);
+ }
+ stop = 1;
+ break;
+ default:
+ /* Update buffer and return */
+ if (i < lc.len) {
+ nwritten = snprintf(buf,buflen,"%s",lc.cvec[i]);
+ *len = *pos = nwritten;
+ }
+ stop = 1;
+ break;
+ }
+ }
+ }
+
+ freeCompletions(&lc);
+ return c; /* Return last read character */
+}
+
+void linenoiseClearScreen(void) {
+#ifdef _WIN32
+ COORD coord = {0, 0};
+ CONSOLE_SCREEN_BUFFER_INFO inf;
+ DWORD count;
+ DWORD size;
+
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ size = inf.dwSize.X * inf.dwSize.Y;
+ FillConsoleOutputCharacterA(console_out, ' ', size, coord, &count );
+ SetConsoleCursorPosition(console_out, coord);
+#else
+ if (write(1,"\x1b[H\x1b[2J",7) <= 0) {
+ /* nothing to do, just to avoid warning. */
+ }
+#endif
+}
+
+static int linenoisePrompt(int fd, char *buf, size_t buflen, const char *prompt) {
+ size_t plen = strlen(prompt);
+ size_t pos = 0;
+ size_t len = 0;
+ size_t cols = getColumns();
+
+ buf[0] = '\0';
+ buflen--; /* Make sure there is always space for the nulterm */
+
+ /* The latest history entry is always our current buffer, that
+ * initially is just an empty string. */
+ linenoiseHistoryAdd("");
+ history_index = history_len-1;
+
+ if (write(1,prompt,plen) == -1) return -1;
+ while(1) {
+ char c = linenoiseReadChar(fd);
+
+ if (c == 0) return len;
+ if (c == (char)-1) {
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ continue;
+ }
+
+ /* Only autocomplete when the callback is set. It returns < 0 when
+ * there was an error reading from fd. Otherwise it will return the
+ * character that should be handled next. */
+ if (c == 9 && completionCallback != NULL) { /* tab */
+ /* ignore tabs used for indentation */
+ if (pos == 0) continue;
+
+ c = completeLine(fd,prompt,buf,buflen,&len,&pos,cols);
+ /* Return on errors */
+ if (c < 0) return len;
+ /* Read next character when 0 */
+ if (c == 0) continue;
+ }
+
+ switch(c) {
+ case 13: /* enter */
+ history_len--;
+ free(history[history_len]);
+ return (int)len;
+ case 3: /* ctrl-c */
+ errno = EAGAIN;
+ return -1;
+ case 127: /* delete */
+ if (len > 0 && pos < len) {
+ memmove(buf+pos,buf+pos+1,len-pos-1);
+ len--;
+ buf[len] = '\0';
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ break;
+ case 8: /* backspace or ctrl-h */
+ if (pos > 0 && len > 0) {
+ memmove(buf+pos-1,buf+pos,len-pos);
+ pos--;
+ len--;
+ buf[len] = '\0';
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ break;
+ case 4: /* ctrl-d, remove char at right of cursor */
+ if (len > 1 && pos < (len-1)) {
+ memmove(buf+pos,buf+pos+1,len-pos);
+ len--;
+ buf[len] = '\0';
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ } else if (len == 0) {
+ history_len--;
+ free(history[history_len]);
+ return -1;
+ }
+ break;
+ case 20: /* ctrl-t */
+ if (pos > 0 && pos < len) {
+ int aux = buf[pos-1];
+ buf[pos-1] = buf[pos];
+ buf[pos] = aux;
+ if (pos != len-1) pos++;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ break;
+ case 2: /* ctrl-b */ /* left arrow */
+ if (pos > 0) {
+ pos--;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ break;
+ case 6: /* ctrl-f */
+ /* right arrow */
+ if (pos != len) {
+ pos++;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ break;
+ case 16: /* ctrl-p */
+ case 14: /* ctrl-n */
+ /* up and down arrow: history */
+ if (history_len > 1) {
+ /* Update the current history entry before to
+ * overwrite it with tne next one. */
+ free(history[history_index]);
+ history[history_index] = strdup(buf);
+ /* Show the new entry */
+ history_index += (c == 16) ? -1 : 1;
+ if (history_index < 0) {
+ history_index = 0;
+ break;
+ } else if (history_index >= history_len) {
+ history_index = history_len-1;
+ break;
+ }
+ strncpy(buf,history[history_index],buflen);
+ buf[buflen] = '\0';
+ len = pos = strlen(buf);
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ break;
+ case 27: /* escape sequence */
+ break; /* should be handled by linenoiseReadChar */
+ default:
+ if (len < buflen) {
+ if (len == pos) {
+ buf[pos] = c;
+ pos++;
+ len++;
+ buf[len] = '\0';
+ if (plen+len < cols) {
+ /* Avoid a full update of the line in the
+ * trivial case. */
+ if (write(1,&c,1) == -1) return -1;
+ } else {
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ } else {
+ memmove(buf+pos+1,buf+pos,len-pos);
+ buf[pos] = c;
+ len++;
+ pos++;
+ buf[len] = '\0';
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ }
+ break;
+ case 21: /* Ctrl+u, delete the whole line. */
+ buf[0] = '\0';
+ pos = len = 0;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ break;
+ case 11: /* Ctrl+k, delete from current to end of line. */
+ buf[pos] = '\0';
+ len = pos;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ break;
+ case 1: /* Ctrl+a, go to the start of the line */
+ pos = 0;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ break;
+ case 5: /* ctrl+e, go to the end of the line */
+ pos = len;
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ break;
+ case 12: /* ctrl+l, clear screen */
+ linenoiseClearScreen();
+ refreshLine(fd,prompt,buf,len,pos,cols);
+ }
+ }
+ return len;
+}
+
+static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) {
+ int fd = STDIN_FILENO;
+ int count;
+
+ if (buflen == 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (!isatty(STDIN_FILENO)) {
+ if (fgets(buf, buflen, stdin) == NULL) return -1;
+ count = strlen(buf);
+ if (count && buf[count-1] == '\n') {
+ count--;
+ buf[count] = '\0';
+ }
+ } else {
+ if (enableRawMode(fd) == -1) return -1;
+ count = linenoisePrompt(fd, buf, buflen, prompt);
+ disableRawMode(fd);
+ printf("\n");
+ }
+ return count;
+}
+
+char *linenoise(const char *prompt) {
+ char buf[LINENOISE_MAX_LINE];
+ int count;
+
+ if (isUnsupportedTerm()) {
+ size_t len;
+
+ printf("%s",prompt);
+ fflush(stdout);
+ if (fgets(buf,LINENOISE_MAX_LINE,stdin) == NULL) return NULL;
+ len = strlen(buf);
+ while(len && (buf[len-1] == '\n' || buf[len-1] == '\r')) {
+ len--;
+ buf[len] = '\0';
+ }
+ return strdup(buf);
+ } else {
+ count = linenoiseRaw(buf,LINENOISE_MAX_LINE,prompt);
+ if (count == -1) return NULL;
+ return strdup(buf);
+ }
+}
+
+/* Register a callback function to be called for tab-completion. */
+void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) {
+ completionCallback = fn;
+}
+
+void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) {
+ size_t len = strlen(str);
+ char *copy = (char*)malloc(len+1);
+ memcpy(copy,str,len+1);
+ lc->cvec = (char**)realloc(lc->cvec,sizeof(char*)*(lc->len+1));
+ lc->cvec[lc->len++] = copy;
+}
+
+/* Using a circular buffer is smarter, but a bit more complex to handle. */
+int linenoiseHistoryAdd(const char *line) {
+ char *linecopy;
+
+ if (history_max_len == 0) return 0;
+ if (history == NULL) {
+ history = (char**)malloc(sizeof(char*)*history_max_len);
+ if (history == NULL) return 0;
+ memset(history,0,(sizeof(char*)*history_max_len));
+ }
+ linecopy = strdup(line);
+ if (!linecopy) return 0;
+ if (history_len == history_max_len) {
+ free(history[0]);
+ memmove(history,history+1,sizeof(char*)*(history_max_len-1));
+ history_len--;
+ }
+ history[history_len] = linecopy;
+ history_len++;
+ return 1;
+}
+
+int linenoiseHistorySetMaxLen(int len) {
+ char **newHistory;
+
+ if (len < 1) return 0;
+ if (history) {
+ int tocopy = history_len;
+
+ newHistory = (char**)malloc(sizeof(char*)*len);
+ if (newHistory == NULL) return 0;
+ if (len < tocopy) tocopy = len;
+ memcpy(newHistory,history+(history_max_len-tocopy), sizeof(char*)*tocopy);
+ free(history);
+ history = newHistory;
+ }
+ history_max_len = len;
+ if (history_len > history_max_len)
+ history_len = history_max_len;
+ return 1;
+}
+
+/* Save the history in the specified file. On success 0 is returned
+ * otherwise -1 is returned. */
+int linenoiseHistorySave(const char *filename) {
+ FILE *fp = fopen(filename,"w");
+ int j;
+
+ if (fp == NULL) return -1;
+ for (j = 0; j < history_len; j++){
+ if (history[j][0] != '\0')
+ fprintf(fp,"%s\n",history[j]);
+ }
+ fclose(fp);
+ return 0;
+}
+
+/* Load the history from the specified file. If the file does not exist
+ * zero is returned and no operation is performed.
+ *
+ * If the file exists and the operation succeeded 0 is returned, otherwise
+ * on error -1 is returned. */
+int linenoiseHistoryLoad(const char *filename) {
+ FILE *fp = fopen(filename,"r");
+ char buf[LINENOISE_MAX_LINE];
+
+ if (fp == NULL) return -1;
+
+ while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) {
+ char *p;
+
+ p = strchr(buf,'\r');
+ if (!p) p = strchr(buf,'\n');
+ if (p) *p = '\0';
+ if (p != buf)
+ linenoiseHistoryAdd(buf);
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/third_party/linenoise/linenoise.h b/third_party/linenoise/linenoise.h
new file mode 100644
index 0000000..72916e5
--- /dev/null
+++ b/third_party/linenoise/linenoise.h
@@ -0,0 +1,55 @@
+/* linenoise.h -- guerrilla line editing library against the idea that a
+ * line editing lib needs to be 20,000 lines of C code.
+ *
+ * See linenoise.c for more information.
+ *
+ * Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com>
+ * Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LINENOISE_H
+#define __LINENOISE_H
+
+typedef struct linenoiseCompletions {
+ size_t len;
+ char **cvec;
+} linenoiseCompletions;
+
+typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *);
+void linenoiseSetCompletionCallback(linenoiseCompletionCallback *);
+void linenoiseAddCompletion(linenoiseCompletions *, const char *);
+
+char *linenoise(const char *prompt);
+int linenoiseHistoryAdd(const char *line);
+int linenoiseHistorySetMaxLen(int len);
+int linenoiseHistorySave(const char *filename);
+int linenoiseHistoryLoad(const char *filename);
+void linenoiseClearScreen(void);
+
+#endif /* __LINENOISE_H */
diff --git a/third_party/linenoise/linenoise_win32.cpp b/third_party/linenoise/linenoise_win32.cpp
new file mode 100644
index 0000000..0a2c334
--- /dev/null
+++ b/third_party/linenoise/linenoise_win32.cpp
@@ -0,0 +1,442 @@
+/* linenoise_win32.c -- Linenoise win32 port.
+ *
+ * Modifications copyright 2010, Jon Griffiths <jon_p_griffiths at yahoo dot com>.
+ * All rights reserved.
+ * Based on linenoise, copyright 2010, Salvatore Sanfilippo <antirez at gmail dot com>.
+ * The original linenoise can be found at: http://github.com/antirez/linenoise
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Todo list:
+ * Actually switch to/from raw mode so emacs key combos work.
+ * Set a console handler to clean up onn exit.
+ */
+#include <conio.h>
+#include <windows.h>
+#include <stdio.h>
+
+/* If ALT_KEYS is defined, emacs key combos using ALT instead of CTRL are
+ * available. At this time, you don't get key repeats when enabled though. */
+/* #define ALT_KEYS */
+
+static HANDLE console_in, console_out;
+
+#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
+#define LINENOISE_MAX_LINE 4096
+
+static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN;
+static int history_len = 0;
+char** history = NULL;
+
+int linenoiseHistoryAdd(const char* line);
+
+static int enableRawMode()
+{
+ if (!console_in)
+ {
+ console_in = GetStdHandle(STD_INPUT_HANDLE);
+ console_out = GetStdHandle(STD_OUTPUT_HANDLE);
+ }
+ return 0;
+}
+
+static void disableRawMode()
+{
+ /* Nothing to do yet */
+}
+
+static void output(const char* str,
+ size_t len,
+ int x,
+ int y)
+{
+ COORD pos = { (SHORT)x, (SHORT)y };
+ DWORD count = 0;
+ WriteConsoleOutputCharacterA(console_out, str, len, pos, &count);
+}
+
+static void refreshLine(const char* prompt,
+ char* buf,
+ size_t len,
+ size_t pos,
+ size_t cols)
+{
+ size_t plen = strlen(prompt);
+
+ while ((plen + pos) >= cols)
+ {
+ buf++;
+ len--;
+ pos--;
+ }
+ while (plen + len > cols)
+ {
+ len--;
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO inf = { 0 };
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ size_t prompt_len = strlen(prompt);
+ output(prompt, prompt_len, 0, inf.dwCursorPosition.Y);
+ output(buf, len, prompt_len, inf.dwCursorPosition.Y);
+ if (prompt_len + len < (size_t)inf.dwSize.X)
+ {
+ /* Blank to EOL */
+ char* tmp = (char*)malloc(inf.dwSize.X - (prompt_len + len));
+ memset(tmp, ' ', inf.dwSize.X - (prompt_len + len));
+ output(tmp, inf.dwSize.X - (prompt_len + len), len + prompt_len, inf.dwCursorPosition.Y);
+ free(tmp);
+ }
+ inf.dwCursorPosition.X = (SHORT)(pos + prompt_len);
+ SetConsoleCursorPosition(console_out, inf.dwCursorPosition);
+}
+
+static int linenoisePrompt(char* buf,
+ size_t buflen,
+ const char* prompt)
+{
+ size_t plen = strlen(prompt);
+ size_t pos = 0;
+ size_t len = 0;
+ int history_index = 0;
+#ifdef ALT_KEYS
+ unsigned char last_down = 0;
+#endif
+ buf[0] = '\0';
+ buflen--; /* Make sure there is always space for the nulterm */
+
+ /* The latest history entry is always our current buffer, that
+ * initially is just an empty string. */
+ linenoiseHistoryAdd("");
+
+ CONSOLE_SCREEN_BUFFER_INFO inf = { 0 };
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ size_t cols = inf.dwSize.X;
+ output(prompt, plen, 0, inf.dwCursorPosition.Y);
+ inf.dwCursorPosition.X = (SHORT)plen;
+ SetConsoleCursorPosition(console_out, inf.dwCursorPosition);
+
+ for ( ; ; )
+ {
+ INPUT_RECORD rec;
+ DWORD count;
+ ReadConsoleInputA(console_in, &rec, 1, &count);
+ if (rec.EventType != KEY_EVENT)
+ continue;
+#ifdef ALT_KEYS
+ if (rec.Event.KeyEvent.bKeyDown)
+ {
+ last_down = rec.Event.KeyEvent.uChar.AsciiChar;
+ continue;
+ }
+#else
+ if (!rec.Event.KeyEvent.bKeyDown)
+ {
+ continue;
+ }
+#endif
+ switch (rec.Event.KeyEvent.wVirtualKeyCode)
+ {
+ case VK_RETURN: /* enter */
+ history_len--;
+ free(history[history_len]);
+ return (int)len;
+ case VK_BACK: /* backspace */
+#ifdef ALT_KEYS
+ backspace:
+#endif
+ if (pos > 0 && len > 0)
+ {
+ memmove(buf + pos - 1, buf + pos, len - pos);
+ pos--;
+ len--;
+ buf[len] = '\0';
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_LEFT:
+#ifdef ALT_KEYS
+ left_arrow:
+#endif
+ /* left arrow */
+ if (pos > 0)
+ {
+ pos--;
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_RIGHT:
+#ifdef ALT_KEYS
+ right_arrow:
+#endif
+ /* right arrow */
+ if (pos != len)
+ {
+ pos++;
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_UP:
+ case VK_DOWN:
+#ifdef ALT_KEYS
+ up_down_arrow:
+#endif
+ /* up and down arrow: history */
+ if (history_len > 1)
+ {
+ /* Update the current history entry before to
+ * overwrite it with tne next one. */
+ free(history[history_len - 1 - history_index]);
+ history[history_len - 1 - history_index] = _strdup(buf);
+ /* Show the new entry */
+ history_index += (rec.Event.KeyEvent.wVirtualKeyCode == VK_UP) ? 1 : -1;
+ if (history_index < 0)
+ {
+ history_index = 0;
+ break;
+ }
+ else if (history_index >= history_len)
+ {
+ history_index = history_len - 1;
+ break;
+ }
+ strncpy(buf, history[history_len - 1 - history_index], buflen);
+ buf[buflen] = '\0';
+ len = pos = strlen(buf);
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_DELETE:
+ /* delete */
+ if (len > 0 && pos < len)
+ {
+ memmove(buf + pos, buf + pos + 1, len - pos - 1);
+ len--;
+ buf[len] = '\0';
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_HOME: /* Ctrl+a, go to the start of the line */
+#ifdef ALT_KEYS
+ home:
+#endif
+ pos = 0;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ case VK_END: /* ctrl+e, go to the end of the line */
+#ifdef ALT_KEYS
+ end:
+#endif
+ pos = len;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ default:
+#ifdef ALT_KEYS
+ /* Use alt instead of CTRL since windows eats CTRL+char combos */
+ if (rec.Event.KeyEvent.dwControlKeyState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED))
+ {
+ switch (last_down)
+ {
+ case 'a': /* ctrl-t */
+ goto home;
+ case 'e': /* ctrl-t */
+ goto end;
+ case 't': /* ctrl-t */
+ if (pos > 0 && pos < len)
+ {
+ int aux = buf[pos - 1];
+ buf[pos - 1] = buf[pos];
+ buf[pos] = aux;
+ if (pos != len - 1)
+ pos++;
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case 'h': /* ctrl-h */
+ goto backspace;
+ case 'b': /* ctrl-b */
+ goto left_arrow;
+ case 'f': /* ctrl-f */
+ goto right_arrow;
+ case 'p': /* ctrl-p */
+ rec.Event.KeyEvent.wVirtualKeyCode = VK_UP;
+ goto up_down_arrow;
+ case 'n': /* ctrl-n */
+ rec.Event.KeyEvent.wVirtualKeyCode = VK_DOWN;
+ goto up_down_arrow;
+ case 'u': /* Ctrl+u, delete the whole line. */
+ buf[0] = '\0';
+ pos = len = 0;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ case 'k': /* Ctrl+k, delete from current to end of line. */
+ buf[pos] = '\0';
+ len = pos;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ }
+ continue;
+ }
+#endif /* ALT_KEYS */
+ if (rec.Event.KeyEvent.uChar.AsciiChar < ' ' ||
+ rec.Event.KeyEvent.uChar.AsciiChar > '~')
+ continue;
+
+ if (len < buflen)
+ {
+ if (len != pos)
+ memmove(buf + pos + 1, buf + pos, len - pos);
+ buf[pos] = rec.Event.KeyEvent.uChar.AsciiChar;
+ len++;
+ pos++;
+ buf[len] = '\0';
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ }
+ }
+}
+
+static int linenoiseRaw(char* buf,
+ size_t buflen,
+ const char* prompt)
+{
+ int count = -1;
+
+ if (buflen != 0)
+ {
+ if (enableRawMode() == -1)
+ return -1;
+ count = linenoisePrompt(buf, buflen, prompt);
+ disableRawMode();
+ printf("\n");
+ }
+ return count;
+}
+
+char* linenoise(const char* prompt)
+{
+ char buf[LINENOISE_MAX_LINE];
+ int count = linenoiseRaw(buf, LINENOISE_MAX_LINE, prompt);
+ if (count == -1)
+ return NULL;
+ return _strdup(buf);
+}
+
+/* Using a circular buffer is smarter, but a bit more complex to handle. */
+int linenoiseHistoryAdd(const char* line)
+{
+ char* linecopy;
+
+ if (history_max_len == 0)
+ return 0;
+ if (history == NULL)
+ {
+ history = (char**)malloc(sizeof(char*) * history_max_len);
+ if (history == NULL)
+ return 0;
+ memset(history, 0, (sizeof(char*) * history_max_len));
+ }
+ linecopy = _strdup(line);
+ if (!linecopy)
+ return 0;
+ if (history_len == history_max_len)
+ {
+ free(history[0]);
+ memmove(history, history + 1, sizeof(char*) * (history_max_len - 1));
+ history_len--;
+ }
+ history[history_len] = linecopy;
+ history_len++;
+ return 1;
+}
+
+int linenoiseHistorySetMaxLen(int len)
+{
+ char** new_history;
+
+ if (len < 1)
+ return 0;
+ if (history)
+ {
+ int tocopy = history_len;
+
+ new_history = (char**)malloc(sizeof(char*) * len);
+ if (new_history == NULL)
+ return 0;
+ if (len < tocopy)
+ tocopy = len;
+ memcpy(new_history, history + (history_max_len - tocopy), sizeof(char*) * tocopy);
+ free(history);
+ history = new_history;
+ }
+ history_max_len = len;
+ if (history_len > history_max_len)
+ history_len = history_max_len;
+ return 1;
+}
+
+/* Save the history in the specified file. On success 0 is returned
+ * otherwise -1 is returned. */
+int linenoiseHistorySave(const char* filename)
+{
+ FILE* fp = fopen(filename, "w");
+ int j;
+
+ if (fp == NULL)
+ return -1;
+ for (j = 0; j < history_len; j++)
+ fprintf(fp, "%s\n", history[j]);
+ fclose(fp);
+ return 0;
+}
+
+/* Load the history from the specified file. If the file does not exist
+ * zero is returned and no operation is performed.
+ *
+ * If the file exists and the operation succeeded 0 is returned, otherwise
+ * on error -1 is returned. */
+int linenoiseHistoryLoad(const char* filename)
+{
+ FILE* fp = fopen(filename, "r");
+ char buf[LINENOISE_MAX_LINE];
+
+ if (fp == NULL)
+ return -1;
+
+ while (fgets(buf, LINENOISE_MAX_LINE, fp) != NULL)
+ {
+ char* p;
+
+ p = strchr(buf, '\r');
+ if (!p)
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+ linenoiseHistoryAdd(buf);
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/pcre-7.4/config-cmake.h.in b/third_party/pcre-7.4/config-cmake.h.in
index 27a2d02..27a2d02 100644
--- a/pcre-7.4/config-cmake.h.in
+++ b/third_party/pcre-7.4/config-cmake.h.in
diff --git a/pcre-7.4/config.h b/third_party/pcre-7.4/config.h
index 1dda8bd..29fe726 100644
--- a/pcre-7.4/config.h
+++ b/third_party/pcre-7.4/config.h
@@ -79,7 +79,9 @@ them both to 0; an emulation function will be used. */
/* Define to 1 if you have the `strtoll' function. */
// dm: visual studio
-//#define HAVE_STRTOLL 1
+#ifndef _WIN32
+#define HAVE_STRTOLL 1
+#endif
/* Define to 1 if you have the `strtoq' function. */
// dm: visual studio
@@ -106,7 +108,9 @@ them both to 0; an emulation function will be used. */
/* Define to 1 if you have the `_strtoi64' function. */
/* #undef HAVE__STRTOI64 */
// dm: visual studio
+#ifdef _WIN32
#define HAVE__STRTOI64 1
+#endif
/* The value of LINK_SIZE determines the number of bytes used to store links
as offsets within the compiled regex. The default is 2, which allows for
@@ -161,6 +165,11 @@ them both to 0; an emulation function will be used. */
that support it, "configure" can be used to set this in the Makefile (use
--disable-stack-for-recursion). */
/* #undef NO_RECURSE */
+/* mongodb: don't recurse, don't want to use much stack or blow stack */
+#ifndef __sunos__
+/* TODO this doesn't compile on sunos?? */
+#define NO_RECURSE 1
+#endif
/* Name of package */
#define PACKAGE "pcre"
@@ -193,7 +202,8 @@ them both to 0; an emulation function will be used. */
/* #undef PCRE_EXP_DEFN */
/* Define if linking statically (TODO: make nice with Libtool) */
-/* #undef PCRE_STATIC */
+/* mongodb */
+#define PCRE_STATIC
/* When calling PCRE via the POSIX interface, additional working storage is
required for holding the pointers to capturing substrings because PCRE
@@ -209,12 +219,14 @@ them both to 0; an emulation function will be used. */
#define STDC_HEADERS 1
/* Define to enable support for Unicode properties */
-/* #undef SUPPORT_UCP */
+/* mongodb */
+#define SUPPORT_UCP
+
/* Define to enable support for the UTF-8 Unicode encoding. */
-#if( !defined(SUPPORT_UTF8) )
+/* mongodb */
#define SUPPORT_UTF8
-#endif
+
/* Version number of package */
#define VERSION "7.4"
diff --git a/pcre-7.4/config.h.generic b/third_party/pcre-7.4/config.h.generic
index b6d7ab8..b6d7ab8 100644
--- a/pcre-7.4/config.h.generic
+++ b/third_party/pcre-7.4/config.h.generic
diff --git a/pcre-7.4/config.h.in b/third_party/pcre-7.4/config.h.in
index c4ea3c4..c4ea3c4 100644
--- a/pcre-7.4/config.h.in
+++ b/third_party/pcre-7.4/config.h.in
diff --git a/pcre-7.4/dftables.c b/third_party/pcre-7.4/dftables.c
index 67bca53..67bca53 100644
--- a/pcre-7.4/dftables.c
+++ b/third_party/pcre-7.4/dftables.c
diff --git a/pcre-7.4/pcre.h b/third_party/pcre-7.4/pcre.h
index 701699b..701699b 100644
--- a/pcre-7.4/pcre.h
+++ b/third_party/pcre-7.4/pcre.h
diff --git a/pcre-7.4/pcre.h.generic b/third_party/pcre-7.4/pcre.h.generic
index 58a83c3..58a83c3 100644
--- a/pcre-7.4/pcre.h.generic
+++ b/third_party/pcre-7.4/pcre.h.generic
diff --git a/pcre-7.4/pcre.h.in b/third_party/pcre-7.4/pcre.h.in
index 8bebbb4..8bebbb4 100644
--- a/pcre-7.4/pcre.h.in
+++ b/third_party/pcre-7.4/pcre.h.in
diff --git a/pcre-7.4/pcre_chartables.c b/third_party/pcre-7.4/pcre_chartables.c
index ae45db0..ae45db0 100644
--- a/pcre-7.4/pcre_chartables.c
+++ b/third_party/pcre-7.4/pcre_chartables.c
diff --git a/pcre-7.4/pcre_chartables.c.dist b/third_party/pcre-7.4/pcre_chartables.c.dist
index ae45db0..ae45db0 100644
--- a/pcre-7.4/pcre_chartables.c.dist
+++ b/third_party/pcre-7.4/pcre_chartables.c.dist
diff --git a/pcre-7.4/pcre_compile.c b/third_party/pcre-7.4/pcre_compile.c
index 3994781..3994781 100644
--- a/pcre-7.4/pcre_compile.c
+++ b/third_party/pcre-7.4/pcre_compile.c
diff --git a/pcre-7.4/pcre_config.c b/third_party/pcre-7.4/pcre_config.c
index 220ef93..220ef93 100644
--- a/pcre-7.4/pcre_config.c
+++ b/third_party/pcre-7.4/pcre_config.c
diff --git a/pcre-7.4/pcre_dfa_exec.c b/third_party/pcre-7.4/pcre_dfa_exec.c
index e590fbb..e590fbb 100644
--- a/pcre-7.4/pcre_dfa_exec.c
+++ b/third_party/pcre-7.4/pcre_dfa_exec.c
diff --git a/pcre-7.4/pcre_exec.c b/third_party/pcre-7.4/pcre_exec.c
index 6db7c35..657c142 100644
--- a/pcre-7.4/pcre_exec.c
+++ b/third_party/pcre-7.4/pcre_exec.c
@@ -257,7 +257,7 @@ argument of match(), which never changes. */
#define RMATCH(ra,rb,rc,rd,re,rf,rg,rw)\
{\
- heapframe *newframe = (pcre_stack_malloc)(sizeof(heapframe));\
+ heapframe *newframe = ((heapframe*)(pcre_stack_malloc)(sizeof(heapframe)));\
frame->Xwhere = rw; \
newframe->Xeptr = ra;\
newframe->Xecode = rb;\
@@ -420,7 +420,7 @@ heap storage. Set up the top-level frame here; others are obtained from the
heap whenever RMATCH() does a "recursion". See the macro definitions above. */
#ifdef NO_RECURSE
-heapframe *frame = (pcre_stack_malloc)(sizeof(heapframe));
+heapframe *frame = (heapframe*) ((pcre_stack_malloc)(sizeof(heapframe)));
frame->Xprevframe = NULL; /* Marks the top level */
/* Copy in the original argument variables */
diff --git a/pcre-7.4/pcre_fullinfo.c b/third_party/pcre-7.4/pcre_fullinfo.c
index 04e31f6..04e31f6 100644
--- a/pcre-7.4/pcre_fullinfo.c
+++ b/third_party/pcre-7.4/pcre_fullinfo.c
diff --git a/pcre-7.4/pcre_get.c b/third_party/pcre-7.4/pcre_get.c
index fc283c8..fc283c8 100644
--- a/pcre-7.4/pcre_get.c
+++ b/third_party/pcre-7.4/pcre_get.c
diff --git a/pcre-7.4/pcre_globals.c b/third_party/pcre-7.4/pcre_globals.c
index 4794819..4794819 100644
--- a/pcre-7.4/pcre_globals.c
+++ b/third_party/pcre-7.4/pcre_globals.c
diff --git a/pcre-7.4/pcre_info.c b/third_party/pcre-7.4/pcre_info.c
index 9bcccbc..9bcccbc 100644
--- a/pcre-7.4/pcre_info.c
+++ b/third_party/pcre-7.4/pcre_info.c
diff --git a/pcre-7.4/pcre_internal.h b/third_party/pcre-7.4/pcre_internal.h
index 5fbb344..5fbb344 100644
--- a/pcre-7.4/pcre_internal.h
+++ b/third_party/pcre-7.4/pcre_internal.h
diff --git a/pcre-7.4/pcre_maketables.c b/third_party/pcre-7.4/pcre_maketables.c
index 352bea9..352bea9 100644
--- a/pcre-7.4/pcre_maketables.c
+++ b/third_party/pcre-7.4/pcre_maketables.c
diff --git a/pcre-7.4/pcre_newline.c b/third_party/pcre-7.4/pcre_newline.c
index 1708d93..1708d93 100644
--- a/pcre-7.4/pcre_newline.c
+++ b/third_party/pcre-7.4/pcre_newline.c
diff --git a/pcre-7.4/pcre_ord2utf8.c b/third_party/pcre-7.4/pcre_ord2utf8.c
index d3904c6..d3904c6 100644
--- a/pcre-7.4/pcre_ord2utf8.c
+++ b/third_party/pcre-7.4/pcre_ord2utf8.c
diff --git a/pcre-7.4/pcre_refcount.c b/third_party/pcre-7.4/pcre_refcount.c
index b14103c..b14103c 100644
--- a/pcre-7.4/pcre_refcount.c
+++ b/third_party/pcre-7.4/pcre_refcount.c
diff --git a/pcre-7.4/pcre_scanner.cc b/third_party/pcre-7.4/pcre_scanner.cc
index a817a68..a817a68 100644
--- a/pcre-7.4/pcre_scanner.cc
+++ b/third_party/pcre-7.4/pcre_scanner.cc
diff --git a/pcre-7.4/pcre_scanner.h b/third_party/pcre-7.4/pcre_scanner.h
index f32e9e0..8d2265f 100644
--- a/pcre-7.4/pcre_scanner.h
+++ b/third_party/pcre-7.4/pcre_scanner.h
@@ -48,8 +48,8 @@
#include <string>
#include <vector>
-#include <pcrecpp.h>
-#include <pcre_stringpiece.h>
+#include "pcrecpp.h"
+#include "pcre_stringpiece.h"
namespace pcrecpp {
diff --git a/pcre-7.4/pcre_scanner_unittest.cc b/third_party/pcre-7.4/pcre_scanner_unittest.cc
index 284c8ea..284c8ea 100644
--- a/pcre-7.4/pcre_scanner_unittest.cc
+++ b/third_party/pcre-7.4/pcre_scanner_unittest.cc
diff --git a/pcre-7.4/pcre_stringpiece.cc b/third_party/pcre-7.4/pcre_stringpiece.cc
index 67c0f1f..67c0f1f 100644
--- a/pcre-7.4/pcre_stringpiece.cc
+++ b/third_party/pcre-7.4/pcre_stringpiece.cc
diff --git a/pcre-7.4/pcre_stringpiece.h b/third_party/pcre-7.4/pcre_stringpiece.h
index 599a351..599a351 100644
--- a/pcre-7.4/pcre_stringpiece.h
+++ b/third_party/pcre-7.4/pcre_stringpiece.h
diff --git a/pcre-7.4/pcre_stringpiece.h.in b/third_party/pcre-7.4/pcre_stringpiece.h.in
index b017661..b017661 100644
--- a/pcre-7.4/pcre_stringpiece.h.in
+++ b/third_party/pcre-7.4/pcre_stringpiece.h.in
diff --git a/pcre-7.4/pcre_stringpiece_unittest.cc b/third_party/pcre-7.4/pcre_stringpiece_unittest.cc
index 1e821ab..1e821ab 100644
--- a/pcre-7.4/pcre_stringpiece_unittest.cc
+++ b/third_party/pcre-7.4/pcre_stringpiece_unittest.cc
diff --git a/pcre-7.4/pcre_study.c b/third_party/pcre-7.4/pcre_study.c
index 1c28384..1c28384 100644
--- a/pcre-7.4/pcre_study.c
+++ b/third_party/pcre-7.4/pcre_study.c
diff --git a/pcre-7.4/pcre_tables.c b/third_party/pcre-7.4/pcre_tables.c
index 4b14fd1..4b14fd1 100644
--- a/pcre-7.4/pcre_tables.c
+++ b/third_party/pcre-7.4/pcre_tables.c
diff --git a/pcre-7.4/pcre_try_flipped.c b/third_party/pcre-7.4/pcre_try_flipped.c
index 412902b..412902b 100644
--- a/pcre-7.4/pcre_try_flipped.c
+++ b/third_party/pcre-7.4/pcre_try_flipped.c
diff --git a/pcre-7.4/pcre_ucp_searchfuncs.c b/third_party/pcre-7.4/pcre_ucp_searchfuncs.c
index 316163e..316163e 100644
--- a/pcre-7.4/pcre_ucp_searchfuncs.c
+++ b/third_party/pcre-7.4/pcre_ucp_searchfuncs.c
diff --git a/pcre-7.4/pcre_valid_utf8.c b/third_party/pcre-7.4/pcre_valid_utf8.c
index 1899142..1899142 100644
--- a/pcre-7.4/pcre_valid_utf8.c
+++ b/third_party/pcre-7.4/pcre_valid_utf8.c
diff --git a/pcre-7.4/pcre_version.c b/third_party/pcre-7.4/pcre_version.c
index c3b9cee..c3b9cee 100644
--- a/pcre-7.4/pcre_version.c
+++ b/third_party/pcre-7.4/pcre_version.c
diff --git a/pcre-7.4/pcre_xclass.c b/third_party/pcre-7.4/pcre_xclass.c
index cdf1af1..cdf1af1 100644
--- a/pcre-7.4/pcre_xclass.c
+++ b/third_party/pcre-7.4/pcre_xclass.c
diff --git a/pcre-7.4/pcrecpp.cc b/third_party/pcre-7.4/pcrecpp.cc
index a0c2b83..ffb7932 100644
--- a/pcre-7.4/pcrecpp.cc
+++ b/third_party/pcre-7.4/pcrecpp.cc
@@ -356,7 +356,7 @@ static int NewlineMode(int pcre_options) {
else if (newline == -2)
newline_mode = PCRE_NEWLINE_ANYCRLF;
else
- assert("" == "Unexpected return value from pcre_config(NEWLINE)");
+ assert( ! "Unexpected return value from pcre_config(NEWLINE)");
}
return newline_mode;
}
diff --git a/pcre-7.4/pcrecpp.h b/third_party/pcre-7.4/pcrecpp.h
index 5a0d597..5a0d597 100644
--- a/pcre-7.4/pcrecpp.h
+++ b/third_party/pcre-7.4/pcrecpp.h
diff --git a/pcre-7.4/pcrecpp_internal.h b/third_party/pcre-7.4/pcrecpp_internal.h
index 0af9478..0af9478 100644
--- a/pcre-7.4/pcrecpp_internal.h
+++ b/third_party/pcre-7.4/pcrecpp_internal.h
diff --git a/pcre-7.4/pcrecpp_unittest.cc b/third_party/pcre-7.4/pcrecpp_unittest.cc
index 463a11c..463a11c 100644
--- a/pcre-7.4/pcrecpp_unittest.cc
+++ b/third_party/pcre-7.4/pcrecpp_unittest.cc
diff --git a/pcre-7.4/pcrecpparg.h b/third_party/pcre-7.4/pcrecpparg.h
index c5bfae0..c5bfae0 100644
--- a/pcre-7.4/pcrecpparg.h
+++ b/third_party/pcre-7.4/pcrecpparg.h
diff --git a/pcre-7.4/pcrecpparg.h.in b/third_party/pcre-7.4/pcrecpparg.h.in
index 83cc44b..83cc44b 100644
--- a/pcre-7.4/pcrecpparg.h.in
+++ b/third_party/pcre-7.4/pcrecpparg.h.in
diff --git a/pcre-7.4/pcredemo.c b/third_party/pcre-7.4/pcredemo.c
index 4068e3e..4068e3e 100644
--- a/pcre-7.4/pcredemo.c
+++ b/third_party/pcre-7.4/pcredemo.c
diff --git a/pcre-7.4/pcregrep.c b/third_party/pcre-7.4/pcregrep.c
index b44574e..b44574e 100644
--- a/pcre-7.4/pcregrep.c
+++ b/third_party/pcre-7.4/pcregrep.c
diff --git a/pcre-7.4/pcreposix.c b/third_party/pcre-7.4/pcreposix.c
index 24f2109..24f2109 100644
--- a/pcre-7.4/pcreposix.c
+++ b/third_party/pcre-7.4/pcreposix.c
diff --git a/pcre-7.4/pcreposix.h b/third_party/pcre-7.4/pcreposix.h
index 875e1ff..875e1ff 100644
--- a/pcre-7.4/pcreposix.h
+++ b/third_party/pcre-7.4/pcreposix.h
diff --git a/pcre-7.4/pcretest.c b/third_party/pcre-7.4/pcretest.c
index a222146..a222146 100644
--- a/pcre-7.4/pcretest.c
+++ b/third_party/pcre-7.4/pcretest.c
diff --git a/pcre-7.4/ucp.h b/third_party/pcre-7.4/ucp.h
index 3a4179b..3a4179b 100644
--- a/pcre-7.4/ucp.h
+++ b/third_party/pcre-7.4/ucp.h
diff --git a/pcre-7.4/ucpinternal.h b/third_party/pcre-7.4/ucpinternal.h
index 811a373..811a373 100644
--- a/pcre-7.4/ucpinternal.h
+++ b/third_party/pcre-7.4/ucpinternal.h
diff --git a/pcre-7.4/ucptable.h b/third_party/pcre-7.4/ucptable.h
index 07eaced..07eaced 100644
--- a/pcre-7.4/ucptable.h
+++ b/third_party/pcre-7.4/ucptable.h
diff --git a/third_party/pcre.py b/third_party/pcre.py
new file mode 100644
index 0000000..a200722
--- /dev/null
+++ b/third_party/pcre.py
@@ -0,0 +1,38 @@
+
+import os
+
+def getFiles():
+
+ root = "third_party/pcre-7.4"
+
+ def pcreFilter(x):
+ if x.endswith( "dftables.c" ):
+ return False
+ if x.endswith( "pcredemo.c" ):
+ return False
+ if x.endswith( "pcretest.c" ):
+ return False
+ if x.endswith( "unittest.cc" ):
+ return False
+ if x.endswith( "pcregrep.c" ):
+ return False
+ return x.endswith( ".c" ) or x.endswith( ".cc" )
+
+ files = [ root + "/" + x for x in filter( pcreFilter , os.listdir( root ) ) ]
+
+ return files
+
+def configure( env , fileLists , options ):
+ #fileLists = { "serverOnlyFiles" : [] }
+
+ env.Prepend( CPPPATH=["./third_party/pcre-7.4/"] )
+
+ myenv = env.Clone()
+ myenv.Append( CPPDEFINES=["HAVE_CONFIG_H"] )
+ fileLists["commonFiles"] += [ myenv.Object(f) for f in getFiles() ]
+
+
+
+if __name__ == "__main__":
+ for x in getFiles():
+ print( x )
diff --git a/third_party/sm.py b/third_party/sm.py
new file mode 100644
index 0000000..53e7984
--- /dev/null
+++ b/third_party/sm.py
@@ -0,0 +1,100 @@
+import os
+import buildscripts.utils
+
+basicFiles = [ "jsapi.c" ,
+ "jsarena.c" ,
+ "jsarray.c" ,
+ "jsatom.c" ,
+ "jsbool.c" ,
+ "jscntxt.c" ,
+ "jsdate.c" ,
+ "jsdbgapi.c" ,
+ "jsdhash.c" ,
+ "jsdtoa.c" ,
+ "jsemit.c" ,
+ "jsexn.c" ,
+ "jsfun.c" ,
+ "jsgc.c" ,
+ "jshash.c" ,
+ "jsiter.c" ,
+ "jsinterp.c" ,
+ "jslock.c" ,
+ "jslog2.c" ,
+ "jslong.c" ,
+ "jsmath.c" ,
+ "jsnum.c" ,
+ "jsobj.c" ,
+ "jsopcode.c" ,
+ "jsparse.c" ,
+ "jsprf.c" ,
+ "jsregexp.c" ,
+ "jsscan.c" ,
+ "jsscope.c" ,
+ "jsscript.c" ,
+ "jsstr.c" ,
+ "jsutil.c" ,
+ "jsxdrapi.c" ,
+ "jsxml.c" ,
+ "prmjtime.c" ]
+
+root = "third_party/js-1.7"
+
+def r(x):
+ return "%s/%s" % ( root , x )
+
+def configure( env , fileLists , options ):
+ if not options["usesm"]:
+ return
+
+ if options["windows"]:
+ env.Append( CPPDEFINES=[ "XP_WIN" ] )
+ else:
+ env.Append( CPPDEFINES=[ "XP_UNIX" ] )
+
+ env.Prepend( CPPPATH=[root] )
+
+ myenv = env.Clone()
+ myenv.Append( CPPDEFINES=[ "JSFILE" , "EXPORT_JS_API" , "JS_C_STRINGS_ARE_UTF8" ] )
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "-Werror" , "" )
+
+ if options["windows"]:
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "/TP" , "" )
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "/O2" , "" )
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "/Gy" , "" )
+ myenv.Append( CPPFLAGS=" /wd4748 " )
+
+
+ if "NDEBUG" in myenv["CPPDEFINES"]:
+ myenv["CPPDEFINES"].remove( "NDEBUG" )
+
+ if os.sys.platform.startswith( "linux" ) or os.sys.platform == "darwin":
+ myenv["CPPDEFINES"] += [ "HAVE_VA_COPY" , "VA_COPY=va_copy" ]
+
+ elif "sunos5" == os.sys.platform:
+ myenv.Append( CPPDEFINES=[ "SOLARIS" , "HAVE_VA_LIST_AS_ARRAY" , "SVR4" , "SYSV" , "HAVE_LOCALTIME_R" ] )
+
+ fileLists["scriptingFiles"] += [ myenv.Object(root + "/" + f) for f in basicFiles ]
+
+ jskwgen = str( myenv.Program( r("jskwgen") , [ r("jskwgen.c") ] )[0] )
+ jscpucfg = str( myenv.Program( r("jscpucfg") , [ r("jscpucfg.c") ] )[0] )
+
+ def buildAutoFile( target , source , env ):
+ outFile = str( target[0] )
+
+ cmd = str( source[0] )
+ if options["nix"]:
+ cmd = "./" + cmd
+
+ output = buildscripts.utils.execsys( cmd )[0]
+ output = output.replace( '\r' , '\n' )
+ out = open( outFile , 'w' )
+ out.write( output )
+ return None
+
+ autoBuilder = myenv.Builder( action = buildAutoFile , suffix = '.h')
+
+ myenv.Append( BUILDERS={ 'Auto' : autoBuilder } )
+ myenv.Auto( r("jsautokw.h") , [ jskwgen ] )
+ myenv.Auto( r("jsautocfg.h") , [ jscpucfg ] )
+
+ myenv.Depends( r("jsscan.c") , r("jsautokw.h") )
diff --git a/third_party/snappy.py b/third_party/snappy.py
new file mode 100644
index 0000000..c70cb28
--- /dev/null
+++ b/third_party/snappy.py
@@ -0,0 +1,11 @@
+
+def configure( env , fileLists , options ):
+ #fileLists = { "serverOnlyFiles" : [] }
+
+ myenv = env.Clone()
+ if not options["windows"]:
+ myenv.Append(CPPFLAGS=" -Wno-sign-compare -Wno-unused-function ") #snappy doesn't compile cleanly
+
+ files = ["third_party/snappy/snappy.cc", "third_party/snappy/snappy-sinksource.cc"]
+
+ fileLists["serverOnlyFiles"] += [ myenv.Object(f) for f in files ]
diff --git a/third_party/snappy/COPYING b/third_party/snappy/COPYING
new file mode 100755
index 0000000..8d6bd9f
--- /dev/null
+++ b/third_party/snappy/COPYING
@@ -0,0 +1,28 @@
+Copyright 2011, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/snappy/README b/third_party/snappy/README
new file mode 100755
index 0000000..df8f0e1
--- /dev/null
+++ b/third_party/snappy/README
@@ -0,0 +1,135 @@
+Snappy, a fast compressor/decompressor.
+
+
+Introduction
+============
+
+Snappy is a compression/decompression library. It does not aim for maximum
+compression, or compatibility with any other compression library; instead,
+it aims for very high speeds and reasonable compression. For instance,
+compared to the fastest mode of zlib, Snappy is an order of magnitude faster
+for most inputs, but the resulting compressed files are anywhere from 20% to
+100% bigger. (For more information, see "Performance", below.)
+
+Snappy has the following properties:
+
+ * Fast: Compression speeds at 250 MB/sec and beyond, with no assembler code.
+ See "Performance" below.
+ * Stable: Over the last few years, Snappy has compressed and decompressed
+ petabytes of data in Google's production environment. The Snappy bitstream
+ format is stable and will not change between versions.
+ * Robust: The Snappy decompressor is designed not to crash in the face of
+ corrupted or malicious input.
+ * Free and open source software: Snappy is licensed under a BSD-type license.
+ For more information, see the included COPYING file.
+
+Snappy has previously been called "Zippy" in some Google presentations
+and the like.
+
+
+Performance
+===========
+
+Snappy is intended to be fast. On a single core of a Core i7 processor
+in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
+about 500 MB/sec or more. (These numbers are for the slowest inputs in our
+benchmark suite; others are much faster.) In our tests, Snappy usually
+is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
+etc.) while achieving comparable compression ratios.
+
+Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
+for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
+other already-compressed data. Similar numbers for zlib in its fastest mode
+are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
+capable of achieving yet higher compression rates, although usually at the
+expense of speed. Of course, compression ratio will vary significantly with
+the input.
+
+Although Snappy should be fairly portable, it is primarily optimized
+for 64-bit x86-compatible processors, and may run slower in other environments.
+In particular:
+
+ - Snappy uses 64-bit operations in several places to process more data at
+ once than would otherwise be possible.
+ - Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
+ On some platforms, these must be emulated with single-byte loads
+ and stores, which is much slower.
+ - Snappy assumes little-endian throughout, and needs to byte-swap data in
+ several places if running on a big-endian platform.
+
+Experience has shown that even heavily tuned code can be improved.
+Performance optimizations, whether for 64-bit x86 or other platforms,
+are of course most welcome; see "Contact", below.
+
+
+Usage
+=====
+
+Note that Snappy, both the implementation and the main interface,
+is written in C++. However, several third-party bindings to other languages
+are available; see the Google Code page at http://code.google.com/p/snappy/
+for more information. Also, if you want to use Snappy from C code, you can
+use the included C bindings in snappy-c.h.
+
+To use Snappy from your own C++ program, include the file "snappy.h" from
+your calling file, and link against the compiled library.
+
+There are many ways to call Snappy, but the simplest possible is
+
+ snappy::Compress(input, &output);
+
+and similarly
+
+ snappy::Uncompress(input, &output);
+
+where "input" and "output" are both instances of std::string.
+
+There are other interfaces that are more flexible in various ways, including
+support for custom (non-array) input sources. See the header file for more
+information.
+
+
+Tests and benchmarks
+====================
+
+When you compile Snappy, snappy_unittest is compiled in addition to the
+library itself. You do not need it to use the compressor from your own library,
+but it contains several useful components for Snappy development.
+
+First of all, it contains unit tests, verifying correctness on your machine in
+various scenarios. If you want to change or optimize Snappy, please run the
+tests to verify you have not broken anything. Note that if you have the
+Google Test library installed, unit test behavior (especially failures) will be
+significantly more user-friendly. You can find Google Test at
+
+ http://code.google.com/p/googletest/
+
+You probably also want the gflags library for handling of command-line flags;
+you can find it at
+
+ http://code.google.com/p/google-gflags/
+
+In addition to the unit tests, snappy contains microbenchmarks used to
+tune compression and decompression performance. These are automatically run
+before the unit tests, but you can disable them using the flag
+--run_microbenchmarks=false if you have gflags installed (otherwise you will
+need to edit the source).
+
+Finally, snappy can benchmark Snappy against a few other compression libraries
+(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
+To benchmark using a given file, give the compression algorithm you want to test
+Snappy against (e.g. --zlib) and then a list of one or more file names on the
+command line. The testdata/ directory contains the files used by the
+microbenchmark, which should provide a reasonably balanced starting point for
+benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
+are used to verify correctness in the presence of corrupted data in the unit
+test.)
+
+
+Contact
+=======
+
+Snappy is distributed through Google Code. For the latest version, a bug tracker,
+and other information, see
+
+ http://code.google.com/p/snappy/
diff --git a/third_party/snappy/config.h b/third_party/snappy/config.h
new file mode 100755
index 0000000..bfc3b30
--- /dev/null
+++ b/third_party/snappy/config.h
@@ -0,0 +1,124 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+//#undef AC_APPLE_UNIVERSAL_BUILD
+
+#if defined(_WIN32)
+// signed/unsigned mismatch
+#pragma warning( disable : 4018 )
+#endif
+
+/* Define to 1 if the compiler supports __builtin_ctz and friends. */
+#if defined(__GNUC__)
+#definfe HAVE_BUILTIN_CTZ 1
+#endif
+
+/* Define to 1 if the compiler supports __builtin_expect. */
+#if defined(__GNUC__)
+#definfe HAVE_BUILTIN_EXPECT 1
+#endif
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#if !defined(_WIN32)
+#define HAVE_DLFCN_H 1
+#endif
+
+/* Use the gflags package for command-line parsing. */
+#undef HAVE_GFLAGS
+
+/* Defined when Google Test is available. */
+#undef HAVE_GTEST
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `fastlz' library (-lfastlz). */
+#undef HAVE_LIBFASTLZ
+
+/* Define to 1 if you have the `lzf' library (-llzf). */
+#undef HAVE_LIBLZF
+
+/* Define to 1 if you have the `lzo2' library (-llzo2). */
+#undef HAVE_LIBLZO2
+
+/* Define to 1 if you have the `quicklz' library (-lquicklz). */
+#undef HAVE_LIBQUICKLZ
+
+/* Define to 1 if you have the `z' library (-lz). */
+#undef HAVE_LIBZ
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#define HAVE_STDDEF_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#if !defined(_WIN32)
+#define HAVE_SYS_MMAN_H 1
+#endif
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#define HAVE_SYS_RESOURCE_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the <windows.h> header file. */
+#if defined(_WIN32)
+#define HAVE_WINDOWS_H 1
+#endif
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR "libs/"
+
+/* Name of package */
+#define PACKAGE "snappy"
+
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "snappy"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "snappy 1.0.3"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "snappy"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "1.0.3"
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Version number of package */
+#define VERSION "1.0.3"
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined(__BIG_ENDIAN__)
+#define WORDS_BIGENDIAN 1
+#endif
diff --git a/third_party/snappy/snappy-internal.h b/third_party/snappy/snappy-internal.h
new file mode 100755
index 0000000..a32eda5
--- /dev/null
+++ b/third_party/snappy/snappy-internal.h
@@ -0,0 +1,150 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Internals shared between the Snappy implementation and its unittest.
+
+#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+
+#include "snappy-stubs-internal.h"
+
+namespace snappy {
+namespace internal {
+
+class WorkingMemory {
+ public:
+ WorkingMemory() : large_table_(NULL) { }
+ ~WorkingMemory() { delete[] large_table_; }
+
+ // Allocates and clears a hash table using memory in "*this",
+ // stores the number of buckets in "*table_size" and returns a pointer to
+ // the base of the hash table.
+ uint16* GetHashTable(size_t input_size, int* table_size);
+
+ private:
+ uint16 small_table_[1<<10]; // 2KB
+ uint16* large_table_; // Allocated only when needed
+
+ DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
+};
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input_length <= kBlockSize"
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input_length)" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+char* CompressFragment(const char* input,
+ size_t input_length,
+ char* op,
+ uint16* table,
+ const int table_size);
+
+// Return the largest n such that
+//
+// s1[0,n-1] == s2[0,n-1]
+// and n <= (s2_limit - s2).
+//
+// Does not read *s2_limit or beyond.
+// Does not read *(s1 + (s2_limit - s2)) or beyond.
+// Requires that s2_limit >= s2.
+//
+// Separate implementation for x86_64, for speed. Uses the fact that
+// x86_64 is little endian.
+#if defined(ARCH_K8)
+static inline int FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
+ DCHECK_GE(s2_limit, s2);
+ int matched = 0;
+
+ // Find out how long the match is. We loop over the data 64 bits at a
+ // time until we find a 64-bit block that doesn't match; then we find
+ // the first non-matching bit and use that to calculate the total
+ // length of the match.
+ while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
+ if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
+ s2 += 8;
+ matched += 8;
+ } else {
+ // On current (mid-2008) Opteron models there is a 3% more
+ // efficient code sequence to find the first non-matching byte.
+ // However, what follows is ~10% better on Intel Core 2 and newer,
+ // and we expect AMD's bsf instruction to improve.
+ uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
+ matched += matching_bits >> 3;
+ return matched;
+ }
+ }
+ while (PREDICT_TRUE(s2 < s2_limit)) {
+ if (PREDICT_TRUE(s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ } else {
+ return matched;
+ }
+ }
+ return matched;
+}
+#else
+static inline int FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
+ // Implementation based on the x86-64 version, above.
+ DCHECK_GE(s2_limit, s2);
+ int matched = 0;
+
+ while (s2 <= s2_limit - 4 &&
+ UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
+ s2 += 4;
+ matched += 4;
+ }
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
+ uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero(x);
+ matched += matching_bits >> 3;
+ } else {
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ }
+ }
+ return matched;
+}
+#endif
+
+} // end namespace internal
+} // end namespace snappy
+
+#endif // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
diff --git a/third_party/snappy/snappy-sinksource.cc b/third_party/snappy/snappy-sinksource.cc
new file mode 100755
index 0000000..1017895
--- /dev/null
+++ b/third_party/snappy/snappy-sinksource.cc
@@ -0,0 +1,72 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string.h>
+
+#include "snappy-sinksource.h"
+
+namespace snappy {
+
+Source::~Source() { }
+
+Sink::~Sink() { }
+
+char* Sink::GetAppendBuffer(size_t length, char* scratch) {
+ return scratch;
+}
+
+ByteArraySource::~ByteArraySource() { }
+
+size_t ByteArraySource::Available() const { return left_; }
+
+const char* ByteArraySource::Peek(size_t* len) {
+ *len = left_;
+ return ptr_;
+}
+
+void ByteArraySource::Skip(size_t n) {
+ left_ -= n;
+ ptr_ += n;
+}
+
+UncheckedByteArraySink::~UncheckedByteArraySink() { }
+
+void UncheckedByteArraySink::Append(const char* data, size_t n) {
+ // Do no copying if the caller filled in the result of GetAppendBuffer()
+ if (data != dest_) {
+ memcpy(dest_, data, n);
+ }
+ dest_ += n;
+}
+
+char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
+ return dest_;
+}
+
+
+}
diff --git a/third_party/snappy/snappy-sinksource.h b/third_party/snappy/snappy-sinksource.h
new file mode 100755
index 0000000..430baea
--- /dev/null
+++ b/third_party/snappy/snappy-sinksource.h
@@ -0,0 +1,136 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+
+#include <stddef.h>
+
+
+namespace snappy {
+
+// A Sink is an interface that consumes a sequence of bytes.
+class Sink {
+ public:
+ Sink() { }
+ virtual ~Sink();
+
+ // Append "bytes[0,n-1]" to this.
+ virtual void Append(const char* bytes, size_t n) = 0;
+
+ // Returns a writable buffer of the specified length for appending.
+ // May return a pointer to the caller-owned scratch buffer which
+ // must have at least the indicated length. The returned buffer is
+ // only valid until the next operation on this Sink.
+ //
+ // After writing at most "length" bytes, call Append() with the
+ // pointer returned from this function and the number of bytes
+ // written. Many Append() implementations will avoid copying
+ // bytes if this function returned an internal buffer.
+ //
+ // If a non-scratch buffer is returned, the caller may only pass a
+ // prefix of it to Append(). That is, it is not correct to pass an
+ // interior pointer of the returned array to Append().
+ //
+ // The default implementation always returns the scratch buffer.
+ virtual char* GetAppendBuffer(size_t length, char* scratch);
+
+ private:
+ // No copying
+ Sink(const Sink&);
+ void operator=(const Sink&);
+};
+
+// A Source is an interface that yields a sequence of bytes
+class Source {
+ public:
+ Source() { }
+ virtual ~Source();
+
+ // Return the number of bytes left to read from the source
+ virtual size_t Available() const = 0;
+
+ // Peek at the next flat region of the source. Does not reposition
+ // the source. The returned region is empty iff Available()==0.
+ //
+ // Returns a pointer to the beginning of the region and store its
+ // length in *len.
+ //
+ // The returned region is valid until the next call to Skip() or
+ // until this object is destroyed, whichever occurs first.
+ //
+ // The returned region may be larger than Available() (for example
+ // if this ByteSource is a view on a substring of a larger source).
+ // The caller is responsible for ensuring that it only reads the
+ // Available() bytes.
+ virtual const char* Peek(size_t* len) = 0;
+
+ // Skip the next n bytes. Invalidates any buffer returned by
+ // a previous call to Peek().
+ // REQUIRES: Available() >= n
+ virtual void Skip(size_t n) = 0;
+
+ private:
+ // No copying
+ Source(const Source&);
+ void operator=(const Source&);
+};
+
+// A Source implementation that yields the contents of a flat array
+class ByteArraySource : public Source {
+ public:
+ ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
+ virtual ~ByteArraySource();
+ virtual size_t Available() const;
+ virtual const char* Peek(size_t* len);
+ virtual void Skip(size_t n);
+ private:
+ const char* ptr_;
+ size_t left_;
+};
+
+// A Sink implementation that writes to a flat array without any bound checks.
+class UncheckedByteArraySink : public Sink {
+ public:
+ explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
+ virtual ~UncheckedByteArraySink();
+ virtual void Append(const char* data, size_t n);
+ virtual char* GetAppendBuffer(size_t len, char* scratch);
+
+ // Return the current output pointer so that a caller can see how
+ // many bytes were produced.
+ // Note: this is not a Sink method.
+ char* CurrentDestination() const { return dest_; }
+ private:
+ char* dest_;
+};
+
+
+}
+
+#endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
diff --git a/third_party/snappy/snappy-stubs-internal.cc b/third_party/snappy/snappy-stubs-internal.cc
new file mode 100755
index 0000000..6ed3343
--- /dev/null
+++ b/third_party/snappy/snappy-stubs-internal.cc
@@ -0,0 +1,42 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <string>
+
+#include "snappy-stubs-internal.h"
+
+namespace snappy {
+
+void Varint::Append32(string* s, uint32 value) {
+ char buf[Varint::kMax32];
+ const char* p = Varint::Encode32(buf, value);
+ s->append(buf, p - buf);
+}
+
+} // namespace snappy
diff --git a/third_party/snappy/snappy-stubs-internal.h b/third_party/snappy/snappy-stubs-internal.h
new file mode 100755
index 0000000..355a06b
--- /dev/null
+++ b/third_party/snappy/snappy-stubs-internal.h
@@ -0,0 +1,478 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various stubs for the open-source version of Snappy.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <string>
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef HAVE_SYS_MMAN
+#include <sys/mman.h>
+#endif
+
+#include "snappy-stubs-public.h"
+
+#if defined(__x86_64__)
+
+// Enable 64-bit optimized versions of some routines.
+#define ARCH_K8 1
+
+#endif
+
+// Needed by OS X, among others.
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+// Pull in std::min, std::ostream, and the likes. This is safe because this
+// header file is never used from any public header files.
+using namespace std;
+
+// The size of an array, if known at compile-time.
+// Will give unexpected results if used on a pointer.
+// We undefine it first, since some compilers already have a definition.
+#ifdef ARRAYSIZE
+#undef ARRAYSIZE
+#endif
+#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+
+// Static prediction hints.
+#ifdef HAVE_BUILTIN_EXPECT
+#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define PREDICT_FALSE(x) x
+#define PREDICT_TRUE(x) x
+#endif
+
+// This is only used for recomputing the tag byte table used during
+// decompression; for simplicity we just remove it from the open-source
+// version (anyone who wants to regenerate it can just do the call
+// themselves within main()).
+#define DEFINE_bool(flag_name, default_value, description) \
+ bool FLAGS_ ## flag_name = default_value;
+#define DECLARE_bool(flag_name) \
+ extern bool FLAGS_ ## flag_name;
+#define REGISTER_MODULE_INITIALIZER(name, code)
+
+namespace snappy {
+
+static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
+static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
+
+// Logging.
+
+#define LOG(level) LogMessage()
+#define VLOG(level) true ? (void)0 : \
+ snappy::LogMessageVoidify() & snappy::LogMessage()
+
+class LogMessage {
+ public:
+ LogMessage() { }
+ ~LogMessage() {
+ cerr << endl;
+ }
+
+ LogMessage& operator<<(const std::string& msg) {
+ cerr << msg;
+ return *this;
+ }
+ LogMessage& operator<<(int x) {
+ cerr << x;
+ return *this;
+ }
+};
+
+// Asserts, both versions activated in debug mode only,
+// and ones that are always active.
+
+#define CRASH_UNLESS(condition) \
+ PREDICT_TRUE(condition) ? (void)0 : \
+ snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+
+class LogMessageCrash : public LogMessage {
+ public:
+ LogMessageCrash() { }
+ ~LogMessageCrash() {
+ cerr << endl;
+ abort();
+ }
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class LogMessageVoidify {
+ public:
+ LogMessageVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(const LogMessage&) { }
+};
+
+#define CHECK(cond) CRASH_UNLESS(cond)
+#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+
+#ifdef NDEBUG
+
+#define DCHECK(cond) CRASH_UNLESS(true)
+#define DCHECK_LE(a, b) CRASH_UNLESS(true)
+#define DCHECK_GE(a, b) CRASH_UNLESS(true)
+#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
+#define DCHECK_NE(a, b) CRASH_UNLESS(true)
+#define DCHECK_LT(a, b) CRASH_UNLESS(true)
+#define DCHECK_GT(a, b) CRASH_UNLESS(true)
+
+#else
+
+#define DCHECK(cond) CHECK(cond)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+
+#endif
+
+// Potentially unaligned loads and stores.
+
+#if 1
+//#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(_WIN32)
+
+#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
+
+#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
+
+#else
+
+// These functions are provided for architectures that don't support
+// unaligned loads and stores.
+
+inline uint16 UNALIGNED_LOAD16(const void *p) {
+ uint16 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint32 UNALIGNED_LOAD32(const void *p) {
+ uint32 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint64 UNALIGNED_LOAD64(const void *p) {
+ uint64 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline void UNALIGNED_STORE16(void *p, uint16 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+inline void UNALIGNED_STORE32(void *p, uint32 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+inline void UNALIGNED_STORE64(void *p, uint64 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+#endif
+
+// The following guarantees declaration of the byte swap functions.
+#ifdef WORDS_BIGENDIAN
+
+#ifdef _MSC_VER
+#include <stdlib.h>
+#define bswap_16(x) _byteswap_ushort(x)
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#define bswap_16(x) OSSwapInt16(x)
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#else
+#include <byteswap.h>
+#endif
+
+#endif // WORDS_BIGENDIAN
+
+// Convert to little-endian storage, opposite of network format.
+// Convert x from host to little endian: x = LittleEndian.FromHost(x);
+// convert x from little endian to host: x = LittleEndian.ToHost(x);
+//
+// Store values into unaligned memory converting to little endian order:
+// LittleEndian.Store16(p, x);
+//
+// Load unaligned values stored in little endian converting to host order:
+// x = LittleEndian.Load16(p);
+class LittleEndian {
+ public:
+ // Conversion functions.
+#ifdef WORDS_BIGENDIAN
+
+ static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+ static uint16 ToHost16(uint16 x) { return bswap_16(x); }
+
+ static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+ static uint32 ToHost32(uint32 x) { return bswap_32(x); }
+
+ static bool IsLittleEndian() { return false; }
+
+#else // !defined(WORDS_BIGENDIAN)
+
+ static uint16 FromHost16(uint16 x) { return x; }
+ static uint16 ToHost16(uint16 x) { return x; }
+
+ static uint32 FromHost32(uint32 x) { return x; }
+ static uint32 ToHost32(uint32 x) { return x; }
+
+ static bool IsLittleEndian() { return true; }
+
+#endif // !defined(WORDS_BIGENDIAN)
+
+ // Functions to do unaligned loads and stores in little-endian order.
+ static uint16 Load16(const void *p) {
+ return ToHost16(UNALIGNED_LOAD16(p));
+ }
+
+ static void Store16(void *p, uint16 v) {
+ UNALIGNED_STORE16(p, FromHost16(v));
+ }
+
+ static uint32 Load32(const void *p) {
+ return ToHost32(UNALIGNED_LOAD32(p));
+ }
+
+ static void Store32(void *p, uint32 v) {
+ UNALIGNED_STORE32(p, FromHost32(v));
+ }
+};
+
+// Some bit-manipulation functions.
+class Bits {
+ public:
+ // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
+ static int Log2Floor(uint32 n);
+
+ // Return the first set least / most significant bit, 0-indexed. Returns an
+ // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
+ // that it's 0-indexed.
+ static int FindLSBSetNonZero(uint32 n);
+ static int FindLSBSetNonZero64(uint64 n);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Bits);
+};
+
+#ifdef HAVE_BUILTIN_CTZ
+
+inline int Bits::Log2Floor(uint32 n) {
+ return n == 0 ? -1 : 31 ^ __builtin_clz(n);
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ return __builtin_ctz(n);
+}
+
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ return __builtin_ctzll(n);
+}
+
+#else // Portable versions.
+
+inline int Bits::Log2Floor(uint32 n) {
+ if (n == 0)
+ return -1;
+ int log = 0;
+ uint32 value = n;
+ for (int i = 4; i >= 0; --i) {
+ int shift = (1 << i);
+ uint32 x = value >> shift;
+ if (x != 0) {
+ value = x;
+ log += shift;
+ }
+ }
+ assert(value == 1);
+ return log;
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ int rc = 31;
+ for (int i = 4, shift = 1 << 4; i >= 0; --i) {
+ const uint32 x = n << shift;
+ if (x != 0) {
+ n = x;
+ rc -= shift;
+ }
+ shift >>= 1;
+ }
+ return rc;
+}
+
+// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ const uint32 bottombits = static_cast<uint32>(n);
+ if (bottombits == 0) {
+ // Bottom bits are zero, so scan in top bits
+ return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
+ } else {
+ return FindLSBSetNonZero(bottombits);
+ }
+}
+
+#endif // End portable versions.
+
+// Variable-length integer encoding.
+class Varint {
+ public:
+ // Maximum lengths of varint encoding of uint32.
+ static const int kMax32 = 5;
+
+ // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
+ // Never reads a character at or beyond limit. If a valid/terminated varint32
+ // was found in the range, stores it in *OUTPUT and returns a pointer just
+ // past the last byte of the varint32. Else returns NULL. On success,
+ // "result <= limit".
+ static const char* Parse32WithLimit(const char* ptr, const char* limit,
+ uint32* OUTPUT);
+
+ // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
+ // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
+ // byte just past the last encoded byte.
+ static char* Encode32(char* ptr, uint32 v);
+
+ // EFFECTS Appends the varint representation of "value" to "*s".
+ static void Append32(string* s, uint32 value);
+};
+
+inline const char* Varint::Parse32WithLimit(const char* p,
+ const char* l,
+ uint32* OUTPUT) {
+ const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
+ const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
+ uint32 b, result;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result = b & 127; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
+ return NULL; // Value is too long to be a varint32
+ done:
+ *OUTPUT = result;
+ return reinterpret_cast<const char*>(ptr);
+}
+
+inline char* Varint::Encode32(char* sptr, uint32 v) {
+ // Operate on characters as unsigneds
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
+ static const int B = 128;
+ if (v < (1<<7)) {
+ *(ptr++) = v;
+ } else if (v < (1<<14)) {
+ *(ptr++) = v | B;
+ *(ptr++) = v>>7;
+ } else if (v < (1<<21)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = v>>14;
+ } else if (v < (1<<28)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = (v>>14) | B;
+ *(ptr++) = v>>21;
+ } else {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = (v>>14) | B;
+ *(ptr++) = (v>>21) | B;
+ *(ptr++) = v>>28;
+ }
+ return reinterpret_cast<char*>(ptr);
+}
+
+// If you know the internal layout of the std::string in use, you can
+// replace this function with one that resizes the string without
+// filling the new space with zeros (if applicable) --
+// it will be non-portable but faster.
+inline void STLStringResizeUninitialized(string* s, size_t new_size) {
+ s->resize(new_size);
+}
+
+// Return a mutable char* pointing to a string's internal buffer,
+// which may not be null-terminated. Writing through this pointer will
+// modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// As of 2006-04, there is no standard-blessed way of getting a
+// mutable reference to a string's internal buffer. However, issue 530
+// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
+// proposes this as the method. It will officially be part of the standard
+// for C++0x. This should already work on all current implementations.
+inline char* string_as_array(string* str) {
+ return str->empty() ? NULL : &*str->begin();
+}
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
diff --git a/third_party/snappy/snappy-stubs-public.h b/third_party/snappy/snappy-stubs-public.h
new file mode 100755
index 0000000..074d463
--- /dev/null
+++ b/third_party/snappy/snappy-stubs-public.h
@@ -0,0 +1,85 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sesse@google.com (Steinar H. Gunderson)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various type stubs for the open-source version of Snappy.
+//
+// This file cannot include config.h, as it is included from snappy.h,
+// which is a public header. Instead, snappy-stubs-public.h is generated by
+// from snappy-stubs-public.h.in at configure time.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+
+#if !defined(_WIN32)
+#include <stdint.h>
+#endif
+
+#if 1
+#include <stddef.h>
+#endif
+
+#define SNAPPY_MAJOR 1
+#define SNAPPY_MINOR 0
+#define SNAPPY_PATCHLEVEL 3
+#define SNAPPY_VERSION \
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+
+#include <string>
+
+namespace snappy {
+
+#if !defined(_WIN32)
+typedef int8_t int8;
+typedef uint8_t uint8;
+typedef int16_t int16;
+typedef uint16_t uint16;
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#else
+typedef signed char int8;
+typedef unsigned char uint8;
+typedef short int16;
+typedef unsigned short uint16;
+typedef int int32;
+typedef unsigned int uint32;
+typedef long long int64;
+typedef unsigned long long uint64;
+#endif
+
+typedef std::string string;
+
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
diff --git a/third_party/snappy/snappy.cc b/third_party/snappy/snappy.cc
new file mode 100755
index 0000000..fdc67e8
--- /dev/null
+++ b/third_party/snappy/snappy.cc
@@ -0,0 +1,1026 @@
+// Copyright 2005 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "snappy.h"
+#include "snappy-internal.h"
+#include "snappy-sinksource.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+namespace snappy {
+
+// Any hash function will produce a valid compressed bitstream, but a good
+// hash function reduces the number of collisions and thus yields better
+// compression for compressible input, and more speed for incompressible
+// input. Of course, it doesn't hurt if the hash function is reasonably fast
+// either, as it gets called a lot.
+static inline uint32 HashBytes(uint32 bytes, int shift) {
+ uint32 kMul = 0x1e35a7bd;
+ return (bytes * kMul) >> shift;
+}
+static inline uint32 Hash(const char* p, int shift) {
+ return HashBytes(UNALIGNED_LOAD32(p), shift);
+}
+
+size_t MaxCompressedLength(size_t source_len) {
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ return 32 + source_len + source_len/6;
+}
+
+enum {
+ LITERAL = 0,
+ COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
+ COPY_2_BYTE_OFFSET = 2,
+ COPY_4_BYTE_OFFSET = 3
+};
+
+// Copy "len" bytes from "src" to "op", one byte at a time. Used for
+// handling COPY operations where the input and output regions may
+// overlap. For example, suppose:
+// src == "ab"
+// op == src + 2
+// len == 20
+// After IncrementalCopy(src, op, len), the result will have
+// eleven copies of "ab"
+// ababababababababababab
+// Note that this does not match the semantics of either memcpy()
+// or memmove().
+static inline void IncrementalCopy(const char* src, char* op, int len) {
+ DCHECK_GT(len, 0);
+ do {
+ *op++ = *src++;
+ } while (--len > 0);
+}
+
+// Equivalent to IncrementalCopy except that it can write up to ten extra
+// bytes after the end of the copy, and that it is faster.
+//
+// The main part of this loop is a simple copy of eight bytes at a time until
+// we've copied (at least) the requested amount of bytes. However, if op and
+// src are less than eight bytes apart (indicating a repeating pattern of
+// length < 8), we first need to expand the pattern in order to get the correct
+// results. For instance, if the buffer looks like this, with the eight-byte
+// <src> and <op> patterns marked as intervals:
+//
+// abxxxxxxxxxxxx
+// [------] src
+// [------] op
+//
+// a single eight-byte copy from <src> to <op> will repeat the pattern once,
+// after which we can move <op> two bytes without moving <src>:
+//
+// ababxxxxxxxxxx
+// [------] src
+// [------] op
+//
+// and repeat the exercise until the two no longer overlap.
+//
+// This allows us to do very well in the special case of one single byte
+// repeated many times, without taking a big hit for more general cases.
+//
+// The worst case of extra writing past the end of the match occurs when
+// op - src == 1 and len == 1; the last copy will read from byte positions
+// [0..7] and write to [4..11], whereas it was only supposed to write to
+// position 1. Thus, ten excess bytes.
+
+namespace {
+
+const int kMaxIncrementCopyOverflow = 10;
+
+} // namespace
+
+static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
+ while (op - src < 8) {
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
+ len -= op - src;
+ op += op - src;
+ }
+ while (len > 0) {
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
+ src += 8;
+ op += 8;
+ len -= 8;
+ }
+}
+
+static inline char* EmitLiteral(char* op,
+ const char* literal,
+ int len,
+ bool allow_fast_path) {
+ int n = len - 1; // Zero-length literals are disallowed
+ if (n < 60) {
+ // Fits in tag byte
+ *op++ = LITERAL | (n << 2);
+
+ // The vast majority of copies are below 16 bytes, for which a
+ // call to memcpy is overkill. This fast path can sometimes
+ // copy up to 15 bytes too much, but that is okay in the
+ // main loop, since we have a bit to go on for both sides:
+ //
+ // - The input will always have kInputMarginBytes = 15 extra
+ // available bytes, as long as we're in the main loop, and
+ // if not, allow_fast_path = false.
+ // - The output will always have 32 spare bytes (see
+ // MaxCompressedLength).
+ if (allow_fast_path && len <= 16) {
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
+ return op + len;
+ }
+ } else {
+ // Encode in upcoming bytes
+ char* base = op;
+ int count = 0;
+ op++;
+ while (n > 0) {
+ *op++ = n & 0xff;
+ n >>= 8;
+ count++;
+ }
+ assert(count >= 1);
+ assert(count <= 4);
+ *base = LITERAL | ((59+count) << 2);
+ }
+ memcpy(op, literal, len);
+ return op + len;
+}
+
+static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
+ DCHECK_LE(len, 64);
+ DCHECK_GE(len, 4);
+ DCHECK_LT(offset, 65536);
+
+ if ((len < 12) && (offset < 2048)) {
+ int len_minus_4 = len - 4;
+ assert(len_minus_4 < 8); // Must fit in 3 bits
+ *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
+ *op++ = offset & 0xff;
+ } else {
+ *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
+ LittleEndian::Store16(op, offset);
+ op += 2;
+ }
+ return op;
+}
+
+static inline char* EmitCopy(char* op, int offset, int len) {
+ // Emit 64 byte copies but make sure to keep at least four bytes reserved
+ while (len >= 68) {
+ op = EmitCopyLessThan64(op, offset, 64);
+ len -= 64;
+ }
+
+ // Emit an extra 60 byte copy if have too much data to fit in one copy
+ if (len > 64) {
+ op = EmitCopyLessThan64(op, offset, 60);
+ len -= 60;
+ }
+
+ // Emit remainder
+ op = EmitCopyLessThan64(op, offset, len);
+ return op;
+}
+
+
+bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
+ uint32 v = 0;
+ const char* limit = start + n;
+ if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
+ *result = v;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+namespace internal {
+uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
+ // Use smaller hash table when input.size() is smaller, since we
+ // fill the table, incurring O(hash table size) overhead for
+ // compression, and if the input is short, we won't need that
+ // many hash table entries anyway.
+ assert(kMaxHashTableSize >= 256);
+ int htsize = 256;
+ while (htsize < kMaxHashTableSize && htsize < input_size) {
+ htsize <<= 1;
+ }
+ CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
+ CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
+
+ uint16* table;
+ if (htsize <= ARRAYSIZE(small_table_)) {
+ table = small_table_;
+ } else {
+ if (large_table_ == NULL) {
+ large_table_ = new uint16[kMaxHashTableSize];
+ }
+ table = large_table_;
+ }
+
+ *table_size = htsize;
+ memset(table, 0, htsize * sizeof(*table));
+ return table;
+}
+} // end namespace internal
+
+#if defined(_WIN32)
+// signed/unsigned mismatch
+# pragma warning( disable : 4244 )
+#endif
+
+// For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
+// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
+// empirically found that overlapping loads such as
+// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
+ DCHECK(0 <= offset && offset <= 4) << offset;
+ return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
+}
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input" is at most "kBlockSize" bytes long.
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input.size())" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+namespace internal {
+char* CompressFragment(const char* const input,
+ const size_t input_size,
+ char* op,
+ uint16* table,
+ const int table_size) {
+ // "ip" is the input pointer, and "op" is the output pointer.
+ const char* ip = input;
+ CHECK_LE(input_size, kBlockSize);
+ CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
+ const int shift = 32 - Bits::Log2Floor(table_size);
+ DCHECK_EQ(kuint32max >> shift, table_size - 1);
+ const char* ip_end = input + input_size;
+ const char* base_ip = ip;
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
+ // [next_emit, ip_end) after the main loop.
+ const char* next_emit = ip;
+
+ const int kInputMarginBytes = 15;
+ if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
+ const char* ip_limit = input + input_size - kInputMarginBytes;
+
+ for (uint32 next_hash = Hash(++ip, shift); ; ) {
+ DCHECK_LT(next_emit, ip);
+ // The body of this loop calls EmitLiteral once and then EmitCopy one or
+ // more times. (The exception is that when we're close to exhausting
+ // the input we goto emit_remainder.)
+ //
+ // In the first iteration of this loop we're just starting, so
+ // there's nothing to copy, so calling EmitLiteral once is
+ // necessary. And we only start a new iteration when the
+ // current iteration has determined that a call to EmitLiteral will
+ // precede the next call to EmitCopy (if any).
+ //
+ // Step 1: Scan forward in the input looking for a 4-byte-long match.
+ // If we get close to exhausting the input then goto emit_remainder.
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned, look at every third byte, etc.. When a match is found,
+ // immediately go back to looking at every byte. This is a small loss
+ // (~5% performance, ~0.1% density) for compressible data due to more
+ // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
+ // win since the compressor quickly "realizes" the data is incompressible
+ // and doesn't bother looking for matches everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since the
+ // last match; dividing it by 32 (ie. right-shifting by five) gives the
+ // number of bytes to move ahead for each iteration.
+ uint32 skip = 32;
+
+ const char* next_ip = ip;
+ const char* candidate;
+ do {
+ ip = next_ip;
+ uint32 hash = next_hash;
+ DCHECK_EQ(hash, Hash(ip, shift));
+ uint32 bytes_between_hash_lookups = skip++ >> 5;
+ next_ip = ip + bytes_between_hash_lookups;
+ if (PREDICT_FALSE(next_ip > ip_limit)) {
+ goto emit_remainder;
+ }
+ next_hash = Hash(next_ip, shift);
+ candidate = base_ip + table[hash];
+ DCHECK_GE(candidate, base_ip);
+ DCHECK_LT(candidate, ip);
+
+ table[hash] = ip - base_ip;
+ } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
+ UNALIGNED_LOAD32(candidate)));
+
+ // Step 2: A 4-byte match has been found. We'll later see if more
+ // than 4 bytes match. But, prior to the match, input
+ // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
+ DCHECK_LE(next_emit + 16, ip_end);
+ op = EmitLiteral(op, next_emit, ip - next_emit, true);
+
+ // Step 3: Call EmitCopy, and then see if another EmitCopy could
+ // be our next move. Repeat until we find no match for the
+ // input immediately after what was consumed by the last EmitCopy call.
+ //
+ // If we exit this loop normally then we need to call EmitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can exit
+ // this loop via goto if we get close to exhausting the input.
+ uint64 input_bytes = 0;
+ uint32 candidate_bytes = 0;
+
+ do {
+ // We have a 4-byte match at ip, and no need to emit any
+ // "literal bytes" prior to ip.
+ const char* base = ip;
+ int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
+ ip += matched;
+ int offset = base - candidate;
+ DCHECK_EQ(0, memcmp(base, candidate, matched));
+ op = EmitCopy(op, offset, matched);
+ // We could immediately start working at ip now, but to improve
+ // compression we first update table[Hash(ip - 1, ...)].
+ const char* insert_tail = ip - 1;
+ next_emit = ip;
+ if (PREDICT_FALSE(ip >= ip_limit)) {
+ goto emit_remainder;
+ }
+ input_bytes = UNALIGNED_LOAD64(insert_tail);
+ uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
+ table[prev_hash] = ip - base_ip - 1;
+ uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
+ candidate = base_ip + table[cur_hash];
+ candidate_bytes = UNALIGNED_LOAD32(candidate);
+ table[cur_hash] = ip - base_ip;
+ } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
+
+ next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
+ ++ip;
+ }
+ }
+
+ emit_remainder:
+ // Emit the remaining bytes as a literal
+ if (next_emit < ip_end) {
+ op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
+ }
+
+ return op;
+}
+} // end namespace internal
+
+// Signature of output types needed by decompression code.
+// The decompression code is templatized on a type that obeys this
+// signature so that we do not pay virtual function call overhead in
+// the middle of a tight decompression loop.
+//
+// class DecompressionWriter {
+// public:
+// // Called before decompression
+// void SetExpectedLength(size_t length);
+//
+// // Called after decompression
+// bool CheckLength() const;
+//
+// // Called repeatedly during decompression
+// bool Append(const char* ip, uint32 length, bool allow_fast_path);
+// bool AppendFromSelf(uint32 offset, uint32 length);
+// };
+//
+// "allow_fast_path" is a parameter that says if there is at least 16
+// readable bytes in "ip". It is currently only used by SnappyArrayWriter.
+
+// -----------------------------------------------------------------------
+// Lookup table for decompression code. Generated by ComputeTable() below.
+// -----------------------------------------------------------------------
+
+// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
+static const uint32 wordmask[] = {
+ 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+};
+
+// Data stored per entry in lookup table:
+// Range Bits-used Description
+// ------------------------------------
+// 1..64 0..7 Literal/copy length encoded in opcode byte
+// 0..7 8..10 Copy offset encoded in opcode byte / 256
+// 0..4 11..13 Extra bytes after opcode
+//
+// We use eight bits for the length even though 7 would have sufficed
+// because of efficiency reasons:
+// (1) Extracting a byte is faster than a bit-field
+// (2) It properly aligns copy offset so we do not need a <<8
+static const uint16 char_table[256] = {
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+};
+
+// In debug mode, allow optional computation of the table at startup.
+// Also, check that the decompression table is correct.
+#ifndef NDEBUG
+DEFINE_bool(snappy_dump_decompression_table, false,
+ "If true, we print the decompression table at startup.");
+
+static uint16 MakeEntry(unsigned int extra,
+ unsigned int len,
+ unsigned int copy_offset) {
+ // Check that all of the fields fit within the allocated space
+ DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
+ DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
+ DCHECK_EQ(len, len & 0x7f); // At most 7 bits
+ return len | (copy_offset << 8) | (extra << 11);
+}
+
+static void ComputeTable() {
+ uint16 dst[256];
+
+ // Place invalid entries in all places to detect missing initialization
+ int assigned = 0;
+ for (int i = 0; i < 256; i++) {
+ dst[i] = 0xffff;
+ }
+
+ // Small LITERAL entries. We store (len-1) in the top 6 bits.
+ for (unsigned int len = 1; len <= 60; len++) {
+ dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
+ assigned++;
+ }
+
+ // Large LITERAL entries. We use 60..63 in the high 6 bits to
+ // encode the number of bytes of length info that follow the opcode.
+ for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
+ // We set the length field in the lookup table to 1 because extra
+ // bytes encode len-1.
+ dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
+ assigned++;
+ }
+
+ // COPY_1_BYTE_OFFSET.
+ //
+ // The tag byte in the compressed data stores len-4 in 3 bits, and
+ // offset/256 in 5 bits. offset%256 is stored in the next byte.
+ //
+ // This format is used for length in range [4..11] and offset in
+ // range [0..2047]
+ for (unsigned int len = 4; len < 12; len++) {
+ for (unsigned int offset = 0; offset < 2048; offset += 256) {
+ dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
+ MakeEntry(1, len, offset>>8);
+ assigned++;
+ }
+ }
+
+ // COPY_2_BYTE_OFFSET.
+ // Tag contains len-1 in top 6 bits, and offset in next two bytes.
+ for (unsigned int len = 1; len <= 64; len++) {
+ dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
+ assigned++;
+ }
+
+ // COPY_4_BYTE_OFFSET.
+ // Tag contents len-1 in top 6 bits, and offset in next four bytes.
+ for (unsigned int len = 1; len <= 64; len++) {
+ dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
+ assigned++;
+ }
+
+ // Check that each entry was initialized exactly once.
+ CHECK_EQ(assigned, 256);
+ for (int i = 0; i < 256; i++) {
+ CHECK_NE(dst[i], 0xffff);
+ }
+
+ if (FLAGS_snappy_dump_decompression_table) {
+ printf("static const uint16 char_table[256] = {\n ");
+ for (int i = 0; i < 256; i++) {
+ printf("0x%04x%s",
+ dst[i],
+ ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
+ }
+ printf("};\n");
+ }
+
+ // Check that computed table matched recorded table
+ for (int i = 0; i < 256; i++) {
+ CHECK_EQ(dst[i], char_table[i]);
+ }
+}
+REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
+#endif /* !NDEBUG */
+
+// Helper class for decompression
+class SnappyDecompressor {
+ private:
+ Source* reader_; // Underlying source of bytes to decompress
+ const char* ip_; // Points to next buffered byte
+ const char* ip_limit_; // Points just past buffered bytes
+ uint32 peeked_; // Bytes peeked from reader (need to skip)
+ bool eof_; // Hit end of input without an error?
+ char scratch_[5]; // Temporary buffer for PeekFast() boundaries
+
+ // Ensure that all of the tag metadata for the next tag is available
+ // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
+ // if (ip_limit_ - ip_ < 5).
+ //
+ // Returns true on success, false on error or end of input.
+ bool RefillTag();
+
+ public:
+ explicit SnappyDecompressor(Source* reader)
+ : reader_(reader),
+ ip_(NULL),
+ ip_limit_(NULL),
+ peeked_(0),
+ eof_(false) {
+ }
+
+ ~SnappyDecompressor() {
+ // Advance past any bytes we peeked at from the reader
+ reader_->Skip(peeked_);
+ }
+
+ // Returns true iff we have hit the end of the input without an error.
+ bool eof() const {
+ return eof_;
+ }
+
+ // Read the uncompressed length stored at the start of the compressed data.
+ // On succcess, stores the length in *result and returns true.
+ // On failure, returns false.
+ bool ReadUncompressedLength(uint32* result) {
+ DCHECK(ip_ == NULL); // Must not have read anything yet
+ // Length is encoded in 1..5 bytes
+ *result = 0;
+ uint32 shift = 0;
+ while (true) {
+ if (shift >= 32) return false;
+ size_t n;
+ const char* ip = reader_->Peek(&n);
+ if (n == 0) return false;
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ reader_->Skip(1);
+ *result |= static_cast<uint32>(c & 0x7f) << shift;
+ if (c < 128) {
+ break;
+ }
+ shift += 7;
+ }
+ return true;
+ }
+
+ // Process the next item found in the input.
+ // Returns true if successful, false on error or end of input.
+ template <class Writer>
+ void DecompressAllTags(Writer* writer) {
+ const char* ip = ip_;
+ for ( ;; ) {
+ if (ip_limit_ - ip < 5) {
+ ip_ = ip;
+ if (!RefillTag()) return;
+ ip = ip_;
+ }
+
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
+ const uint32 entry = char_table[c];
+ const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
+ ip += entry >> 11;
+ const uint32 length = entry & 0xff;
+
+ if ((c & 0x3) == LITERAL) {
+ uint32 literal_length = length + trailer;
+ uint32 avail = ip_limit_ - ip;
+ while (avail < literal_length) {
+ bool allow_fast_path = (avail >= 16);
+ if (!writer->Append(ip, avail, allow_fast_path)) return;
+ literal_length -= avail;
+ reader_->Skip(peeked_);
+ size_t n;
+ ip = reader_->Peek(&n);
+ avail = n;
+ peeked_ = avail;
+ if (avail == 0) return; // Premature end of input
+ ip_limit_ = ip + avail;
+ }
+ bool allow_fast_path = (avail >= 16);
+ if (!writer->Append(ip, literal_length, allow_fast_path)) {
+ return;
+ }
+ ip += literal_length;
+ } else {
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
+ // those bits, we get copy_offset (since the bit-field starts at
+ // bit 8).
+ const uint32 copy_offset = entry & 0x700;
+ if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
+ return;
+ }
+ }
+ }
+ }
+};
+
+bool SnappyDecompressor::RefillTag() {
+ const char* ip = ip_;
+ if (ip == ip_limit_) {
+ // Fetch a new fragment from the reader
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ size_t n;
+ ip = reader_->Peek(&n);
+ peeked_ = n;
+ if (n == 0) {
+ eof_ = true;
+ return false;
+ }
+ ip_limit_ = ip + n;
+ }
+
+ // Read the tag character
+ DCHECK_LT(ip, ip_limit_);
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ const uint32 entry = char_table[c];
+ const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
+ DCHECK_LE(needed, sizeof(scratch_));
+
+ // Read more bytes from reader if needed
+ uint32 nbuf = ip_limit_ - ip;
+ if (nbuf < needed) {
+ // Stitch together bytes from ip and reader to form the word
+ // contents. We store the needed bytes in "scratch_". They
+ // will be consumed immediately by the caller since we do not
+ // read more than we need.
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ while (nbuf < needed) {
+ size_t length;
+ const char* src = reader_->Peek(&length);
+ if (length == 0) return false;
+ uint32 to_add = min<uint32>(needed - nbuf, length);
+ memcpy(scratch_ + nbuf, src, to_add);
+ nbuf += to_add;
+ reader_->Skip(to_add);
+ }
+ DCHECK_EQ(nbuf, needed);
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + needed;
+ } else if (nbuf < 5) {
+ // Have enough bytes, but move into scratch_ so that we do not
+ // read past end of input
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + nbuf;
+ } else {
+ // Pass pointer to buffer returned by reader_.
+ ip_ = ip;
+ }
+ return true;
+}
+
+template <typename Writer>
+static bool InternalUncompress(Source* r,
+ Writer* writer,
+ uint32 max_len) {
+ // Read the uncompressed length from the front of the compressed input
+ SnappyDecompressor decompressor(r);
+ uint32 uncompressed_len = 0;
+ if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
+ // Protect against possible DoS attack
+ if (static_cast<uint64>(uncompressed_len) > max_len) {
+ return false;
+ }
+
+ writer->SetExpectedLength(uncompressed_len);
+
+ // Process the entire input
+ decompressor.DecompressAllTags(writer);
+ return (decompressor.eof() && writer->CheckLength());
+}
+
+bool GetUncompressedLength(Source* source, uint32* result) {
+ SnappyDecompressor decompressor(source);
+ return decompressor.ReadUncompressedLength(result);
+}
+
+size_t Compress(Source* reader, Sink* writer) {
+ size_t written = 0;
+ int N = reader->Available();
+ char ulength[Varint::kMax32];
+ char* p = Varint::Encode32(ulength, N);
+ writer->Append(ulength, p-ulength);
+ written += (p - ulength);
+
+ internal::WorkingMemory wmem;
+ char* scratch = NULL;
+ char* scratch_output = NULL;
+
+ while (N > 0) {
+ // Get next block to compress (without copying if possible)
+ size_t fragment_size;
+ const char* fragment = reader->Peek(&fragment_size);
+ DCHECK_NE(fragment_size, 0) << ": premature end of input";
+ const int num_to_read = min(N, kBlockSize);
+ size_t bytes_read = fragment_size;
+
+ int pending_advance = 0;
+ if (bytes_read >= num_to_read) {
+ // Buffer returned by reader is large enough
+ pending_advance = num_to_read;
+ fragment_size = num_to_read;
+ } else {
+ // Read into scratch buffer
+ if (scratch == NULL) {
+ // If this is the last iteration, we want to allocate N bytes
+ // of space, otherwise the max possible kBlockSize space.
+ // num_to_read contains exactly the correct value
+ scratch = new char[num_to_read];
+ }
+ memcpy(scratch, fragment, bytes_read);
+ reader->Skip(bytes_read);
+
+ while (bytes_read < num_to_read) {
+ fragment = reader->Peek(&fragment_size);
+ size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
+ memcpy(scratch + bytes_read, fragment, n);
+ bytes_read += n;
+ reader->Skip(n);
+ }
+ DCHECK_EQ(bytes_read, num_to_read);
+ fragment = scratch;
+ fragment_size = num_to_read;
+ }
+ DCHECK_EQ(fragment_size, num_to_read);
+
+ // Get encoding table for compression
+ int table_size;
+ uint16* table = wmem.GetHashTable(num_to_read, &table_size);
+
+ // Compress input_fragment and append to dest
+ const int max_output = MaxCompressedLength(num_to_read);
+
+ // Need a scratch buffer for the output, in case the byte sink doesn't
+ // have room for us directly.
+ if (scratch_output == NULL) {
+ scratch_output = new char[max_output];
+ } else {
+ // Since we encode kBlockSize regions followed by a region
+ // which is <= kBlockSize in length, a previously allocated
+ // scratch_output[] region is big enough for this iteration.
+ }
+ char* dest = writer->GetAppendBuffer(max_output, scratch_output);
+ char* end = internal::CompressFragment(fragment, fragment_size,
+ dest, table, table_size);
+ writer->Append(dest, end - dest);
+ written += (end - dest);
+
+ N -= num_to_read;
+ reader->Skip(pending_advance);
+ }
+
+ delete[] scratch;
+ delete[] scratch_output;
+
+ return written;
+}
+
+// -----------------------------------------------------------------------
+// Flat array interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to a flat array.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyArrayWriter {
+ private:
+ char* base_;
+ char* op_;
+ char* op_limit_;
+
+ public:
+ inline explicit SnappyArrayWriter(char* dst)
+ : base_(dst),
+ op_(dst) {
+ }
+
+ inline void SetExpectedLength(size_t len) {
+ op_limit_ = op_ + len;
+ }
+
+ inline bool CheckLength() const {
+ return op_ == op_limit_;
+ }
+
+ inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
+ char* op = op_;
+ const int space_left = op_limit_ - op;
+ if (allow_fast_path && len <= 16 && space_left >= 16) {
+ // Fast path, used for the majority (about 90%) of dynamic invocations.
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
+ } else {
+ if (space_left < len) {
+ return false;
+ }
+ memcpy(op, ip, len);
+ }
+ op_ = op + len;
+ return true;
+ }
+
+ inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ char* op = op_;
+ const int space_left = op_limit_ - op;
+
+ if (op - base_ <= offset - 1u) { // -1u catches offset==0
+ return false;
+ }
+ if (len <= 16 && offset >= 8 && space_left >= 16) {
+ // Fast path, used for the majority (70-80%) of dynamic invocations.
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
+ } else {
+ if (space_left >= len + kMaxIncrementCopyOverflow) {
+ IncrementalCopyFastPath(op - offset, op, len);
+ } else {
+ if (space_left < len) {
+ return false;
+ }
+ IncrementalCopy(op - offset, op, len);
+ }
+ }
+
+ op_ = op + len;
+ return true;
+ }
+};
+
+bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
+ ByteArraySource reader(compressed, n);
+ return RawUncompress(&reader, uncompressed);
+}
+
+bool RawUncompress(Source* compressed, char* uncompressed) {
+ SnappyArrayWriter output(uncompressed);
+ return InternalUncompress(compressed, &output, kuint32max);
+}
+
+bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
+ size_t ulength;
+ if (!GetUncompressedLength(compressed, n, &ulength)) {
+ return false;
+ }
+ // Protect against possible DoS attack
+ if ((static_cast<uint64>(ulength) + uncompressed->size()) >
+ uncompressed->max_size()) {
+ return false;
+ }
+ STLStringResizeUninitialized(uncompressed, ulength);
+ return RawUncompress(compressed, n, string_as_array(uncompressed));
+}
+
+
+// A Writer that drops everything on the floor and just does validation
+class SnappyDecompressionValidator {
+ private:
+ size_t expected_;
+ size_t produced_;
+
+ public:
+ inline SnappyDecompressionValidator() : produced_(0) { }
+ inline void SetExpectedLength(size_t len) {
+ expected_ = len;
+ }
+ inline bool CheckLength() const {
+ return expected_ == produced_;
+ }
+ inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+ inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ if (produced_ <= offset - 1u) return false; // -1u catches offset==0
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+};
+
+bool IsValidCompressedBuffer(const char* compressed, size_t n) {
+ ByteArraySource reader(compressed, n);
+ SnappyDecompressionValidator writer;
+ return InternalUncompress(&reader, &writer, kuint32max);
+}
+
+void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length) {
+ ByteArraySource reader(input, input_length);
+ UncheckedByteArraySink writer(compressed);
+ Compress(&reader, &writer);
+
+ // Compute how many bytes were added
+ *compressed_length = (writer.CurrentDestination() - compressed);
+}
+
+size_t Compress(const char* input, size_t input_length, string* compressed) {
+ // Pre-grow the buffer to the max length of the compressed output
+ compressed->resize(MaxCompressedLength(input_length));
+
+ size_t compressed_length;
+ RawCompress(input, input_length, string_as_array(compressed),
+ &compressed_length);
+ compressed->resize(compressed_length);
+ return compressed_length;
+}
+
+
+} // end namespace snappy
+
diff --git a/third_party/snappy/snappy.h b/third_party/snappy/snappy.h
new file mode 100755
index 0000000..8d6ef22
--- /dev/null
+++ b/third_party/snappy/snappy.h
@@ -0,0 +1,155 @@
+// Copyright 2005 and onwards Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A light-weight compression algorithm. It is designed for speed of
+// compression and decompression, rather than for the utmost in space
+// savings.
+//
+// For getting better compression ratios when you are compressing data
+// with long repeated sequences or compressing data that is similar to
+// other data, while still compressing fast, you might look at first
+// using BMDiff and then compressing the output of BMDiff with
+// Snappy.
+
+#ifndef UTIL_SNAPPY_SNAPPY_H__
+#define UTIL_SNAPPY_SNAPPY_H__
+
+#include <stddef.h>
+#include <string>
+
+#include "snappy-stubs-public.h"
+
+namespace snappy {
+ class Source;
+ class Sink;
+
+ // ------------------------------------------------------------------------
+ // Generic compression/decompression routines.
+ // ------------------------------------------------------------------------
+
+ // Compress the bytes read from "*source" and append to "*sink". Return the
+ // number of bytes written.
+ size_t Compress(Source* source, Sink* sink);
+
+ bool GetUncompressedLength(Source* source, uint32* result);
+
+ // ------------------------------------------------------------------------
+ // Higher-level string based routines (should be sufficient for most users)
+ // ------------------------------------------------------------------------
+
+ // Sets "*output" to the compressed version of "input[0,input_length-1]".
+ // Original contents of *output are lost.
+ //
+ // REQUIRES: "input[]" is not an alias of "*output".
+ size_t Compress(const char* input, size_t input_length, string* output);
+
+ // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
+ // Original contents of "*uncompressed" are lost.
+ //
+ // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
+ //
+ // returns false if the message is corrupted and could not be decompressed
+ bool Uncompress(const char* compressed, size_t compressed_length,
+ string* uncompressed);
+
+
+ // ------------------------------------------------------------------------
+ // Lower-level character array based routines. May be useful for
+ // efficiency reasons in certain circumstances.
+ // ------------------------------------------------------------------------
+
+ // REQUIRES: "compressed" must point to an area of memory that is at
+ // least "MaxCompressedLength(input_length)" bytes in length.
+ //
+ // Takes the data stored in "input[0..input_length]" and stores
+ // it in the array pointed to by "compressed".
+ //
+ // "*compressed_length" is set to the length of the compressed output.
+ //
+ // Example:
+ // char* output = new char[snappy::MaxCompressedLength(input_length)];
+ // size_t output_length;
+ // RawCompress(input, input_length, output, &output_length);
+ // ... Process(output, output_length) ...
+ // delete [] output;
+ void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
+
+ // Given data in "compressed[0..compressed_length-1]" generated by
+ // calling the Snappy::Compress routine, this routine
+ // stores the uncompressed data to
+ // uncompressed[0..GetUncompressedLength(compressed)-1]
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompress(const char* compressed, size_t compressed_length,
+ char* uncompressed);
+
+ // Given data from the byte source 'compressed' generated by calling
+ // the Snappy::Compress routine, this routine stores the uncompressed
+ // data to
+ // uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompress(Source* compressed, char* uncompressed);
+
+ // Returns the maximal size of the compressed representation of
+ // input data that is "source_bytes" bytes in length;
+ size_t MaxCompressedLength(size_t source_bytes);
+
+ // REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
+ // Returns true and stores the length of the uncompressed data in
+ // *result normally. Returns false on parsing error.
+ // This operation takes O(1) time.
+ bool GetUncompressedLength(const char* compressed, size_t compressed_length,
+ size_t* result);
+
+ // Returns true iff the contents of "compressed[]" can be uncompressed
+ // successfully. Does not return the uncompressed data. Takes
+ // time proportional to compressed_length, but is usually at least
+ // a factor of four faster than actual decompression.
+ bool IsValidCompressedBuffer(const char* compressed,
+ size_t compressed_length);
+
+ // *** DO NOT CHANGE THE VALUE OF kBlockSize ***
+ //
+ // New Compression code chops up the input into blocks of at most
+ // the following size. This ensures that back-references in the
+ // output never cross kBlockSize block boundaries. This can be
+ // helpful in implementing blocked decompression. However the
+ // decompression code should not rely on this guarantee since older
+ // compression code may not obey it.
+ static const int kBlockLog = 15;
+ static const int kBlockSize = 1 << kBlockLog;
+
+ static const int kMaxHashTableBits = 14;
+ static const int kMaxHashTableSize = 1 << kMaxHashTableBits;
+
+} // end namespace snappy
+
+
+#endif // UTIL_SNAPPY_SNAPPY_H__
diff --git a/tools/bridge.cpp b/tools/bridge.cpp
index 86dea0a..341a1da 100644
--- a/tools/bridge.cpp
+++ b/tools/bridge.cpp
@@ -17,7 +17,8 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
#include "../client/dbclient.h"
#include "../db/dbmessage.h"
@@ -41,7 +42,7 @@ public:
try {
m.reset();
if ( !mp_.recv( m ) ) {
- cout << "end connection " << mp_.farEnd.toString() << endl;
+ cout << "end connection " << mp_.remoteString() << endl;
mp_.shutdown();
break;
}
@@ -87,7 +88,7 @@ set<MessagingPort*> ports;
class MyListener : public Listener {
public:
- MyListener( int port ) : Listener( "", port ) {}
+ MyListener( int port ) : Listener( "bridge" , "", port ) {}
virtual void accepted(MessagingPort *mp) {
ports.insert( mp );
Forwarder f( *mp );
@@ -108,7 +109,7 @@ void cleanup( int sig ) {
void myterminate() {
rawOut( "bridge terminate() called, printing stack:" );
printStackTrace();
- abort();
+ ::abort();
}
void setupSignals() {
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 29553f4..a1690b2 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -18,6 +18,7 @@
#include "../pch.h"
#include "../client/dbclient.h"
+#include "../db/db.h"
#include "tool.h"
#include <fcntl.h>
@@ -27,6 +28,14 @@ using namespace mongo;
namespace po = boost::program_options;
class Dump : public Tool {
+ class FilePtr : boost::noncopyable {
+ public:
+ /*implicit*/ FilePtr(FILE* f) : _f(f) {}
+ ~FilePtr() { fclose(_f); }
+ operator FILE*() { return _f; }
+ private:
+ FILE* _f;
+ };
public:
Dump() : Tool( "dump" , ALL , "*" , "*" , false ) {
add_options()
@@ -34,15 +43,24 @@ public:
("query,q", po::value<string>() , "json query" )
("oplog", "Use oplog for point-in-time snapshotting" )
("repair", "try to recover a crashed database" )
+ ("forceTableScan", "force a table scan (do not use $snapshot)" )
;
}
// This is a functor that writes a BSONObj to a file
struct Writer {
- Writer(ostream& out, ProgressMeter* m) :_out(out), _m(m) {}
+ Writer(FILE* out, ProgressMeter* m) :_out(out), _m(m) {}
void operator () (const BSONObj& obj) {
- _out.write( obj.objdata() , obj.objsize() );
+ size_t toWrite = obj.objsize();
+ size_t written = 0;
+
+ while (toWrite) {
+ size_t ret = fwrite( obj.objdata()+written, 1, toWrite, _out );
+ uassert(14035, errnoWithPrefix("couldn't write to file"), ret);
+ toWrite -= ret;
+ written += ret;
+ }
// if there's a progress bar, hit it
if (_m) {
@@ -50,21 +68,19 @@ public:
}
}
- ostream& _out;
+ FILE* _out;
ProgressMeter* _m;
};
- void doCollection( const string coll , ostream &out , ProgressMeter *m ) {
- Query q;
- if ( _query.isEmpty() && !hasParam("dbpath"))
- q.snapshot();
- else
- q = _query;
+ void doCollection( const string coll , FILE* out , ProgressMeter *m ) {
+ Query q = _query;
int queryOptions = QueryOption_SlaveOk | QueryOption_NoCursorTimeout;
if (startsWith(coll.c_str(), "local.oplog."))
queryOptions |= QueryOption_OplogReplay;
-
+ else if ( _query.isEmpty() && !hasParam("dbpath") && !hasParam("forceTableScan") )
+ q.snapshot();
+
DBClientBase& connBase = conn(true);
Writer writer(out, m);
@@ -86,21 +102,18 @@ public:
void writeCollectionFile( const string coll , path outputFile ) {
cout << "\t" << coll << " to " << outputFile.string() << endl;
- ofstream out;
- out.open( outputFile.string().c_str() , ios_base::out | ios_base::binary );
- assertStreamGood( 10262 , "couldn't open file" , out );
+ FilePtr f (fopen(outputFile.string().c_str(), "wb"));
+ uassert(10262, errnoWithPrefix("couldn't open file"), f);
ProgressMeter m( conn( true ).count( coll.c_str() , BSONObj() , QueryOption_SlaveOk ) );
- doCollection(coll, out, &m);
+ doCollection(coll, f, &m);
cout << "\t\t " << m.done() << " objects" << endl;
-
- out.close();
}
void writeCollectionStdout( const string coll ) {
- doCollection(coll, cout, NULL);
+ doCollection(coll, stdout, NULL);
}
void go( const string db , const path outdir ) {
@@ -113,10 +126,14 @@ public:
auto_ptr<DBClientCursor> cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->nextSafe();
- if ( obj.toString().find( ".$" ) != string::npos )
+ const string name = obj.getField( "name" ).valuestr();
+
+ // skip namespaces with $ in them only if we don't specify a collection to dump
+ if ( _coll == "*" && name.find( ".$" ) != string::npos ) {
+ log(1) << "\tskipping collection: " << name << endl;
continue;
+ }
- const string name = obj.getField( "name" ).valuestr();
const string filename = name.substr( db.size() + 1 );
if ( _coll != "*" && db + "." + _coll != name && _coll != name )
@@ -139,18 +156,13 @@ public:
return -1;
}
- if ( hasParam( "collection" ) ){
- cout << "repair mode can't work with collection, only on full db" << endl;
- return -1;
- }
-
string dbname = getParam( "db" );
log() << "going to try and recover data from: " << dbname << endl;
return _repair( dbname );
}
- DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc ){
+ DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ){
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ){
@@ -170,22 +182,49 @@ public:
LogIndentLevel lil2;
+ set<DiskLoc> seen;
+
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ){
+
+ if ( ! seen.insert( loc ).second ) {
+ error() << "infinite loop in extend, seen: " << loc << " before" << endl;
+ break;
+ }
+
if ( loc.getOfs() <= 0 ){
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
- log() << loc << endl;
+ log(1) << loc << endl;
Record* rec = loc.rec();
- log() << loc.obj() << endl;
+ BSONObj obj;
+ try {
+ obj = loc.obj();
+ assert( obj.valid() );
+ LOG(1) << obj << endl;
+ w( obj );
+ }
+ catch ( std::exception& e ) {
+ log() << "found invalid document @ " << loc << " " << e.what() << endl;
+ if ( ! obj.isEmpty() ) {
+ try {
+ BSONElement e = obj.firstElement();
+ stringstream ss;
+ ss << "first element: " << e;
+ log() << ss.str();
+ }
+ catch ( std::exception& ) {
+ }
+ }
+ }
loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
}
return forward ? e->xnext : e->xprev;
}
- void _repair( Database* db , string ns ){
+ void _repair( Database* db , string ns , path outfile ){
NamespaceDetails * nsd = nsdetails( ns.c_str() );
log() << "nrecords: " << nsd->stats.nrecords
<< " datasize: " << nsd->stats.datasize
@@ -201,36 +240,46 @@ public:
log() << " ERROR fisrtExtent is not valid" << endl;
return;
}
+
+ outfile /= ( ns.substr( ns.find( "." ) + 1 ) + ".bson" );
+ log() << "writing to: " << outfile.string() << endl;
+ FilePtr f (fopen(outfile.string().c_str(), "wb"));
+
+ ProgressMeter m( nsd->stats.nrecords * 2 );
+
+ Writer w( f , &m );
+
try {
log() << "forward extent pass" << endl;
LogIndentLevel lil;
DiskLoc eLoc = nsd->firstExtent;
while ( ! eLoc.isNull() ){
log() << "extent loc: " << eLoc << endl;
- eLoc = _repairExtent( db , ns , true , eLoc );
+ eLoc = _repairExtent( db , ns , true , eLoc , w );
}
}
catch ( DBException& e ){
error() << "forward extent pass failed:" << e.toString() << endl;
}
-
+
try {
log() << "backwards extent pass" << endl;
LogIndentLevel lil;
DiskLoc eLoc = nsd->lastExtent;
while ( ! eLoc.isNull() ){
log() << "extent loc: " << eLoc << endl;
- eLoc = _repairExtent( db , ns , false , eLoc );
+ eLoc = _repairExtent( db , ns , false , eLoc , w );
}
}
catch ( DBException& e ){
error() << "ERROR: backwards extent pass failed:" << e.toString() << endl;
}
+ log() << "\t\t " << m.done() << " objects" << endl;
}
- int _repair( string dbname ){
+ int _repair( string dbname ) {
dblock lk;
Client::Context cx( dbname );
Database * db = cx.db();
@@ -238,16 +287,28 @@ public:
list<string> namespaces;
db->namespaceIndex.getNamespaces( namespaces );
+ path root = getParam( "out" );
+ root /= dbname;
+ create_directories( root );
+
for ( list<string>::iterator i=namespaces.begin(); i!=namespaces.end(); ++i ){
LogIndentLevel lil;
string ns = *i;
+
if ( str::endsWith( ns , ".system.namespaces" ) )
continue;
+
+ if ( str::contains( ns , ".tmp.mr." ) )
+ continue;
+
+ if ( _coll != "*" && ! str::endsWith( ns , _coll ) )
+ continue;
+
log() << "trying to recover: " << ns << endl;
LogIndentLevel lil2;
try {
- _repair( db , ns );
+ _repair( db , ns , root );
}
catch ( DBException& e ){
log() << "ERROR recovering: " << ns << " " << e.toString() << endl;
@@ -318,12 +379,7 @@ public:
}
}
- {
- // TODO: when mongos supports QueryOption_Exaust add a version check (SERVER-2628)
- BSONObj isdbgrid;
- conn("true").simpleCommand("admin", &isdbgrid, "isdbgrid");
- _usingMongos = isdbgrid["isdbgrid"].trueValue();
- }
+ _usingMongos = isMongos();
path root( out );
string db = _db;
diff --git a/tools/export.cpp b/tools/export.cpp
index 0262c4b..c3a5420 100644
--- a/tools/export.cpp
+++ b/tools/export.cpp
@@ -40,10 +40,78 @@ public:
("csv","export to csv instead of json")
("out,o", po::value<string>(), "output file; if not specified, stdout is used")
("jsonArray", "output to a json array rather than one object per line")
+ ("slaveOk,k", po::value<bool>()->default_value(true) , "use secondaries for export if available, default true")
;
_usesstdout = false;
}
+ // Turn every double quote character into two double quote characters
+ // If hasSurroundingQuotes is true, doesn't escape the first and last
+ // characters of the string, if it's false, add a double quote character
+ // around the whole string.
+ string csvEscape(string str, bool hasSurroundingQuotes = false) {
+ size_t index = hasSurroundingQuotes ? 1 : 0;
+ while (((index = str.find('"', index)) != string::npos)
+ && (index < (hasSurroundingQuotes ? str.size() - 1 : str.size()))) {
+ str.replace(index, 1, "\"\"");
+ index += 2;
+ }
+ return hasSurroundingQuotes ? str : "\"" + str + "\"";
+ }
+
+ // Gets the string representation of a BSON object that can be correctly written to a CSV file
+ string csvString (const BSONElement& object) {
+ const char* binData; // Only used with BinData type
+
+ switch (object.type()) {
+ case MinKey:
+ return "$MinKey";
+ case MaxKey:
+ return "$MaxKey";
+ case NumberInt:
+ case NumberDouble:
+ case NumberLong:
+ case Bool:
+ return object.toString(false);
+ case String:
+ case Symbol:
+ return csvEscape(object.toString(false), true);
+ case Object:
+ return csvEscape(object.jsonString(Strict, false));
+ case Array:
+ return csvEscape(object.jsonString(Strict, false));
+ case BinData:
+ int len;
+ binData = object.binDataClean(len);
+ return toHex(binData, len);
+ case jstOID:
+ return "ObjectID(" + object.OID().toString() + ")"; // OIDs are always 24 bytes
+ case Date:
+ return timeToISOString(object.Date() / 1000);
+ case Timestamp:
+ return csvEscape(object.jsonString(Strict, false));
+ case RegEx:
+ return csvEscape("/" + string(object.regex()) + "/" + string(object.regexFlags()));
+ case Code:
+ return csvEscape(object.toString(false));
+ case CodeWScope:
+ if (string(object.codeWScopeScopeData()) == "") {
+ return csvEscape(object.toString(false));
+ } else {
+ return csvEscape(object.jsonString(Strict, false));
+ }
+ case EOO:
+ case Undefined:
+ case DBRef:
+ case jstNULL:
+ cerr << "Invalid BSON object type for CSV output: " << object.type() << endl;
+ return "";
+ }
+ // Can never get here
+ assert(false);
+ return "";
+ }
+
int run() {
string ns;
const bool csv = hasParam( "csv" );
@@ -110,7 +178,9 @@ public:
if ( q.getFilter().isEmpty() && !hasParam("dbpath"))
q.snapshot();
- auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
+ bool slaveOk = _params["slaveOk"].as<bool>();
+
+ auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , ( slaveOk ? QueryOption_SlaveOk : 0 ) | QueryOption_NoCursorTimeout );
if ( csv ) {
for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
@@ -134,7 +204,7 @@ public:
out << ",";
const BSONElement & e = obj.getFieldDotted(i->c_str());
if ( ! e.eoo() ) {
- out << e.jsonString( Strict , false );
+ out << csvString(e);
}
}
out << endl;
diff --git a/tools/import.cpp b/tools/import.cpp
index 6b59bdc..16980b0 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -27,6 +27,7 @@
#include <iostream>
#include <boost/program_options.hpp>
+#include <boost/algorithm/string.hpp>
using namespace mongo;
@@ -44,100 +45,215 @@ class Import : public Tool {
bool _doimport;
bool _jsonArray;
vector<string> _upsertFields;
+ static const int BUF_SIZE = 1024 * 1024 * 4;
+
+ string trimWhitespace(const string& str) {
+ if (str.size() == 0) {
+ return str;
+ }
+ size_t begin = 0;
+ size_t end = str.size() - 1;
+ while (begin < str.size() && isspace(str[begin])) { ++begin; } // Finds index of first non-whitespace character
+ while (end > 0 && isspace(str[end])) { --end; } // Finds index of last non-whitespace character
+ return str.substr(begin, end - begin + 1);
+ }
+
+ void csvTokenizeRow(const string& row, vector<string>& tokens) {
+ bool inQuotes = false;
+ bool prevWasQuote = false;
+ bool tokenQuoted = false;
+ string curtoken = "";
+ for (string::const_iterator it = row.begin(); it != row.end(); ++it) {
+ char element = *it;
+ if (element == '"') {
+ if (!inQuotes) {
+ inQuotes = true;
+ tokenQuoted = true;
+ curtoken = "";
+ } else {
+ if (prevWasQuote) {
+ curtoken += "\"";
+ prevWasQuote = false;
+ } else {
+ prevWasQuote = true;
+ }
+ }
+ } else {
+ if (inQuotes && prevWasQuote) {
+ inQuotes = false;
+ prevWasQuote = false;
+ tokens.push_back(curtoken);
+ }
+
+ if (element == ',' && !inQuotes) {
+ if (!tokenQuoted) { // If token was quoted, it's already been added
+ tokens.push_back(trimWhitespace(curtoken));
+ }
+ curtoken = "";
+ tokenQuoted = false;
+ } else {
+ curtoken += element;
+ }
+ }
+ }
+ if (!tokenQuoted || (inQuotes && prevWasQuote)) {
+ tokens.push_back(trimWhitespace(curtoken));
+ }
+ }
void _append( BSONObjBuilder& b , const string& fieldName , const string& data ) {
- if ( b.appendAsNumber( fieldName , data ) )
+ if ( _ignoreBlanks && data.size() == 0 )
return;
- if ( _ignoreBlanks && data.size() == 0 )
+ if ( b.appendAsNumber( fieldName , data ) )
return;
// TODO: other types?
- b.append( fieldName , data );
+ b.append ( fieldName , data );
+ }
+
+ /*
+ * Reads one line from in into buf.
+ * Returns the number of bytes that should be skipped - the caller should
+ * increment buf by this amount.
+ */
+ int getLine(istream* in, char* buf) {
+ if (_jsonArray) {
+ in->read(buf, BUF_SIZE);
+ uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
+ buf[ in->gcount() ] = '\0';
+ }
+ else {
+ in->getline( buf , BUF_SIZE );
+ log(1) << "got line:" << buf << endl;
+ }
+ uassert( 10263 , "unknown error reading file" ,
+ (!(in->rdstate() & ios_base::badbit)) &&
+ (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
+
+ int numBytesSkipped = 0;
+ if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
+ buf += 3;
+ numBytesSkipped += 3;
+ }
+
+ uassert(13289, "Invalid UTF8 character detected", isValidUTF8(buf));
+ return numBytesSkipped;
}
- BSONObj parseLine( char * line ) {
- uassert(13289, "Invalid UTF8 character detected", isValidUTF8(line));
+ /*
+ * Parses a BSON object out of a JSON array.
+ * Returns number of bytes processed on success and -1 on failure.
+ */
+ int parseJSONArray(char* buf, BSONObj& o) {
+ int len = 0;
+ while (buf[0] != '{' && buf[0] != '\0') {
+ len++;
+ buf++;
+ }
+ if (buf[0] == '\0')
+ return -1;
+
+ int jslen;
+ o = fromjson(buf, &jslen);
+ len += jslen;
- if ( _type == JSON ) {
+ return len;
+ }
+
+ /*
+ * Parses one object from the input file. This usually corresponds to one line in the input
+ * file, unless the file is a CSV and contains a newline within a quoted string entry.
+ * Returns a true if a BSONObj was successfully created and false if not.
+ */
+ bool parseRow(istream* in, BSONObj& o, int& numBytesRead) {
+ boost::scoped_array<char> buffer(new char[BUF_SIZE+2]);
+ char* line = buffer.get();
+
+ numBytesRead = getLine(in, line);
+ line += numBytesRead;
+
+ if (line[0] == '\0') {
+ return false;
+ }
+ numBytesRead += strlen( line );
+
+ if (_type == JSON) {
+ // Strip out trailing whitespace
char * end = ( line + strlen( line ) ) - 1;
- while ( isspace(*end) ) {
+ while ( end >= line && isspace(*end) ) {
*end = 0;
end--;
}
- return fromjson( line );
+ o = fromjson( line );
+ return true;
}
- BSONObjBuilder b;
+ vector<string> tokens;
+ if (_type == CSV) {
+ string row;
+ bool inside_quotes = false;
+ size_t last_quote = 0;
+ while (true) {
+ string lineStr(line);
+ // Deal with line breaks in quoted strings
+ last_quote = lineStr.find_first_of('"');
+ while (last_quote != string::npos) {
+ inside_quotes = !inside_quotes;
+ last_quote = lineStr.find_first_of('"', last_quote+1);
+ }
- unsigned int pos=0;
- while ( line[0] ) {
- string name;
- if ( pos < _fields.size() ) {
- name = _fields[pos];
+ row.append(lineStr);
+
+ if (inside_quotes) {
+ row.append("\n");
+ int num = getLine(in, line);
+ line += num;
+ numBytesRead += num;
+
+ uassert (15854, "CSV file ends while inside quoted field", line[0] != '\0');
+ numBytesRead += strlen( line );
+ } else {
+ break;
+ }
}
- else {
- stringstream ss;
- ss << "field" << pos;
- name = ss.str();
+ // now 'row' is string corresponding to one row of the CSV file
+ // (which may span multiple lines) and represents one BSONObj
+ csvTokenizeRow(row, tokens);
+ }
+ else { // _type == TSV
+ while (line[0] != '\t' && isspace(line[0])) { // Strip leading whitespace, but not tabs
+ line++;
}
- pos++;
-
- bool done = false;
- string data;
- char * end;
- if ( _type == CSV && line[0] == '"' ) {
- line++; //skip first '"'
-
- while (true) {
- end = strchr( line , '"' );
- if (!end) {
- data += line;
- done = true;
- break;
- }
- else if (end[1] == '"') {
- // two '"'s get appended as one
- data.append(line, end-line+1); //include '"'
- line = end+2; //skip both '"'s
- }
- else if (end[-1] == '\\') {
- // "\\\"" gets appended as '"'
- data.append(line, end-line-1); //exclude '\\'
- data.append("\"");
- line = end+1; //skip the '"'
- }
- else {
- data.append(line, end-line);
- line = end+2; //skip '"' and ','
- break;
- }
- }
+
+ boost::split(tokens, line, boost::is_any_of(_sep));
+ }
+
+ // Now that the row is tokenized, create a BSONObj out of it.
+ BSONObjBuilder b;
+ unsigned int pos=0;
+ for (vector<string>::iterator it = tokens.begin(); it != tokens.end(); ++it) {
+ string token = *it;
+ if ( _headerLine ) {
+ _fields.push_back(token);
}
else {
- end = strstr( line , _sep );
- if ( ! end ) {
- done = true;
- data = string( line );
+ string name;
+ if ( pos < _fields.size() ) {
+ name = _fields[pos];
}
else {
- data = string( line , end - line );
- line = end+1;
+ stringstream ss;
+ ss << "field" << pos;
+ name = ss.str();
}
- }
+ pos++;
- if ( _headerLine ) {
- while ( isspace( data[0] ) )
- data = data.substr( 1 );
- _fields.push_back( data );
+ _append( b , name , token );
}
- else
- _append( b , name , data );
-
- if ( done )
- break;
}
- return b.obj();
+ o = b.obj();
+ return true;
}
public:
@@ -255,68 +371,37 @@ public:
_jsonArray = true;
}
- int errors = 0;
-
- int num = 0;
-
time_t start = time(0);
-
log(1) << "filesize: " << fileSize << endl;
ProgressMeter pm( fileSize );
- const int BUF_SIZE = 1024 * 1024 * 4;
- boost::scoped_array<char> line(new char[BUF_SIZE+2]);
- char * buf = line.get();
- while ( _jsonArray || in->rdstate() == 0 ) {
- if (_jsonArray) {
- if (buf == line.get()) { //first pass
- in->read(buf, BUF_SIZE);
- uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
- buf[ in->gcount() ] = '\0';
- }
- }
- else {
- buf = line.get();
- in->getline( buf , BUF_SIZE );
- log(1) << "got line:" << buf << endl;
- }
- uassert( 10263 , "unknown error reading file" ,
- (!(in->rdstate() & ios_base::badbit)) &&
- (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
-
- int len = 0;
- if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
- buf += 3;
- len += 3;
- }
-
- if (_jsonArray) {
- while (buf[0] != '{' && buf[0] != '\0') {
- len++;
- buf++;
- }
- if (buf[0] == '\0')
- break;
- }
- else {
- while (isspace( buf[0] )) {
- len++;
- buf++;
- }
- if (buf[0] == '\0')
- continue;
- len += strlen( buf );
- }
+ int num = 0;
+ int errors = 0;
+ int len = 0;
+ // buffer and line are only used when parsing a jsonArray
+ boost::scoped_array<char> buffer(new char[BUF_SIZE+2]);
+ char* line = buffer.get();
+ while ( _jsonArray || in->rdstate() == 0 ) {
try {
BSONObj o;
if (_jsonArray) {
- int jslen;
- o = fromjson(buf, &jslen);
- len += jslen;
- buf += jslen;
+ int bytesProcessed = 0;
+ if (line == buffer.get()) { // Only read on first pass - the whole array must be on one line.
+ bytesProcessed = getLine(in, line);
+ line += bytesProcessed;
+ len += bytesProcessed;
+ }
+ if ((bytesProcessed = parseJSONArray(line, o)) < 0) {
+ len += bytesProcessed;
+ break;
+ }
+ len += bytesProcessed;
+ line += len;
}
else {
- o = parseLine( buf );
+ if (!parseRow(in, o, len)) {
+ continue;
+ }
}
if ( _headerLine ) {
@@ -348,7 +433,7 @@ public:
}
catch ( std::exception& e ) {
cout << "exception:" << e.what() << endl;
- cout << buf << endl;
+ cout << line << endl;
errors++;
if (hasParam("stopOnError") || _jsonArray)
diff --git a/tools/restore.cpp b/tools/restore.cpp
index 9a18c00..c08c14f 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -1,4 +1,4 @@
-// restore.cpp
+// @file restore.cpp
/**
* Copyright (C) 2008 10gen Inc.
@@ -25,6 +25,7 @@
#include <boost/program_options.hpp>
#include <fcntl.h>
+#include <set>
using namespace mongo;
@@ -38,13 +39,16 @@ class Restore : public BSONTool {
public:
bool _drop;
+ bool _keepIndexVersion;
string _curns;
string _curdb;
+ set<string> _users; // For restoring users with --drop
Restore() : BSONTool( "restore" ) , _drop(false) {
add_options()
("drop" , "drop each collection before import" )
("oplogReplay" , "replay oplog for point-in-time restore")
+ ("keepIndexVersion" , "don't upgrade indexes to newest version")
;
add_hidden_options()
("dir", po::value<string>()->default_value("dump"), "directory to restore from")
@@ -67,6 +71,7 @@ public:
}
_drop = hasParam( "drop" );
+ _keepIndexVersion = hasParam("keepIndexVersion");
bool doOplog = hasParam( "oplogReplay" );
if (doOplog) {
@@ -168,7 +173,7 @@ public:
if ( ! ( endsWith( root.string().c_str() , ".bson" ) ||
endsWith( root.string().c_str() , ".bin" ) ) ) {
- cerr << "don't know what to do with [" << root.string() << "]" << endl;
+ cerr << "don't know what to do with file [" << root.string() << "]" << endl;
return;
}
@@ -208,13 +213,31 @@ public:
out() << "\t going into namespace [" << ns << "]" << endl;
if ( _drop ) {
- out() << "\t dropping" << endl;
- conn().dropCollection( ns );
+ if (root.leaf() != "system.users.bson" ) {
+ out() << "\t dropping" << endl;
+ conn().dropCollection( ns );
+ } else {
+ // Create map of the users currently in the DB
+ BSONObj fields = BSON("user" << 1);
+ scoped_ptr<DBClientCursor> cursor(conn().query(ns, Query(), 0, 0, &fields));
+ while (cursor->more()) {
+ BSONObj user = cursor->next();
+ _users.insert(user["user"].String());
+ }
+ }
}
_curns = ns.c_str();
_curdb = NamespaceString(_curns).db;
processFile( root );
+ if (_drop && root.leaf() == "system.users.bson") {
+ // Delete any users that used to exist but weren't in the dump file
+ for (set<string>::iterator it = _users.begin(); it != _users.end(); ++it) {
+ BSONObj userMatch = BSON("user" << *it);
+ conn().remove(ns, Query(userMatch));
+ }
+ _users.clear();
+ }
}
virtual void gotObject( const BSONObj& obj ) {
@@ -245,7 +268,7 @@ public:
string s = _curdb + "." + n.coll;
bo.append("ns", s);
}
- else {
+ else if (strcmp(e.fieldName(), "v") != 0 || _keepIndexVersion) { // Remove index version number
bo.append(e);
}
}
@@ -257,10 +280,16 @@ public:
cerr << "Error creating index " << o["ns"].String();
cerr << ": " << err["code"].Int() << " " << err["err"].String() << endl;
cerr << "To resume index restoration, run " << _name << " on file" << _fileName << " manually." << endl;
- abort();
+ ::abort();
}
}
- else {
+ else if (_drop && endsWith(_curns.c_str(), ".system.users") && _users.count(obj["user"].String())) {
+ // Since system collections can't be dropped, we have to manually
+ // replace the contents of the system.users collection
+ BSONObj userMatch = BSON("user" << obj["user"].String());
+ conn().update(_curns, Query(userMatch), obj);
+ _users.erase(obj["user"].String());
+ } else {
conn().insert( _curns , obj );
}
}
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 0422f87..aeab808 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -26,7 +26,7 @@
killcursors
*/
-
+#include "../pch.h"
#include <pcap.h>
#ifdef _WIN32
@@ -35,7 +35,7 @@
#endif
#include "../bson/util/builder.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../util/mmap.h"
#include "../db/dbmessage.h"
#include "../client/dbclient.h"
@@ -69,6 +69,11 @@ using mongo::DBClientConnection;
using mongo::QueryResult;
using mongo::MemoryMappedFile;
+mongo::CmdLine mongo::cmdLine;
+namespace mongo {
+ void setupSignals( bool inFork ){}
+}
+
#define SNAP_LEN 65535
int captureHeaderSize;
@@ -99,7 +104,10 @@ struct sniff_ip {
#define IP_V(ip) (((ip)->ip_vhl) >> 4)
/* TCP header */
-typedef u_int32_t tcp_seq;
+#ifdef _WIN32
+typedef unsigned __int32 uint32_t;
+#endif
+typedef uint32_t tcp_seq;
struct sniff_tcp {
u_short th_sport; /* source port */
@@ -271,7 +279,7 @@ void processMessage( Connection& c , Message& m ) {
if ( m.operation() == mongo::opReply )
out() << " - " << (unsigned)m.header()->responseTo;
- out() << endl;
+ out() << '\n';
try {
switch( m.operation() ) {
@@ -279,14 +287,23 @@ void processMessage( Connection& c , Message& m ) {
mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
if ( r->nReturned ) {
- mongo::BSONObj o( r->data() , 0 );
+ mongo::BSONObj o( r->data() );
out() << "\t" << o << endl;
}
break;
}
case mongo::dbQuery: {
mongo::QueryMessage q(d);
- out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip << endl;
+ out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip;
+ if( !q.fields.isEmpty() )
+ out() << " hasfields";
+ if( q.queryOptions & mongo::QueryOption_SlaveOk )
+ out() << " SlaveOk";
+ if( q.queryOptions & mongo::QueryOption_NoCursorTimeout )
+ out() << " NoCursorTimeout";
+ if( q.queryOptions & ~(mongo::QueryOption_SlaveOk | mongo::QueryOption_NoCursorTimeout) )
+ out() << " queryOptions:" << hex << q.queryOptions;
+ out() << endl;
break;
}
case mongo::dbUpdate: {
@@ -323,6 +340,7 @@ void processMessage( Connection& c , Message& m ) {
break;
}
default:
+ out() << "\tunknown opcode " << m.operation() << endl;
cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
}
}
diff --git a/tools/stat.cpp b/tools/stat.cpp
index fa6be31..7483222 100644
--- a/tools/stat.cpp
+++ b/tools/stat.cpp
@@ -19,14 +19,11 @@
#include "pch.h"
#include "client/dbclient.h"
#include "db/json.h"
-#include "../util/httpclient.h"
+#include "../util/net/httpclient.h"
#include "../util/text.h"
-
#include "tool.h"
-
#include <fstream>
#include <iostream>
-
#include <boost/program_options.hpp>
namespace po = boost::program_options;
@@ -65,24 +62,31 @@ namespace mongo {
virtual void printExtraHelpAfter( ostream & out ) {
out << "\n";
out << " Fields\n";
- out << " inserts \t- # of inserts per second\n";
- out << " query \t- # of queries per second\n";
- out << " update \t- # of updates per second\n";
- out << " delete \t- # of deletes per second\n";
- out << " getmore \t- # of get mores (cursor batch) per second\n";
- out << " command \t- # of commands per second\n";
- out << " flushes \t- # of fsync flushes per second\n";
- out << " mapped \t- amount of data mmaped (total data size) megabytes\n";
- out << " visze \t- virtual size of process in megabytes\n";
- out << " res \t- resident size of process in megabytes\n";
- out << " faults \t- # of pages faults per sec (linux only)\n";
- out << " locked \t- percent of time in global write lock\n";
- out << " idx miss \t- percent of btree page misses (sampled)\n";
- out << " qr|qw \t- queue lengths for clients waiting (read|write)\n";
- out << " ar|aw \t- active clients (read|write)\n";
- out << " netIn \t- network traffic in - bits\n";
- out << " netOut \t- network traffic out - bits\n";
- out << " conn \t- number of open connections\n";
+ out << " inserts \t- # of inserts per second (* means replicated op)\n";
+ out << " query \t- # of queries per second\n";
+ out << " update \t- # of updates per second\n";
+ out << " delete \t- # of deletes per second\n";
+ out << " getmore \t- # of get mores (cursor batch) per second\n";
+ out << " command \t- # of commands per second, on a slave its local|replicated\n";
+ out << " flushes \t- # of fsync flushes per second\n";
+ out << " mapped \t- amount of data mmaped (total data size) megabytes\n";
+ out << " vsize \t- virtual size of process in megabytes\n";
+ out << " res \t- resident size of process in megabytes\n";
+ out << " faults \t- # of pages faults per sec (linux only)\n";
+ out << " locked \t- percent of time in global write lock\n";
+ out << " idx miss \t- percent of btree page misses (sampled)\n";
+ out << " qr|qw \t- queue lengths for clients waiting (read|write)\n";
+ out << " ar|aw \t- active clients (read|write)\n";
+ out << " netIn \t- network traffic in - bits\n";
+ out << " netOut \t- network traffic out - bits\n";
+ out << " conn \t- number of open connections\n";
+ out << " set \t- replica set name\n";
+ out << " repl \t- replication type \n";
+ out << " \t M - master\n";
+ out << " \t SEC - secondary\n";
+ out << " \t REC - recovering\n";
+ out << " \t UNK - unknown\n";
+ out << " \t SLV - slave\n";
}
@@ -196,6 +200,8 @@ namespace mongo {
BSONObj doRow( const BSONObj& a , const BSONObj& b ) {
BSONObjBuilder result;
+ bool isMongos = b["shardCursorType"].type() == Object; // TODO: should have a better check
+
if ( a["opcounters"].isABSONObj() && b["opcounters"].isABSONObj() ) {
BSONObj ax = a["opcounters"].embeddedObject();
BSONObj bx = b["opcounters"].embeddedObject();
@@ -251,11 +257,12 @@ namespace mongo {
if ( b.getFieldDotted("mem.supported").trueValue() ) {
BSONObj bx = b["mem"].embeddedObject();
BSONObjIterator i( bx );
- _appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
+ if (!isMongos)
+ _appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
_appendMem( result , "vsize" , 6 , bx["virtual"].numberInt() );
_appendMem( result , "res" , 6 , bx["resident"].numberInt() );
- if ( _all )
+ if ( !isMongos && _all )
_appendMem( result , "non-mapped" , 6 , bx["virtual"].numberInt() - bx["mapped"].numberInt() );
}
@@ -266,8 +273,10 @@ namespace mongo {
_append( result , "faults" , 6 , (int)diff( "page_faults" , ax , bx ) );
}
- _append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
- _append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
+ if (!isMongos) {
+ _append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
+ _append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
+ }
if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ) {
int r = b.getFieldDotted( "globalLock.currentQueue.readers" ).numberInt();
@@ -320,9 +329,7 @@ namespace mongo {
_append( result , "repl" , 4 , ss.str() );
}
- else if ( b["shardCursorType"].type() == Object ) {
- // is a mongos
- // TODO: should have a better check
+ else if ( isMongos ) {
_append( result , "repl" , 4 , "RTR" );
}
@@ -353,12 +360,16 @@ namespace mongo {
}
if ( hasParam( "discover" ) ) {
- _noconnection = true;
_many = true;
}
}
int run() {
+ if ( !(_username.empty() || _password.empty()) && isMongos()) {
+ cout << "You cannot use mongostat on a mongos running with authentication enabled" << endl;
+ return -1;
+ }
+
_sleep = getParam( "sleep" , _sleep );
_all = hasParam( "all" );
if ( _many )
@@ -593,7 +604,6 @@ namespace mongo {
int runMany() {
StateMap threads;
-
{
string orig = getParam( "host" );
if ( orig == "" )
diff --git a/tools/tool.cpp b/tools/tool.cpp
index 54dc5df..e8c23d5 100644
--- a/tools/tool.cpp
+++ b/tools/tool.cpp
@@ -21,10 +21,11 @@
#include <iostream>
#include <boost/filesystem/operations.hpp>
-#include <pcrecpp.h>
+#include "pcrecpp.h"
#include "util/file_allocator.h"
#include "util/password.h"
+#include "util/version.h"
using namespace std;
using namespace mongo;
@@ -44,6 +45,7 @@ namespace mongo {
_options->add_options()
("help","produce help message")
("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("version", "print the program's version and exit" )
;
if ( access & REMOTE_SERVER )
@@ -51,6 +53,9 @@ namespace mongo {
("host,h",po::value<string>(), "mongo host to connect to ( <set name>/s1,s2 for sets)" )
("port",po::value<string>(), "server port. Can also use --host hostname:port" )
("ipv6", "enable IPv6 support (disabled by default)")
+#ifdef MONGO_SSL
+ ("ssl", "use all for connections")
+#endif
("username,u",po::value<string>(), "username" )
("password,p", new PasswordValue( &_password ), "password" )
@@ -63,6 +68,7 @@ namespace mongo {
"server - needs to lock the data directory, so cannot be "
"used if a mongod is currently accessing the same path" )
("directoryperdb", "if dbpath specified, each db is in a separate directory" )
+ ("journal", "enable journaling" )
;
if ( access & SPECIFY_DBCOL )
@@ -92,6 +98,12 @@ namespace mongo {
printExtraHelpAfter(out);
}
+ void Tool::printVersion(ostream &out) {
+ out << _name << " version " << mongo::versionString;
+ if (mongo::versionString[strlen(mongo::versionString)-1] == '-')
+ out << " (commit " << mongo::gitVersion() << ")";
+ out << endl;
+ }
int Tool::main( int argc , char ** argv ) {
static StaticObserver staticObserver;
@@ -146,6 +158,11 @@ namespace mongo {
return 0;
}
+ if ( _params.count( "version" ) ) {
+ printVersion(cout);
+ return 0;
+ }
+
if ( _params.count( "verbose" ) ) {
logLevel = 1;
}
@@ -156,6 +173,13 @@ namespace mongo {
}
}
+
+#ifdef MONGO_SSL
+ if (_params.count("ssl")) {
+ mongo::cmdLine.sslOnNormalPorts = true;
+ }
+#endif
+
preSetup();
bool useDirectClient = hasParam( "dbpath" );
@@ -195,6 +219,11 @@ namespace mongo {
directoryperdb = true;
}
assert( lastError.get( true ) );
+
+ if (_params.count("journal")){
+ cmdLine.dur = true;
+ }
+
Client::initThread("tools");
_conn = new DBDirectClient();
_host = "DIRECT";
@@ -212,6 +241,8 @@ namespace mongo {
}
FileAllocator::get()->start();
+
+ dur::startup();
}
if ( _params.count( "db" ) )
@@ -239,6 +270,33 @@ namespace mongo {
cerr << "assertion: " << e.toString() << endl;
ret = -1;
}
+ catch(const boost::filesystem::filesystem_error &fse) {
+ /*
+ https://jira.mongodb.org/browse/SERVER-2904
+
+ Simple tools that don't access the database, such as
+ bsondump, aren't throwing DBExceptions, but are throwing
+ boost exceptions.
+
+ The currently available set of error codes don't seem to match
+ boost documentation. boost::filesystem::not_found_error
+ (from http://www.boost.org/doc/libs/1_31_0/libs/filesystem/doc/exception.htm)
+ doesn't seem to exist in our headers. Also, fse.code() isn't
+ boost::system::errc::no_such_file_or_directory when this
+ happens, as you would expect. And, determined from
+ experimentation that the command-line argument gets turned into
+ "\\?" instead of "/?" !!!
+ */
+#if defined(_WIN32)
+ if (/*(fse.code() == boost::system::errc::no_such_file_or_directory) &&*/
+ (fse.path1() == "\\?"))
+ printHelp(cerr);
+ else
+#endif // _WIN32
+ cerr << "error: " << fse.what() << endl;
+
+ ret = -1;
+ }
if ( currentClient.get() )
currentClient->shutdown();
@@ -275,6 +333,13 @@ namespace mongo {
return true;
}
+ bool Tool::isMongos() {
+ // TODO: when mongos supports QueryOption_Exaust add a version check (SERVER-2628)
+ BSONObj isdbgrid;
+ conn("true").simpleCommand("admin", &isdbgrid, "isdbgrid");
+ return isdbgrid["isdbgrid"].trueValue();
+ }
+
void Tool::addFieldOptions() {
add_options()
("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
@@ -332,8 +397,15 @@ namespace mongo {
if ( ! dbname.size() )
dbname = _db;
- if ( ! ( _username.size() || _password.size() ) )
+ if ( ! ( _username.size() || _password.size() ) ) {
+ // Make sure that we don't need authentication to connect to this db
+ // findOne throws an AssertionException if it's not authenticated.
+ if (_coll.size() > 0) {
+ // BSONTools don't have a collection
+ conn().findOne(getNS(), Query("{}"));
+ }
return;
+ }
string errmsg;
if ( _conn->auth( dbname , _username , _password , errmsg ) )
@@ -348,7 +420,7 @@ namespace mongo {
}
BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
- : Tool( name , access , "" , "" ) , _objcheck( objcheck ) {
+ : Tool( name , access , "" , "" , false ) , _objcheck( objcheck ) {
add_options()
("objcheck" , "validate object before inserting" )
@@ -441,9 +513,9 @@ namespace mongo {
fclose( file );
uassert( 10265 , "counts don't match" , m.done() == fileLength );
- out() << "\t " << m.hits() << " objects found" << endl;
+ (_usesstdout ? cout : cerr ) << m.hits() << " objects found" << endl;
if ( _matcher.get() )
- out() << "\t " << processed << " objects processed" << endl;
+ (_usesstdout ? cout : cerr ) << processed << " objects processed" << endl;
return processed;
}
diff --git a/tools/tool.h b/tools/tool.h
index f6124b8..e6694f3 100644
--- a/tools/tool.h
+++ b/tools/tool.h
@@ -28,6 +28,7 @@
#include "client/dbclient.h"
#include "db/instance.h"
+#include "db/matcher.h"
using std::string;
@@ -82,6 +83,7 @@ namespace mongo {
}
bool isMaster();
+ bool isMongos();
virtual void preSetup() {}
@@ -92,6 +94,8 @@ namespace mongo {
virtual void printExtraHelp( ostream & out ) {}
virtual void printExtraHelpAfter( ostream & out ) {}
+ virtual void printVersion(ostream &out);
+
protected:
mongo::DBClientBase &conn( bool slaveIfPaired = false );
diff --git a/tools/top.cpp b/tools/top.cpp
new file mode 100644
index 0000000..42e4568
--- /dev/null
+++ b/tools/top.cpp
@@ -0,0 +1,196 @@
+// stat.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+#include "../util/text.h"
+#include "tool.h"
+#include <fstream>
+#include <iostream>
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ class TopTool : public Tool {
+ public:
+
+ TopTool() : Tool( "top" , REMOTE_SERVER , "admin" ) {
+ _sleep = 1;
+
+ add_hidden_options()
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
+ addPositionArg( "sleep" , 1 );
+
+ _autoreconnect = true;
+ }
+
+ BSONObj getData() {
+ BSONObj out;
+ if ( ! conn().simpleCommand( _db , &out , "top" ) ) {
+ cout << "error: " << out << endl;
+ return BSONObj();
+ }
+ return out.getOwned();
+ }
+
+ void printDiff( BSONObj prev , BSONObj now ) {
+ if ( ! prev["totals"].isABSONObj() ||
+ ! now["totals"].isABSONObj() ) {
+ cout << "." << endl;
+ return;
+ }
+
+ prev = prev["totals"].Obj();
+ now = now["totals"].Obj();
+
+ vector<NSInfo> data;
+
+ unsigned longest = 30;
+
+ BSONObjIterator i( now );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ // invalid, data fixed in 1.8.0
+ if ( e.fieldName()[0] == '?' )
+ continue;
+
+ if ( ! str::contains( e.fieldName() , '.' ) )
+ continue;
+
+ BSONElement old = prev[e.fieldName()];
+ if ( old.eoo() )
+ continue;
+
+ if ( strlen( e.fieldName() ) > longest )
+ longest = strlen(e.fieldName());
+
+ data.push_back( NSInfo( e.fieldName() , old.Obj() , e.Obj() ) );
+ }
+
+ std::sort( data.begin() , data.end() );
+
+ cout << "\n"
+ << setw(longest) << "ns"
+ << "\ttotal "
+ << "\tread "
+ << "\twrite "
+ << "\t\t" << terseCurrentTime()
+ << endl;
+ for ( int i=data.size()-1; i>=0 && data.size() - i < 10 ; i-- ) {
+ cout << setw(longest) << data[i].ns
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "total" ) << "ms"
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "readLock" ) << "ms"
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "writeLock" ) << "ms"
+ << endl;
+ }
+ }
+
+ int run() {
+ _sleep = getParam( "sleep" , _sleep );
+
+ BSONObj prev = getData();
+
+ while ( true ) {
+ sleepsecs( _sleep );
+
+ BSONObj now;
+ try {
+ now = getData();
+ }
+ catch ( std::exception& e ) {
+ cout << "can't get data: " << e.what() << endl;
+ continue;
+ }
+
+ if ( now.isEmpty() )
+ return -2;
+
+ try {
+ printDiff( prev , now );
+ }
+ catch ( AssertionException& e ) {
+ cout << "\nerror: " << e.what() << "\n"
+ << now
+ << endl;
+ }
+
+
+ prev = now;
+ }
+
+ return 0;
+ }
+
+ struct NSInfo {
+ NSInfo( string thens , BSONObj a , BSONObj b ) {
+ ns = thens;
+ prev = a;
+ cur = b;
+
+ timeDiff = diffTime( "total" );
+ }
+
+
+ int diffTimeMS( const char * field ) const {
+ return (int)(diffTime( field ) / 1000);
+ }
+
+ double diffTime( const char * field ) const {
+ return diff( field , "time" );
+ }
+
+ double diffCount( const char * field ) const {
+ return diff( field , "count" );
+ }
+
+ /**
+ * @param field total,readLock, etc...
+ * @param type time or count
+ */
+ double diff( const char * field , const char * type ) const {
+ return cur[field].Obj()[type].number() - prev[field].Obj()[type].number();
+ }
+
+ bool operator<(const NSInfo& r) const {
+ return timeDiff < r.timeDiff;
+ }
+
+ string ns;
+
+ BSONObj prev;
+ BSONObj cur;
+
+ double timeDiff; // time diff between prev and cur
+ };
+
+ private:
+ int _sleep;
+ };
+
+}
+
+int main( int argc , char ** argv ) {
+ mongo::TopTool top;
+ return top.main( argc , argv );
+}
+
diff --git a/util/alignedbuilder.cpp b/util/alignedbuilder.cpp
index 1734431..b2e0461 100644
--- a/util/alignedbuilder.cpp
+++ b/util/alignedbuilder.cpp
@@ -29,6 +29,35 @@ namespace mongo {
BOOST_STATIC_ASSERT(sizeof(void*) == sizeof(size_t));
+ /** reset for a re-use. shrinks if > 128MB */
+ void AlignedBuilder::reset() {
+ _len = 0;
+ RARELY {
+ const unsigned sizeCap = 128*1024*1024;
+ if (_p._size > sizeCap)
+ _realloc(sizeCap, _len);
+ }
+ }
+
+ /** reset with a hint as to the upcoming needed size specified */
+ void AlignedBuilder::reset(unsigned sz) {
+ _len = 0;
+ unsigned Q = 32 * 1024 * 1024 - 1;
+ unsigned want = (sz+Q) & (~Q);
+ if( _p._size == want ) {
+ return;
+ }
+ if( _p._size > want ) {
+ if( _p._size <= 64 * 1024 * 1024 )
+ return;
+ bool downsize = false;
+ RARELY { downsize = true; }
+ if( !downsize )
+ return;
+ }
+ _realloc(want, _len);
+ }
+
void AlignedBuilder::mallocSelfAligned(unsigned sz) {
assert( sz == _p._size );
void *p = malloc(sz + Alignment - 1);
@@ -44,10 +73,20 @@ namespace mongo {
/* "slow"/infrequent portion of 'grow()' */
void NOINLINE_DECL AlignedBuilder::growReallocate(unsigned oldLen) {
+ dassert( _len > _p._size );
unsigned a = _p._size;
assert( a );
while( 1 ) {
- a *= 2;
+ if( a < 128 * 1024 * 1024 )
+ a *= 2;
+ else if( sizeof(int*) == 4 )
+ a += 32 * 1024 * 1024;
+ else
+ a += 64 * 1024 * 1024;
+ DEV if( a > 256*1024*1024 ) {
+ log() << "dur AlignedBuilder too big, aborting in _DEBUG build" << endl;
+ abort();
+ }
wassert( a <= 256*1024*1024 );
assert( a <= 512*1024*1024 );
if( _len < a )
diff --git a/util/alignedbuilder.h b/util/alignedbuilder.h
index 452cec2..1d246a9 100644
--- a/util/alignedbuilder.h
+++ b/util/alignedbuilder.h
@@ -28,13 +28,11 @@ namespace mongo {
AlignedBuilder(unsigned init_size);
~AlignedBuilder() { kill(); }
+ /** reset with a hint as to the upcoming needed size specified */
+ void reset(unsigned sz);
+
/** reset for a re-use. shrinks if > 128MB */
- void reset() {
- _len = 0;
- const unsigned sizeCap = 128*1024*1024;
- if (_p._size > sizeCap)
- _realloc(sizeCap, _len);
- }
+ void reset();
/** note this may be deallocated (realloced) if you keep writing or reset(). */
const char* buf() const { return _p._data; }
@@ -48,8 +46,12 @@ namespace mongo {
return l;
}
+ /** if buffer grows pointer no longer valid */
char* atOfs(unsigned ofs) { return _p._data + ofs; }
+ /** if buffer grows pointer no longer valid */
+ char* cur() { return _p._data + _len; }
+
void appendChar(char j) {
*((char*)grow(sizeof(char))) = j;
}
@@ -99,7 +101,7 @@ namespace mongo {
inline char* grow(unsigned by) {
unsigned oldlen = _len;
_len += by;
- if ( _len > _p._size ) {
+ if (MONGO_unlikely( _len > _p._size )) {
growReallocate(oldlen);
}
return _p._data + oldlen;
diff --git a/util/array.h b/util/array.h
index bf705a4..1282225 100644
--- a/util/array.h
+++ b/util/array.h
@@ -18,6 +18,12 @@
namespace mongo {
+ /*
+ * simple array class that does no allocations
+ * same api as vector
+ * fixed buffer, so once capacity is exceeded, will assert
+ * meant to be-reused with clear()
+ */
template<typename T>
class FastArray {
public:
@@ -44,6 +50,7 @@ namespace mongo {
}
void push_back( const T& t ) {
+ assert( _size < _capacity );
_data[_size++] = t;
}
diff --git a/util/assert_util.cpp b/util/assert_util.cpp
index 8280d8b..da039c0 100644
--- a/util/assert_util.cpp
+++ b/util/assert_util.cpp
@@ -62,18 +62,34 @@ namespace mongo {
b.append( c , code );
}
-
string getDbContext();
/* "warning" assert -- safe to continue, so we don't throw exception. */
- void wasserted(const char *msg, const char *file, unsigned line) {
- problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
+ NOINLINE_DECL void wasserted(const char *msg, const char *file, unsigned line) {
+ static bool rateLimited;
+ static time_t lastWhen;
+ static unsigned lastLine;
+ if( lastLine == line && time(0)-lastWhen < 5 ) {
+ if( rateLimited++ == 0 ) {
+ log() << "rate limiting wassert" << endl;
+ }
+ return;
+ }
+ lastWhen = time(0);
+ lastLine = line;
+
+ problem() << "warning assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
sayDbContext();
raiseError(0,msg && *msg ? msg : "wassertion failure");
assertionCount.condrollover( ++assertionCount.warning );
+#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
+ // this is so we notice in buildbot
+ log() << "\n\n***aborting after wassert() failure in a debug/test build\n\n" << endl;
+ abort();
+#endif
}
- void asserted(const char *msg, const char *file, unsigned line) {
+ NOINLINE_DECL void asserted(const char *msg, const char *file, unsigned line) {
assertionCount.condrollover( ++assertionCount.regular );
problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
sayDbContext();
@@ -82,6 +98,28 @@ namespace mongo {
temp << "assertion " << file << ":" << line;
AssertionException e(temp.str(),0);
breakpoint();
+#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
+ // this is so we notice in buildbot
+ log() << "\n\n***aborting after assert() failure in a debug/test build\n\n" << endl;
+ abort();
+#endif
+ throw e;
+ }
+
+ NOINLINE_DECL void verifyFailed( int msgid ) {
+ assertionCount.condrollover( ++assertionCount.regular );
+ problem() << "Assertion failure " << msgid << endl;
+ sayDbContext();
+ raiseError(0,"assertion failure");
+ stringstream temp;
+ temp << msgid;
+ AssertionException e(temp.str(),0);
+ breakpoint();
+#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
+ // this is so we notice in buildbot
+ log() << "\n\n***aborting after verify() failure in a debug/test build\n\n" << endl;
+ abort();
+#endif
throw e;
}
@@ -89,14 +127,14 @@ namespace mongo {
raiseError(0,msg);
}
- void uasserted(int msgid, const char *msg) {
+ NOINLINE_DECL void uasserted(int msgid, const char *msg) {
assertionCount.condrollover( ++assertionCount.user );
LOG(1) << "User Assertion: " << msgid << ":" << msg << endl;
raiseError(msgid,msg);
throw UserException(msgid, msg);
}
- void msgasserted(int msgid, const char *msg) {
+ NOINLINE_DECL void msgasserted(int msgid, const char *msg) {
assertionCount.condrollover( ++assertionCount.warning );
tlog() << "Assertion: " << msgid << ":" << msg << endl;
raiseError(msgid,msg && *msg ? msg : "massert failure");
@@ -105,14 +143,14 @@ namespace mongo {
throw MsgAssertionException(msgid, msg);
}
- void msgassertedNoTrace(int msgid, const char *msg) {
+ NOINLINE_DECL void msgassertedNoTrace(int msgid, const char *msg) {
assertionCount.condrollover( ++assertionCount.warning );
log() << "Assertion: " << msgid << ":" << msg << endl;
raiseError(msgid,msg && *msg ? msg : "massert failure");
throw MsgAssertionException(msgid, msg);
}
- void streamNotGood( int code , string msg , std::ios& myios ) {
+ NOINLINE_DECL void streamNotGood( int code , string msg , std::ios& myios ) {
stringstream ss;
// errno might not work on all systems for streams
// if it doesn't for a system should deal with here
@@ -144,5 +182,22 @@ namespace mongo {
#endif
}
+ NOINLINE_DECL ErrorMsg::ErrorMsg(const char *msg, char ch) {
+ int l = strlen(msg);
+ assert( l < 128);
+ memcpy(buf, msg, l);
+ char *p = buf + l;
+ p[0] = ch;
+ p[1] = 0;
+ }
+
+ NOINLINE_DECL ErrorMsg::ErrorMsg(const char *msg, unsigned val) {
+ int l = strlen(msg);
+ assert( l < 128);
+ memcpy(buf, msg, l);
+ char *p = buf + l;
+ sprintf(p, "%u", val);
+ }
+
}
diff --git a/util/assert_util.h b/util/assert_util.h
index 151e950..b4c68b7 100644
--- a/util/assert_util.h
+++ b/util/assert_util.h
@@ -20,6 +20,13 @@
#include "../db/lasterror.h"
+// MONGO_NORETURN undefed at end of file
+#ifdef __GNUC__
+# define MONGO_NORETURN __attribute__((__noreturn__))
+#else
+# define MONGO_NORETURN
+#endif
+
namespace mongo {
enum CommonErrorCodes {
@@ -53,11 +60,28 @@ namespace mongo {
void append( BSONObjBuilder& b , const char * m = "$err" , const char * c = "code" ) const ;
string toString() const { stringstream ss; ss << "exception: " << code << " " << msg; return ss.str(); }
bool empty() const { return msg.empty(); }
+
+ void reset(){ msg = ""; code=-1; }
string msg;
int code;
};
+ /** helper class that builds error strings. lighter weight than a StringBuilder, albeit less flexible.
+ NOINLINE_DECL used in the constructor implementations as we are assuming this is a cold code path when used.
+
+ example:
+ throw UserException(123, ErrorMsg("blah", num_val));
+ */
+ class ErrorMsg {
+ public:
+ ErrorMsg(const char *msg, char ch);
+ ErrorMsg(const char *msg, unsigned val);
+ operator string() const { return buf; }
+ private:
+ char buf[256];
+ };
+
class DBException : public std::exception {
public:
DBException( const ExceptionInfo& ei ) : _ei(ei) {}
@@ -117,14 +141,14 @@ namespace mongo {
virtual void appendPrefix( stringstream& ss ) const { ss << "massert:"; }
};
-
- void asserted(const char *msg, const char *file, unsigned line);
+ void asserted(const char *msg, const char *file, unsigned line) MONGO_NORETURN;
void wasserted(const char *msg, const char *file, unsigned line);
-
+ void verifyFailed( int msgid );
+
/** a "user assertion". throws UserAssertion. logs. typically used for errors that a user
- could cause, such as dupliate key, disk full, etc.
+ could cause, such as duplicate key, disk full, etc.
*/
- void uasserted(int msgid, const char *msg);
+ void uasserted(int msgid, const char *msg) MONGO_NORETURN;
inline void uasserted(int msgid , string msg) { uasserted(msgid, msg.c_str()); }
/** reported via lasterror, but don't throw exception */
@@ -133,24 +157,33 @@ namespace mongo {
/** msgassert and massert are for errors that are internal but have a well defined error text string.
a stack trace is logged.
*/
- void msgassertedNoTrace(int msgid, const char *msg);
+ void msgassertedNoTrace(int msgid, const char *msg) MONGO_NORETURN;
inline void msgassertedNoTrace(int msgid, const string& msg) { msgassertedNoTrace( msgid , msg.c_str() ); }
- void msgasserted(int msgid, const char *msg);
+ void msgasserted(int msgid, const char *msg) MONGO_NORETURN;
inline void msgasserted(int msgid, string msg) { msgasserted(msgid, msg.c_str()); }
+ /* convert various types of exceptions to strings */
+ inline string causedBy( const char* e ){ return (string)" :: caused by :: " + e; }
+ inline string causedBy( const DBException& e ){ return causedBy( e.toString().c_str() ); }
+ inline string causedBy( const std::exception& e ){ return causedBy( e.what() ); }
+ inline string causedBy( const string& e ){ return causedBy( e.c_str() ); }
+
+ /** in the mongodb source, use verify() instead of assert(). verify is always evaluated even in release builds. */
+ inline void verify( int msgid , bool testOK ) { if ( ! testOK ) verifyFailed( msgid ); }
+
#ifdef assert
#undef assert
#endif
-#define MONGO_assert(_Expression) (void)( (!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
+#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
#define assert MONGO_assert
/* "user assert". if asserts, user did something wrong, not our code */
-#define MONGO_uassert(msgid, msg, expr) (void)( (!!(expr)) || (mongo::uasserted(msgid, msg), 0) )
+#define MONGO_uassert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::uasserted(msgid, msg), 0) )
#define uassert MONGO_uassert
/* warning only - keeps going */
-#define MONGO_wassert(_Expression) (void)( (!!(_Expression)) || (mongo::wasserted(#_Expression, __FILE__, __LINE__), 0) )
+#define MONGO_wassert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::wasserted(#_Expression, __FILE__, __LINE__), 0) )
#define wassert MONGO_wassert
/* display a message, no context, and throw assertionexception
@@ -158,7 +191,7 @@ namespace mongo {
easy way to throw an exception and log something without our stack trace
display happening.
*/
-#define MONGO_massert(msgid, msg, expr) (void)( (!!(expr)) || (mongo::msgasserted(msgid, msg), 0) )
+#define MONGO_massert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::msgasserted(msgid, msg), 0) )
#define massert MONGO_massert
/* dassert is 'debug assert' -- might want to turn off for production as these
@@ -179,7 +212,7 @@ namespace mongo {
enum { ASSERT_ID_DUPKEY = 11000 };
/* throws a uassertion with an appropriate msg */
- void streamNotGood( int code , string msg , std::ios& myios );
+ void streamNotGood( int code , string msg , std::ios& myios ) MONGO_NORETURN;
inline void assertStreamGood(unsigned msgid, string msg, std::ios& myios) {
if( !myios.good() ) streamNotGood(msgid, msg, myios);
@@ -195,10 +228,21 @@ namespace mongo {
expression; \
} catch ( const std::exception &e ) { \
stringstream ss; \
- ss << "caught boost exception: " << e.what(); \
- msgasserted( 13294 , ss.str() ); \
+ ss << "caught boost exception: " << e.what() << ' ' << __FILE__ << ' ' << __LINE__; \
+ msgasserted( 13294 , ss.str() ); \
+ } catch ( ... ) { \
+ massert( 10437 , "unknown boost failed" , false ); \
+ }
+
+#define MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( expression, msg ) \
+ try { \
+ expression; \
+ } catch ( const std::exception &e ) { \
+ stringstream ss; \
+ ss << msg << " caught boost exception: " << e.what(); \
+ msgasserted( 14043 , ss.str() ); \
} catch ( ... ) { \
- massert( 10437 , "unknown boost failed" , false ); \
+ msgasserted( 14044 , string("unknown boost failed ") + msg ); \
}
#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
@@ -210,3 +254,5 @@ namespace mongo {
} catch ( ... ) { \
problem() << "caught unknown exception in destructor (" << __FUNCTION__ << ")" << endl; \
}
+
+#undef MONGO_NORETURN
diff --git a/util/background.cpp b/util/background.cpp
index 746d14c..215b271 100644
--- a/util/background.cpp
+++ b/util/background.cpp
@@ -18,8 +18,11 @@
#include "pch.h"
#include "concurrency/mutex.h"
+#include "concurrency/spin_lock.h"
#include "background.h"
+#include "time_support.h"
+#include "timer.h"
#include "mongoutils/str.h"
@@ -80,6 +83,7 @@ namespace mongo {
}
bool BackgroundJob::wait( unsigned msTimeOut ) {
+ assert( !_status->deleteSelf ); // you cannot call wait on a self-deleting job
scoped_lock l( _status->m );
while ( _status->state != Done ) {
if ( msTimeOut ) {
@@ -117,4 +121,70 @@ namespace mongo {
return _status->state == Running;
}
+ // -------------------------
+
+ PeriodicTask::PeriodicTask() {
+ if ( ! theRunner )
+ theRunner = new Runner();
+ theRunner->add( this );
+ }
+
+ PeriodicTask::~PeriodicTask() {
+ theRunner->remove( this );
+ }
+
+ void PeriodicTask::Runner::add( PeriodicTask* task ) {
+ scoped_spinlock lk( _lock );
+ _tasks.push_back( task );
+ }
+
+ void PeriodicTask::Runner::remove( PeriodicTask* task ) {
+ scoped_spinlock lk( _lock );
+ for ( size_t i=0; i<_tasks.size(); i++ ) {
+ if ( _tasks[i] == task ) {
+ _tasks[i] = 0;
+ break;
+ }
+ }
+ }
+
+ void PeriodicTask::Runner::run() {
+ int sleeptime = 60;
+ DEV sleeptime = 5; // to catch race conditions
+
+ while ( ! inShutdown() ) {
+
+ sleepsecs( sleeptime );
+
+ scoped_spinlock lk( _lock );
+
+ size_t size = _tasks.size();
+
+ for ( size_t i=0; i<size; i++ ) {
+ PeriodicTask * t = _tasks[i];
+ if ( ! t )
+ continue;
+
+ if ( inShutdown() )
+ break;
+
+ Timer timer;
+ try {
+ t->taskDoWork();
+ }
+ catch ( std::exception& e ) {
+ error() << "task: " << t->taskName() << " failed: " << e.what() << endl;
+ }
+ catch ( ... ) {
+ error() << "task: " << t->taskName() << " failed with unknown error" << endl;
+ }
+
+ int ms = timer.millis();
+ LOG( ms <= 3 ? 1 : 0 ) << "task: " << t->taskName() << " took: " << ms << "ms" << endl;
+ }
+ }
+ }
+
+ PeriodicTask::Runner* PeriodicTask::theRunner = 0;
+
} // namespace mongo
diff --git a/util/background.h b/util/background.h
index 861df9b..496a1f4 100644
--- a/util/background.h
+++ b/util/background.h
@@ -17,6 +17,8 @@
#pragma once
+#include "concurrency/spin_lock.h"
+
namespace mongo {
/**
@@ -102,5 +104,52 @@ namespace mongo {
void jobBody( boost::shared_ptr<JobStatus> status );
};
+
+ /**
+ * these run "roughly" every minute
+ * instantiate statically
+ * class MyTask : public PeriodicTask {
+ * public:
+ * virtual string name() const { return "MyTask; " }
+ * virtual void doWork() { log() << "hi" << endl; }
+ * } myTask;
+ */
+ class PeriodicTask {
+ public:
+ PeriodicTask();
+ virtual ~PeriodicTask();
+
+ virtual void taskDoWork() = 0;
+ virtual string taskName() const = 0;
+
+ class Runner : public BackgroundJob {
+ public:
+ virtual ~Runner(){}
+
+ virtual string name() const { return "PeriodicTask::Runner"; }
+
+ virtual void run();
+
+ void add( PeriodicTask* task );
+ void remove( PeriodicTask* task );
+
+ private:
+
+ SpinLock _lock;
+
+ // these are NOT owned by Runner
+ // Runner will not delete these
+ // this never gets smaller
+ // only fields replaced with nulls
+ vector<PeriodicTask*> _tasks;
+
+ };
+
+ static Runner* theRunner;
+
+ };
+
+
+
} // namespace mongo
diff --git a/util/bson_util.h b/util/bson_util.h
new file mode 100644
index 0000000..973e31f
--- /dev/null
+++ b/util/bson_util.h
@@ -0,0 +1,42 @@
+// bson_util.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+
+namespace mongo {
+
+template <typename T>
+void bsonArrToNumVector(BSONElement el, vector<T>& results){
+
+ if(el.type() == Array){
+
+ vector<BSONElement> elements = el.Array();
+
+ for(vector<BSONElement>::iterator i = elements.begin(); i != elements.end(); ++i){
+ results.push_back( (T) (*i).Number() );
+ }
+ }
+ else if(el.isNumber()){
+ results.push_back( (T) el.Number() );
+ }
+
+}
+
+
+}
diff --git a/util/bufreader.h b/util/bufreader.h
index a0dcefa..53f0ba7 100644
--- a/util/bufreader.h
+++ b/util/bufreader.h
@@ -28,6 +28,7 @@ namespace mongo {
public:
class eof : public std::exception {
public:
+ eof() { }
virtual const char * what() { return "BufReader eof"; }
};
@@ -88,6 +89,7 @@ namespace mongo {
}
const void* pos() { return _pos; }
+ const void* start() { return _start; }
private:
const void *_start;
diff --git a/util/checksum.h b/util/checksum.h
new file mode 100644
index 0000000..009ab56
--- /dev/null
+++ b/util/checksum.h
@@ -0,0 +1,37 @@
+#pragma once
+#include "../pch.h"
+namespace mongo {
+ /** a simple, rather dumb, but very fast checksum. see perftests.cpp for unit tests. */
+ struct Checksum {
+ union {
+ unsigned char bytes[16];
+ unsigned long long words[2];
+ };
+
+ // if you change this you must bump dur::CurrentVersion
+ void gen(const void *buf, unsigned len) {
+ wassert( ((size_t)buf) % 8 == 0 ); // performance warning
+ unsigned n = len / 8 / 2;
+ const unsigned long long *p = (const unsigned long long *) buf;
+ unsigned long long a = 0;
+ for( unsigned i = 0; i < n; i++ ) {
+ a += (*p ^ i);
+ p++;
+ }
+ unsigned long long b = 0;
+ for( unsigned i = 0; i < n; i++ ) {
+ b += (*p ^ i);
+ p++;
+ }
+ unsigned long long c = 0;
+ for( unsigned i = n * 2 * 8; i < len; i++ ) { // 0-7 bytes left
+ c = (c << 8) | ((const char *)buf)[i];
+ }
+ words[0] = a ^ len;
+ words[1] = b ^ c;
+ }
+
+ bool operator==(const Checksum& rhs) const { return words[0]==rhs.words[0] && words[1]==rhs.words[1]; }
+ bool operator!=(const Checksum& rhs) const { return words[0]!=rhs.words[0] || words[1]!=rhs.words[1]; }
+ };
+}
diff --git a/util/compress.cpp b/util/compress.cpp
new file mode 100644
index 0000000..bcde488
--- /dev/null
+++ b/util/compress.cpp
@@ -0,0 +1,31 @@
+// @file compress.cpp
+
+#include "../third_party/snappy/snappy.h"
+#include "compress.h"
+#include <string>
+#include <string.h>
+#include <assert.h>
+
+namespace mongo {
+
+ void rawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length)
+ {
+ snappy::RawCompress(input, input_length, compressed, compressed_length);
+ }
+
+ size_t maxCompressedLength(size_t source_len) {
+ return snappy::MaxCompressedLength(source_len);
+ }
+
+ size_t compress(const char* input, size_t input_length, std::string* output) {
+ return snappy::Compress(input, input_length, output);
+ }
+
+ bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed) {
+ return snappy::Uncompress(compressed, compressed_length, uncompressed);
+ }
+
+}
diff --git a/util/compress.h b/util/compress.h
new file mode 100644
index 0000000..5bc5a33
--- /dev/null
+++ b/util/compress.h
@@ -0,0 +1,21 @@
+// @file compress.h
+
+#pragma once
+
+#include <string>
+
+namespace mongo {
+
+ size_t compress(const char* input, size_t input_length, std::string* output);
+
+ bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed);
+
+ size_t maxCompressedLength(size_t source_len);
+ void rawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
+
+}
+
+
diff --git a/util/concurrency/list.h b/util/concurrency/list.h
index e5eaec6..01bae6f 100644
--- a/util/concurrency/list.h
+++ b/util/concurrency/list.h
@@ -42,38 +42,54 @@ namespace mongo {
friend class List1;
T *_next;
public:
+ Base() : _next(0){}
+ ~Base() { wassert(false); } // we never want this to happen
T* next() const { return _next; }
};
- T* head() const { return _head; }
+ /** note this is safe:
+
+ T* p = mylist.head();
+ if( p )
+ use(p);
+
+ and this is not:
+
+ if( mylist.head() )
+ use( mylist.head() ); // could become 0
+ */
+ T* head() const { return (T*) _head; }
void push(T* t) {
+ assert( t->_next == 0 );
scoped_lock lk(_m);
- t->_next = _head;
+ t->_next = (T*) _head;
_head = t;
}
- // intentionally leak.
+ // intentionally leaks.
void orphanAll() {
+ scoped_lock lk(_m);
_head = 0;
}
/* t is not deleted, but is removed from the list. (orphaned) */
void orphan(T* t) {
scoped_lock lk(_m);
- T *&prev = _head;
+ T *&prev = (T*&) _head;
T *n = prev;
while( n != t ) {
+ uassert( 14050 , "List1: item to orphan not in list", n );
prev = n->_next;
n = prev;
}
prev = t->_next;
if( ++_orphans > 500 )
- log() << "warning orphans=" << _orphans << '\n';
+ log() << "warning List1 orphans=" << _orphans << '\n';
}
private:
- T *_head;
+ volatile T *_head;
mongo::mutex _m;
int _orphans;
};
diff --git a/util/concurrency/mutex.h b/util/concurrency/mutex.h
index c463498..f17c3f0 100644
--- a/util/concurrency/mutex.h
+++ b/util/concurrency/mutex.h
@@ -19,11 +19,12 @@
#include <map>
#include <set>
-
#include "../heapcheck.h"
namespace mongo {
+ void printStackTrace( ostream &o );
+
class mutex;
inline boost::xtime incxtimemillis( long long s ) {
@@ -50,7 +51,6 @@ namespace mongo {
map< mid, set<mid> > followers;
boost::mutex &x;
unsigned magic;
-
void aBreakPoint() { } // for debugging
public:
// set these to create an assert that
@@ -147,20 +147,16 @@ namespace mongo {
~StaticObserver() { _destroyingStatics = true; }
};
- /** On pthread systems, it is an error to destroy a mutex while held. Static global
- * mutexes may be held upon shutdown in our implementation, and this way we avoid
- * destroying them.
- * NOT recursive.
+ /** On pthread systems, it is an error to destroy a mutex while held (boost mutex
+ * may use pthread). Static global mutexes may be held upon shutdown in our
+ * implementation, and this way we avoid destroying them.
+ * NOT recursive.
*/
class mutex : boost::noncopyable {
public:
#if defined(_DEBUG)
const char * const _name;
-#endif
-
-#if defined(_DEBUG)
- mutex(const char *name)
- : _name(name)
+ mutex(const char *name) : _name(name)
#else
mutex(const char *)
#endif
@@ -184,44 +180,47 @@ namespace mongo {
#else
ok( _l.locked() )
#endif
- {
- }
-
- ~try_lock() {
- }
-
+ { }
private:
boost::timed_mutex::scoped_timed_lock _l;
-
public:
const bool ok;
};
-
class scoped_lock : boost::noncopyable {
+ public:
#if defined(_DEBUG)
- mongo::mutex *mut;
+ struct PostStaticCheck {
+ PostStaticCheck() {
+ if ( StaticObserver::_destroyingStatics ) {
+ cout << "trying to lock a mongo::mutex during static shutdown" << endl;
+ printStackTrace( cout );
+ }
+ }
+ };
+
+ PostStaticCheck _check;
+ mongo::mutex * const _mut;
#endif
- public:
- scoped_lock( mongo::mutex &m ) : _l( m.boost() ) {
+ scoped_lock( mongo::mutex &m ) :
+#if defined(_DEBUG)
+ _mut(&m),
+#endif
+ _l( m.boost() ) {
#if defined(_DEBUG)
- mut = &m;
- mutexDebugger.entering(mut->_name);
+ mutexDebugger.entering(_mut->_name);
#endif
}
~scoped_lock() {
#if defined(_DEBUG)
- mutexDebugger.leaving(mut->_name);
+ mutexDebugger.leaving(_mut->_name);
#endif
}
boost::timed_mutex::scoped_lock &boost() { return _l; }
private:
boost::timed_mutex::scoped_lock _l;
};
-
-
private:
-
boost::timed_mutex &boost() { return *_m; }
boost::timed_mutex *_m;
};
@@ -229,4 +228,52 @@ namespace mongo {
typedef mutex::scoped_lock scoped_lock;
typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock;
+ /** The concept with SimpleMutex is that it is a basic lock/unlock with no
+ special functionality (such as try and try timeout). Thus it can be
+ implemented using OS-specific facilities in all environments (if desired).
+ On Windows, the implementation below is faster than boost mutex.
+ */
+#if defined(_WIN32)
+ class SimpleMutex : boost::noncopyable {
+ CRITICAL_SECTION _cs;
+ public:
+ SimpleMutex(const char *name) { InitializeCriticalSection(&_cs); }
+ ~SimpleMutex() { DeleteCriticalSection(&_cs); }
+
+ void lock() { EnterCriticalSection(&_cs); }
+ void unlock() { LeaveCriticalSection(&_cs); }
+
+ class scoped_lock : boost::noncopyable {
+ SimpleMutex& _m;
+ public:
+ scoped_lock( SimpleMutex &m ) : _m(m) { _m.lock(); }
+ ~scoped_lock() { _m.unlock(); }
+ };
+ };
+#else
+ class SimpleMutex : boost::noncopyable {
+ public:
+ SimpleMutex(const char* name) { assert( pthread_mutex_init(&_lock,0) == 0 ); }
+ ~SimpleMutex(){
+ if ( ! StaticObserver::_destroyingStatics ) {
+ assert( pthread_mutex_destroy(&_lock) == 0 );
+ }
+ }
+
+ void lock() { assert( pthread_mutex_lock(&_lock) == 0 ); }
+ void unlock() { assert( pthread_mutex_unlock(&_lock) == 0 ); }
+
+ class scoped_lock : boost::noncopyable {
+ SimpleMutex& _m;
+ public:
+ scoped_lock( SimpleMutex &m ) : _m(m) { _m.lock(); }
+ ~scoped_lock() { _m.unlock(); }
+ };
+
+ private:
+ pthread_mutex_t _lock;
+ };
+
+#endif
+
}
diff --git a/util/concurrency/race.h b/util/concurrency/race.h
index 0b8338c..4644e37 100644
--- a/util/concurrency/race.h
+++ b/util/concurrency/race.h
@@ -19,15 +19,56 @@ namespace mongo {
the same time. Also detects and disallows recursion.
*/
+#ifdef _WIN32
+ typedef unsigned threadId_t;
+#else
+ typedef pthread_t threadId_t;
+#endif
+
+
#if defined(_DEBUG)
+ namespace race {
+
+ class CodePoint {
+ public:
+ string lastName;
+ threadId_t lastTid;
+ string file;
+ CodePoint(string f) : lastTid(0), file(f) { }
+ };
+ class Check {
+ public:
+ Check(CodePoint& p) {
+ threadId_t t = GetCurrentThreadId();
+ if( p.lastTid == 0 ) {
+ p.lastTid = t;
+ p.lastName = getThreadName();
+ }
+ else if( t != p.lastTid ) {
+ log() << "\n\n\n\n\nRACE? error assert\n " << p.file << '\n'
+ << " " << p.lastName
+ << " " << getThreadName() << "\n\n" << endl;
+ mongoAbort("racecheck");
+ }
+ };
+ };
+
+ }
+
+#define RACECHECK
+ // dm TODO - the right code for this file is in a different branch at the moment (merge)
+ //#define RACECHECK
+ //static race::CodePoint __cp(__FILE__);
+ //race::Check __ck(__cp);
+
class CodeBlock {
volatile int n;
- unsigned tid;
+ threadId_t tid;
void fail() {
log() << "synchronization (race condition) failure" << endl;
printStackTrace();
- abort();
+ ::abort();
}
void enter() {
if( ++n != 1 ) fail();
@@ -58,6 +99,8 @@ namespace mongo {
#else
+#define RACECHECK
+
class CodeBlock{
public:
class Within {
@@ -69,4 +112,4 @@ namespace mongo {
#endif
-}
+} // namespace
diff --git a/util/concurrency/rwlock.h b/util/concurrency/rwlock.h
index ca81a9f..d8a11ea 100644
--- a/util/concurrency/rwlock.h
+++ b/util/concurrency/rwlock.h
@@ -21,9 +21,11 @@
#include "mutex.h"
#include "../time_support.h"
-// this requires Vista+ to work
+// this requires newer windows versions
// it works better than sharable_mutex under high contention
+#if defined(_WIN64)
//#define MONGO_USE_SRW_ON_WINDOWS 1
+#endif
#if !defined(MONGO_USE_SRW_ON_WINDOWS)
@@ -55,131 +57,153 @@ namespace mongo {
#if defined(MONGO_USE_SRW_ON_WINDOWS) && defined(_WIN32)
- class RWLock {
+ // Windows RWLock implementation (requires newer versions of windows thus the above macro)
+ class RWLock : boost::noncopyable {
public:
- RWLock(const char *) { InitializeSRWLock(&_lock); }
+ RWLock(const char *, int lowPriorityWaitMS=0 ) : _lowPriorityWaitMS(lowPriorityWaitMS)
+ { InitializeSRWLock(&_lock); }
~RWLock() { }
+ const char * implType() const { return "WINSRW"; }
+ int lowPriorityWaitMS() const { return _lowPriorityWaitMS; }
void lock() { AcquireSRWLockExclusive(&_lock); }
void unlock() { ReleaseSRWLockExclusive(&_lock); }
void lock_shared() { AcquireSRWLockShared(&_lock); }
void unlock_shared() { ReleaseSRWLockShared(&_lock); }
bool lock_shared_try( int millis ) {
+ if( TryAcquireSRWLockShared(&_lock) )
+ return true;
+ if( millis == 0 )
+ return false;
unsigned long long end = curTimeMicros64() + millis*1000;
while( 1 ) {
+ Sleep(1);
if( TryAcquireSRWLockShared(&_lock) )
return true;
if( curTimeMicros64() >= end )
break;
- Sleep(1);
}
return false;
}
bool lock_try( int millis = 0 ) {
+ if( TryAcquireSRWLockExclusive(&_lock) ) // quick check to optimistically avoid calling curTimeMicros64
+ return true;
+ if( millis == 0 )
+ return false;
unsigned long long end = curTimeMicros64() + millis*1000;
- while( 1 ) {
+ do {
+ Sleep(1);
if( TryAcquireSRWLockExclusive(&_lock) )
return true;
- if( curTimeMicros64() >= end )
- break;
- Sleep(1);
- }
+ } while( curTimeMicros64() < end );
return false;
}
private:
SRWLOCK _lock;
+ const int _lowPriorityWaitMS;
};
#elif defined(BOOST_RWLOCK)
- class RWLock {
+
+ // Boost based RWLock implementation
+ class RWLock : boost::noncopyable {
shared_mutex _m;
+ const int _lowPriorityWaitMS;
public:
-#if defined(_DEBUG)
- const char *_name;
- RWLock(const char *name) : _name(name) { }
-#else
- RWLock(const char *) { }
-#endif
+ const char * const _name;
+
+ RWLock(const char *name, int lowPriorityWait=0) : _lowPriorityWaitMS(lowPriorityWait) , _name(name) { }
+
+ const char * implType() const { return "boost"; }
+
+ int lowPriorityWaitMS() const { return _lowPriorityWaitMS; }
+
void lock() {
- _m.lock();
-#if defined(_DEBUG)
- mutexDebugger.entering(_name);
-#endif
+ _m.lock();
+ DEV mutexDebugger.entering(_name);
}
+
+ /*void lock() {
+ // This sequence gives us the lock semantics we want: specifically that write lock acquisition is
+ // greedy EXCEPT when someone already is in upgradable state.
+ lockAsUpgradable();
+ upgrade();
+ DEV mutexDebugger.entering(_name);
+ }*/
+
void unlock() {
-#if defined(_DEBUG)
- mutexDebugger.leaving(_name);
-#endif
+ DEV mutexDebugger.leaving(_name);
_m.unlock();
}
+ void lockAsUpgradable() {
+ _m.lock_upgrade();
+ }
+ void unlockFromUpgradable() { // upgradable -> unlocked
+ _m.unlock_upgrade();
+ }
+ void upgrade() { // upgradable -> exclusive lock
+ _m.unlock_upgrade_and_lock();
+ }
+
void lock_shared() {
_m.lock_shared();
}
-
void unlock_shared() {
_m.unlock_shared();
}
bool lock_shared_try( int millis ) {
- boost::system_time until = get_system_time();
- until += boost::posix_time::milliseconds(millis);
- if( _m.timed_lock_shared( until ) ) {
+ if( _m.timed_lock_shared( boost::posix_time::milliseconds(millis) ) ) {
return true;
}
return false;
}
bool lock_try( int millis = 0 ) {
- boost::system_time until = get_system_time();
- until += boost::posix_time::milliseconds(millis);
- if( _m.timed_lock( until ) ) {
-#if defined(_DEBUG)
- mutexDebugger.entering(_name);
-#endif
+ if( _m.timed_lock( boost::posix_time::milliseconds(millis) ) ) {
+ DEV mutexDebugger.entering(_name);
return true;
}
return false;
}
-
-
};
+
#else
- class RWLock {
- pthread_rwlock_t _lock;
- inline void check( int x ) {
- if( x == 0 )
+ // Posix RWLock implementation
+ class RWLock : boost::noncopyable {
+ pthread_rwlock_t _lock;
+ const int _lowPriorityWaitMS;
+ static void check( int x ) {
+ if( MONGO_likely(x == 0) )
return;
log() << "pthread rwlock failed: " << x << endl;
assert( x == 0 );
}
-
+
public:
-#if defined(_DEBUG)
const char *_name;
- RWLock(const char *name) : _name(name) {
-#else
- RWLock(const char *) {
-#endif
+ RWLock(const char *name, int lowPriorityWaitMS=0) : _lowPriorityWaitMS(lowPriorityWaitMS), _name(name)
+ {
check( pthread_rwlock_init( &_lock , 0 ) );
}
-
+
~RWLock() {
if ( ! StaticObserver::_destroyingStatics ) {
- check( pthread_rwlock_destroy( &_lock ) );
+ wassert( pthread_rwlock_destroy( &_lock ) == 0 ); // wassert as don't want to throw from a destructor
}
}
+ const char * implType() const { return "posix"; }
+
+ int lowPriorityWaitMS() const { return _lowPriorityWaitMS; }
+
void lock() {
check( pthread_rwlock_wrlock( &_lock ) );
-#if defined(_DEBUG)
- mutexDebugger.entering(_name);
-#endif
+ DEV mutexDebugger.entering(_name);
}
void unlock() {
-#if defined(_DEBUG)
mutexDebugger.leaving(_name);
-#endif
check( pthread_rwlock_unlock( &_lock ) );
}
@@ -197,9 +221,7 @@ namespace mongo {
bool lock_try( int millis = 0 ) {
if( _try( millis , true ) ) {
-#if defined(_DEBUG)
- mutexDebugger.entering(_name);
-#endif
+ DEV mutexDebugger.entering(_name);
return true;
}
return false;
@@ -233,7 +255,7 @@ namespace mongo {
#endif
/** throws on failure to acquire in the specified time period. */
- class rwlock_try_write {
+ class rwlock_try_write : boost::noncopyable {
public:
struct exception { };
rwlock_try_write(RWLock& l, int millis = 0) : _l(l) {
@@ -245,16 +267,57 @@ namespace mongo {
RWLock& _l;
};
+ class rwlock_shared : boost::noncopyable {
+ public:
+ rwlock_shared(RWLock& rwlock) : _r(rwlock) {_r.lock_shared(); }
+ ~rwlock_shared() { _r.unlock_shared(); }
+ private:
+ RWLock& _r;
+ };
+
/* scoped lock for RWLock */
- class rwlock {
+ class rwlock : boost::noncopyable {
public:
- rwlock( const RWLock& lock , bool write , bool alreadyHaveLock = false )
+ /**
+ * @param write acquire write lock if true sharable if false
+ * @param lowPriority if > 0, will try to get the lock non-greedily for that many ms
+ */
+ rwlock( const RWLock& lock , bool write, /* bool alreadyHaveLock = false , */int lowPriorityWaitMS = 0 )
: _lock( (RWLock&)lock ) , _write( write ) {
- if ( ! alreadyHaveLock ) {
- if ( _write )
- _lock.lock();
- else
+
+ {
+ if ( _write ) {
+
+ if ( ! lowPriorityWaitMS && lock.lowPriorityWaitMS() )
+ lowPriorityWaitMS = lock.lowPriorityWaitMS();
+
+ if ( lowPriorityWaitMS ) {
+ bool got = false;
+ for ( int i=0; i<lowPriorityWaitMS; i++ ) {
+ if ( _lock.lock_try(0) ) {
+ got = true;
+ break;
+ }
+
+ int sleep = 1;
+ if ( i > ( lowPriorityWaitMS / 20 ) )
+ sleep = 10;
+ sleepmillis(sleep);
+ i += ( sleep - 1 );
+ }
+ if ( ! got ) {
+ log() << "couldn't get lazy rwlock" << endl;
+ _lock.lock();
+ }
+ }
+ else {
+ _lock.lock();
+ }
+
+ }
+ else {
_lock.lock_shared();
+ }
}
}
~rwlock() {
@@ -267,4 +330,67 @@ namespace mongo {
RWLock& _lock;
const bool _write;
};
+
+ /** recursive on shared locks is ok for this implementation */
+ class RWLockRecursive : boost::noncopyable {
+ ThreadLocalValue<int> _state;
+ RWLock _lk;
+ friend class Exclusive;
+ public:
+ /** @param lpwaitms lazy wait */
+ RWLockRecursive(const char *name, int lpwaitms) : _lk(name, lpwaitms) { }
+
+ void assertExclusivelyLocked() {
+ dassert( _state.get() < 0 );
+ }
+
+ // RWLockRecursive::Exclusive scoped lock
+ class Exclusive : boost::noncopyable {
+ RWLockRecursive& _r;
+ rwlock *_scopedLock;
+ public:
+ Exclusive(RWLockRecursive& r) : _r(r), _scopedLock(0) {
+ int s = _r._state.get();
+ dassert( s <= 0 );
+ if( s == 0 )
+ _scopedLock = new rwlock(_r._lk, true);
+ _r._state.set(s-1);
+ }
+ ~Exclusive() {
+ int s = _r._state.get();
+ DEV wassert( s < 0 ); // wassert: don't throw from destructors
+ _r._state.set(s+1);
+ delete _scopedLock;
+ }
+ };
+
+ // RWLockRecursive::Shared scoped lock
+ class Shared : boost::noncopyable {
+ RWLockRecursive& _r;
+ bool _alreadyExclusive;
+ public:
+ Shared(RWLockRecursive& r) : _r(r) {
+ int s = _r._state.get();
+ _alreadyExclusive = s < 0;
+ if( !_alreadyExclusive ) {
+ dassert( s >= 0 ); // -1 would mean exclusive
+ if( s == 0 )
+ _r._lk.lock_shared();
+ _r._state.set(s+1);
+ }
+ }
+ ~Shared() {
+ if( _alreadyExclusive ) {
+ DEV wassert( _r._state.get() < 0 );
+ }
+ else {
+ int s = _r._state.get() - 1;
+ if( s == 0 )
+ _r._lk.unlock_shared();
+ _r._state.set(s);
+ DEV wassert( s >= 0 );
+ }
+ }
+ };
+ };
}
diff --git a/util/concurrency/shared_mutex_win.hpp b/util/concurrency/shared_mutex_win.hpp
index 5356cf2..e850fc6 100755..100644
--- a/util/concurrency/shared_mutex_win.hpp
+++ b/util/concurrency/shared_mutex_win.hpp
@@ -7,10 +7,31 @@
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
-// MongoDB :
-//
-// Slightly modified boost file to not die above 127 pending writes
-//
+/* MongoDB :
+ Slightly modified boost file to not die above 127 pending writes
+ Here is what changed (from boost 1.42.0 shared_mutex.hpp):
+ 1,2c1,2
+ < #ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP
+ < #define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP
+ ---
+ > #ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
+ > #define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
+ 22c27
+ < class shared_mutex:
+ ---
+ > class modified_shared_mutex:
+ 73c78
+ < shared_mutex():
+ ---
+ > modified_shared_mutex():
+ 84c89
+ < ~shared_mutex()
+ ---
+ > ~modified_shared_mutex()
+ 283a289,290
+ > if( new_state.exclusive_waiting == 127 ) // the maximum already!
+ > break;
+*/
#include <boost/assert.hpp>
#include <boost/detail/interlocked.hpp>
diff --git a/util/concurrency/spin_lock.cpp b/util/concurrency/spin_lock.cpp
index 0f33609..1811f15 100644
--- a/util/concurrency/spin_lock.cpp
+++ b/util/concurrency/spin_lock.cpp
@@ -25,20 +25,28 @@ namespace mongo {
SpinLock::~SpinLock() {
#if defined(_WIN32)
DeleteCriticalSection(&_cs);
+#elif defined(__USE_XOPEN2K)
+ pthread_spin_destroy(&_lock);
#endif
}
SpinLock::SpinLock()
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- : _locked( false ) { }
-#elif defined(_WIN32)
+#if defined(_WIN32)
{ InitializeCriticalSectionAndSpinCount(&_cs, 4000); }
+#elif defined(__USE_XOPEN2K)
+ { pthread_spin_init( &_lock , 0 ); }
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ : _locked( false ) { }
#else
- : _mutex( "SpinLock" ) { }
+ : _mutex( "SpinLock" ) { }
#endif
void SpinLock::lock() {
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+#if defined(_WIN32)
+ EnterCriticalSection(&_cs);
+#elif defined(__USE_XOPEN2K)
+ pthread_spin_lock( &_lock );
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
// fast path
if (!_locked && !__sync_lock_test_and_set(&_locked, true)) {
return;
@@ -55,8 +63,6 @@ namespace mongo {
while (__sync_lock_test_and_set(&_locked, true)) {
nanosleep(&t, NULL);
}
-#elif defined(_WIN32)
- EnterCriticalSection(&_cs);
#else
// WARNING Missing spin lock in this platform. This can potentially
// be slow.
@@ -66,19 +72,28 @@ namespace mongo {
}
void SpinLock::unlock() {
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
-
- __sync_lock_release(&_locked);
-
-#elif defined(WIN32)
-
+#if defined(_WIN32)
LeaveCriticalSection(&_cs);
-
+#elif defined(__USE_XOPEN2K)
+ pthread_spin_unlock(&_lock);
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ __sync_lock_release(&_locked);
#else
-
_mutex.unlock();
+#endif
+ }
+ bool SpinLock::isfast() {
+#if defined(_WIN32)
+ return true;
+#elif defined(__USE_XOPEN2K)
+ return true;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ return true;
+#else
+ return false;
#endif
}
+
} // namespace mongo
diff --git a/util/concurrency/spin_lock.h b/util/concurrency/spin_lock.h
index 02a8797..65ecb15 100644
--- a/util/concurrency/spin_lock.h
+++ b/util/concurrency/spin_lock.h
@@ -18,8 +18,7 @@
#pragma once
-#include "pch.h"
-#include "rwlock.h"
+#include "mutex.h"
namespace mongo {
@@ -27,7 +26,7 @@ namespace mongo {
* The spinlock currently requires late GCC support routines to be efficient.
* Other platforms default to a mutex implemenation.
*/
- class SpinLock {
+ class SpinLock : boost::noncopyable {
public:
SpinLock();
~SpinLock();
@@ -35,30 +34,30 @@ namespace mongo {
void lock();
void unlock();
+ static bool isfast(); // true if a real spinlock on this platform
+
private:
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- volatile bool _locked;
-#elif defined(_WIN32)
+#if defined(_WIN32)
CRITICAL_SECTION _cs;
+#elif defined(__USE_XOPEN2K)
+ pthread_spinlock_t _lock;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ volatile bool _locked;
#else
- // default to a scoped mutex if not implemented
- RWLock _mutex;
+ // default to a mutex if not implemented
+ SimpleMutex _mutex;
#endif
-
- // Non-copyable, non-assignable
- SpinLock(SpinLock&);
- SpinLock& operator=(SpinLock&);
};
- struct scoped_spinlock {
- scoped_spinlock( SpinLock& l ) : _l(l){
+ class scoped_spinlock : boost::noncopyable {
+ public:
+ scoped_spinlock( SpinLock& l ) : _l(l) {
_l.lock();
}
~scoped_spinlock() {
- _l.unlock();
- }
+ _l.unlock();}
+ private:
SpinLock& _l;
};
} // namespace mongo
-
diff --git a/util/concurrency/synchronization.cpp b/util/concurrency/synchronization.cpp
index 12e2894..ce2547c 100644
--- a/util/concurrency/synchronization.cpp
+++ b/util/concurrency/synchronization.cpp
@@ -20,7 +20,8 @@
namespace mongo {
- Notification::Notification() : _mutex ( "Notification" ) , _notified( false ) { }
+ Notification::Notification() : _mutex ( "Notification" ) , _notified( false ) {
+ }
Notification::~Notification() { }
@@ -37,19 +38,40 @@ namespace mongo {
_condition.notify_one();
}
- NotifyAll::NotifyAll() : _mutex("NotifyAll"), _counter(0) { }
+ /* --- NotifyAll --- */
+
+ NotifyAll::NotifyAll() : _mutex("NotifyAll") {
+ _lastDone = 0;
+ _lastReturned = 0;
+ _nWaiting = 0;
+ }
+
+ NotifyAll::When NotifyAll::now() {
+ scoped_lock lock( _mutex );
+ return ++_lastReturned;
+ }
+
+ void NotifyAll::waitFor(When e) {
+ scoped_lock lock( _mutex );
+ ++_nWaiting;
+ while( _lastDone < e ) {
+ _condition.wait( lock.boost() );
+ }
+ }
- void NotifyAll::wait() {
+ void NotifyAll::awaitBeyondNow() {
scoped_lock lock( _mutex );
- unsigned long long old = _counter;
- while( old == _counter ) {
+ ++_nWaiting;
+ When e = ++_lastReturned;
+ while( _lastDone <= e ) {
_condition.wait( lock.boost() );
}
}
- void NotifyAll::notifyAll() {
+ void NotifyAll::notifyAll(When e) {
scoped_lock lock( _mutex );
- ++_counter;
+ _lastDone = e;
+ _nWaiting = 0;
_condition.notify_all();
}
diff --git a/util/concurrency/synchronization.h b/util/concurrency/synchronization.h
index ac2fcab..a0e89f7 100644
--- a/util/concurrency/synchronization.h
+++ b/util/concurrency/synchronization.h
@@ -56,18 +56,30 @@ namespace mongo {
public:
NotifyAll();
+ typedef unsigned long long When;
+
+ When now();
+
/** awaits the next notifyAll() call by another thread. notifications that precede this
call are ignored -- we are looking for a fresh event.
*/
- void wait();
+ void waitFor(When);
+
+ /** a bit faster than waitFor( now() ) */
+ void awaitBeyondNow();
/** may be called multiple times. notifies all waiters */
- void notifyAll();
+ void notifyAll(When);
+
+ /** indicates how many threads are waiting for a notify. */
+ unsigned nWaiting() const { return _nWaiting; }
private:
mongo::mutex _mutex;
- unsigned long long _counter;
boost::condition _condition;
+ When _lastDone;
+ When _lastReturned;
+ unsigned _nWaiting;
};
} // namespace mongo
diff --git a/util/concurrency/value.h b/util/concurrency/value.h
index 0a0ef85..c66977b 100644
--- a/util/concurrency/value.h
+++ b/util/concurrency/value.h
@@ -1,5 +1,5 @@
/* @file value.h
- concurrency helpers Atomic<T> and DiagStr
+ concurrency helpers DiagStr, Guarded
*/
/**
@@ -20,44 +20,29 @@
#pragma once
+#include "mutex.h"
+
namespace mongo {
- extern mutex _atomicMutex;
+ /** declare that a variable that is "guarded" by a mutex.
- /** atomic wrapper for a value. enters a mutex on each access. must
- be copyable.
- */
- template<typename T>
- class Atomic : boost::noncopyable {
- T val;
- public:
- Atomic<T>() { }
+ The decl documents the rule. For example "counta and countb are guarded by xyzMutex":
- void operator=(const T& a) {
- scoped_lock lk(_atomicMutex);
- val = a;
- }
+ Guarded<int, xyzMutex> counta;
+ Guarded<int, xyzMutex> countb;
- operator T() const {
- scoped_lock lk(_atomicMutex);
- return val;
+ Upon use, specify the scoped_lock object. This makes it hard for someone
+ later to forget to be in the lock. Check is made that it is the right lock in _DEBUG
+ builds at runtime.
+ */
+ template <typename T, mutex& BY>
+ class Guarded {
+ T _val;
+ public:
+ T& ref(const scoped_lock& lk) {
+ dassert( lk._mut == &BY );
+ return _val;
}
-
- /** example:
- Atomic<int> q;
- ...
- {
- Atomic<int>::tran t(q);
- if( q.ref() > 0 )
- q.ref()--;
- }
- */
- class tran : private scoped_lock {
- Atomic<T>& _a;
- public:
- tran(Atomic<T>& a) : scoped_lock(_atomicMutex), _a(a) { }
- T& ref() { return _a.val; }
- };
};
class DiagStr {
diff --git a/util/concurrency/vars.cpp b/util/concurrency/vars.cpp
index 19b58eb..213e576 100644
--- a/util/concurrency/vars.cpp
+++ b/util/concurrency/vars.cpp
@@ -17,15 +17,13 @@
*/
#include "pch.h"
-#include "value.h"
#include "mutex.h"
+#include "value.h"
namespace mongo {
mutex DiagStr::m("diags");
- mongo::mutex _atomicMutex("_atomicMutex");
-
// intentional leak. otherwise destructor orders can be problematic at termination.
MutexDebugger &mutexDebugger = *(new MutexDebugger());
diff --git a/util/file.h b/util/file.h
index 0a973e3..368e692 100644
--- a/util/file.h
+++ b/util/file.h
@@ -23,10 +23,8 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#else
-#include <windows.h>
+#include <sys/statvfs.h>
#endif
-
#include "text.h"
namespace mongo {
@@ -37,6 +35,8 @@ namespace mongo {
typedef boost::uint64_t fileofs;
#endif
+ /* NOTE: not thread-safe. (at least the windows implementation isn't. */
+
class FileInterface {
public:
void open(const char *fn) {}
@@ -46,6 +46,12 @@ namespace mongo {
bool is_open() {return false;}
fileofs len() { return 0; }
void fsync() { assert(false); }
+
+ // shrink file to size bytes. No-op if file already smaller.
+ void truncate(fileofs size);
+
+ /** @return -1 if error or unavailable */
+ static boost::intmax_t freeSpace(const string &path) { assert(false); return -1; }
};
#if defined(_WIN32)
@@ -54,10 +60,11 @@ namespace mongo {
class File : public FileInterface {
HANDLE fd;
bool _bad;
+ string _name;
void err(BOOL b=false) { /* false = error happened */
if( !b && !_bad ) {
_bad = true;
- log() << "File I/O error " << GetLastError() << '\n';
+ log() << "File " << _name << "I/O error " << GetLastError() << '\n';
}
}
public:
@@ -69,7 +76,8 @@ namespace mongo {
if( is_open() ) CloseHandle(fd);
fd = INVALID_HANDLE_VALUE;
}
- void open(const char *filename, bool readOnly=false ) {
+ void open(const char *filename, bool readOnly=false , bool direct=false) {
+ _name = filename;
fd = CreateFile(
toNativeString(filename).c_str(),
( readOnly ? 0 : GENERIC_WRITE ) | GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ,
@@ -81,6 +89,15 @@ namespace mongo {
else
_bad = false;
}
+ static boost::intmax_t freeSpace(const string &path) {
+ ULARGE_INTEGER avail;
+ if( GetDiskFreeSpaceEx(toNativeString(path.c_str()).c_str(), &avail, NULL, NULL) ) {
+ return avail.QuadPart;
+ }
+ DWORD e = GetLastError();
+ log() << "GetDiskFreeSpaceEx fails errno: " << e << endl;
+ return -1;
+ }
void write(fileofs o, const char *data, unsigned len) {
LARGE_INTEGER li;
li.QuadPart = o;
@@ -111,6 +128,20 @@ namespace mongo {
return li.QuadPart;
}
void fsync() { FlushFileBuffers(fd); }
+
+ void truncate(fileofs size) {
+ if (len() <= size)
+ return;
+
+ LARGE_INTEGER li;
+ li.QuadPart = size;
+ if (SetFilePointerEx(fd, li, NULL, FILE_BEGIN) == 0){
+ err(false);
+ return; //couldn't seek
+ }
+
+ err(SetEndOfFile(fd));
+ }
};
#else
@@ -140,9 +171,13 @@ namespace mongo {
#define O_NOATIME 0
#endif
- void open(const char *filename, bool readOnly=false ) {
+ void open(const char *filename, bool readOnly=false , bool direct=false) {
fd = ::open(filename,
- O_CREAT | ( readOnly ? 0 : ( O_RDWR | O_NOATIME ) ) ,
+ O_CREAT | ( readOnly ? 0 : ( O_RDWR | O_NOATIME ) )
+#if defined(O_DIRECT)
+ | ( direct ? O_DIRECT : 0 )
+#endif
+ ,
S_IRUSR | S_IWUSR);
if ( fd <= 0 ) {
out() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
@@ -154,14 +189,37 @@ namespace mongo {
err( ::pwrite(fd, data, len, o) == (int) len );
}
void read(fileofs o, char *data, unsigned len) {
- err( ::pread(fd, data, len, o) == (int) len );
+ ssize_t s = ::pread(fd, data, len, o);
+ if( s == -1 ) {
+ err(false);
+ }
+ else if( s != (int) len ) {
+ _bad = true;
+ log() << "File error read:" << s << " bytes, wanted:" << len << " ofs:" << o << endl;
+ }
}
bool bad() { return _bad; }
bool is_open() { return fd > 0; }
fileofs len() {
- return lseek(fd, 0, SEEK_END);
+ off_t o = lseek(fd, 0, SEEK_END);
+ if( o != (off_t) -1 )
+ return o;
+ err(false);
+ return 0;
}
void fsync() { ::fsync(fd); }
+ static boost::intmax_t freeSpace ( const string &path ) {
+ struct statvfs info;
+ assert( !statvfs( path.c_str() , &info ) );
+ return boost::intmax_t( info.f_bavail ) * info.f_frsize;
+ }
+
+ void truncate(fileofs size) {
+ if (len() <= size)
+ return;
+
+ err(ftruncate(fd, size) == 0);
+ }
};
diff --git a/util/file_allocator.cpp b/util/file_allocator.cpp
index 54590ed..b0572f9 100644
--- a/util/file_allocator.cpp
+++ b/util/file_allocator.cpp
@@ -32,19 +32,22 @@ using namespace mongoutils;
#endif
#include "file_allocator.h"
+#include "paths.h"
namespace mongo {
- void ensureParentDirCreated(const boost::filesystem::path& p){
+ boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p){
const boost::filesystem::path parent = p.branch_path();
-
+
if (! boost::filesystem::exists(parent)){
ensureParentDirCreated(parent);
log() << "creating directory " << parent.string() << endl;
boost::filesystem::create_directory(parent);
+ flushMyDirectory(parent); // flushes grandparent to ensure parent exists after crash
}
-
+
assert(boost::filesystem::is_directory(parent));
+ return parent;
}
#if defined(_WIN32)
@@ -74,6 +77,10 @@ namespace mongo {
// TODO : we should to avoid fragmentation
}
+ bool FileAllocator::hasFailed() const {
+ return false;
+ }
+
#else
FileAllocator::FileAllocator()
@@ -174,6 +181,10 @@ namespace mongo {
}
}
+ bool FileAllocator::hasFailed() const {
+ return _failed;
+ }
+
void FileAllocator::checkFailure() {
if (_failed) {
// we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
@@ -197,6 +208,19 @@ namespace mongo {
return false;
}
+ string makeTempFileName( path root ) {
+ while( 1 ) {
+ path p = root / "_tmp";
+ stringstream ss;
+ ss << (unsigned) rand();
+ p /= ss.str();
+ string fn = p.string();
+ if( !boost::filesystem::exists(p) )
+ return fn;
+ }
+ return "";
+ }
+
void FileAllocator::run( FileAllocator * fa ) {
setThreadName( "FileAllocator" );
while( 1 ) {
@@ -215,19 +239,25 @@ namespace mongo {
name = fa->_pending.front();
size = fa->_pendingSize[ name ];
}
+
+ string tmp;
+ long fd = 0;
try {
log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
- ensureParentDirCreated(name);
- long fd = open(name.c_str(), O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
+
+ boost::filesystem::path parent = ensureParentDirCreated(name);
+ tmp = makeTempFileName( parent );
+ ensureParentDirCreated(tmp);
+
+ fd = open(tmp.c_str(), O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
if ( fd <= 0 ) {
- stringstream ss;
- ss << "FileAllocator: couldn't open " << name << ' ' << errnoWithDescription();
- uassert( 10439 , ss.str(), fd <= 0 );
+ log() << "FileAllocator: couldn't create " << name << " (" << tmp << ") " << errnoWithDescription() << endl;
+ uasserted(10439, "");
}
#if defined(POSIX_FADV_DONTNEED)
if( posix_fadvise(fd, 0, size, POSIX_FADV_DONTNEED) ) {
- log() << "warning: posix_fadvise fails " << name << ' ' << errnoWithDescription() << endl;
+ log() << "warning: posix_fadvise fails " << name << " (" << tmp << ") " << errnoWithDescription() << endl;
}
#endif
@@ -236,18 +266,32 @@ namespace mongo {
/* make sure the file is the full desired length */
ensureLength( fd , size );
+ close( fd );
+ fd = 0;
+
+ if( rename(tmp.c_str(), name.c_str()) ) {
+ log() << "error: couldn't rename " << tmp << " to " << name << ' ' << errnoWithDescription() << endl;
+ uasserted(13653, "");
+ }
+ flushMyDirectory(name);
+
log() << "done allocating datafile " << name << ", "
<< "size: " << size/1024/1024 << "MB, "
<< " took " << ((double)t.millis())/1000.0 << " secs"
<< endl;
- close( fd );
-
+ // no longer in a failed state. allow new writers.
+ fa->_failed = false;
}
catch ( ... ) {
+ if ( fd > 0 )
+ close( fd );
log() << "error failed to allocate new file: " << name
- << " size: " << size << ' ' << errnoWithDescription() << endl;
+ << " size: " << size << ' ' << errnoWithDescription() << warnings;
+ log() << " will try again in 10 seconds" << endl; // not going to warning logs
try {
+ if ( tmp.size() )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove( tmp ) );
BOOST_CHECK_EXCEPTION( boost::filesystem::remove( name ) );
}
catch ( ... ) {
@@ -256,7 +300,10 @@ namespace mongo {
fa->_failed = true;
// not erasing from pending
fa->_pendingUpdated.notify_all();
- return; // no more allocation
+
+
+ sleepsecs(10);
+ continue;
}
{
diff --git a/util/file_allocator.h b/util/file_allocator.h
index 6cc7b2d..7c3cacb 100644
--- a/util/file_allocator.h
+++ b/util/file_allocator.h
@@ -47,12 +47,14 @@ namespace mongo {
void allocateAsap( const string &name, unsigned long long &size );
void waitUntilFinished() const;
+
+ bool hasFailed() const;
static void ensureLength(int fd , long size);
/** @return the singletone */
static FileAllocator * get();
-
+
private:
FileAllocator();
@@ -84,6 +86,6 @@ namespace mongo {
};
/** like "mkdir -p" but on parent dir of p rather than p itself */
- void ensureParentDirCreated(const boost::filesystem::path& p);
+ boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p);
} // namespace mongo
diff --git a/util/goodies.h b/util/goodies.h
index 53a74c2..65bfbab 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -109,49 +109,12 @@ namespace mongo {
// PRINTFL; prints file:line
#define MONGO_PRINTFL cout << __FILE__ ":" << __LINE__ << endl
#define PRINTFL MONGO_PRINTFL
+#define MONGO_FLOG log() << __FILE__ ":" << __LINE__ << endl
+#define FLOG MONGO_FLOG
#undef assert
#define assert MONGO_assert
- struct WrappingInt {
- WrappingInt() {
- x = 0;
- }
- WrappingInt(unsigned z) : x(z) { }
- unsigned x;
- operator unsigned() const {
- return x;
- }
-
-
- static int diff(unsigned a, unsigned b) {
- return a-b;
- }
- bool operator<=(WrappingInt r) {
- // platform dependent
- int df = (r.x - x);
- return df >= 0;
- }
- bool operator>(WrappingInt r) {
- return !(r<=*this);
- }
- };
-
- /*
-
- class DebugMutex : boost::noncopyable {
- friend class lock;
- mongo::mutex m;
- int locked;
- public:
- DebugMutex() : locked(0); { }
- bool isLocked() { return locked; }
- };
-
- */
-
-//typedef scoped_lock lock;
-
inline bool startsWith(const char *str, const char *prefix) {
size_t l = strlen(prefix);
if ( strlen(str) < l ) return false;
@@ -236,6 +199,7 @@ namespace mongo {
_active = 0;
}
+ // typically you do ProgressMeterHolder
void reset( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ) {
_total = total;
_secondsBetween = secondsBetween;
@@ -257,6 +221,7 @@ namespace mongo {
}
/**
+ * @param n how far along we are relative to the total # we set in CurOp::setMessage
* @return if row was printed
*/
bool hit( int n = 1 ) {
@@ -282,13 +247,15 @@ namespace mongo {
return true;
}
- unsigned long long done() {
- return _done;
+ void setTotalWhileRunning( unsigned long long total ) {
+ _total = total;
}
- unsigned long long hits() {
- return _hits;
- }
+ unsigned long long done() const { return _done; }
+
+ unsigned long long hits() const { return _hits; }
+
+ unsigned long long total() const { return _total; }
string toString() const {
if ( ! _active )
@@ -314,6 +281,10 @@ namespace mongo {
int _lastTime;
};
+ // e.g.:
+ // CurOp * op = cc().curop();
+ // ProgressMeterHolder pm( op->setMessage( "index: (1/3) external sort" , d->stats.nrecords , 10 ) );
+ // loop { pm.hit(); }
class ProgressMeterHolder : boost::noncopyable {
public:
ProgressMeterHolder( ProgressMeter& pm )
@@ -417,7 +388,7 @@ namespace mongo {
class ThreadSafeString {
public:
ThreadSafeString( size_t size=256 )
- : _size( 256 ) , _buf( new char[256] ) {
+ : _size( size ) , _buf( new char[size] ) {
memset( _buf , 0 , _size );
}
@@ -468,97 +439,6 @@ namespace mongo {
ostream& operator<<( ostream &s, const ThreadSafeString &o );
- inline bool isNumber( char c ) {
- return c >= '0' && c <= '9';
- }
-
- inline unsigned stringToNum(const char *str) {
- unsigned x = 0;
- const char *p = str;
- while( 1 ) {
- if( !isNumber(*p) ) {
- if( *p == 0 && p != str )
- break;
- throw 0;
- }
- x = x * 10 + *p++ - '0';
- }
- return x;
- }
-
- // for convenience, '{' is greater than anything and stops number parsing
- inline int lexNumCmp( const char *s1, const char *s2 ) {
- //cout << "START : " << s1 << "\t" << s2 << endl;
- while( *s1 && *s2 ) {
-
- bool p1 = ( *s1 == (char)255 );
- bool p2 = ( *s2 == (char)255 );
- //cout << "\t\t " << p1 << "\t" << p2 << endl;
- if ( p1 && !p2 )
- return 1;
- if ( p2 && !p1 )
- return -1;
-
- bool n1 = isNumber( *s1 );
- bool n2 = isNumber( *s2 );
-
- if ( n1 && n2 ) {
- // get rid of leading 0s
- while ( *s1 == '0' ) s1++;
- while ( *s2 == '0' ) s2++;
-
- char * e1 = (char*)s1;
- char * e2 = (char*)s2;
-
- // find length
- // if end of string, will break immediately ('\0')
- while ( isNumber (*e1) ) e1++;
- while ( isNumber (*e2) ) e2++;
-
- int len1 = (int)(e1-s1);
- int len2 = (int)(e2-s2);
-
- int result;
- // if one is longer than the other, return
- if ( len1 > len2 ) {
- return 1;
- }
- else if ( len2 > len1 ) {
- return -1;
- }
- // if the lengths are equal, just strcmp
- else if ( (result = strncmp(s1, s2, len1)) != 0 ) {
- return result;
- }
-
- // otherwise, the numbers are equal
- s1 = e1;
- s2 = e2;
- continue;
- }
-
- if ( n1 )
- return 1;
-
- if ( n2 )
- return -1;
-
- if ( *s1 > *s2 )
- return 1;
-
- if ( *s2 > *s1 )
- return -1;
-
- s1++; s2++;
- }
-
- if ( *s1 )
- return 1;
- if ( *s2 )
- return -1;
- return 0;
- }
-
/** A generic pointer type for function arguments.
* It will convert from any pointer type except auto_ptr.
* Semantics are the same as passing the pointer returned from get()
@@ -597,6 +477,8 @@ namespace mongo {
T* _p;
};
+
+
/** Hmmmm */
using namespace boost;
diff --git a/util/hashtab.h b/util/hashtab.h
index 6818bef..f1a3306 100644
--- a/util/hashtab.h
+++ b/util/hashtab.h
@@ -54,7 +54,7 @@ namespace mongo {
}
};
void* _buf;
- int n;
+ int n; // number of hashtable buckets
int maxChain;
Node& nodes(int i) {
@@ -156,9 +156,9 @@ namespace mongo {
typedef void (*IteratorCallback)( const Key& k , Type& v );
void iterAll( IteratorCallback callback ) {
for ( int i=0; i<n; i++ ) {
- if ( ! nodes(i).inUse() )
- continue;
- callback( nodes(i).k , nodes(i).value );
+ if ( nodes(i).inUse() ) {
+ callback( nodes(i).k , nodes(i).value );
+ }
}
}
@@ -166,9 +166,9 @@ namespace mongo {
typedef void (*IteratorCallback2)( const Key& k , Type& v , void * extra );
void iterAll( IteratorCallback2 callback , void * extra ) {
for ( int i=0; i<n; i++ ) {
- if ( ! nodes(i).inUse() )
- continue;
- callback( nodes(i).k , nodes(i).value , extra );
+ if ( nodes(i).inUse() ) {
+ callback( nodes(i).k , nodes(i).value , extra );
+ }
}
}
diff --git a/util/log.cpp b/util/log.cpp
index eb1cbae..bc48584 100644
--- a/util/log.cpp
+++ b/util/log.cpp
@@ -19,16 +19,18 @@
#include "pch.h"
#include "assert_util.h"
#include "assert.h"
-//#include "file.h"
#include <cmath>
+#include "time_support.h"
using namespace std;
-#ifndef _WIN32
-#include <cxxabi.h>
-#include <sys/file.h>
+#ifdef _WIN32
+# include <io.h>
+#else
+# include <cxxabi.h>
+# include <sys/file.h>
#endif
-#include "../db/jsobj.h"
+//#include "../db/jsobj.h"
namespace mongo {
@@ -47,6 +49,8 @@ namespace mongo {
uassert( 10268 , "LoggingManager already started" , ! _enabled );
_append = append;
+ bool exists = boost::filesystem::exists(lp);
+
// test path
FILE * test = fopen( lp.c_str() , _append ? "a" : "w" );
if ( ! test ) {
@@ -59,6 +63,14 @@ namespace mongo {
dbexit( EXIT_BADOPTIONS );
assert( 0 );
}
+
+ if (append && exists){
+ // two blank lines before and after
+ const string msg = "\n\n***** SERVER RESTARTED *****\n\n\n";
+ massert(14036, errnoWithPrefix("couldn't write to log file"),
+ fwrite(msg.data(), 1, msg.size(), test) == msg.size());
+ }
+
fclose( test );
_path = lp;
@@ -74,7 +86,7 @@ namespace mongo {
if ( _file ) {
#ifdef _WIN32
- cout << "log rotation doesn't work on windows" << endl;
+ cout << "log rotation net yet supported on windows" << endl;
return;
#else
struct tm t;
@@ -95,8 +107,21 @@ namespace mongo {
assert(0);
}
+#ifdef _WIN32 // windows has these functions it just gives them a funny name
+# define dup2 _dup2
+# define fileno _fileno
+#endif
+ // redirect stderr to log file
+ dup2(fileno(tmp), 2);
+
Logstream::setLogFile(tmp); // after this point no thread will be using old file
+#if 0 // enable to test redirection
+ cout << "written to cout" << endl;
+ cerr << "written to cerr" << endl;
+ log() << "written to log()" << endl;
+#endif
+
_file = tmp;
_opened = time(0);
}
@@ -125,4 +150,3 @@ namespace mongo {
FILE* Logstream::logfile = stdout;
}
-
diff --git a/util/log.h b/util/log.h
index 86aae1c..d5c7e55 100644
--- a/util/log.h
+++ b/util/log.h
@@ -46,6 +46,39 @@ namespace mongo {
}
}
+ class LabeledLevel {
+ public:
+
+ LabeledLevel( int level ) : _level( level ) {}
+ LabeledLevel( const char* label, int level ) : _label( label ), _level( level ) {}
+ LabeledLevel( const string& label, int level ) : _label( label ), _level( level ) {}
+
+ LabeledLevel operator+( int i ) const {
+ return LabeledLevel( _label, _level + i );
+ }
+
+ LabeledLevel operator+( const char* label ) const {
+ if( _label == "" )
+ return LabeledLevel( label, _level );
+ return LabeledLevel( _label + string("::") + label, _level );
+ }
+
+ LabeledLevel operator+( string& label ) const {
+ return LabeledLevel( _label + string("::") + label, _level );
+ }
+
+ LabeledLevel operator-( int i ) const {
+ return LabeledLevel( _label, _level - i );
+ }
+
+ const string& getLabel() const { return _label; }
+ int getLevel() const { return _level; }
+
+ private:
+ string _label;
+ int _level;
+ };
+
class LazyString {
public:
virtual ~LazyString() {}
@@ -104,6 +137,9 @@ namespace mongo {
virtual Nullstream& operator<<(unsigned) {
return *this;
}
+ virtual Nullstream& operator<<(unsigned short) {
+ return *this;
+ }
virtual Nullstream& operator<<(double) {
return *this;
}
@@ -209,6 +245,7 @@ namespace mongo {
Logstream& operator<<(long x) { ss << x; return *this; }
Logstream& operator<<(unsigned long x) { ss << x; return *this; }
Logstream& operator<<(unsigned x) { ss << x; return *this; }
+ Logstream& operator<<(unsigned short x){ ss << x; return *this; }
Logstream& operator<<(double x) { ss << x; return *this; }
Logstream& operator<<(void *x) { ss << x; return *this; }
Logstream& operator<<(const void *x) { ss << x; return *this; }
@@ -261,6 +298,9 @@ namespace mongo {
}
public:
static Logstream& get() {
+ if ( StaticObserver::_destroyingStatics ) {
+ cout << "Logstream::get called in uninitialized state" << endl;
+ }
Logstream *p = tsp.get();
if( p == 0 )
tsp.reset( p = new Logstream() );
@@ -291,7 +331,7 @@ namespace mongo {
return Logstream::get();
}
- /** logging which we may not want during unit tests runs.
+ /** logging which we may not want during unit tests (dbtests) runs.
set tlogLevel to -1 to suppress tlog() output in a test program. */
inline Nullstream& tlog( int level = 0 ) {
if ( level > tlogLevel || level > logLevel )
@@ -305,13 +345,19 @@ namespace mongo {
return Logstream::get().prolog();
}
-#define MONGO_LOG(level) if ( logLevel >= (level) ) log( level )
+#define MONGO_LOG(level) if ( MONGO_unlikely(logLevel >= (level)) ) log( level )
#define LOG MONGO_LOG
inline Nullstream& log( LogLevel l ) {
return Logstream::get().prolog().setLogLevel( l );
}
+ inline Nullstream& log( const LabeledLevel& ll ) {
+ Nullstream& stream = log( ll.getLevel() );
+ if( ll.getLabel() != "" )
+ stream << "[" << ll.getLabel() << "] ";
+ return stream;
+ }
inline Nullstream& log() {
return Logstream::get().prolog();
@@ -392,7 +438,6 @@ namespace mongo {
string errnoWithPrefix( const char * prefix );
void Logstream::logLockless( const StringData& s ) {
-
if ( s.size() == 0 )
return;
@@ -475,4 +520,6 @@ namespace mongo {
}
};
+ extern Tee* const warnings; // Things put here go in serverStatus
+
} // namespace mongo
diff --git a/util/logfile.cpp b/util/logfile.cpp
index 0386a59..609edb8 100644
--- a/util/logfile.cpp
+++ b/util/logfile.cpp
@@ -77,18 +77,35 @@ namespace mongo {
CloseHandle(_fd);
}
- void LogFile::synchronousAppend(const void *buf, size_t len) {
- assert(_fd);
- DWORD written;
- if( !WriteFile(_fd, buf, len, &written, NULL) ) {
- DWORD e = GetLastError();
- if( e == 87 )
- massert(13519, "error appending to file - misaligned direct write?", false);
- else
- uasserted(13517, str::stream() << "error appending to file " << errnoWithDescription(e));
+ void LogFile::truncate() {
+ verify(15870, _fd != INVALID_HANDLE_VALUE);
+
+ if (!SetEndOfFile(_fd)){
+ msgasserted(15871, "Couldn't truncate file: " + errnoWithDescription());
}
- else {
- dassert( written == len );
+ }
+
+ void LogFile::synchronousAppend(const void *_buf, size_t _len) {
+ const size_t BlockSize = 8 * 1024 * 1024;
+ assert(_fd);
+ assert(_len % 4096 == 0);
+ const char *buf = (const char *) _buf;
+ size_t left = _len;
+ while( left ) {
+ size_t toWrite = min(left, BlockSize);
+ DWORD written;
+ if( !WriteFile(_fd, buf, toWrite, &written, NULL) ) {
+ DWORD e = GetLastError();
+ if( e == 87 )
+ msgasserted(13519, "error 87 appending to file - invalid parameter");
+ else
+ uasserted(13517, str::stream() << "error appending to file " << _name << ' ' << _len << ' ' << toWrite << ' ' << errnoWithDescription(e));
+ }
+ else {
+ dassert( written == toWrite );
+ }
+ left -= written;
+ buf += written;
}
}
@@ -96,28 +113,44 @@ namespace mongo {
#else
+// posix
+
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include "paths.h"
namespace mongo {
LogFile::LogFile(string name) : _name(name) {
- _fd = open(name.c_str(),
- O_CREAT
- | O_WRONLY
+ int options = O_CREAT
+ | O_WRONLY
#if defined(O_DIRECT)
- | O_DIRECT
+ | O_DIRECT
#endif
#if defined(O_NOATIME)
- | O_NOATIME
+ | O_NOATIME
+#endif
+ ;
+
+ _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
+
+#if defined(O_DIRECT)
+ _direct = true;
+ if( _fd < 0 ) {
+ _direct = false;
+ options &= ~O_DIRECT;
+ _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
+ }
+#else
+ _direct = false;
#endif
- ,
- S_IRUSR | S_IWUSR);
+
if( _fd < 0 ) {
uasserted(13516, str::stream() << "couldn't open file " << name << " for writing " << errnoWithDescription());
}
+ flushMyDirectory(name);
}
LogFile::~LogFile() {
@@ -126,7 +159,21 @@ namespace mongo {
_fd = -1;
}
+ void LogFile::truncate() {
+ verify(15872, _fd >= 0);
+
+ BOOST_STATIC_ASSERT(sizeof(off_t) == 8); // we don't want overflow here
+ const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
+ if (ftruncate(_fd, pos) != 0){
+ msgasserted(15873, "Couldn't truncate file: " + errnoWithDescription());
+ }
+ }
+
void LogFile::synchronousAppend(const void *b, size_t len) {
+#ifdef POSIX_FADV_DONTNEED
+ const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
+#endif
+
const char *buf = (char *) b;
assert(_fd);
assert(((size_t)buf)%4096==0); // aligned
@@ -150,6 +197,10 @@ namespace mongo {
uasserted(13514, str::stream() << "error appending to file on fsync " << ' ' << errnoWithDescription());
}
+#ifdef POSIX_FADV_DONTNEED
+ if (!_direct)
+ posix_fadvise(_fd, pos, len, POSIX_FADV_DONTNEED);
+#endif
}
}
diff --git a/util/logfile.h b/util/logfile.h
index 9085161..f6d1c94 100644
--- a/util/logfile.h
+++ b/util/logfile.h
@@ -38,6 +38,8 @@ namespace mongo {
const string _name;
+ void truncate(); // Removes extra data after current position
+
private:
#if defined(_WIN32)
typedef HANDLE fd_type;
@@ -45,6 +47,7 @@ namespace mongo {
typedef int fd_type;
#endif
fd_type _fd;
+ bool _direct; // are we using direct I/O
};
}
diff --git a/util/message.cpp b/util/message.cpp
deleted file mode 100644
index bcb1772..0000000
--- a/util/message.cpp
+++ /dev/null
@@ -1,764 +0,0 @@
-/* message
-
- todo: authenticate; encrypt?
-*/
-
-/* Copyright 2009 10gen Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "pch.h"
-#include "message.h"
-#include <time.h>
-#include "../util/goodies.h"
-#include "../util/background.h"
-#include <fcntl.h>
-#include <errno.h>
-#include "../db/cmdline.h"
-#include "../client/dbclient.h"
-#include "../util/time_support.h"
-
-#ifndef _WIN32
-# ifndef __sunos__
-# include <ifaddrs.h>
-# endif
-# include <sys/resource.h>
-# include <sys/stat.h>
-#else
-
-// errno doesn't work for winsock.
-#undef errno
-#define errno WSAGetLastError()
-
-#endif
-
-namespace mongo {
-
- bool noUnixSocket = false;
-
- bool objcheck = false;
-
- void checkTicketNumbers();
-
-// if you want trace output:
-#define mmm(x)
-
-#ifdef MSG_NOSIGNAL
- const int portSendFlags = MSG_NOSIGNAL;
- const int portRecvFlags = MSG_NOSIGNAL;
-#else
- const int portSendFlags = 0;
- const int portRecvFlags = 0;
-#endif
-
- const Listener* Listener::_timeTracker;
-
- string SocketException::toString() const {
- stringstream ss;
- ss << _ei.code << " socket exception [" << _type << "] ";
-
- if ( _server.size() )
- ss << "server [" << _server << "] ";
-
- if ( _extra.size() )
- ss << _extra;
-
- return ss.str();
- }
-
-
- vector<SockAddr> ipToAddrs(const char* ips, int port) {
- vector<SockAddr> out;
- if (*ips == '\0') {
- out.push_back(SockAddr("0.0.0.0", port)); // IPv4 all
-
- if (IPv6Enabled())
- out.push_back(SockAddr("::", port)); // IPv6 all
-#ifndef _WIN32
- if (!noUnixSocket)
- out.push_back(SockAddr(makeUnixSockPath(port).c_str(), port)); // Unix socket
-#endif
- return out;
- }
-
- while(*ips) {
- string ip;
- const char * comma = strchr(ips, ',');
- if (comma) {
- ip = string(ips, comma - ips);
- ips = comma + 1;
- }
- else {
- ip = string(ips);
- ips = "";
- }
-
- SockAddr sa(ip.c_str(), port);
- out.push_back(sa);
-
-#ifndef _WIN32
- if (!noUnixSocket && (sa.getAddr() == "127.0.0.1" || sa.getAddr() == "0.0.0.0")) // only IPv4
- out.push_back(SockAddr(makeUnixSockPath(port).c_str(), port));
-#endif
- }
- return out;
-
- }
-
- /* listener ------------------------------------------------------------------- */
-
- void Listener::initAndListen() {
- checkTicketNumbers();
- vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _port);
- vector<int> socks;
- SOCKET maxfd = 0; // needed for select()
-
- for (vector<SockAddr>::iterator it=mine.begin(), end=mine.end(); it != end; ++it) {
- SockAddr& me = *it;
-
- SOCKET sock = ::socket(me.getType(), SOCK_STREAM, 0);
- if ( sock == INVALID_SOCKET ) {
- log() << "ERROR: listen(): invalid socket? " << errnoWithDescription() << endl;
- }
-
- if (me.getType() == AF_UNIX) {
-#if !defined(_WIN32)
- if (unlink(me.getAddr().c_str()) == -1) {
- int x = errno;
- if (x != ENOENT) {
- log() << "couldn't unlink socket file " << me << errnoWithDescription(x) << " skipping" << endl;
- continue;
- }
- }
-#endif
- }
- else if (me.getType() == AF_INET6) {
- // IPv6 can also accept IPv4 connections as mapped addresses (::ffff:127.0.0.1)
- // That causes a conflict if we don't do set it to IPV6_ONLY
- const int one = 1;
- setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*) &one, sizeof(one));
- }
-
- prebindOptions( sock );
-
- if ( ::bind(sock, me.raw(), me.addressSize) != 0 ) {
- int x = errno;
- log() << "listen(): bind() failed " << errnoWithDescription(x) << " for socket: " << me.toString() << endl;
- if ( x == EADDRINUSE )
- log() << " addr already in use" << endl;
- closesocket(sock);
- return;
- }
-
-#if !defined(_WIN32)
- if (me.getType() == AF_UNIX) {
- if (chmod(me.getAddr().c_str(), 0777) == -1) {
- log() << "couldn't chmod socket file " << me << errnoWithDescription() << endl;
- }
-
- ListeningSockets::get()->addPath( me.getAddr() );
- }
-#endif
-
- if ( ::listen(sock, 128) != 0 ) {
- log() << "listen(): listen() failed " << errnoWithDescription() << endl;
- closesocket(sock);
- return;
- }
-
- ListeningSockets::get()->add( sock );
-
- socks.push_back(sock);
- if (sock > maxfd)
- maxfd = sock;
- }
-
- static long connNumber = 0;
- struct timeval maxSelectTime;
- while ( ! inShutdown() ) {
- fd_set fds[1];
- FD_ZERO(fds);
-
- for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
- FD_SET(*it, fds);
- }
-
- maxSelectTime.tv_sec = 0;
- maxSelectTime.tv_usec = 10000;
- const int ret = select(maxfd+1, fds, NULL, NULL, &maxSelectTime);
-
- if (ret == 0) {
-#if defined(__linux__)
- _elapsedTime += ( 10000 - maxSelectTime.tv_usec ) / 1000;
-#else
- _elapsedTime += 10;
-#endif
- continue;
- }
- _elapsedTime += ret; // assume 1ms to grab connection. very rough
-
- if (ret < 0) {
- int x = errno;
-#ifdef EINTR
- if ( x == EINTR ) {
- log() << "select() signal caught, continuing" << endl;
- continue;
- }
-#endif
- if ( ! inShutdown() )
- log() << "select() failure: ret=" << ret << " " << errnoWithDescription(x) << endl;
- return;
- }
-
- for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
- if (! (FD_ISSET(*it, fds)))
- continue;
-
- SockAddr from;
- int s = accept(*it, from.raw(), &from.addressSize);
- if ( s < 0 ) {
- int x = errno; // so no global issues
- if ( x == ECONNABORTED || x == EBADF ) {
- log() << "Listener on port " << _port << " aborted" << endl;
- return;
- }
- if ( x == 0 && inShutdown() ) {
- return; // socket closed
- }
- if( !inShutdown() )
- log() << "Listener: accept() returns " << s << " " << errnoWithDescription(x) << endl;
- continue;
- }
- if (from.getType() != AF_UNIX)
- disableNagle(s);
- if ( _logConnect && ! cmdLine.quiet )
- log() << "connection accepted from " << from.toString() << " #" << ++connNumber << endl;
- accepted(s, from);
- }
- }
- }
-
- void Listener::accepted(int sock, const SockAddr& from) {
- accepted( new MessagingPort(sock, from) );
- }
-
- /* messagingport -------------------------------------------------------------- */
-
- class PiggyBackData {
- public:
- PiggyBackData( MessagingPort * port ) {
- _port = port;
- _buf = new char[1300];
- _cur = _buf;
- }
-
- ~PiggyBackData() {
- DESTRUCTOR_GUARD (
- flush();
- delete[]( _cur );
- );
- }
-
- void append( Message& m ) {
- assert( m.header()->len <= 1300 );
-
- if ( len() + m.header()->len > 1300 )
- flush();
-
- memcpy( _cur , m.singleData() , m.header()->len );
- _cur += m.header()->len;
- }
-
- void flush() {
- if ( _buf == _cur )
- return;
-
- _port->send( _buf , len(), "flush" );
- _cur = _buf;
- }
-
- int len() const { return _cur - _buf; }
-
- private:
- MessagingPort* _port;
- char * _buf;
- char * _cur;
- };
-
- class Ports {
- set<MessagingPort*> ports;
- mongo::mutex m;
- public:
- Ports() : ports(), m("Ports") {}
- void closeAll(unsigned skip_mask) {
- scoped_lock bl(m);
- for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
- if( (*i)->tag & skip_mask )
- continue;
- (*i)->shutdown();
- }
- }
- void insert(MessagingPort* p) {
- scoped_lock bl(m);
- ports.insert(p);
- }
- void erase(MessagingPort* p) {
- scoped_lock bl(m);
- ports.erase(p);
- }
- };
-
- // we "new" this so it is still be around when other automatic global vars
- // are being destructed during termination.
- Ports& ports = *(new Ports());
-
- void MessagingPort::closeAllSockets(unsigned mask) {
- ports.closeAll(mask);
- }
-
- MessagingPort::MessagingPort(int _sock, const SockAddr& _far) : sock(_sock), piggyBackData(0), _bytesIn(0), _bytesOut(0), farEnd(_far), _timeout(), tag(0) {
- _logLevel = 0;
- ports.insert(this);
- }
-
- MessagingPort::MessagingPort( double timeout, int ll ) : _bytesIn(0), _bytesOut(0), tag(0) {
- _logLevel = ll;
- ports.insert(this);
- sock = -1;
- piggyBackData = 0;
- _timeout = timeout;
- }
-
- void MessagingPort::shutdown() {
- if ( sock >= 0 ) {
- closesocket(sock);
- sock = -1;
- }
- }
-
- MessagingPort::~MessagingPort() {
- if ( piggyBackData )
- delete( piggyBackData );
- shutdown();
- ports.erase(this);
- }
-
- class ConnectBG : public BackgroundJob {
- public:
- ConnectBG(int sock, SockAddr farEnd) : _sock(sock), _farEnd(farEnd) { }
-
- void run() { _res = ::connect(_sock, _farEnd.raw(), _farEnd.addressSize); }
- string name() const { return "ConnectBG"; }
- int inError() const { return _res; }
-
- private:
- int _sock;
- int _res;
- SockAddr _farEnd;
- };
-
- bool MessagingPort::connect(SockAddr& _far) {
- farEnd = _far;
-
- sock = socket(farEnd.getType(), SOCK_STREAM, 0);
- if ( sock == INVALID_SOCKET ) {
- log(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
- return false;
- }
-
- if ( _timeout > 0 ) {
- setSockTimeouts( sock, _timeout );
- }
-
- ConnectBG bg(sock, farEnd);
- bg.go();
- if ( bg.wait(5000) ) {
- if ( bg.inError() ) {
- closesocket(sock);
- sock = -1;
- return false;
- }
- }
- else {
- // time out the connect
- closesocket(sock);
- sock = -1;
- bg.wait(); // so bg stays in scope until bg thread terminates
- return false;
- }
-
- if (farEnd.getType() != AF_UNIX)
- disableNagle(sock);
-
-#ifdef SO_NOSIGPIPE
- // osx
- const int one = 1;
- setsockopt(sock, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(int));
-#endif
-
- /*
- // SO_LINGER is bad
- #ifdef SO_LINGER
- struct linger ling;
- ling.l_onoff = 1;
- ling.l_linger = 0;
- setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *) &ling, sizeof(ling));
- #endif
- */
- return true;
- }
-
- bool MessagingPort::recv(Message& m) {
- try {
-again:
- mmm( log() << "* recv() sock:" << this->sock << endl; )
- int len = -1;
-
- char *lenbuf = (char *) &len;
- int lft = 4;
- recv( lenbuf, lft );
-
- if ( len < 16 || len > 48000000 ) { // messages must be large enough for headers
- if ( len == -1 ) {
- // Endian check from the client, after connecting, to see what mode server is running in.
- unsigned foo = 0x10203040;
- send( (char *) &foo, 4, "endian" );
- goto again;
- }
-
- if ( len == 542393671 ) {
- // an http GET
- log(_logLevel) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
- string msg = "You are trying to access MongoDB on the native driver port. For http diagnostic access, add 1000 to the port number\n";
- stringstream ss;
- ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
- string s = ss.str();
- send( s.c_str(), s.size(), "http" );
- return false;
- }
- log(0) << "recv(): message len " << len << " is too large" << len << endl;
- return false;
- }
-
- int z = (len+1023)&0xfffffc00;
- assert(z>=len);
- MsgData *md = (MsgData *) malloc(z);
- assert(md);
- md->len = len;
-
- char *p = (char *) &md->id;
- int left = len -4;
-
- try {
- recv( p, left );
- }
- catch (...) {
- free(md);
- throw;
- }
-
- _bytesIn += len;
- m.setData(md, true);
- return true;
-
- }
- catch ( const SocketException & e ) {
- log(_logLevel + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
- m.reset();
- return false;
- }
- }
-
- void MessagingPort::reply(Message& received, Message& response) {
- say(/*received.from, */response, received.header()->id);
- }
-
- void MessagingPort::reply(Message& received, Message& response, MSGID responseTo) {
- say(/*received.from, */response, responseTo);
- }
-
- bool MessagingPort::call(Message& toSend, Message& response) {
- mmm( log() << "*call()" << endl; )
- say(toSend);
- return recv( toSend , response );
- }
-
- bool MessagingPort::recv( const Message& toSend , Message& response ) {
- while ( 1 ) {
- bool ok = recv(response);
- if ( !ok )
- return false;
- //log() << "got response: " << response.data->responseTo << endl;
- if ( response.header()->responseTo == toSend.header()->id )
- break;
- error() << "MessagingPort::call() wrong id got:" << hex << (unsigned)response.header()->responseTo << " expect:" << (unsigned)toSend.header()->id << '\n'
- << dec
- << " toSend op: " << (unsigned)toSend.operation() << '\n'
- << " response msgid:" << (unsigned)response.header()->id << '\n'
- << " response len: " << (unsigned)response.header()->len << '\n'
- << " response op: " << response.operation() << '\n'
- << " farEnd: " << farEnd << endl;
- assert(false);
- response.reset();
- }
- mmm( log() << "*call() end" << endl; )
- return true;
- }
-
- void MessagingPort::say(Message& toSend, int responseTo) {
- assert( !toSend.empty() );
- mmm( log() << "* say() sock:" << this->sock << " thr:" << GetCurrentThreadId() << endl; )
- toSend.header()->id = nextMessageId();
- toSend.header()->responseTo = responseTo;
-
- if ( piggyBackData && piggyBackData->len() ) {
- mmm( log() << "* have piggy back" << endl; )
- if ( ( piggyBackData->len() + toSend.header()->len ) > 1300 ) {
- // won't fit in a packet - so just send it off
- piggyBackData->flush();
- }
- else {
- piggyBackData->append( toSend );
- piggyBackData->flush();
- return;
- }
- }
-
- toSend.send( *this, "say" );
- }
-
- // sends all data or throws an exception
- void MessagingPort::send( const char * data , int len, const char *context ) {
- _bytesOut += len;
- while( len > 0 ) {
- int ret = ::send( sock , data , len , portSendFlags );
- if ( ret == -1 ) {
- if ( errno != EAGAIN || _timeout == 0 ) {
- SocketException::Type t = SocketException::SEND_ERROR;
-#if defined(_WINDOWS)
- if( e == WSAETIMEDOUT ) t = SocketException::SEND_TIMEOUT;
-#endif
- log(_logLevel) << "MessagingPort " << context << " send() " << errnoWithDescription() << ' ' << farEnd.toString() << endl;
- throw SocketException( t );
- }
- else {
- if ( !serverAlive( farEnd.toString() ) ) {
- log(_logLevel) << "MessagingPort " << context << " send() remote dead " << farEnd.toString() << endl;
- throw SocketException( SocketException::SEND_ERROR );
- }
- }
- }
- else {
- assert( ret <= len );
- len -= ret;
- data += ret;
- }
- }
- }
-
- // sends all data or throws an exception
- void MessagingPort::send( const vector< pair< char *, int > > &data, const char *context ) {
-#if defined(_WIN32)
- // TODO use scatter/gather api
- for( vector< pair< char *, int > >::const_iterator i = data.begin(); i != data.end(); ++i ) {
- char * data = i->first;
- int len = i->second;
- send( data, len, context );
- }
-#else
- vector< struct iovec > d( data.size() );
- int i = 0;
- for( vector< pair< char *, int > >::const_iterator j = data.begin(); j != data.end(); ++j ) {
- if ( j->second > 0 ) {
- d[ i ].iov_base = j->first;
- d[ i ].iov_len = j->second;
- ++i;
- }
- }
- struct msghdr meta;
- memset( &meta, 0, sizeof( meta ) );
- meta.msg_iov = &d[ 0 ];
- meta.msg_iovlen = d.size();
-
- while( meta.msg_iovlen > 0 ) {
- int ret = ::sendmsg( sock , &meta , portSendFlags );
- if ( ret == -1 ) {
- if ( errno != EAGAIN || _timeout == 0 ) {
- log(_logLevel) << "MessagingPort " << context << " send() " << errnoWithDescription() << ' ' << farEnd.toString() << endl;
- throw SocketException( SocketException::SEND_ERROR );
- }
- else {
- if ( !serverAlive( farEnd.toString() ) ) {
- log(_logLevel) << "MessagingPort " << context << " send() remote dead " << farEnd.toString() << endl;
- throw SocketException( SocketException::SEND_ERROR );
- }
- }
- }
- else {
- struct iovec *& i = meta.msg_iov;
- while( ret > 0 ) {
- if ( i->iov_len > unsigned( ret ) ) {
- i->iov_len -= ret;
- i->iov_base = (char*)(i->iov_base) + ret;
- ret = 0;
- }
- else {
- ret -= i->iov_len;
- ++i;
- --(meta.msg_iovlen);
- }
- }
- }
- }
-#endif
- }
-
- void MessagingPort::recv( char * buf , int len ) {
- unsigned retries = 0;
- while( len > 0 ) {
- int ret = ::recv( sock , buf , len , portRecvFlags );
- if ( ret > 0 ) {
- if ( len <= 4 && ret != len )
- log(_logLevel) << "MessagingPort recv() got " << ret << " bytes wanted len=" << len << endl;
- assert( ret <= len );
- len -= ret;
- buf += ret;
- }
- else if ( ret == 0 ) {
- log(3) << "MessagingPort recv() conn closed? " << farEnd.toString() << endl;
- throw SocketException( SocketException::CLOSED );
- }
- else { /* ret < 0 */
- int e = errno;
-
-#if defined(EINTR) && !defined(_WIN32)
- if( e == EINTR ) {
- if( ++retries == 1 ) {
- log() << "EINTR retry" << endl;
- continue;
- }
- }
-#endif
- if ( ( e == EAGAIN
-#ifdef _WINDOWS
- || e == WSAETIMEDOUT
-#endif
- ) && _timeout > 0 ) {
- // this is a timeout
- log(_logLevel) << "MessagingPort recv() timeout " << farEnd.toString() <<endl;
- throw SocketException(SocketException::RECV_TIMEOUT);
- }
-
- log(_logLevel) << "MessagingPort recv() " << errnoWithDescription(e) << " " << farEnd.toString() <<endl;
- throw SocketException(SocketException::RECV_ERROR);
- }
- }
- }
-
- int MessagingPort::unsafe_recv( char *buf, int max ) {
- return ::recv( sock , buf , max , portRecvFlags );
- }
-
- void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
-
- if ( toSend.header()->len > 1300 ) {
- // not worth saving because its almost an entire packet
- say( toSend );
- return;
- }
-
- // we're going to be storing this, so need to set it up
- toSend.header()->id = nextMessageId();
- toSend.header()->responseTo = responseTo;
-
- if ( ! piggyBackData )
- piggyBackData = new PiggyBackData( this );
-
- piggyBackData->append( toSend );
- }
-
- unsigned MessagingPort::remotePort() const {
- return farEnd.getPort();
- }
-
- HostAndPort MessagingPort::remote() const {
- if ( _farEndParsed.port() == -1 )
- _farEndParsed = HostAndPort( farEnd );
- return _farEndParsed;
- }
-
-
- MSGID NextMsgId;
-
- struct MsgStart {
- MsgStart() {
- NextMsgId = (((unsigned) time(0)) << 16) ^ curTimeMillis();
- assert(MsgDataHeaderSize == 16);
- }
- } msgstart;
-
- MSGID nextMessageId() {
- MSGID msgid = NextMsgId++;
- return msgid;
- }
-
- bool doesOpGetAResponse( int op ) {
- return op == dbQuery || op == dbGetMore;
- }
-
- const int DEFAULT_MAX_CONN = 20000;
- const int MAX_MAX_CONN = 20000;
-
- int getMaxConnections() {
-#ifdef _WIN32
- return DEFAULT_MAX_CONN;
-#else
- struct rlimit limit;
- assert( getrlimit(RLIMIT_NOFILE,&limit) == 0 );
-
- int max = (int)(limit.rlim_cur * .8);
-
- log(1) << "fd limit"
- << " hard:" << limit.rlim_max
- << " soft:" << limit.rlim_cur
- << " max conn: " << max
- << endl;
-
- if ( max > MAX_MAX_CONN )
- max = MAX_MAX_CONN;
-
- return max;
-#endif
- }
-
- void checkTicketNumbers() {
- int want = getMaxConnections();
- int current = connTicketHolder.outof();
- if ( current != DEFAULT_MAX_CONN ) {
- if ( current < want ) {
- // they want fewer than they can handle
- // which is fine
- log(1) << " only allowing " << current << " connections" << endl;
- return;
- }
- if ( current > want ) {
- log() << " --maxConns too high, can only handle " << want << endl;
- }
- }
- connTicketHolder.resize( want );
- }
-
- TicketHolder connTicketHolder(DEFAULT_MAX_CONN);
-
-} // namespace mongo
diff --git a/util/mmap.cpp b/util/mmap.cpp
index 18edc34..fa9ab73 100644
--- a/util/mmap.cpp
+++ b/util/mmap.cpp
@@ -20,6 +20,7 @@
#include "processinfo.h"
#include "concurrency/rwlock.h"
#include "../db/namespace.h"
+#include "../db/cmdline.h"
namespace mongo {
@@ -61,7 +62,7 @@ namespace mongo {
this is the administrative stuff
*/
- RWLock MongoFile::mmmutex("rw:mmmutex");
+ RWLockRecursive MongoFile::mmmutex("mmmutex",10*60*1000 /* 10 minutes */);
/* subclass must call in destructor (or at close).
removes this from pathToFile and other maps
@@ -69,7 +70,7 @@ namespace mongo {
ideal to call close to the close, if the close is well before object destruction
*/
void MongoFile::destroyed() {
- rwlock lk( mmmutex , true );
+ mmmutex.assertExclusivelyLocked();
mmfiles.erase(this);
pathToFile.erase( filename() );
}
@@ -83,11 +84,12 @@ namespace mongo {
}
++closingAllFiles;
- rwlock lk( mmmutex , true );
+ RWLockRecursive::Exclusive lk(mmmutex);
ProgressMeter pm( mmfiles.size() , 2 , 1 );
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
- (*i)->close();
+ set<MongoFile*> temp = mmfiles;
+ for ( set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++ ) {
+ (*i)->close(); // close() now removes from mmfiles
pm.hit();
}
message << "closeAllFiles() finished";
@@ -97,7 +99,7 @@ namespace mongo {
/*static*/ long long MongoFile::totalMappedLength() {
unsigned long long total = 0;
- rwlock lk( mmmutex , false );
+ RWLockRecursive::Shared lk(mmmutex);
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
total += (*i)->length();
@@ -120,7 +122,7 @@ namespace mongo {
/*static*/ int MongoFile::_flushAll( bool sync ) {
if ( ! sync ) {
int num = 0;
- rwlock lk( mmmutex , false );
+ RWLockRecursive::Shared lk(mmmutex);
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
num++;
MongoFile * mmf = *i;
@@ -137,7 +139,7 @@ namespace mongo {
while ( true ) {
auto_ptr<Flushable> f;
{
- rwlock lk( mmmutex , false );
+ RWLockRecursive::Shared lk(mmmutex);
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
MongoFile * mmf = *i;
if ( ! mmf )
@@ -158,12 +160,12 @@ namespace mongo {
}
void MongoFile::created() {
- rwlock lk( mmmutex , true );
+ RWLockRecursive::Exclusive lk(mmmutex);
mmfiles.insert(this);
}
void MongoFile::setFilename(string fn) {
- rwlock( mmmutex, true );
+ RWLockRecursive::Exclusive lk(mmmutex);
assert( _filename.empty() );
_filename = fn;
MongoFile *&ptf = pathToFile[fn];
@@ -173,7 +175,9 @@ namespace mongo {
#if defined(_DEBUG)
void MongoFile::markAllWritable() {
- rwlock lk( mmmutex , false );
+ if( cmdLine.dur )
+ return;
+ RWLockRecursive::Shared lk(mmmutex);
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
MongoFile * mmf = *i;
if (mmf) mmf->_lock();
@@ -181,7 +185,9 @@ namespace mongo {
}
void MongoFile::unmarkAllWritable() {
- rwlock lk( mmmutex , false );
+ if( cmdLine.dur )
+ return;
+ RWLockRecursive::Shared lk(mmmutex);
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
MongoFile * mmf = *i;
if (mmf) mmf->_unlock();
diff --git a/util/mmap.h b/util/mmap.h
index 2ef4176..c1b14bb 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -21,6 +21,15 @@
namespace mongo {
+ class MAdvise {
+ void *_p;
+ unsigned _len;
+ public:
+ enum Advice { Sequential=1 };
+ MAdvise(void *p, unsigned len, Advice a);
+ ~MAdvise(); // destructor resets the range to MADV_NORMAL
+ };
+
/* the administrative-ish stuff here */
class MongoFile : boost::noncopyable {
public:
@@ -44,7 +53,8 @@ namespace mongo {
template < class F >
static void forEach( F fun );
- /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically. */
+ /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
+*/
static set<MongoFile*>& getAllFiles() { return mmfiles; }
// callbacks if you need them
@@ -100,7 +110,9 @@ namespace mongo {
static set<MongoFile*> mmfiles;
public:
static map<string,MongoFile*> pathToFile;
- static RWLock mmmutex;
+
+ // lock order: lock dbMutex before this if you lock both
+ static RWLockRecursive mmmutex;
};
/** look up a MMF by filename. scoped mutex locking convention.
@@ -111,7 +123,7 @@ namespace mongo {
*/
class MongoFileFinder : boost::noncopyable {
public:
- MongoFileFinder() : _lk(MongoFile::mmmutex,false) { }
+ MongoFileFinder() : _lk(MongoFile::mmmutex) { }
/** @return The MongoFile object associated with the specified file name. If no file is open
with the specified name, returns null.
@@ -122,7 +134,7 @@ namespace mongo {
}
private:
- rwlock _lk;
+ RWLockRecursive::Shared _lk;
};
struct MongoFileAllowWrites {
@@ -135,11 +147,18 @@ namespace mongo {
};
class MemoryMappedFile : public MongoFile {
+ protected:
+ virtual void* viewForFlushing() {
+ if( views.size() == 0 )
+ return 0;
+ assert( views.size() == 1 );
+ return views[0];
+ }
public:
MemoryMappedFile();
virtual ~MemoryMappedFile() {
- destroyed(); // cleans up from the master list of mmaps
+ RWLockRecursive::Exclusive lk(mmmutex);
close();
}
@@ -147,6 +166,8 @@ namespace mongo {
// Throws exception if file doesn't exist. (dm may2010: not sure if this is always true?)
void* map(const char *filename);
+
+ /** @param options see MongoFile::Options */
void* mapWithOptions(const char *filename, int options);
/* Creates with length if DNE, otherwise uses existing file length,
@@ -187,7 +208,7 @@ namespace mongo {
HANDLE maphandle;
vector<void *> views;
unsigned long long len;
-
+
#ifdef _WIN32
boost::shared_ptr<mutex> _flushMutex;
void clearWritableBits(void *privateView);
@@ -212,7 +233,7 @@ namespace mongo {
/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
template < class F >
inline void MongoFile::forEach( F p ) {
- rwlock lk( mmmutex , false );
+ RWLockRecursive::Shared lklk(mmmutex);
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
p(*i);
}
@@ -227,10 +248,11 @@ namespace mongo {
bool get(unsigned i) const {
unsigned x = i / 32;
assert( x < MemoryMappedFile::NChunks );
- return bits[x] & (1 << (i%32));
+ return (bits[x] & (1 << (i%32))) != 0;
}
void set(unsigned i) {
unsigned x = i / 32;
+ wassert( x < (MemoryMappedFile::NChunks*2/3) ); // warn if getting close to limit
assert( x < MemoryMappedFile::NChunks );
bits[x] |= (1 << (i%32));
}
diff --git a/util/mmap_posix.cpp b/util/mmap_posix.cpp
index f47a06f..5b5e86d 100644
--- a/util/mmap_posix.cpp
+++ b/util/mmap_posix.cpp
@@ -19,13 +19,12 @@
#include "mmap.h"
#include "file_allocator.h"
#include "../db/concurrency.h"
-
#include <errno.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-
+#include "../util/processinfo.h"
#include "mongoutils/str.h"
using namespace mongoutils;
@@ -39,6 +38,7 @@ namespace mongo {
}
void MemoryMappedFile::close() {
+ mmmutex.assertExclusivelyLocked();
for( vector<void*>::iterator i = views.begin(); i != views.end(); i++ ) {
munmap(*i,len);
}
@@ -47,6 +47,7 @@ namespace mongo {
if ( fd )
::close(fd);
fd = 0;
+ destroyed(); // cleans up from the master list of mmaps
}
#ifndef O_NOATIME
@@ -57,6 +58,19 @@ namespace mongo {
#define MAP_NORESERVE (0)
#endif
+#if defined(__sunos__)
+ MAdvise::MAdvise(void *,unsigned, Advice) { }
+ MAdvise::~MAdvise() { }
+#else
+ MAdvise::MAdvise(void *p, unsigned len, Advice a) : _p(p), _len(len) {
+ assert( a == Sequential ); // more later
+ madvise(_p,_len,MADV_SEQUENTIAL);
+ }
+ MAdvise::~MAdvise() {
+ madvise(_p,_len,MADV_NORMAL);
+ }
+#endif
+
void* MemoryMappedFile::map(const char *filename, unsigned long long &length, int options) {
// length may be updated by callee.
setFilename(filename);
@@ -68,6 +82,7 @@ namespace mongo {
fd = open(filename, O_RDWR | O_NOATIME);
if ( fd <= 0 ) {
log() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
+ fd = 0; // our sentinel for not opened
return 0;
}
@@ -149,6 +164,7 @@ namespace mongo {
int err = errno;
error() << "13601 Couldn't remap private view: " << errnoWithDescription(err) << endl;
log() << "aborting" << endl;
+ printMemInfo();
abort();
}
assert( x == oldPrivateAddr );
@@ -158,7 +174,7 @@ namespace mongo {
void MemoryMappedFile::flush(bool sync) {
if ( views.empty() || fd == 0 )
return;
- if ( msync(views[0], len, sync ? MS_SYNC : MS_ASYNC) )
+ if ( msync(viewForFlushing(), len, sync ? MS_SYNC : MS_ASYNC) )
problem() << "msync " << errnoWithDescription() << endl;
}
@@ -181,7 +197,7 @@ namespace mongo {
};
MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
- return new PosixFlushable( views.empty() ? 0 : views[0] , fd , len );
+ return new PosixFlushable( viewForFlushing() , fd , len );
}
void MemoryMappedFile::_lock() {
diff --git a/util/mmap_win.cpp b/util/mmap_win.cpp
index 0b0b834..9173d7b 100644
--- a/util/mmap_win.cpp
+++ b/util/mmap_win.cpp
@@ -18,7 +18,6 @@
#include "pch.h"
#include "mmap.h"
#include "text.h"
-#include <windows.h>
#include "../db/mongommf.h"
#include "../db/concurrency.h"
@@ -27,6 +26,9 @@ namespace mongo {
mutex mapViewMutex("mapView");
ourbitset writable;
+ MAdvise::MAdvise(void *,unsigned, Advice) { }
+ MAdvise::~MAdvise() { }
+
/** notification on unmapping so we can clear writable bits */
void MemoryMappedFile::clearWritableBits(void *p) {
for( unsigned i = ((size_t)p)/ChunkSize; i <= (((size_t)p)+len)/ChunkSize; i++ ) {
@@ -44,6 +46,7 @@ namespace mongo {
}
void MemoryMappedFile::close() {
+ mmmutex.assertExclusivelyLocked();
for( vector<void*>::iterator i = views.begin(); i != views.end(); i++ ) {
clearWritableBits(*i);
UnmapViewOfFile(*i);
@@ -55,6 +58,7 @@ namespace mongo {
if ( fd )
CloseHandle(fd);
fd = 0;
+ destroyed(); // cleans up from the master list of mmaps
}
unsigned long long mapped = 0;
@@ -138,7 +142,8 @@ namespace mongo {
}
if ( view == 0 ) {
DWORD e = GetLastError();
- log() << "MapViewOfFile failed " << filename << " " << errnoWithDescription(e) << endl;
+ log() << "MapViewOfFile failed " << filename << " " << errnoWithDescription(e) <<
+ ((sizeof(void*)==4)?" (32 bit build)":"") << endl;
close();
}
else {
@@ -183,13 +188,13 @@ namespace mongo {
void MemoryMappedFile::flush(bool sync) {
uassert(13056, "Async flushing not supported on windows", sync);
if( !views.empty() ) {
- WindowsFlushable f( views[0] , fd , filename() , _flushMutex);
+ WindowsFlushable f( viewForFlushing() , fd , filename() , _flushMutex);
f.flush();
}
}
MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
- return new WindowsFlushable( views.empty() ? 0 : views[0] , fd , filename() , _flushMutex );
+ return new WindowsFlushable( viewForFlushing() , fd , filename() , _flushMutex );
}
void MemoryMappedFile::_lock() {}
void MemoryMappedFile::_unlock() {}
diff --git a/util/mongoutils/README b/util/mongoutils/README
index fd2a589..f61277c 100755
--- a/util/mongoutils/README
+++ b/util/mongoutils/README
@@ -11,3 +11,5 @@
So basically, easy to use, general purpose stuff, with no arduous dependencies to drop into
any new project.
+
+ *** PLACE UNIT TESTS IN mongoutils/test.cpp ***
diff --git a/util/mongoutils/str.h b/util/mongoutils/str.h
index ea8f938..57b94fa 100644
--- a/util/mongoutils/str.h
+++ b/util/mongoutils/str.h
@@ -29,6 +29,8 @@
#include <string>
#include <sstream>
+
+// this violates the README rules for mongoutils:
#include "../../bson/util/builder.h"
namespace mongoutils {
@@ -48,13 +50,11 @@ namespace mongoutils {
class stream {
public:
mongo::StringBuilder ss;
-
template<class T>
stream& operator<<(const T& v) {
ss << v;
return *this;
}
-
operator std::string () const { return ss.str(); }
};
@@ -106,13 +106,13 @@ namespace mongoutils {
return strchr(s.c_str(), x) != 0;
}
- /** @return everything befor the character x, else entire string */
+ /** @return everything before the character x, else entire string */
inline string before(const string& s, char x) {
const char *p = strchr(s.c_str(), x);
return (p != 0) ? s.substr(0, p-s.c_str()) : s;
}
- /** @return everything befor the string x, else entire string */
+ /** @return everything before the string x, else entire string */
inline string before(const string& s, const string& x) {
const char *p = strstr(s.c_str(), x.c_str());
return (p != 0) ? s.substr(0, p-s.c_str()) : s;
diff --git a/util/mongoutils/test.cpp b/util/mongoutils/test.cpp
index d8ee461..45268c5 100644
--- a/util/mongoutils/test.cpp
+++ b/util/mongoutils/test.cpp
@@ -19,9 +19,9 @@
* limitations under the License.
*/
+#include <assert.h>
#include "str.h"
#include "html.h"
-#include <assert.h>
using namespace std;
using namespace mongoutils;
diff --git a/util/hostandport.h b/util/net/hostandport.h
index fd27296..573e8ee 100644
--- a/util/hostandport.h
+++ b/util/net/hostandport.h
@@ -18,8 +18,8 @@
#pragma once
#include "sock.h"
-#include "../db/cmdline.h"
-#include "mongoutils/str.h"
+#include "../../db/cmdline.h"
+#include "../mongoutils/str.h"
namespace mongo {
@@ -70,8 +70,10 @@ namespace mongo {
bool isLocalHost() const;
- // @returns host:port
- string toString() const;
+ /**
+ * @param includePort host:port if true, host otherwise
+ */
+ string toString( bool includePort=true ) const;
operator string() const { return toString(); }
@@ -87,24 +89,6 @@ namespace mongo {
int _port; // -1 indicates unspecified
};
- /** returns true if strings seem to be the same hostname.
- "nyc1", "nyc1.acme", and "nyc1.acme.com" are treated as the same.
- */
- inline bool sameHostname(const string& a, const string& b) {
- size_t prefixLen = str::shareCommonPrefix(a.c_str(), b.c_str());
-
- if (prefixLen == a.size()) { // (a == b) or (a isPrefixOf b)
- if ( b[prefixLen] == '.' || b[prefixLen] == '\0')
- return true;
- }
- else if(prefixLen == b.size()) { // (b isPrefixOf a)
- if ( a[prefixLen] == '.') // can't be '\0'
- return true;
- }
-
- return false;
- }
-
inline HostAndPort HostAndPort::Me() {
const char* ips = cmdLine.bind_ip.c_str();
while(*ips) {
@@ -130,7 +114,10 @@ namespace mongo {
return HostAndPort(h, cmdLine.port);
}
- inline string HostAndPort::toString() const {
+ inline string HostAndPort::toString( bool includePort ) const {
+ if ( ! includePort )
+ return _host;
+
stringstream ss;
ss << _host;
if ( _port != -1 ) {
@@ -150,7 +137,12 @@ namespace mongo {
}
inline bool HostAndPort::isLocalHost() const {
- return _host == "localhost" || startsWith(_host.c_str(), "127.") || _host == "::1";
+ return ( _host == "localhost"
+ || startsWith(_host.c_str(), "127.")
+ || _host == "::1"
+ || _host == "anonymous unix socket"
+ || _host.c_str()[0] == '/' // unix socket
+ );
}
inline HostAndPort::HostAndPort(string s) {
diff --git a/util/httpclient.cpp b/util/net/httpclient.cpp
index 61d5671..16eaa0a 100644
--- a/util/httpclient.cpp
+++ b/util/net/httpclient.cpp
@@ -19,7 +19,9 @@
#include "httpclient.h"
#include "sock.h"
#include "message.h"
-#include "../bson/util/builder.h"
+#include "message_port.h"
+#include "../mongoutils/str.h"
+#include "../../bson/util/builder.h"
namespace mongo {
@@ -36,8 +38,15 @@ namespace mongo {
}
int HttpClient::_go( const char * command , string url , const char * body , Result * result ) {
- uassert( 10271 , "invalid url" , url.find( "http://" ) == 0 );
- url = url.substr( 7 );
+ bool ssl = false;
+ if ( url.find( "https://" ) == 0 ) {
+ ssl = true;
+ url = url.substr( 8 );
+ }
+ else {
+ uassert( 10271 , "invalid url" , url.find( "http://" ) == 0 );
+ url = url.substr( 7 );
+ }
string host , path;
if ( url.find( "/" ) == string::npos ) {
@@ -54,7 +63,7 @@ namespace mongo {
HD( "path [" << path << "]" );
string server = host;
- int port = 80;
+ int port = ssl ? 443 : 80;
string::size_type idx = host.find( ":" );
if ( idx != string::npos ) {
@@ -87,18 +96,27 @@ namespace mongo {
SockAddr addr( server.c_str() , port );
HD( "addr: " << addr.toString() );
- MessagingPort p;
- if ( ! p.connect( addr ) )
+ Socket sock;
+ if ( ! sock.connect( addr ) )
return -1;
+
+ if ( ssl ) {
+#ifdef MONGO_SSL
+ _checkSSLManager();
+ sock.secure( _sslManager.get() );
+#else
+ uasserted( 15862 , "no ssl support" );
+#endif
+ }
{
const char * out = req.c_str();
int toSend = req.size();
- p.send( out , toSend, "_go" );
+ sock.send( out , toSend, "_go" );
}
char buf[4096];
- int got = p.unsafe_recv( buf , 4096 );
+ int got = sock.unsafe_recv( buf , 4096 );
buf[got] = 0;
int rc;
@@ -110,7 +128,7 @@ namespace mongo {
if ( result )
sb << buf;
- while ( ( got = p.unsafe_recv( buf , 4096 ) ) > 0) {
+ while ( ( got = sock.unsafe_recv( buf , 4096 ) ) > 0) {
if ( result )
sb << buf;
}
@@ -141,10 +159,19 @@ namespace mongo {
if ( h.size() == 0 )
break;
+
+ i = h.find( ':' );
+ if ( i != string::npos )
+ _headers[h.substr(0,i)] = str::ltrim(h.substr(i+1));
}
_body = entire;
}
+#ifdef MONGO_SSL
+ void HttpClient::_checkSSLManager() {
+ _sslManager.reset( new SSLManager( true ) );
+ }
+#endif
}
diff --git a/util/httpclient.h b/util/net/httpclient.h
index d66544e..c3f8c82 100644
--- a/util/httpclient.h
+++ b/util/net/httpclient.h
@@ -17,13 +17,16 @@
#pragma once
-#include "../pch.h"
+#include "../../pch.h"
+#include "sock.h"
namespace mongo {
- class HttpClient {
+ class HttpClient : boost::noncopyable {
public:
+ typedef map<string,string> Headers;
+
class Result {
public:
Result() {}
@@ -32,7 +35,7 @@ namespace mongo {
return _entireResponse;
}
- const map<string,string> getHeaders() const {
+ const Headers getHeaders() const {
return _headers;
}
@@ -47,7 +50,7 @@ namespace mongo {
int _code;
string _entireResponse;
- map<string,string> _headers;
+ Headers _headers;
string _body;
friend class HttpClient;
@@ -66,6 +69,11 @@ namespace mongo {
private:
int _go( const char * command , string url , const char * body , Result * result );
+#ifdef MONGO_SSL
+ void _checkSSLManager();
+
+ scoped_ptr<SSLManager> _sslManager;
+#endif
};
}
diff --git a/util/net/listen.cpp b/util/net/listen.cpp
new file mode 100644
index 0000000..6ee25b4
--- /dev/null
+++ b/util/net/listen.cpp
@@ -0,0 +1,391 @@
+// listen.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "pch.h"
+#include "listen.h"
+#include "message_port.h"
+
+#ifndef _WIN32
+
+# ifndef __sunos__
+# include <ifaddrs.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <netdb.h>
+#ifdef __openbsd__
+# include <sys/uio.h>
+#endif
+
+#else
+
+// errno doesn't work for winsock.
+#undef errno
+#define errno WSAGetLastError()
+
+#endif
+
+namespace mongo {
+
+
+ void checkTicketNumbers();
+
+
+ // ----- Listener -------
+
+ const Listener* Listener::_timeTracker;
+
+ vector<SockAddr> ipToAddrs(const char* ips, int port, bool useUnixSockets) {
+ vector<SockAddr> out;
+ if (*ips == '\0') {
+ out.push_back(SockAddr("0.0.0.0", port)); // IPv4 all
+
+ if (IPv6Enabled())
+ out.push_back(SockAddr("::", port)); // IPv6 all
+#ifndef _WIN32
+ if (useUnixSockets)
+ out.push_back(SockAddr(makeUnixSockPath(port).c_str(), port)); // Unix socket
+#endif
+ return out;
+ }
+
+ while(*ips) {
+ string ip;
+ const char * comma = strchr(ips, ',');
+ if (comma) {
+ ip = string(ips, comma - ips);
+ ips = comma + 1;
+ }
+ else {
+ ip = string(ips);
+ ips = "";
+ }
+
+ SockAddr sa(ip.c_str(), port);
+ out.push_back(sa);
+
+#ifndef _WIN32
+ if (useUnixSockets && (sa.getAddr() == "127.0.0.1" || sa.getAddr() == "0.0.0.0")) // only IPv4
+ out.push_back(SockAddr(makeUnixSockPath(port).c_str(), port));
+#endif
+ }
+ return out;
+
+ }
+
+ Listener::Listener(const string& name, const string &ip, int port, bool logConnect )
+ : _port(port), _name(name), _ip(ip), _logConnect(logConnect), _elapsedTime(0) {
+#ifdef MONGO_SSL
+ _ssl = 0;
+ _sslPort = 0;
+
+ if ( cmdLine.sslOnNormalPorts && cmdLine.sslServerManager ) {
+ secure( cmdLine.sslServerManager );
+ }
+#endif
+ }
+
+ Listener::~Listener() {
+ if ( _timeTracker == this )
+ _timeTracker = 0;
+ }
+
+#ifdef MONGO_SSL
+ void Listener::secure( SSLManager* manager ) {
+ _ssl = manager;
+ }
+
+ void Listener::addSecurePort( SSLManager* manager , int additionalPort ) {
+ _ssl = manager;
+ _sslPort = additionalPort;
+ }
+
+#endif
+
+ bool Listener::_setupSockets( const vector<SockAddr>& mine , vector<int>& socks ) {
+ for (vector<SockAddr>::const_iterator it=mine.begin(), end=mine.end(); it != end; ++it) {
+ const SockAddr& me = *it;
+
+ SOCKET sock = ::socket(me.getType(), SOCK_STREAM, 0);
+ massert( 15863 , str::stream() << "listen(): invalid socket? " << errnoWithDescription() , sock >= 0 );
+
+ if (me.getType() == AF_UNIX) {
+#if !defined(_WIN32)
+ if (unlink(me.getAddr().c_str()) == -1) {
+ int x = errno;
+ if (x != ENOENT) {
+ log() << "couldn't unlink socket file " << me << errnoWithDescription(x) << " skipping" << endl;
+ continue;
+ }
+ }
+#endif
+ }
+ else if (me.getType() == AF_INET6) {
+ // IPv6 can also accept IPv4 connections as mapped addresses (::ffff:127.0.0.1)
+ // That causes a conflict if we don't do set it to IPV6_ONLY
+ const int one = 1;
+ setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*) &one, sizeof(one));
+ }
+
+#if !defined(_WIN32)
+ {
+ const int one = 1;
+ if ( setsockopt( sock , SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0 )
+ out() << "Failed to set socket opt, SO_REUSEADDR" << endl;
+ }
+#endif
+
+ if ( ::bind(sock, me.raw(), me.addressSize) != 0 ) {
+ int x = errno;
+ error() << "listen(): bind() failed " << errnoWithDescription(x) << " for socket: " << me.toString() << endl;
+ if ( x == EADDRINUSE )
+ error() << " addr already in use" << endl;
+ closesocket(sock);
+ return false;
+ }
+
+#if !defined(_WIN32)
+ if (me.getType() == AF_UNIX) {
+ if (chmod(me.getAddr().c_str(), 0777) == -1) {
+ error() << "couldn't chmod socket file " << me << errnoWithDescription() << endl;
+ }
+ ListeningSockets::get()->addPath( me.getAddr() );
+ }
+#endif
+
+ if ( ::listen(sock, 128) != 0 ) {
+ error() << "listen(): listen() failed " << errnoWithDescription() << endl;
+ closesocket(sock);
+ return false;
+ }
+
+ ListeningSockets::get()->add( sock );
+
+ socks.push_back(sock);
+ }
+
+ return true;
+ }
+
+ void Listener::initAndListen() {
+ checkTicketNumbers();
+ vector<int> socks;
+ set<int> sslSocks;
+
+ { // normal sockets
+ vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _port, (!cmdLine.noUnixSocket && useUnixSockets()));
+ if ( ! _setupSockets( mine , socks ) )
+ return;
+ }
+
+#ifdef MONGO_SSL
+ if ( _ssl && _sslPort > 0 ) {
+ unsigned prev = socks.size();
+
+ vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _sslPort, false );
+ if ( ! _setupSockets( mine , socks ) )
+ return;
+
+ for ( unsigned i=prev; i<socks.size(); i++ ) {
+ sslSocks.insert( socks[i] );
+ }
+
+ }
+#endif
+
+ SOCKET maxfd = 0; // needed for select()
+ for ( unsigned i=0; i<socks.size(); i++ ) {
+ if ( socks[i] > maxfd )
+ maxfd = socks[i];
+ }
+
+#ifdef MONGO_SSL
+ if ( _ssl == 0 ) {
+ _logListen( _port , false );
+ }
+ else if ( _sslPort == 0 ) {
+ _logListen( _port , true );
+ }
+ else {
+ // both
+ _logListen( _port , false );
+ _logListen( _sslPort , true );
+ }
+#else
+ _logListen( _port , false );
+#endif
+
+ static long connNumber = 0;
+ struct timeval maxSelectTime;
+ while ( ! inShutdown() ) {
+ fd_set fds[1];
+ FD_ZERO(fds);
+
+ for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
+ FD_SET(*it, fds);
+ }
+
+ maxSelectTime.tv_sec = 0;
+ maxSelectTime.tv_usec = 10000;
+ const int ret = select(maxfd+1, fds, NULL, NULL, &maxSelectTime);
+
+ if (ret == 0) {
+#if defined(__linux__)
+ _elapsedTime += ( 10000 - maxSelectTime.tv_usec ) / 1000;
+#else
+ _elapsedTime += 10;
+#endif
+ continue;
+ }
+
+ if (ret < 0) {
+ int x = errno;
+#ifdef EINTR
+ if ( x == EINTR ) {
+ log() << "select() signal caught, continuing" << endl;
+ continue;
+ }
+#endif
+ if ( ! inShutdown() )
+ log() << "select() failure: ret=" << ret << " " << errnoWithDescription(x) << endl;
+ return;
+ }
+
+#if defined(__linux__)
+ _elapsedTime += max(ret, (int)(( 10000 - maxSelectTime.tv_usec ) / 1000));
+#else
+ _elapsedTime += ret; // assume 1ms to grab connection. very rough
+#endif
+
+ for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
+ if (! (FD_ISSET(*it, fds)))
+ continue;
+
+ SockAddr from;
+ int s = accept(*it, from.raw(), &from.addressSize);
+ if ( s < 0 ) {
+ int x = errno; // so no global issues
+ if ( x == ECONNABORTED || x == EBADF ) {
+ log() << "Listener on port " << _port << " aborted" << endl;
+ return;
+ }
+ if ( x == 0 && inShutdown() ) {
+ return; // socket closed
+ }
+ if( !inShutdown() ) {
+ log() << "Listener: accept() returns " << s << " " << errnoWithDescription(x) << endl;
+ if (x == EMFILE || x == ENFILE) {
+ // Connection still in listen queue but we can't accept it yet
+ error() << "Out of file descriptors. Waiting one second before trying to accept more connections." << warnings;
+ sleepsecs(1);
+ }
+ }
+ continue;
+ }
+ if (from.getType() != AF_UNIX)
+ disableNagle(s);
+ if ( _logConnect && ! cmdLine.quiet )
+ log() << "connection accepted from " << from.toString() << " #" << ++connNumber << endl;
+
+ Socket newSock = Socket(s, from);
+#ifdef MONGO_SSL
+ if ( _ssl && ( _sslPort == 0 || sslSocks.count(*it) ) ) {
+ newSock.secureAccepted( _ssl );
+ }
+#endif
+ accepted( newSock );
+ }
+ }
+ }
+
+ void Listener::_logListen( int port , bool ssl ) {
+ log() << _name << ( _name.size() ? " " : "" ) << "waiting for connections on port " << port << ( ssl ? " ssl" : "" ) << endl;
+ }
+
+
+ void Listener::accepted(Socket socket) {
+ accepted( new MessagingPort(socket) );
+ }
+
+ void Listener::accepted(MessagingPort *mp) {
+ assert(!"You must overwrite one of the accepted methods");
+ }
+
+ // ----- ListeningSockets -------
+
+ ListeningSockets* ListeningSockets::_instance = new ListeningSockets();
+
+ ListeningSockets* ListeningSockets::get() {
+ return _instance;
+ }
+
+ // ------ connection ticket and control ------
+
+ const int DEFAULT_MAX_CONN = 20000;
+ const int MAX_MAX_CONN = 20000;
+
+ int getMaxConnections() {
+#ifdef _WIN32
+ return DEFAULT_MAX_CONN;
+#else
+ struct rlimit limit;
+ assert( getrlimit(RLIMIT_NOFILE,&limit) == 0 );
+
+ int max = (int)(limit.rlim_cur * .8);
+
+ log(1) << "fd limit"
+ << " hard:" << limit.rlim_max
+ << " soft:" << limit.rlim_cur
+ << " max conn: " << max
+ << endl;
+
+ if ( max > MAX_MAX_CONN )
+ max = MAX_MAX_CONN;
+
+ return max;
+#endif
+ }
+
+ void checkTicketNumbers() {
+ int want = getMaxConnections();
+ int current = connTicketHolder.outof();
+ if ( current != DEFAULT_MAX_CONN ) {
+ if ( current < want ) {
+ // they want fewer than they can handle
+ // which is fine
+ log(1) << " only allowing " << current << " connections" << endl;
+ return;
+ }
+ if ( current > want ) {
+ log() << " --maxConns too high, can only handle " << want << endl;
+ }
+ }
+ connTicketHolder.resize( want );
+ }
+
+ TicketHolder connTicketHolder(DEFAULT_MAX_CONN);
+
+}
diff --git a/util/net/listen.h b/util/net/listen.h
new file mode 100644
index 0000000..415db1e
--- /dev/null
+++ b/util/net/listen.h
@@ -0,0 +1,190 @@
+// listen.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "sock.h"
+
+namespace mongo {
+
+ class MessagingPort;
+
+ class Listener : boost::noncopyable {
+ public:
+
+ Listener(const string& name, const string &ip, int port, bool logConnect=true );
+
+ virtual ~Listener();
+
+#ifdef MONGO_SSL
+ /**
+ * make this an ssl socket
+ * ownership of SSLManager remains with the caller
+ */
+ void secure( SSLManager* manager );
+
+ void addSecurePort( SSLManager* manager , int additionalPort );
+#endif
+
+ void initAndListen(); // never returns unless error (start a thread)
+
+ /* spawn a thread, etc., then return */
+ virtual void accepted(Socket socket);
+ virtual void accepted(MessagingPort *mp);
+
+ const int _port;
+
+ /**
+ * @return a rough estimate of elapsed time since the server started
+ */
+ long long getMyElapsedTimeMillis() const { return _elapsedTime; }
+
+ void setAsTimeTracker() {
+ _timeTracker = this;
+ }
+
+ static const Listener* getTimeTracker() {
+ return _timeTracker;
+ }
+
+ static long long getElapsedTimeMillis() {
+ if ( _timeTracker )
+ return _timeTracker->getMyElapsedTimeMillis();
+
+ // should this assert or throw? seems like callers may not expect to get zero back, certainly not forever.
+ return 0;
+ }
+
+ private:
+ string _name;
+ string _ip;
+ bool _logConnect;
+ long long _elapsedTime;
+
+#ifdef MONGO_SSL
+ SSLManager* _ssl;
+ int _sslPort;
+#endif
+
+ /**
+ * @return true iff everything went ok
+ */
+ bool _setupSockets( const vector<SockAddr>& mine , vector<int>& socks );
+
+ void _logListen( int port , bool ssl );
+
+ static const Listener* _timeTracker;
+
+ virtual bool useUnixSockets() const { return false; }
+ };
+
+ /**
+ * keep track of elapsed time
+ * after a set amount of time, tells you to do something
+ * only in this file because depends on Listener
+ */
+ class ElapsedTracker {
+ public:
+ ElapsedTracker( int hitsBetweenMarks , int msBetweenMarks )
+ : _h( hitsBetweenMarks ) , _ms( msBetweenMarks ) , _pings(0) {
+ _last = Listener::getElapsedTimeMillis();
+ }
+
+ /**
+ * call this for every iteration
+ * returns true if one of the triggers has gone off
+ */
+ bool ping() {
+ if ( ( ++_pings % _h ) == 0 ) {
+ _last = Listener::getElapsedTimeMillis();
+ return true;
+ }
+
+ long long now = Listener::getElapsedTimeMillis();
+ if ( now - _last > _ms ) {
+ _last = now;
+ return true;
+ }
+
+ return false;
+ }
+
+ private:
+ int _h;
+ int _ms;
+
+ unsigned long long _pings;
+
+ long long _last;
+
+ };
+
+ class ListeningSockets {
+ public:
+ ListeningSockets()
+ : _mutex("ListeningSockets")
+ , _sockets( new set<int>() )
+ , _socketPaths( new set<string>() )
+ { }
+ void add( int sock ) {
+ scoped_lock lk( _mutex );
+ _sockets->insert( sock );
+ }
+ void addPath( string path ) {
+ scoped_lock lk( _mutex );
+ _socketPaths->insert( path );
+ }
+ void remove( int sock ) {
+ scoped_lock lk( _mutex );
+ _sockets->erase( sock );
+ }
+ void closeAll() {
+ set<int>* sockets;
+ set<string>* paths;
+
+ {
+ scoped_lock lk( _mutex );
+ sockets = _sockets;
+ _sockets = new set<int>();
+ paths = _socketPaths;
+ _socketPaths = new set<string>();
+ }
+
+ for ( set<int>::iterator i=sockets->begin(); i!=sockets->end(); i++ ) {
+ int sock = *i;
+ log() << "closing listening socket: " << sock << endl;
+ closesocket( sock );
+ }
+
+ for ( set<string>::iterator i=paths->begin(); i!=paths->end(); i++ ) {
+ string path = *i;
+ log() << "removing socket file: " << path << endl;
+ ::remove( path.c_str() );
+ }
+ }
+ static ListeningSockets* get();
+ private:
+ mongo::mutex _mutex;
+ set<int>* _sockets;
+ set<string>* _socketPaths; // for unix domain sockets
+ static ListeningSockets* _instance;
+ };
+
+
+ extern TicketHolder connTicketHolder;
+
+}
diff --git a/util/net/message.cpp b/util/net/message.cpp
new file mode 100644
index 0000000..a84e5c4
--- /dev/null
+++ b/util/net/message.cpp
@@ -0,0 +1,64 @@
+// message.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include <fcntl.h>
+#include <errno.h>
+#include <time.h>
+
+#include "message.h"
+#include "message_port.h"
+#include "listen.h"
+
+#include "../goodies.h"
+#include "../../client/dbclient.h"
+
+namespace mongo {
+
+ void Message::send( MessagingPort &p, const char *context ) {
+ if ( empty() ) {
+ return;
+ }
+ if ( _buf != 0 ) {
+ p.send( (char*)_buf, _buf->len, context );
+ }
+ else {
+ p.send( _data, context );
+ }
+ }
+
+ MSGID NextMsgId;
+
+ /*struct MsgStart {
+ MsgStart() {
+ NextMsgId = (((unsigned) time(0)) << 16) ^ curTimeMillis();
+ assert(MsgDataHeaderSize == 16);
+ }
+ } msgstart;*/
+
+ MSGID nextMessageId() {
+ MSGID msgid = NextMsgId++;
+ return msgid;
+ }
+
+ bool doesOpGetAResponse( int op ) {
+ return op == dbQuery || op == dbGetMore;
+ }
+
+
+} // namespace mongo
diff --git a/util/message.h b/util/net/message.h
index f114445..16da5d6 100644
--- a/util/message.h
+++ b/util/net/message.h
@@ -1,4 +1,4 @@
-// Message.h
+// message.h
/* Copyright 2009 10gen Inc.
*
@@ -17,154 +17,17 @@
#pragma once
-#include "../util/sock.h"
-#include "../bson/util/atomic_int.h"
+#include "sock.h"
+#include "../../bson/util/atomic_int.h"
#include "hostandport.h"
namespace mongo {
- extern bool noUnixSocket;
-
class Message;
class MessagingPort;
class PiggyBackData;
- typedef AtomicUInt MSGID;
-
- class Listener : boost::noncopyable {
- public:
- Listener(const string &ip, int p, bool logConnect=true ) : _port(p), _ip(ip), _logConnect(logConnect), _elapsedTime(0) { }
- virtual ~Listener() {
- if ( _timeTracker == this )
- _timeTracker = 0;
- }
- void initAndListen(); // never returns unless error (start a thread)
-
- /* spawn a thread, etc., then return */
- virtual void accepted(int sock, const SockAddr& from);
- virtual void accepted(MessagingPort *mp) {
- assert(!"You must overwrite one of the accepted methods");
- }
-
- const int _port;
-
- /**
- * @return a rough estimate of elepased time since the server started
- */
- long long getMyElapsedTimeMillis() const { return _elapsedTime; }
-
- void setAsTimeTracker() {
- _timeTracker = this;
- }
-
- static const Listener* getTimeTracker() {
- return _timeTracker;
- }
-
- static long long getElapsedTimeMillis() {
- if ( _timeTracker )
- return _timeTracker->getMyElapsedTimeMillis();
-
- // should this assert or throw? seems like callers may not expect to get zero back, certainly not forever.
- return 0;
- }
-
- private:
- string _ip;
- bool _logConnect;
- long long _elapsedTime;
-
- static const Listener* _timeTracker;
- };
-
- class AbstractMessagingPort : boost::noncopyable {
- public:
- virtual ~AbstractMessagingPort() { }
- virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
- virtual void reply(Message& received, Message& response) = 0;
-
- virtual HostAndPort remote() const = 0;
- virtual unsigned remotePort() const = 0;
-
- private:
- int _clientId;
- };
-
- class MessagingPort : public AbstractMessagingPort {
- public:
- MessagingPort(int sock, const SockAddr& farEnd);
-
- // in some cases the timeout will actually be 2x this value - eg we do a partial send,
- // then the timeout fires, then we try to send again, then the timeout fires again with
- // no data sent, then we detect that the other side is down
- MessagingPort(double so_timeout = 0, int logLevel = 0 );
-
- virtual ~MessagingPort();
-
- void shutdown();
-
- bool connect(SockAddr& farEnd);
-
- /* it's assumed if you reuse a message object, that it doesn't cross MessagingPort's.
- also, the Message data will go out of scope on the subsequent recv call.
- */
- bool recv(Message& m);
- void reply(Message& received, Message& response, MSGID responseTo);
- void reply(Message& received, Message& response);
- bool call(Message& toSend, Message& response);
-
- void say(Message& toSend, int responseTo = -1);
-
- /**
- * this is used for doing 'async' queries
- * instead of doing call( to , from )
- * you would do
- * say( to )
- * recv( from )
- * Note: if you fail to call recv and someone else uses this port,
- * horrible things will happend
- */
- bool recv( const Message& sent , Message& response );
-
- void piggyBack( Message& toSend , int responseTo = -1 );
-
- virtual unsigned remotePort() const;
- virtual HostAndPort remote() const;
-
- // send len or throw SocketException
- void send( const char * data , int len, const char *context );
- void send( const vector< pair< char *, int > > &data, const char *context );
-
- // recv len or throw SocketException
- void recv( char * data , int len );
-
- int unsafe_recv( char *buf, int max );
- void clearCounters() { _bytesIn = 0; _bytesOut = 0; }
- long long getBytesIn() const { return _bytesIn; }
- long long getBytesOut() const { return _bytesOut; }
- private:
- int sock;
- PiggyBackData * piggyBackData;
-
- long long _bytesIn;
- long long _bytesOut;
-
- // this is the parsed version of farEnd
- // mutable because its initialized only on call to remote()
- mutable HostAndPort _farEndParsed;
-
- public:
- SockAddr farEnd;
- double _timeout;
- int _logLevel; // passed to log() when logging errors
-
- static void closeAllSockets(unsigned tagMask = 0xffffffff);
-
- /* ports can be tagged with various classes. see closeAllSockets(tag). defaults to 0. */
- unsigned tag;
-
- friend class PiggyBackData;
- };
+ typedef AtomicUInt MSGID;
enum Operations {
opReply = 1, /* reply. responseTo is set. */
@@ -425,17 +288,9 @@ namespace mongo {
return _freeIt;
}
- void send( MessagingPort &p, const char *context ) {
- if ( empty() ) {
- return;
- }
- if ( _buf != 0 ) {
- p.send( (char*)_buf, _buf->len, context );
- }
- else {
- p.send( _data, context );
- }
- }
+ void send( MessagingPort &p, const char *context );
+
+ string toString() const;
private:
void _setData( MsgData *d, bool freeIt ) {
@@ -450,59 +305,8 @@ namespace mongo {
bool _freeIt;
};
- class SocketException : public DBException {
- public:
- const enum Type { CLOSED , RECV_ERROR , SEND_ERROR, RECV_TIMEOUT, SEND_TIMEOUT, FAILED_STATE, CONNECT_ERROR } _type;
-
- SocketException( Type t , string server="" , int code = 9001 , string extra="" ) : DBException( "socket exception" , code ) , _type(t) , _server(server), _extra(extra){ }
- virtual ~SocketException() throw() {}
-
- bool shouldPrint() const { return _type != CLOSED; }
- virtual string toString() const;
-
- private:
- string _server;
- string _extra;
- };
MSGID nextMessageId();
- extern TicketHolder connTicketHolder;
-
- class ElapsedTracker {
- public:
- ElapsedTracker( int hitsBetweenMarks , int msBetweenMarks )
- : _h( hitsBetweenMarks ) , _ms( msBetweenMarks ) , _pings(0) {
- _last = Listener::getElapsedTimeMillis();
- }
-
- /**
- * call this for every iteration
- * returns true if one of the triggers has gone off
- */
- bool ping() {
- if ( ( ++_pings % _h ) == 0 ) {
- _last = Listener::getElapsedTimeMillis();
- return true;
- }
-
- long long now = Listener::getElapsedTimeMillis();
- if ( now - _last > _ms ) {
- _last = now;
- return true;
- }
-
- return false;
- }
-
- private:
- int _h;
- int _ms;
-
- unsigned long long _pings;
-
- long long _last;
-
- };
} // namespace mongo
diff --git a/util/net/message_port.cpp b/util/net/message_port.cpp
new file mode 100644
index 0000000..9abfaf7
--- /dev/null
+++ b/util/net/message_port.cpp
@@ -0,0 +1,298 @@
+// message_port.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include <fcntl.h>
+#include <errno.h>
+#include <time.h>
+
+#include "message.h"
+#include "message_port.h"
+#include "listen.h"
+
+#include "../goodies.h"
+#include "../background.h"
+#include "../time_support.h"
+#include "../../db/cmdline.h"
+#include "../../client/dbclient.h"
+
+
+#ifndef _WIN32
+# ifndef __sunos__
+# include <ifaddrs.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+#else
+
+// errno doesn't work for winsock.
+#undef errno
+#define errno WSAGetLastError()
+
+#endif
+
+namespace mongo {
+
+
+// if you want trace output:
+#define mmm(x)
+
+ /* messagingport -------------------------------------------------------------- */
+
+ class PiggyBackData {
+ public:
+ PiggyBackData( MessagingPort * port ) {
+ _port = port;
+ _buf = new char[1300];
+ _cur = _buf;
+ }
+
+ ~PiggyBackData() {
+ DESTRUCTOR_GUARD (
+ flush();
+ delete[]( _cur );
+ );
+ }
+
+ void append( Message& m ) {
+ assert( m.header()->len <= 1300 );
+
+ if ( len() + m.header()->len > 1300 )
+ flush();
+
+ memcpy( _cur , m.singleData() , m.header()->len );
+ _cur += m.header()->len;
+ }
+
+ void flush() {
+ if ( _buf == _cur )
+ return;
+
+ _port->send( _buf , len(), "flush" );
+ _cur = _buf;
+ }
+
+ int len() const { return _cur - _buf; }
+
+ private:
+ MessagingPort* _port;
+ char * _buf;
+ char * _cur;
+ };
+
+ class Ports {
+ set<MessagingPort*> ports;
+ mongo::mutex m;
+ public:
+ Ports() : ports(), m("Ports") {}
+ void closeAll(unsigned skip_mask) {
+ scoped_lock bl(m);
+ for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
+ if( (*i)->tag & skip_mask )
+ continue;
+ (*i)->shutdown();
+ }
+ }
+ void insert(MessagingPort* p) {
+ scoped_lock bl(m);
+ ports.insert(p);
+ }
+ void erase(MessagingPort* p) {
+ scoped_lock bl(m);
+ ports.erase(p);
+ }
+ };
+
+ // we "new" this so it is still be around when other automatic global vars
+ // are being destructed during termination.
+ Ports& ports = *(new Ports());
+
+ void MessagingPort::closeAllSockets(unsigned mask) {
+ ports.closeAll(mask);
+ }
+
+ MessagingPort::MessagingPort(int fd, const SockAddr& remote)
+ : Socket( fd , remote ) , piggyBackData(0) {
+ ports.insert(this);
+ }
+
+ MessagingPort::MessagingPort( double timeout, int ll )
+ : Socket( timeout, ll ) {
+ ports.insert(this);
+ piggyBackData = 0;
+ }
+
+ MessagingPort::MessagingPort( Socket& sock )
+ : Socket( sock ) , piggyBackData( 0 ) {
+ }
+
+ void MessagingPort::shutdown() {
+ close();
+ }
+
+ MessagingPort::~MessagingPort() {
+ if ( piggyBackData )
+ delete( piggyBackData );
+ shutdown();
+ ports.erase(this);
+ }
+
+ bool MessagingPort::recv(Message& m) {
+ try {
+again:
+ mmm( log() << "* recv() sock:" << this->sock << endl; )
+ int len = -1;
+
+ char *lenbuf = (char *) &len;
+ int lft = 4;
+ Socket::recv( lenbuf, lft );
+
+ if ( len < 16 || len > 48000000 ) { // messages must be large enough for headers
+ if ( len == -1 ) {
+ // Endian check from the client, after connecting, to see what mode server is running in.
+ unsigned foo = 0x10203040;
+ send( (char *) &foo, 4, "endian" );
+ goto again;
+ }
+
+ if ( len == 542393671 ) {
+ // an http GET
+ log(_logLevel) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
+ string msg = "You are trying to access MongoDB on the native driver port. For http diagnostic access, add 1000 to the port number\n";
+ stringstream ss;
+ ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
+ string s = ss.str();
+ send( s.c_str(), s.size(), "http" );
+ return false;
+ }
+ log(0) << "recv(): message len " << len << " is too large" << len << endl;
+ return false;
+ }
+
+ int z = (len+1023)&0xfffffc00;
+ assert(z>=len);
+ MsgData *md = (MsgData *) malloc(z);
+ assert(md);
+ md->len = len;
+
+ char *p = (char *) &md->id;
+ int left = len -4;
+
+ try {
+ Socket::recv( p, left );
+ }
+ catch (...) {
+ free(md);
+ throw;
+ }
+
+ m.setData(md, true);
+ return true;
+
+ }
+ catch ( const SocketException & e ) {
+ log(_logLevel + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
+ m.reset();
+ return false;
+ }
+ }
+
+ void MessagingPort::reply(Message& received, Message& response) {
+ say(/*received.from, */response, received.header()->id);
+ }
+
+ void MessagingPort::reply(Message& received, Message& response, MSGID responseTo) {
+ say(/*received.from, */response, responseTo);
+ }
+
+ bool MessagingPort::call(Message& toSend, Message& response) {
+ mmm( log() << "*call()" << endl; )
+ say(toSend);
+ return recv( toSend , response );
+ }
+
+ bool MessagingPort::recv( const Message& toSend , Message& response ) {
+ while ( 1 ) {
+ bool ok = recv(response);
+ if ( !ok )
+ return false;
+ //log() << "got response: " << response.data->responseTo << endl;
+ if ( response.header()->responseTo == toSend.header()->id )
+ break;
+ error() << "MessagingPort::call() wrong id got:" << hex << (unsigned)response.header()->responseTo << " expect:" << (unsigned)toSend.header()->id << '\n'
+ << dec
+ << " toSend op: " << (unsigned)toSend.operation() << '\n'
+ << " response msgid:" << (unsigned)response.header()->id << '\n'
+ << " response len: " << (unsigned)response.header()->len << '\n'
+ << " response op: " << response.operation() << '\n'
+ << " remote: " << remoteString() << endl;
+ assert(false);
+ response.reset();
+ }
+ mmm( log() << "*call() end" << endl; )
+ return true;
+ }
+
+ void MessagingPort::say(Message& toSend, int responseTo) {
+ assert( !toSend.empty() );
+ mmm( log() << "* say() sock:" << this->sock << " thr:" << GetCurrentThreadId() << endl; )
+ toSend.header()->id = nextMessageId();
+ toSend.header()->responseTo = responseTo;
+
+ if ( piggyBackData && piggyBackData->len() ) {
+ mmm( log() << "* have piggy back" << endl; )
+ if ( ( piggyBackData->len() + toSend.header()->len ) > 1300 ) {
+ // won't fit in a packet - so just send it off
+ piggyBackData->flush();
+ }
+ else {
+ piggyBackData->append( toSend );
+ piggyBackData->flush();
+ return;
+ }
+ }
+
+ toSend.send( *this, "say" );
+ }
+
+ void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
+
+ if ( toSend.header()->len > 1300 ) {
+ // not worth saving because its almost an entire packet
+ say( toSend );
+ return;
+ }
+
+ // we're going to be storing this, so need to set it up
+ toSend.header()->id = nextMessageId();
+ toSend.header()->responseTo = responseTo;
+
+ if ( ! piggyBackData )
+ piggyBackData = new PiggyBackData( this );
+
+ piggyBackData->append( toSend );
+ }
+
+ HostAndPort MessagingPort::remote() const {
+ if ( ! _remoteParsed.hasPort() )
+ _remoteParsed = HostAndPort( remoteAddr() );
+ return _remoteParsed;
+ }
+
+
+} // namespace mongo
diff --git a/util/net/message_port.h b/util/net/message_port.h
new file mode 100644
index 0000000..22ecafe
--- /dev/null
+++ b/util/net/message_port.h
@@ -0,0 +1,107 @@
+// message_port.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "sock.h"
+#include "message.h"
+
+namespace mongo {
+
+ class MessagingPort;
+ class PiggyBackData;
+
+ typedef AtomicUInt MSGID;
+
+ class AbstractMessagingPort : boost::noncopyable {
+ public:
+ AbstractMessagingPort() : tag(0) {}
+ virtual ~AbstractMessagingPort() { }
+ virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
+ virtual void reply(Message& received, Message& response) = 0;
+
+ virtual HostAndPort remote() const = 0;
+ virtual unsigned remotePort() const = 0;
+
+ private:
+
+ public:
+ // TODO make this private with some helpers
+
+ /* ports can be tagged with various classes. see closeAllSockets(tag). defaults to 0. */
+ unsigned tag;
+
+ };
+
+ class MessagingPort : public AbstractMessagingPort , public Socket {
+ public:
+ MessagingPort(int fd, const SockAddr& remote);
+
+ // in some cases the timeout will actually be 2x this value - eg we do a partial send,
+ // then the timeout fires, then we try to send again, then the timeout fires again with
+ // no data sent, then we detect that the other side is down
+ MessagingPort(double so_timeout = 0, int logLevel = 0 );
+
+ MessagingPort(Socket& socket);
+
+ virtual ~MessagingPort();
+
+ void shutdown();
+
+ /* it's assumed if you reuse a message object, that it doesn't cross MessagingPort's.
+ also, the Message data will go out of scope on the subsequent recv call.
+ */
+ bool recv(Message& m);
+ void reply(Message& received, Message& response, MSGID responseTo);
+ void reply(Message& received, Message& response);
+ bool call(Message& toSend, Message& response);
+
+ void say(Message& toSend, int responseTo = -1);
+
+ /**
+ * this is used for doing 'async' queries
+ * instead of doing call( to , from )
+ * you would do
+ * say( to )
+ * recv( from )
+ * Note: if you fail to call recv and someone else uses this port,
+ * horrible things will happend
+ */
+ bool recv( const Message& sent , Message& response );
+
+ void piggyBack( Message& toSend , int responseTo = -1 );
+
+ unsigned remotePort() const { return Socket::remotePort(); }
+ virtual HostAndPort remote() const;
+
+
+ private:
+
+ PiggyBackData * piggyBackData;
+
+ // this is the parsed version of remote
+ // mutable because its initialized only on call to remote()
+ mutable HostAndPort _remoteParsed;
+
+ public:
+ static void closeAllSockets(unsigned tagMask = 0xffffffff);
+
+ friend class PiggyBackData;
+ };
+
+
+} // namespace mongo
diff --git a/util/message_server.h b/util/net/message_server.h
index defae0b..ae77b97 100644
--- a/util/message_server.h
+++ b/util/net/message_server.h
@@ -22,16 +22,28 @@
#pragma once
-#include "../pch.h"
+#include "../../pch.h"
namespace mongo {
class MessageHandler {
public:
virtual ~MessageHandler() {}
-
+
+ /**
+ * called once when a socket is connected
+ */
virtual void connected( AbstractMessagingPort* p ) = 0;
+
+ /**
+ * called every time a message comes in
+ * handler is responsible for responding to client
+ */
virtual void process( Message& m , AbstractMessagingPort* p , LastError * err ) = 0;
+
+ /**
+ * called once when a socket is disconnected
+ */
virtual void disconnected( AbstractMessagingPort* p ) = 0;
};
diff --git a/util/message_server_asio.cpp b/util/net/message_server_asio.cpp
index 0c6a7d9..0c6a7d9 100644
--- a/util/message_server_asio.cpp
+++ b/util/net/message_server_asio.cpp
diff --git a/util/message_server_port.cpp b/util/net/message_server_port.cpp
index 409b0c7..ca0b13d 100644
--- a/util/message_server_port.cpp
+++ b/util/net/message_server_port.cpp
@@ -20,13 +20,15 @@
#ifndef USE_ASIO
#include "message.h"
+#include "message_port.h"
#include "message_server.h"
+#include "listen.h"
-#include "../db/cmdline.h"
-#include "../db/lasterror.h"
-#include "../db/stats/counters.h"
+#include "../../db/cmdline.h"
+#include "../../db/lasterror.h"
+#include "../../db/stats/counters.h"
-#ifdef __linux__
+#ifdef __linux__ // TODO: consider making this ifndef _WIN32
# include <sys/resource.h>
#endif
@@ -38,13 +40,15 @@ namespace mongo {
void threadRun( MessagingPort * inPort) {
TicketHolderReleaser connTicketReleaser( &connTicketHolder );
-
- assert( inPort );
setThreadName( "conn" );
-
+
+ assert( inPort );
+ inPort->setLogLevel(1);
scoped_ptr<MessagingPort> p( inPort );
+ p->postFork();
+
string otherSide;
Message m;
@@ -52,11 +56,11 @@ namespace mongo {
LastError * le = new LastError();
lastError.reset( le ); // lastError now has ownership
- otherSide = p->farEnd.toString();
+ otherSide = p->remoteString();
handler->connected( p.get() );
- while ( 1 ) {
+ while ( ! inShutdown() ) {
m.reset();
p->clearCounters();
@@ -71,14 +75,25 @@ namespace mongo {
networkCounter.hit( p->getBytesIn() , p->getBytesOut() );
}
}
- catch ( const SocketException& ) {
- log() << "unclean socket shutdown from: " << otherSide << endl;
+ catch ( AssertionException& e ) {
+ log() << "AssertionException handling request, closing client connection: " << e << endl;
+ p->shutdown();
+ }
+ catch ( SocketException& e ) {
+ log() << "SocketException handling request, closing client connection: " << e << endl;
+ p->shutdown();
}
- catch ( const std::exception& e ) {
- problem() << "uncaught exception (" << e.what() << ")(" << demangleName( typeid(e) ) <<") in PortMessageServer::threadRun, closing connection" << endl;
+ catch ( const ClockSkewException & ) {
+ log() << "ClockSkewException - shutting down" << endl;
+ exitCleanly( EXIT_CLOCK_SKEW );
+ }
+ catch ( std::exception &e ) {
+ error() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
}
catch ( ... ) {
- problem() << "uncaught exception in PortMessageServer::threadRun, closing connection" << endl;
+ error() << "Uncaught exception, terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
}
handler->disconnected( p.get() );
@@ -89,7 +104,7 @@ namespace mongo {
class PortMessageServer : public MessageServer , public Listener {
public:
PortMessageServer( const MessageServer::Options& opts, MessageHandler * handler ) :
- Listener( opts.ipList, opts.port ) {
+ Listener( "" , opts.ipList, opts.port ) {
uassert( 10275 , "multiple PortMessageServer not supported" , ! pms::handler );
pms::handler = handler;
@@ -116,19 +131,19 @@ namespace mongo {
pthread_attr_init(&attrs);
pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
- static const size_t STACK_SIZE = 4*1024*1024;
+ static const size_t STACK_SIZE = 1024*1024; // if we change this we need to update the warning
struct rlimit limits;
- assert(getrlimit(RLIMIT_STACK, &limits) == 0);
+ verify(15887, getrlimit(RLIMIT_STACK, &limits) == 0);
if (limits.rlim_cur > STACK_SIZE) {
pthread_attr_setstacksize(&attrs, (DEBUG_BUILD
? (STACK_SIZE / 2)
: STACK_SIZE));
- }
- else if (limits.rlim_cur < 1024*1024) {
- warning() << "Stack size set to " << (limits.rlim_cur/1024) << "KB. We suggest at least 1MB" << endl;
+ } else if (limits.rlim_cur < 1024*1024) {
+ warning() << "Stack size set to " << (limits.rlim_cur/1024) << "KB. We suggest 1MB" << endl;
}
+
pthread_t thread;
int failed = pthread_create(&thread, &attrs, (void*(*)(void*)) &pms::threadRun, p);
@@ -149,6 +164,16 @@ namespace mongo {
sleepmillis(2);
}
+ catch ( ... ) {
+ connTicketHolder.release();
+ log() << "unknown error accepting new socket" << endl;
+
+ p->shutdown();
+ delete p;
+
+ sleepmillis(2);
+ }
+
}
virtual void setAsTimeTracker() {
@@ -159,6 +184,7 @@ namespace mongo {
initAndListen();
}
+ virtual bool useUnixSockets() const { return true; }
};
diff --git a/util/miniwebserver.cpp b/util/net/miniwebserver.cpp
index e700112..0793100 100644
--- a/util/miniwebserver.cpp
+++ b/util/net/miniwebserver.cpp
@@ -17,14 +17,14 @@
#include "pch.h"
#include "miniwebserver.h"
-#include "hex.h"
+#include "../hex.h"
-#include <pcrecpp.h>
+#include "pcrecpp.h"
namespace mongo {
- MiniWebServer::MiniWebServer(const string &ip, int port)
- : Listener(ip, port, false)
+ MiniWebServer::MiniWebServer(const string& name, const string &ip, int port)
+ : Listener(name, ip, port, false)
{}
string MiniWebServer::parseURL( const char * buf ) {
@@ -108,17 +108,18 @@ namespace mongo {
return false;
}
- void MiniWebServer::accepted(int s, const SockAddr &from) {
- setSockTimeouts(s, 8);
+ void MiniWebServer::accepted(Socket sock) {
+ sock.postFork();
+ sock.setTimeout(8);
char buf[4096];
int len = 0;
while ( 1 ) {
int left = sizeof(buf) - 1 - len;
if( left == 0 )
break;
- int x = ::recv(s, buf + len, left, 0);
+ int x = sock.unsafe_recv( buf + len , left );
if ( x <= 0 ) {
- closesocket(s);
+ sock.close();
return;
}
len += x;
@@ -134,7 +135,7 @@ namespace mongo {
vector<string> headers;
try {
- doRequest(buf, parseURL( buf ), responseMsg, responseCode, headers, from);
+ doRequest(buf, parseURL( buf ), responseMsg, responseCode, headers, sock.remoteAddr() );
}
catch ( std::exception& e ) {
responseCode = 500;
@@ -165,8 +166,8 @@ namespace mongo {
ss << responseMsg;
string response = ss.str();
- ::send(s, response.c_str(), response.size(), 0);
- closesocket(s);
+ sock.send( response.c_str(), response.size() , "http response" );
+ sock.close();
}
string MiniWebServer::getHeader( const char * req , string wanted ) {
diff --git a/util/miniwebserver.h b/util/net/miniwebserver.h
index b385afc..1fb6b3f 100644
--- a/util/miniwebserver.h
+++ b/util/net/miniwebserver.h
@@ -17,15 +17,17 @@
#pragma once
-#include "../pch.h"
+#include "../../pch.h"
#include "message.h"
-#include "../db/jsobj.h"
+#include "message_port.h"
+#include "listen.h"
+#include "../../db/jsobj.h"
namespace mongo {
class MiniWebServer : public Listener {
public:
- MiniWebServer(const string &ip, int _port);
+ MiniWebServer(const string& name, const string &ip, int _port);
virtual ~MiniWebServer() {}
virtual void doRequest(
@@ -51,7 +53,7 @@ namespace mongo {
static string urlDecode(string s) {return urlDecode(s.c_str());}
private:
- void accepted(int s, const SockAddr &from);
+ void accepted(Socket socket);
static bool fullReceive( const char *buf );
};
diff --git a/util/net/sock.cpp b/util/net/sock.cpp
new file mode 100644
index 0000000..69c42f2
--- /dev/null
+++ b/util/net/sock.cpp
@@ -0,0 +1,713 @@
+// @file sock.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "sock.h"
+#include "../background.h"
+
+#if !defined(_WIN32)
+# include <sys/socket.h>
+# include <sys/types.h>
+# include <sys/socket.h>
+# include <sys/un.h>
+# include <netinet/in.h>
+# include <netinet/tcp.h>
+# include <arpa/inet.h>
+# include <errno.h>
+# include <netdb.h>
+# if defined(__openbsd__)
+# include <sys/uio.h>
+# endif
+#endif
+
+#ifdef MONGO_SSL
+#include <openssl/err.h>
+#include <openssl/ssl.h>
+#endif
+
+
+namespace mongo {
+
+ static bool ipv6 = false;
+ void enableIPv6(bool state) { ipv6 = state; }
+ bool IPv6Enabled() { return ipv6; }
+
+ void setSockTimeouts(int sock, double secs) {
+ struct timeval tv;
+ tv.tv_sec = (int)secs;
+ tv.tv_usec = (int)((long long)(secs*1000*1000) % (1000*1000));
+ bool report = logLevel > 3; // solaris doesn't provide these
+ DEV report = true;
+ bool ok = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv) ) == 0;
+ if( report && !ok ) log() << "unabled to set SO_RCVTIMEO" << endl;
+ ok = setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *) &tv, sizeof(tv) ) == 0;
+ DEV if( report && !ok ) log() << "unabled to set SO_RCVTIMEO" << endl;
+ }
+
+#if defined(_WIN32)
+ void disableNagle(int sock) {
+ int x = 1;
+ if ( setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &x, sizeof(x)) )
+ error() << "disableNagle failed" << endl;
+ if ( setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &x, sizeof(x)) )
+ error() << "SO_KEEPALIVE failed" << endl;
+ }
+#else
+
+ void disableNagle(int sock) {
+ int x = 1;
+
+#ifdef SOL_TCP
+ int level = SOL_TCP;
+#else
+ int level = SOL_SOCKET;
+#endif
+
+ if ( setsockopt(sock, level, TCP_NODELAY, (char *) &x, sizeof(x)) )
+ error() << "disableNagle failed: " << errnoWithDescription() << endl;
+
+#ifdef SO_KEEPALIVE
+ if ( setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &x, sizeof(x)) )
+ error() << "SO_KEEPALIVE failed: " << errnoWithDescription() << endl;
+
+# ifdef __linux__
+ socklen_t len = sizeof(x);
+ if ( getsockopt(sock, level, TCP_KEEPIDLE, (char *) &x, &len) )
+ error() << "can't get TCP_KEEPIDLE: " << errnoWithDescription() << endl;
+
+ if (x > 300) {
+ x = 300;
+ if ( setsockopt(sock, level, TCP_KEEPIDLE, (char *) &x, sizeof(x)) ) {
+ error() << "can't set TCP_KEEPIDLE: " << errnoWithDescription() << endl;
+ }
+ }
+
+ len = sizeof(x); // just in case it changed
+ if ( getsockopt(sock, level, TCP_KEEPINTVL, (char *) &x, &len) )
+ error() << "can't get TCP_KEEPINTVL: " << errnoWithDescription() << endl;
+
+ if (x > 300) {
+ x = 300;
+ if ( setsockopt(sock, level, TCP_KEEPINTVL, (char *) &x, sizeof(x)) ) {
+ error() << "can't set TCP_KEEPINTVL: " << errnoWithDescription() << endl;
+ }
+ }
+# endif
+#endif
+
+ }
+
+#endif
+
+ string getAddrInfoStrError(int code) {
+#if !defined(_WIN32)
+ return gai_strerror(code);
+#else
+ /* gai_strerrorA is not threadsafe on windows. don't use it. */
+ return errnoWithDescription(code);
+#endif
+ }
+
+
+ // --- SockAddr
+
+ SockAddr::SockAddr(int sourcePort) {
+ memset(as<sockaddr_in>().sin_zero, 0, sizeof(as<sockaddr_in>().sin_zero));
+ as<sockaddr_in>().sin_family = AF_INET;
+ as<sockaddr_in>().sin_port = htons(sourcePort);
+ as<sockaddr_in>().sin_addr.s_addr = htonl(INADDR_ANY);
+ addressSize = sizeof(sockaddr_in);
+ }
+
+ SockAddr::SockAddr(const char * iporhost , int port) {
+ if (!strcmp(iporhost, "localhost"))
+ iporhost = "127.0.0.1";
+
+ if (strchr(iporhost, '/')) {
+#ifdef _WIN32
+ uassert(13080, "no unix socket support on windows", false);
+#endif
+ uassert(13079, "path to unix socket too long", strlen(iporhost) < sizeof(as<sockaddr_un>().sun_path));
+ as<sockaddr_un>().sun_family = AF_UNIX;
+ strcpy(as<sockaddr_un>().sun_path, iporhost);
+ addressSize = sizeof(sockaddr_un);
+ }
+ else {
+ addrinfo* addrs = NULL;
+ addrinfo hints;
+ memset(&hints, 0, sizeof(addrinfo));
+ hints.ai_socktype = SOCK_STREAM;
+ //hints.ai_flags = AI_ADDRCONFIG; // This is often recommended but don't do it. SERVER-1579
+ hints.ai_flags |= AI_NUMERICHOST; // first pass tries w/o DNS lookup
+ hints.ai_family = (IPv6Enabled() ? AF_UNSPEC : AF_INET);
+
+ StringBuilder ss;
+ ss << port;
+ int ret = getaddrinfo(iporhost, ss.str().c_str(), &hints, &addrs);
+
+ // old C compilers on IPv6-capable hosts return EAI_NODATA error
+#ifdef EAI_NODATA
+ int nodata = (ret == EAI_NODATA);
+#else
+ int nodata = false;
+#endif
+ if (ret == EAI_NONAME || nodata) {
+ // iporhost isn't an IP address, allow DNS lookup
+ hints.ai_flags &= ~AI_NUMERICHOST;
+ ret = getaddrinfo(iporhost, ss.str().c_str(), &hints, &addrs);
+ }
+
+ if (ret) {
+ // don't log if this as it is a CRT construction and log() may not work yet.
+ if( strcmp("0.0.0.0", iporhost) ) {
+ log() << "getaddrinfo(\"" << iporhost << "\") failed: " << gai_strerror(ret) << endl;
+ }
+ *this = SockAddr(port);
+ }
+ else {
+ //TODO: handle other addresses in linked list;
+ assert(addrs->ai_addrlen <= sizeof(sa));
+ memcpy(&sa, addrs->ai_addr, addrs->ai_addrlen);
+ addressSize = addrs->ai_addrlen;
+ freeaddrinfo(addrs);
+ }
+ }
+ }
+
+ bool SockAddr::isLocalHost() const {
+ switch (getType()) {
+ case AF_INET: return getAddr() == "127.0.0.1";
+ case AF_INET6: return getAddr() == "::1";
+ case AF_UNIX: return true;
+ default: return false;
+ }
+ assert(false);
+ return false;
+ }
+
+ string SockAddr::toString(bool includePort) const {
+ string out = getAddr();
+ if (includePort && getType() != AF_UNIX && getType() != AF_UNSPEC)
+ out += mongoutils::str::stream() << ':' << getPort();
+ return out;
+ }
+
+ sa_family_t SockAddr::getType() const {
+ return sa.ss_family;
+ }
+
+ unsigned SockAddr::getPort() const {
+ switch (getType()) {
+ case AF_INET: return ntohs(as<sockaddr_in>().sin_port);
+ case AF_INET6: return ntohs(as<sockaddr_in6>().sin6_port);
+ case AF_UNIX: return 0;
+ case AF_UNSPEC: return 0;
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return 0;
+ }
+ }
+
+ string SockAddr::getAddr() const {
+ switch (getType()) {
+ case AF_INET:
+ case AF_INET6: {
+ const int buflen=128;
+ char buffer[buflen];
+ int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST);
+ massert(13082, getAddrInfoStrError(ret), ret == 0);
+ return buffer;
+ }
+
+ case AF_UNIX: return (addressSize > 2 ? as<sockaddr_un>().sun_path : "anonymous unix socket");
+ case AF_UNSPEC: return "(NONE)";
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return "";
+ }
+ }
+
+ bool SockAddr::operator==(const SockAddr& r) const {
+ if (getType() != r.getType())
+ return false;
+
+ if (getPort() != r.getPort())
+ return false;
+
+ switch (getType()) {
+ case AF_INET: return as<sockaddr_in>().sin_addr.s_addr == r.as<sockaddr_in>().sin_addr.s_addr;
+ case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) == 0;
+ case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) == 0;
+ case AF_UNSPEC: return true; // assume all unspecified addresses are the same
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
+ }
+ return false;
+ }
+
+ bool SockAddr::operator!=(const SockAddr& r) const {
+ return !(*this == r);
+ }
+
+ bool SockAddr::operator<(const SockAddr& r) const {
+ if (getType() < r.getType())
+ return true;
+ else if (getType() > r.getType())
+ return false;
+
+ if (getPort() < r.getPort())
+ return true;
+ else if (getPort() > r.getPort())
+ return false;
+
+ switch (getType()) {
+ case AF_INET: return as<sockaddr_in>().sin_addr.s_addr < r.as<sockaddr_in>().sin_addr.s_addr;
+ case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) < 0;
+ case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) < 0;
+ case AF_UNSPEC: return false;
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
+ }
+ return false;
+ }
+
+ SockAddr unknownAddress( "0.0.0.0", 0 );
+
+ // ------ hostname -------------------
+
+ string hostbyname(const char *hostname) {
+ string addr = SockAddr(hostname, 0).getAddr();
+ if (addr == "0.0.0.0")
+ return "";
+ else
+ return addr;
+ }
+
+ // --- my --
+
+ string getHostName() {
+ char buf[256];
+ int ec = gethostname(buf, 127);
+ if ( ec || *buf == 0 ) {
+ log() << "can't get this server's hostname " << errnoWithDescription() << endl;
+ return "";
+ }
+ return buf;
+ }
+
+
+ string _hostNameCached;
+ static void _hostNameCachedInit() {
+ _hostNameCached = getHostName();
+ }
+ boost::once_flag _hostNameCachedInitFlags = BOOST_ONCE_INIT;
+
+ string getHostNameCached() {
+ boost::call_once( _hostNameCachedInit , _hostNameCachedInitFlags );
+ return _hostNameCached;
+ }
+
+ // --------- SocketException ----------
+
+#ifdef MSG_NOSIGNAL
+ const int portSendFlags = MSG_NOSIGNAL;
+ const int portRecvFlags = MSG_NOSIGNAL;
+#else
+ const int portSendFlags = 0;
+ const int portRecvFlags = 0;
+#endif
+
+ string SocketException::toString() const {
+ stringstream ss;
+ ss << _ei.code << " socket exception [" << _type << "] ";
+
+ if ( _server.size() )
+ ss << "server [" << _server << "] ";
+
+ if ( _extra.size() )
+ ss << _extra;
+
+ return ss.str();
+ }
+
+
+ // ------------ SSLManager -----------------
+
+#ifdef MONGO_SSL
+ SSLManager::SSLManager( bool client ) {
+ _client = client;
+ SSL_library_init();
+ SSL_load_error_strings();
+ ERR_load_crypto_strings();
+
+ _context = SSL_CTX_new( client ? SSLv23_client_method() : SSLv23_server_method() );
+ massert( 15864 , mongoutils::str::stream() << "can't create SSL Context: " << ERR_error_string(ERR_get_error(), NULL) , _context );
+
+ SSL_CTX_set_options( _context, SSL_OP_ALL);
+ }
+
+ void SSLManager::setupPubPriv( const string& privateKeyFile , const string& publicKeyFile ) {
+ massert( 15865 ,
+ mongoutils::str::stream() << "Can't read SSL certificate from file "
+ << publicKeyFile << ":" << ERR_error_string(ERR_get_error(), NULL) ,
+ SSL_CTX_use_certificate_file(_context, publicKeyFile.c_str(), SSL_FILETYPE_PEM) );
+
+
+ massert( 15866 ,
+ mongoutils::str::stream() << "Can't read SSL private key from file "
+ << privateKeyFile << " : " << ERR_error_string(ERR_get_error(), NULL) ,
+ SSL_CTX_use_PrivateKey_file(_context, privateKeyFile.c_str(), SSL_FILETYPE_PEM) );
+ }
+
+
+ int SSLManager::password_cb(char *buf,int num, int rwflag,void *userdata){
+ SSLManager* sm = (SSLManager*)userdata;
+ string pass = sm->_password;
+ strcpy(buf,pass.c_str());
+ return(pass.size());
+ }
+
+ void SSLManager::setupPEM( const string& keyFile , const string& password ) {
+ _password = password;
+
+ massert( 15867 , "Can't read certificate file" , SSL_CTX_use_certificate_chain_file( _context , keyFile.c_str() ) );
+
+ SSL_CTX_set_default_passwd_cb_userdata( _context , this );
+ SSL_CTX_set_default_passwd_cb( _context, &SSLManager::password_cb );
+
+ massert( 15868 , "Can't read key file" , SSL_CTX_use_PrivateKey_file( _context , keyFile.c_str() , SSL_FILETYPE_PEM ) );
+ }
+
+ SSL * SSLManager::secure( int fd ) {
+ SSL * ssl = SSL_new( _context );
+ massert( 15861 , "can't create SSL" , ssl );
+ SSL_set_fd( ssl , fd );
+ return ssl;
+ }
+
+
+#endif
+
+ // ------------ Socket -----------------
+
+ Socket::Socket(int fd , const SockAddr& remote) :
+ _fd(fd), _remote(remote), _timeout(0) {
+ _logLevel = 0;
+ _init();
+ }
+
+ Socket::Socket( double timeout, int ll ) {
+ _logLevel = ll;
+ _fd = -1;
+ _timeout = timeout;
+ _init();
+ }
+
+ void Socket::_init() {
+ _bytesOut = 0;
+ _bytesIn = 0;
+#ifdef MONGO_SSL
+ _sslAccepted = 0;
+#endif
+ }
+
+ void Socket::close() {
+#ifdef MONGO_SSL
+ _ssl.reset();
+#endif
+ if ( _fd >= 0 ) {
+ closesocket( _fd );
+ _fd = -1;
+ }
+ }
+
+#ifdef MONGO_SSL
+ void Socket::secure( SSLManager * ssl ) {
+ assert( ssl );
+ assert( _fd >= 0 );
+ _ssl.reset( ssl->secure( _fd ) );
+ SSL_connect( _ssl.get() );
+ }
+
+ void Socket::secureAccepted( SSLManager * ssl ) {
+ _sslAccepted = ssl;
+ }
+#endif
+
+ void Socket::postFork() {
+#ifdef MONGO_SSL
+ if ( _sslAccepted ) {
+ assert( _fd );
+ _ssl.reset( _sslAccepted->secure( _fd ) );
+ SSL_accept( _ssl.get() );
+ _sslAccepted = 0;
+ }
+#endif
+ }
+
+ class ConnectBG : public BackgroundJob {
+ public:
+ ConnectBG(int sock, SockAddr remote) : _sock(sock), _remote(remote) { }
+
+ void run() { _res = ::connect(_sock, _remote.raw(), _remote.addressSize); }
+ string name() const { return "ConnectBG"; }
+ int inError() const { return _res; }
+
+ private:
+ int _sock;
+ int _res;
+ SockAddr _remote;
+ };
+
+ bool Socket::connect(SockAddr& remote) {
+ _remote = remote;
+
+ _fd = socket(remote.getType(), SOCK_STREAM, 0);
+ if ( _fd == INVALID_SOCKET ) {
+ log(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
+ return false;
+ }
+
+ if ( _timeout > 0 ) {
+ setTimeout( _timeout );
+ }
+
+ ConnectBG bg(_fd, remote);
+ bg.go();
+ if ( bg.wait(5000) ) {
+ if ( bg.inError() ) {
+ close();
+ return false;
+ }
+ }
+ else {
+ // time out the connect
+ close();
+ bg.wait(); // so bg stays in scope until bg thread terminates
+ return false;
+ }
+
+ if (remote.getType() != AF_UNIX)
+ disableNagle(_fd);
+
+#ifdef SO_NOSIGPIPE
+ // osx
+ const int one = 1;
+ setsockopt( _fd , SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(int));
+#endif
+
+ return true;
+ }
+
+ int Socket::_send( const char * data , int len ) {
+#ifdef MONGO_SSL
+ if ( _ssl ) {
+ return SSL_write( _ssl.get() , data , len );
+ }
+#endif
+ return ::send( _fd , data , len , portSendFlags );
+ }
+
+ // sends all data or throws an exception
+ void Socket::send( const char * data , int len, const char *context ) {
+ while( len > 0 ) {
+ int ret = _send( data , len );
+ if ( ret == -1 ) {
+
+#ifdef MONGO_SSL
+ if ( _ssl ) {
+ log() << "SSL Error ret: " << ret << " err: " << SSL_get_error( _ssl.get() , ret )
+ << " " << ERR_error_string(ERR_get_error(), NULL)
+ << endl;
+ }
+#endif
+
+#if defined(_WIN32)
+ if ( WSAGetLastError() == WSAETIMEDOUT && _timeout != 0 ) {
+#else
+ if ( ( errno == EAGAIN || errno == EWOULDBLOCK ) && _timeout != 0 ) {
+#endif
+ log(_logLevel) << "Socket " << context << " send() timed out " << _remote.toString() << endl;
+ throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
+ }
+ else {
+ SocketException::Type t = SocketException::SEND_ERROR;
+ log(_logLevel) << "Socket " << context << " send() "
+ << errnoWithDescription() << ' ' << remoteString() << endl;
+ throw SocketException( t , remoteString() );
+ }
+ }
+ else {
+ _bytesOut += ret;
+
+ assert( ret <= len );
+ len -= ret;
+ data += ret;
+ }
+ }
+ }
+
+ void Socket::_send( const vector< pair< char *, int > > &data, const char *context ) {
+ for( vector< pair< char *, int > >::const_iterator i = data.begin(); i != data.end(); ++i ) {
+ char * data = i->first;
+ int len = i->second;
+ send( data, len, context );
+ }
+ }
+
+ // sends all data or throws an exception
+ void Socket::send( const vector< pair< char *, int > > &data, const char *context ) {
+
+#ifdef MONGO_SSL
+ if ( _ssl ) {
+ _send( data , context );
+ return;
+ }
+#endif
+
+#if defined(_WIN32)
+ // TODO use scatter/gather api
+ _send( data , context );
+#else
+ vector< struct iovec > d( data.size() );
+ int i = 0;
+ for( vector< pair< char *, int > >::const_iterator j = data.begin(); j != data.end(); ++j ) {
+ if ( j->second > 0 ) {
+ d[ i ].iov_base = j->first;
+ d[ i ].iov_len = j->second;
+ ++i;
+ _bytesOut += j->second;
+ }
+ }
+ struct msghdr meta;
+ memset( &meta, 0, sizeof( meta ) );
+ meta.msg_iov = &d[ 0 ];
+ meta.msg_iovlen = d.size();
+
+ while( meta.msg_iovlen > 0 ) {
+ int ret = ::sendmsg( _fd , &meta , portSendFlags );
+ if ( ret == -1 ) {
+ if ( errno != EAGAIN || _timeout == 0 ) {
+ log(_logLevel) << "Socket " << context << " send() " << errnoWithDescription() << ' ' << remoteString() << endl;
+ throw SocketException( SocketException::SEND_ERROR , remoteString() );
+ }
+ else {
+ log(_logLevel) << "Socket " << context << " send() remote timeout " << remoteString() << endl;
+ throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
+ }
+ }
+ else {
+ struct iovec *& i = meta.msg_iov;
+ while( ret > 0 ) {
+ if ( i->iov_len > unsigned( ret ) ) {
+ i->iov_len -= ret;
+ i->iov_base = (char*)(i->iov_base) + ret;
+ ret = 0;
+ }
+ else {
+ ret -= i->iov_len;
+ ++i;
+ --(meta.msg_iovlen);
+ }
+ }
+ }
+ }
+#endif
+ }
+
+ void Socket::recv( char * buf , int len ) {
+ unsigned retries = 0;
+ while( len > 0 ) {
+ int ret = unsafe_recv( buf , len );
+ if ( ret > 0 ) {
+ if ( len <= 4 && ret != len )
+ log(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
+ assert( ret <= len );
+ len -= ret;
+ buf += ret;
+ }
+ else if ( ret == 0 ) {
+ log(3) << "Socket recv() conn closed? " << remoteString() << endl;
+ throw SocketException( SocketException::CLOSED , remoteString() );
+ }
+ else { /* ret < 0 */
+#if defined(_WIN32)
+ int e = WSAGetLastError();
+#else
+ int e = errno;
+# if defined(EINTR)
+ if( e == EINTR ) {
+ if( ++retries == 1 ) {
+ log() << "EINTR retry" << endl;
+ continue;
+ }
+ }
+# endif
+#endif
+ if ( ( e == EAGAIN
+#if defined(_WIN32)
+ || e == WSAETIMEDOUT
+#endif
+ ) && _timeout > 0 )
+ {
+ // this is a timeout
+ log(_logLevel) << "Socket recv() timeout " << remoteString() <<endl;
+ throw SocketException( SocketException::RECV_TIMEOUT, remoteString() );
+ }
+
+ log(_logLevel) << "Socket recv() " << errnoWithDescription(e) << " " << remoteString() <<endl;
+ throw SocketException( SocketException::RECV_ERROR , remoteString() );
+ }
+ }
+ }
+
+ int Socket::unsafe_recv( char *buf, int max ) {
+ int x = _recv( buf , max );
+ _bytesIn += x;
+ return x;
+ }
+
+
+ int Socket::_recv( char *buf, int max ) {
+#ifdef MONGO_SSL
+ if ( _ssl ){
+ return SSL_read( _ssl.get() , buf , max );
+ }
+#endif
+ return ::recv( _fd , buf , max , portRecvFlags );
+ }
+
+ void Socket::setTimeout( double secs ) {
+ struct timeval tv;
+ tv.tv_sec = (int)secs;
+ tv.tv_usec = (int)((long long)(secs*1000*1000) % (1000*1000));
+ bool report = logLevel > 3; // solaris doesn't provide these
+ DEV report = true;
+ bool ok = setsockopt(_fd, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv) ) == 0;
+ if( report && !ok ) log() << "unabled to set SO_RCVTIMEO" << endl;
+ ok = setsockopt(_fd, SOL_SOCKET, SO_SNDTIMEO, (char *) &tv, sizeof(tv) ) == 0;
+ DEV if( report && !ok ) log() << "unabled to set SO_RCVTIMEO" << endl;
+ }
+
+#if defined(_WIN32)
+ struct WinsockInit {
+ WinsockInit() {
+ WSADATA d;
+ if ( WSAStartup(MAKEWORD(2,2), &d) != 0 ) {
+ out() << "ERROR: wsastartup failed " << errnoWithDescription() << endl;
+ problem() << "ERROR: wsastartup failed " << errnoWithDescription() << endl;
+ dbexit( EXIT_NTSERVICE_ERROR );
+ }
+ }
+ } winsock_init;
+#endif
+
+} // namespace mongo
diff --git a/util/net/sock.h b/util/net/sock.h
new file mode 100644
index 0000000..1cd5133
--- /dev/null
+++ b/util/net/sock.h
@@ -0,0 +1,256 @@
+// @file sock.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../../pch.h"
+
+#include <stdio.h>
+#include <sstream>
+#include "../goodies.h"
+#include "../../db/cmdline.h"
+#include "../mongoutils/str.h"
+
+#ifndef _WIN32
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+
+#ifdef __openbsd__
+# include <sys/uio.h>
+#endif
+
+#endif // _WIN32
+
+#ifdef MONGO_SSL
+#include <openssl/ssl.h>
+#endif
+
+namespace mongo {
+
+ const int SOCK_FAMILY_UNKNOWN_ERROR=13078;
+
+ void disableNagle(int sock);
+
+#if defined(_WIN32)
+
+ typedef short sa_family_t;
+ typedef int socklen_t;
+
+ // This won't actually be used on windows
+ struct sockaddr_un {
+ short sun_family;
+ char sun_path[108]; // length from unix header
+ };
+
+#else // _WIN32
+
+ inline void closesocket(int s) { close(s); }
+ const int INVALID_SOCKET = -1;
+ typedef int SOCKET;
+
+#endif // _WIN32
+
+ inline string makeUnixSockPath(int port) {
+ return mongoutils::str::stream() << cmdLine.socket << "/mongodb-" << port << ".sock";
+ }
+
+ // If an ip address is passed in, just return that. If a hostname is passed
+ // in, look up its ip and return that. Returns "" on failure.
+ string hostbyname(const char *hostname);
+
+ void enableIPv6(bool state=true);
+ bool IPv6Enabled();
+ void setSockTimeouts(int sock, double secs);
+
+ /**
+ * wrapped around os representation of network address
+ */
+ struct SockAddr {
+ SockAddr() {
+ addressSize = sizeof(sa);
+ memset(&sa, 0, sizeof(sa));
+ sa.ss_family = AF_UNSPEC;
+ }
+ SockAddr(int sourcePort); /* listener side */
+ SockAddr(const char *ip, int port); /* EndPoint (remote) side, or if you want to specify which interface locally */
+
+ template <typename T> T& as() { return *(T*)(&sa); }
+ template <typename T> const T& as() const { return *(const T*)(&sa); }
+
+ string toString(bool includePort=true) const;
+
+ /**
+ * @return one of AF_INET, AF_INET6, or AF_UNIX
+ */
+ sa_family_t getType() const;
+
+ unsigned getPort() const;
+
+ string getAddr() const;
+
+ bool isLocalHost() const;
+
+ bool operator==(const SockAddr& r) const;
+
+ bool operator!=(const SockAddr& r) const;
+
+ bool operator<(const SockAddr& r) const;
+
+ const sockaddr* raw() const {return (sockaddr*)&sa;}
+ sockaddr* raw() {return (sockaddr*)&sa;}
+
+ socklen_t addressSize;
+ private:
+ struct sockaddr_storage sa;
+ };
+
+ extern SockAddr unknownAddress; // ( "0.0.0.0", 0 )
+
+ /** this is not cache and does a syscall */
+ string getHostName();
+
+ /** this is cached, so if changes during the process lifetime
+ * will be stale */
+ string getHostNameCached();
+
+ /**
+ * thrown by Socket and SockAddr
+ */
+ class SocketException : public DBException {
+ public:
+ const enum Type { CLOSED , RECV_ERROR , SEND_ERROR, RECV_TIMEOUT, SEND_TIMEOUT, FAILED_STATE, CONNECT_ERROR } _type;
+
+ SocketException( Type t , string server , int code = 9001 , string extra="" )
+ : DBException( "socket exception" , code ) , _type(t) , _server(server), _extra(extra){ }
+ virtual ~SocketException() throw() {}
+
+ bool shouldPrint() const { return _type != CLOSED; }
+ virtual string toString() const;
+
+ private:
+ string _server;
+ string _extra;
+ };
+
+#ifdef MONGO_SSL
+ class SSLManager : boost::noncopyable {
+ public:
+ SSLManager( bool client );
+
+ void setupPEM( const string& keyFile , const string& password );
+ void setupPubPriv( const string& privateKeyFile , const string& publicKeyFile );
+
+ /**
+ * creates an SSL context to be used for this file descriptor
+ * caller should delete
+ */
+ SSL * secure( int fd );
+
+ static int password_cb( char *buf,int num, int rwflag,void *userdata );
+
+ private:
+ bool _client;
+ SSL_CTX* _context;
+ string _password;
+ };
+#endif
+
+ /**
+ * thin wrapped around file descriptor and system calls
+ * todo: ssl
+ */
+ class Socket {
+ public:
+ Socket(int sock, const SockAddr& farEnd);
+
+ /** In some cases the timeout will actually be 2x this value - eg we do a partial send,
+ then the timeout fires, then we try to send again, then the timeout fires again with
+ no data sent, then we detect that the other side is down.
+
+ Generally you don't want a timeout, you should be very prepared for errors if you set one.
+ */
+ Socket(double so_timeout = 0, int logLevel = 0 );
+
+ bool connect(SockAddr& farEnd);
+ void close();
+
+ void send( const char * data , int len, const char *context );
+ void send( const vector< pair< char *, int > > &data, const char *context );
+
+ // recv len or throw SocketException
+ void recv( char * data , int len );
+ int unsafe_recv( char *buf, int max );
+
+ int getLogLevel() const { return _logLevel; }
+ void setLogLevel( int ll ) { _logLevel = ll; }
+
+ SockAddr remoteAddr() const { return _remote; }
+ string remoteString() const { return _remote.toString(); }
+ unsigned remotePort() const { return _remote.getPort(); }
+
+ void clearCounters() { _bytesIn = 0; _bytesOut = 0; }
+ long long getBytesIn() const { return _bytesIn; }
+ long long getBytesOut() const { return _bytesOut; }
+
+ void setTimeout( double secs );
+
+#ifdef MONGO_SSL
+ /** secures inline */
+ void secure( SSLManager * ssl );
+
+ void secureAccepted( SSLManager * ssl );
+#endif
+
+ /**
+ * call this after a fork for server sockets
+ */
+ void postFork();
+
+ private:
+ void _init();
+ /** raw send, same semantics as ::send */
+ int _send( const char * data , int len );
+
+ /** sends dumbly, just each buffer at a time */
+ void _send( const vector< pair< char *, int > > &data, const char *context );
+
+ /** raw recv, same semantics as ::recv */
+ int _recv( char * buf , int max );
+
+ int _fd;
+ SockAddr _remote;
+ double _timeout;
+
+ long long _bytesIn;
+ long long _bytesOut;
+
+#ifdef MONGO_SSL
+ shared_ptr<SSL> _ssl;
+ SSLManager * _sslAccepted;
+#endif
+
+ protected:
+ int _logLevel; // passed to log() when logging errors
+
+ };
+
+
+} // namespace mongo
diff --git a/util/optime.h b/util/optime.h
index 7e6be4d..9f78fda 100644
--- a/util/optime.h
+++ b/util/optime.h
@@ -26,7 +26,7 @@ namespace mongo {
ClockSkewException() : DBException( "clock skew exception" , 20001 ) {}
};
- /* replsets use RSOpTime.
+ /* replsets used to use RSOpTime.
M/S uses OpTime.
But this is useable from both.
*/
@@ -36,9 +36,10 @@ namespace mongo {
*/
#pragma pack(4)
class OpTime {
- unsigned i;
+ unsigned i; // ordinal comes first so we can do a single 64 bit compare on little endian
unsigned secs;
static OpTime last;
+ static OpTime skewed();
public:
static void setLast(const Date_t &date) {
last = OpTime(date);
@@ -46,47 +47,48 @@ namespace mongo {
unsigned getSecs() const {
return secs;
}
+ unsigned getInc() const {
+ return i;
+ }
OpTime(Date_t date) {
reinterpret_cast<unsigned long long&>(*this) = date.millis;
+ dassert( (int)secs >= 0 );
}
OpTime(ReplTime x) {
reinterpret_cast<unsigned long long&>(*this) = x;
+ dassert( (int)secs >= 0 );
}
OpTime(unsigned a, unsigned b) {
secs = a;
i = b;
+ dassert( (int)secs >= 0 );
}
OpTime( const OpTime& other ) {
secs = other.secs;
i = other.i;
+ dassert( (int)secs >= 0 );
}
OpTime() {
secs = 0;
i = 0;
}
- static OpTime now() {
+ // it isn't generally safe to not be locked for this. so use now(). some tests use this.
+ static OpTime now_inlock() {
unsigned t = (unsigned) time(0);
- if ( t < last.secs ) {
- bool toLog = false;
- ONCE toLog = true;
- RARELY toLog = true;
- if ( last.i & 0x80000000 )
- toLog = true;
- if ( toLog )
- log() << "clock skew detected prev: " << last.secs << " now: " << t << " trying to handle..." << endl;
- if ( last.i & 0x80000000 ) {
- log() << "ERROR Large clock skew detected, shutting down" << endl;
- throw ClockSkewException();
- }
- t = last.secs;
- }
if ( last.secs == t ) {
last.i++;
return last;
}
+ if ( t < last.secs ) {
+ return skewed(); // separate function to keep out of the hot code path
+ }
last = OpTime(t, 1);
return last;
}
+ static OpTime now() {
+ DEV dbMutex.assertWriteLocked();
+ return now_inlock();
+ }
/* We store OpTime's in the database as BSON Date datatype -- we needed some sort of
64 bit "container" for these values. While these are not really "Dates", that seems a
diff --git a/util/paths.h b/util/paths.h
index ce0a378..2297a9a 100644
--- a/util/paths.h
+++ b/util/paths.h
@@ -19,10 +19,13 @@
#pragma once
#include "mongoutils/str.h"
-
-using namespace mongoutils;
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
namespace mongo {
+
+ using namespace mongoutils;
extern string dbpath;
@@ -76,4 +79,39 @@ namespace mongo {
};
+ inline dev_t getPartition(const string& path){
+ struct stat stats;
+
+ if (stat(path.c_str(), &stats) != 0){
+ uasserted(13646, str::stream() << "stat() failed for file: " << path << " " << errnoWithDescription());
+ }
+
+ return stats.st_dev;
+ }
+
+ inline bool onSamePartition(const string& path1, const string& path2){
+ dev_t dev1 = getPartition(path1);
+ dev_t dev2 = getPartition(path2);
+
+ return dev1 == dev2;
+ }
+
+ inline void flushMyDirectory(const boost::filesystem::path& file){
+#ifdef __linux__ // this isn't needed elsewhere
+ massert(13652, str::stream() << "Couldn't find parent dir for file: " << file.string(), file.has_branch_path());
+ boost::filesystem::path dir = file.branch_path(); // parent_path in new boosts
+
+ log(1) << "flushing directory " << dir.string() << endl;
+
+ int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
+ massert(13650, str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: " << errnoWithDescription(), fd >= 0);
+ if (fsync(fd) != 0){
+ int e = errno;
+ close(fd);
+ massert(13651, str::stream() << "Couldn't fsync directory '" << dir.string() << "': " << errnoWithDescription(e), false);
+ }
+ close(fd);
+#endif
+ }
+
}
diff --git a/util/processinfo.h b/util/processinfo.h
index b10e6fe..5272831 100644
--- a/util/processinfo.h
+++ b/util/processinfo.h
@@ -53,8 +53,8 @@ namespace mongo {
bool supported();
- bool blockCheckSupported();
- bool blockInMemory( char * start );
+ static bool blockCheckSupported();
+ static bool blockInMemory( char * start );
private:
pid_t _pid;
@@ -62,6 +62,6 @@ namespace mongo {
void writePidFile( const std::string& path );
- void printMemInfo( const char * where );
+ void printMemInfo( const char * whereContextStr = 0 );
}
diff --git a/util/processinfo_darwin.cpp b/util/processinfo_darwin.cpp
index c1190ae..9f73cbf 100644
--- a/util/processinfo_darwin.cpp
+++ b/util/processinfo_darwin.cpp
@@ -19,15 +19,14 @@
#include "processinfo.h"
#include "log.h"
-
+#include <mach/vm_statistics.h>
#include <mach/task_info.h>
-
#include <mach/mach_init.h>
#include <mach/mach_host.h>
#include <mach/mach_traps.h>
#include <mach/task.h>
#include <mach/vm_map.h>
-#include <mach/shared_memory_server.h>
+#include <mach/shared_region.h>
#include <iostream>
#include <sys/types.h>
diff --git a/util/processinfo_win32.cpp b/util/processinfo_win32.cpp
index d62b21b..ec66aec 100644
--- a/util/processinfo_win32.cpp
+++ b/util/processinfo_win32.cpp
@@ -17,10 +17,7 @@
#include "pch.h"
#include "processinfo.h"
-
#include <iostream>
-
-#include <windows.h>
#include <psapi.h>
using namespace std;
diff --git a/util/queue.h b/util/queue.h
index 6a1e33a..4223bd6 100644
--- a/util/queue.h
+++ b/util/queue.h
@@ -43,6 +43,12 @@ namespace mongo {
return _queue.empty();
}
+ size_t size() const {
+ scoped_lock l( _lock );
+ return _queue.size();
+ }
+
+
bool tryPop( T & t ) {
scoped_lock l( _lock );
if ( _queue.empty() )
diff --git a/util/ramlog.cpp b/util/ramlog.cpp
new file mode 100644
index 0000000..69ffc17
--- /dev/null
+++ b/util/ramlog.cpp
@@ -0,0 +1,190 @@
+// ramlog.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "log.h"
+#include "ramlog.h"
+#include "mongoutils/html.h"
+#include "mongoutils/str.h"
+
+namespace mongo {
+
+ using namespace mongoutils;
+
+ RamLog::RamLog( string name ) : _name(name), _lastWrite(0) {
+ h = 0; n = 0;
+ for( int i = 0; i < N; i++ )
+ lines[i][C-1] = 0;
+
+ if ( name.size() ) {
+
+ if ( ! _namedLock )
+ _namedLock = new mongo::mutex("RamLog::_namedLock");
+
+ scoped_lock lk( *_namedLock );
+ if ( ! _named )
+ _named = new RM();
+ (*_named)[name] = this;
+ }
+
+ }
+
+ RamLog::~RamLog() {
+
+ }
+
+ void RamLog::write(LogLevel ll, const string& str) {
+ _lastWrite = time(0);
+
+ char *p = lines[(h+n)%N];
+
+ unsigned sz = str.size();
+ if( sz < C ) {
+ if ( str.c_str()[sz-1] == '\n' ) {
+ memcpy(p, str.c_str(), sz-1);
+ p[sz-1] = 0;
+ }
+ else
+ strcpy(p, str.c_str());
+ }
+ else {
+ memcpy(p, str.c_str(), C-1);
+ }
+
+ if( n < N ) n++;
+ else h = (h+1) % N;
+ }
+
+ void RamLog::get( vector<const char*>& v) const {
+ for( unsigned x=0, i=h; x++ < n; i=(i+1)%N )
+ v.push_back(lines[i]);
+ }
+
+ int RamLog::repeats(const vector<const char *>& v, int i) {
+ for( int j = i-1; j >= 0 && j+8 > i; j-- ) {
+ if( strcmp(v[i]+20,v[j]+20) == 0 ) {
+ for( int x = 1; ; x++ ) {
+ if( j+x == i ) return j;
+ if( i+x>=(int) v.size() ) return -1;
+ if( strcmp(v[i+x]+20,v[j+x]+20) ) return -1;
+ }
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+
+ string RamLog::clean(const vector<const char *>& v, int i, string line ) {
+ if( line.empty() ) line = v[i];
+ if( i > 0 && strncmp(v[i], v[i-1], 11) == 0 )
+ return string(" ") + line.substr(11);
+ return v[i];
+ }
+
+ string RamLog::color(string line) {
+ string s = str::after(line, "replSet ");
+ if( str::startsWith(s, "warning") || startsWith(s, "error") )
+ return html::red(line);
+ if( str::startsWith(s, "info") ) {
+ if( str::endsWith(s, " up\n") )
+ return html::green(line);
+ else if( str::contains(s, " down ") || str::endsWith(s, " down\n") )
+ return html::yellow(line);
+ return line; //html::blue(line);
+ }
+
+ return line;
+ }
+
+ /* turn http:... into an anchor */
+ string RamLog::linkify(const char *s) {
+ const char *p = s;
+ const char *h = strstr(p, "http://");
+ if( h == 0 ) return s;
+
+ const char *sp = h + 7;
+ while( *sp && *sp != ' ' ) sp++;
+
+ string url(h, sp-h);
+ stringstream ss;
+ ss << string(s, h-s) << "<a href=\"" << url << "\">" << url << "</a>" << sp;
+ return ss.str();
+ }
+
+ void RamLog::toHTML(stringstream& s) {
+ vector<const char*> v;
+ get( v );
+
+ s << "<pre>\n";
+ for( int i = 0; i < (int)v.size(); i++ ) {
+ assert( strlen(v[i]) > 20 );
+ int r = repeats(v, i);
+ if( r < 0 ) {
+ s << color( linkify( clean(v,i).c_str() ) );
+ }
+ else {
+ stringstream x;
+ x << string(v[i], 0, 20);
+ int nr = (i-r);
+ int last = i+nr-1;
+ for( ; r < i ; r++ ) x << '.';
+ if( 1 ) {
+ stringstream r;
+ if( nr == 1 ) r << "repeat last line";
+ else r << "repeats last " << nr << " lines; ends " << string(v[last]+4,0,15);
+ s << html::a("", r.str(), clean(v,i,x.str()));
+ }
+ else s << x.str();
+ s << '\n';
+ i = last;
+ }
+ }
+ s << "</pre>\n";
+ }
+
+ // ---------------
+ // static things
+ // ---------------
+
+ RamLog* RamLog::get( string name ) {
+ if ( ! _named )
+ return 0;
+
+ scoped_lock lk( *_namedLock );
+ RM::iterator i = _named->find( name );
+ if ( i == _named->end() )
+ return 0;
+ return i->second;
+ }
+
+ void RamLog::getNames( vector<string>& names ) {
+ if ( ! _named )
+ return;
+
+ scoped_lock lk( *_namedLock );
+ for ( RM::iterator i=_named->begin(); i!=_named->end(); ++i ) {
+ if ( i->second->n )
+ names.push_back( i->first );
+ }
+ }
+
+ mongo::mutex* RamLog::_namedLock;
+ RamLog::RM* RamLog::_named = 0;
+
+ Tee* const warnings = new RamLog("warnings"); // Things put here go in serverStatus
+}
diff --git a/util/ramlog.h b/util/ramlog.h
index b2f3aa0..d3d5c8f 100644
--- a/util/ramlog.h
+++ b/util/ramlog.h
@@ -1,4 +1,4 @@
-// log.h
+// ramlog.h
/* Copyright 2009 10gen Inc.
*
@@ -18,124 +18,48 @@
#pragma once
#include "log.h"
-#include "mongoutils/html.h"
namespace mongo {
class RamLog : public Tee {
- enum {
- N = 128,
- C = 256
- };
- char lines[N][C];
- unsigned h, n;
-
public:
- RamLog() {
- h = 0; n = 0;
- for( int i = 0; i < N; i++ )
- lines[i][C-1] = 0;
- }
-
- virtual void write(LogLevel ll, const string& str) {
- char *p = lines[(h+n)%N];
- if( str.size() < C )
- strcpy(p, str.c_str());
- else
- memcpy(p, str.c_str(), C-1);
- if( n < N ) n++;
- else h = (h+1) % N;
- }
-
- void get( vector<const char*>& v) const {
- for( unsigned x=0, i=h; x++ < n; i=(i+1)%N )
- v.push_back(lines[i]);
- }
-
- static int repeats(const vector<const char *>& v, int i) {
- for( int j = i-1; j >= 0 && j+8 > i; j-- ) {
- if( strcmp(v[i]+20,v[j]+20) == 0 ) {
- for( int x = 1; ; x++ ) {
- if( j+x == i ) return j;
- if( i+x>=(int) v.size() ) return -1;
- if( strcmp(v[i+x]+20,v[j+x]+20) ) return -1;
- }
- return -1;
- }
- }
- return -1;
- }
-
-
- static string clean(const vector<const char *>& v, int i, string line="") {
- if( line.empty() ) line = v[i];
- if( i > 0 && strncmp(v[i], v[i-1], 11) == 0 )
- return string(" ") + line.substr(11);
- return v[i];
- }
-
- static string color(string line) {
- string s = str::after(line, "replSet ");
- if( str::startsWith(s, "warning") || startsWith(s, "error") )
- return html::red(line);
- if( str::startsWith(s, "info") ) {
- if( str::endsWith(s, " up\n") )
- return html::green(line);
- else if( str::contains(s, " down ") || str::endsWith(s, " down\n") )
- return html::yellow(line);
- return line; //html::blue(line);
- }
-
- return line;
- }
+ RamLog( string name );
+
+ virtual void write(LogLevel ll, const string& str);
+
+ void get( vector<const char*>& v) const;
+
+ void toHTML(stringstream& s);
+
+ static RamLog* get( string name );
+ static void getNames( vector<string>& names );
+
+ time_t lastWrite() { return _lastWrite; } // 0 if no writes
+
+ protected:
+ static int repeats(const vector<const char *>& v, int i);
+ static string clean(const vector<const char *>& v, int i, string line="");
+ static string color(string line);
/* turn http:... into an anchor */
- string linkify(const char *s) {
- const char *p = s;
- const char *h = strstr(p, "http://");
- if( h == 0 ) return s;
-
- const char *sp = h + 7;
- while( *sp && *sp != ' ' ) sp++;
-
- string url(h, sp-h);
- stringstream ss;
- ss << string(s, h-s) << "<a href=\"" << url << "\">" << url << "</a>" << sp;
- return ss.str();
- }
-
- void toHTML(stringstream& s) {
- vector<const char*> v;
- get( v );
-
- s << "<pre>\n";
- for( int i = 0; i < (int)v.size(); i++ ) {
- assert( strlen(v[i]) > 20 );
- int r = repeats(v, i);
- if( r < 0 ) {
- s << color( linkify( clean(v,i).c_str() ) );
- }
- else {
- stringstream x;
- x << string(v[i], 0, 20);
- int nr = (i-r);
- int last = i+nr-1;
- for( ; r < i ; r++ ) x << '.';
- if( 1 ) {
- stringstream r;
- if( nr == 1 ) r << "repeat last line";
- else r << "repeats last " << nr << " lines; ends " << string(v[last]+4,0,15);
- s << html::a("", r.str(), clean(v,i,x.str()));
- }
- else s << x.str();
- s << '\n';
- i = last;
- }
- }
- s << "</pre>\n";
- }
+ static string linkify(const char *s);
+ private:
+ ~RamLog(); // want this private as we want to leak so we can use them till the very end
+ enum {
+ N = 128, // number of links
+ C = 256 // max size of line
+ };
+ char lines[N][C];
+ unsigned h; // current position
+ unsigned n; // numer of lines stores 0 o N
+ string _name;
+
+ typedef map<string,RamLog*> RM;
+ static mongo::mutex* _namedLock;
+ static RM* _named;
+ time_t _lastWrite;
};
}
diff --git a/util/sock.cpp b/util/sock.cpp
deleted file mode 100644
index ef3ed0e..0000000
--- a/util/sock.cpp
+++ /dev/null
@@ -1,235 +0,0 @@
-// @file sock.cpp
-
-/* Copyright 2009 10gen Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "pch.h"
-#include "sock.h"
-
-namespace mongo {
-
- static mongo::mutex sock_mutex("sock_mutex");
-
- static bool ipv6 = false;
- void enableIPv6(bool state) { ipv6 = state; }
- bool IPv6Enabled() { return ipv6; }
-
- string getAddrInfoStrError(int code) {
-#if !defined(_WIN32)
- return gai_strerror(code);
-#else
- /* gai_strerrorA is not threadsafe on windows. don't use it. */
- return errnoWithDescription(code);
-#endif
- }
-
- SockAddr::SockAddr(int sourcePort) {
- memset(as<sockaddr_in>().sin_zero, 0, sizeof(as<sockaddr_in>().sin_zero));
- as<sockaddr_in>().sin_family = AF_INET;
- as<sockaddr_in>().sin_port = htons(sourcePort);
- as<sockaddr_in>().sin_addr.s_addr = htonl(INADDR_ANY);
- addressSize = sizeof(sockaddr_in);
- }
-
- SockAddr::SockAddr(const char * iporhost , int port) {
- if (!strcmp(iporhost, "localhost"))
- iporhost = "127.0.0.1";
-
- if (strchr(iporhost, '/')) {
-#ifdef _WIN32
- uassert(13080, "no unix socket support on windows", false);
-#endif
- uassert(13079, "path to unix socket too long", strlen(iporhost) < sizeof(as<sockaddr_un>().sun_path));
- as<sockaddr_un>().sun_family = AF_UNIX;
- strcpy(as<sockaddr_un>().sun_path, iporhost);
- addressSize = sizeof(sockaddr_un);
- }
- else {
- addrinfo* addrs = NULL;
- addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_socktype = SOCK_STREAM;
- //hints.ai_flags = AI_ADDRCONFIG; // This is often recommended but don't do it. SERVER-1579
- hints.ai_flags |= AI_NUMERICHOST; // first pass tries w/o DNS lookup
- hints.ai_family = (IPv6Enabled() ? AF_UNSPEC : AF_INET);
-
- stringstream ss;
- ss << port;
- int ret = getaddrinfo(iporhost, ss.str().c_str(), &hints, &addrs);
-
- // old C compilers on IPv6-capable hosts return EAI_NODATA error
-#ifdef EAI_NODATA
- int nodata = (ret == EAI_NODATA);
-#else
- int nodata = false;
-#endif
- if (ret == EAI_NONAME || nodata) {
- // iporhost isn't an IP address, allow DNS lookup
- hints.ai_flags &= ~AI_NUMERICHOST;
- ret = getaddrinfo(iporhost, ss.str().c_str(), &hints, &addrs);
- }
-
- if (ret) {
- log() << "getaddrinfo(\"" << iporhost << "\") failed: " << gai_strerror(ret) << endl;
- *this = SockAddr(port);
- }
- else {
- //TODO: handle other addresses in linked list;
- assert(addrs->ai_addrlen <= sizeof(sa));
- memcpy(&sa, addrs->ai_addr, addrs->ai_addrlen);
- addressSize = addrs->ai_addrlen;
- freeaddrinfo(addrs);
- }
- }
- }
-
- bool SockAddr::isLocalHost() const {
- switch (getType()) {
- case AF_INET: return getAddr() == "127.0.0.1";
- case AF_INET6: return getAddr() == "::1";
- case AF_UNIX: return true;
- default: return false;
- }
- assert(false);
- return false;
- }
-
- string hostbyname(const char *hostname) {
- string addr = SockAddr(hostname, 0).getAddr();
- if (addr == "0.0.0.0")
- return "";
- else
- return addr;
- }
-
- class UDPConnection {
- public:
- UDPConnection() {
- sock = 0;
- }
- ~UDPConnection() {
- if ( sock ) {
- closesocket(sock);
- sock = 0;
- }
- }
- bool init(const SockAddr& myAddr);
- int recvfrom(char *buf, int len, SockAddr& sender);
- int sendto(char *buf, int len, const SockAddr& EndPoint);
- int mtu(const SockAddr& sa) {
- return sa.isLocalHost() ? 16384 : 1480;
- }
-
- SOCKET sock;
- };
-
- inline int UDPConnection::recvfrom(char *buf, int len, SockAddr& sender) {
- return ::recvfrom(sock, buf, len, 0, sender.raw(), &sender.addressSize);
- }
-
- inline int UDPConnection::sendto(char *buf, int len, const SockAddr& EndPoint) {
- if ( 0 && rand() < (RAND_MAX>>4) ) {
- out() << " NOTSENT ";
- return 0;
- }
- return ::sendto(sock, buf, len, 0, EndPoint.raw(), EndPoint.addressSize);
- }
-
- inline bool UDPConnection::init(const SockAddr& myAddr) {
- sock = socket(myAddr.getType(), SOCK_DGRAM, IPPROTO_UDP);
- if ( sock == INVALID_SOCKET ) {
- out() << "invalid socket? " << errnoWithDescription() << endl;
- return false;
- }
- if ( ::bind(sock, myAddr.raw(), myAddr.addressSize) != 0 ) {
- out() << "udp init failed" << endl;
- closesocket(sock);
- sock = 0;
- return false;
- }
- socklen_t optLen;
- int rcvbuf;
- if (getsockopt(sock,
- SOL_SOCKET,
- SO_RCVBUF,
- (char*)&rcvbuf,
- &optLen) != -1)
- out() << "SO_RCVBUF:" << rcvbuf << endl;
- return true;
- }
-
- void sendtest() {
- out() << "sendtest\n";
- SockAddr me(27016);
- SockAddr dest("127.0.0.1", 27015);
- UDPConnection c;
- if ( c.init(me) ) {
- char buf[256];
- out() << "sendto: ";
- out() << c.sendto(buf, sizeof(buf), dest) << " " << errnoWithDescription() << endl;
- }
- out() << "end\n";
- }
-
- void listentest() {
- out() << "listentest\n";
- SockAddr me(27015);
- SockAddr sender;
- UDPConnection c;
- if ( c.init(me) ) {
- char buf[256];
- out() << "recvfrom: ";
- out() << c.recvfrom(buf, sizeof(buf), sender) << " " << errnoWithDescription() << endl;
- }
- out() << "end listentest\n";
- }
-
- void xmain();
-
-#if defined(_WIN32)
- namespace {
- struct WinsockInit {
- WinsockInit() {
- WSADATA d;
- if ( WSAStartup(MAKEWORD(2,2), &d) != 0 ) {
- out() << "ERROR: wsastartup failed " << errnoWithDescription() << endl;
- problem() << "ERROR: wsastartup failed " << errnoWithDescription() << endl;
- dbexit( EXIT_NTSERVICE_ERROR );
- }
- }
- } winsock_init;
- }
-#endif
-
- SockAddr unknownAddress( "0.0.0.0", 0 );
-
- ListeningSockets* ListeningSockets::_instance = new ListeningSockets();
-
- ListeningSockets* ListeningSockets::get() {
- return _instance;
- }
-
- string _hostNameCached;
- static void _hostNameCachedInit() {
- _hostNameCached = getHostName();
- }
- boost::once_flag _hostNameCachedInitFlags = BOOST_ONCE_INIT;
-
- string getHostNameCached() {
- boost::call_once( _hostNameCachedInit , _hostNameCachedInitFlags );
- return _hostNameCached;
- }
-
-} // namespace mongo
diff --git a/util/sock.h b/util/sock.h
deleted file mode 100644
index 54dfb49..0000000
--- a/util/sock.h
+++ /dev/null
@@ -1,303 +0,0 @@
-// @file sock.h
-
-/* Copyright 2009 10gen Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "../pch.h"
-
-#include <stdio.h>
-#include <sstream>
-#include "goodies.h"
-#include "../db/jsobj.h"
-#include "../db/cmdline.h"
-
-namespace mongo {
-
- const int SOCK_FAMILY_UNKNOWN_ERROR=13078;
- string getAddrInfoStrError(int code);
-
-#if defined(_WIN32)
-
- typedef short sa_family_t;
- typedef int socklen_t;
- inline int getLastError() { return WSAGetLastError(); }
- inline void disableNagle(int sock) {
- int x = 1;
- if ( setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &x, sizeof(x)) )
- out() << "ERROR: disableNagle failed" << endl;
- if ( setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &x, sizeof(x)) )
- out() << "ERROR: SO_KEEPALIVE failed" << endl;
- }
- inline void prebindOptions( int sock ) { }
-
- // This won't actually be used on windows
- struct sockaddr_un {
- short sun_family;
- char sun_path[108]; // length from unix header
- };
-
-#else
-
- extern CmdLine cmdLine;
-
-} // namespace mongo
-
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <arpa/inet.h>
-#include <errno.h>
-#include <netdb.h>
-#ifdef __openbsd__
-# include <sys/uio.h>
-#endif
-
-#ifndef AI_ADDRCONFIG
-# define AI_ADDRCONFIG 0
-#endif
-
-namespace mongo {
-
- inline void closesocket(int s) {
- close(s);
- }
- const int INVALID_SOCKET = -1;
- typedef int SOCKET;
-
- inline void disableNagle(int sock) {
- int x = 1;
-
-#ifdef SOL_TCP
- int level = SOL_TCP;
-#else
- int level = SOL_SOCKET;
-#endif
-
- if ( setsockopt(sock, level, TCP_NODELAY, (char *) &x, sizeof(x)) )
- log() << "ERROR: disableNagle failed: " << errnoWithDescription() << endl;
-
-#ifdef SO_KEEPALIVE
- if ( setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &x, sizeof(x)) )
- log() << "ERROR: SO_KEEPALIVE failed: " << errnoWithDescription() << endl;
-#endif
-
- }
- inline void prebindOptions( int sock ) {
- DEV log() << "doing prebind option" << endl;
- int x = 1;
- if ( setsockopt( sock , SOL_SOCKET, SO_REUSEADDR, &x, sizeof(x)) < 0 )
- out() << "Failed to set socket opt, SO_REUSEADDR" << endl;
- }
-
-
-#endif
-
- inline string makeUnixSockPath(int port) {
- return cmdLine.socket + "/mongodb-" + BSONObjBuilder::numStr(port) + ".sock";
- }
-
- inline void setSockTimeouts(int sock, double secs) {
- struct timeval tv;
- tv.tv_sec = (int)secs;
- tv.tv_usec = (int)((long long)(secs*1000*1000) % (1000*1000));
- bool report = logLevel > 3; // solaris doesn't provide these
- DEV report = true;
- bool ok = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv) ) == 0;
- if( report && !ok ) log() << "unabled to set SO_RCVTIMEO" << endl;
- ok = setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *) &tv, sizeof(tv) ) == 0;
- DEV if( report && !ok ) log() << "unabled to set SO_RCVTIMEO" << endl;
- }
-
- // If an ip address is passed in, just return that. If a hostname is passed
- // in, look up its ip and return that. Returns "" on failure.
- string hostbyname(const char *hostname);
-
- void enableIPv6(bool state=true);
- bool IPv6Enabled();
-
- struct SockAddr {
- SockAddr() {
- addressSize = sizeof(sa);
- memset(&sa, 0, sizeof(sa));
- sa.ss_family = AF_UNSPEC;
- }
- SockAddr(int sourcePort); /* listener side */
- SockAddr(const char *ip, int port); /* EndPoint (remote) side, or if you want to specify which interface locally */
-
- template <typename T>
- T& as() { return *(T*)(&sa); }
- template <typename T>
- const T& as() const { return *(const T*)(&sa); }
-
- string toString(bool includePort=true) const {
- string out = getAddr();
- if (includePort && getType() != AF_UNIX && getType() != AF_UNSPEC)
- out += ':' + BSONObjBuilder::numStr(getPort());
- return out;
- }
-
- // returns one of AF_INET, AF_INET6, or AF_UNIX
- sa_family_t getType() const {
- return sa.ss_family;
- }
-
- unsigned getPort() const {
- switch (getType()) {
- case AF_INET: return ntohs(as<sockaddr_in>().sin_port);
- case AF_INET6: return ntohs(as<sockaddr_in6>().sin6_port);
- case AF_UNIX: return 0;
- case AF_UNSPEC: return 0;
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return 0;
- }
- }
-
- string getAddr() const {
- switch (getType()) {
- case AF_INET:
- case AF_INET6: {
- const int buflen=128;
- char buffer[buflen];
- int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST);
- massert(13082, getAddrInfoStrError(ret), ret == 0);
- return buffer;
- }
-
- case AF_UNIX: return (addressSize > 2 ? as<sockaddr_un>().sun_path : "anonymous unix socket");
- case AF_UNSPEC: return "(NONE)";
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return "";
- }
- }
-
- bool isLocalHost() const;
-
- bool operator==(const SockAddr& r) const {
- if (getType() != r.getType())
- return false;
-
- if (getPort() != r.getPort())
- return false;
-
- switch (getType()) {
- case AF_INET: return as<sockaddr_in>().sin_addr.s_addr == r.as<sockaddr_in>().sin_addr.s_addr;
- case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) == 0;
- case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) == 0;
- case AF_UNSPEC: return true; // assume all unspecified addresses are the same
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return false;
- }
- }
- bool operator!=(const SockAddr& r) const {
- return !(*this == r);
- }
- bool operator<(const SockAddr& r) const {
- if (getType() < r.getType())
- return true;
- else if (getType() > r.getType())
- return false;
-
- if (getPort() < r.getPort())
- return true;
- else if (getPort() > r.getPort())
- return false;
-
- switch (getType()) {
- case AF_INET: return as<sockaddr_in>().sin_addr.s_addr < r.as<sockaddr_in>().sin_addr.s_addr;
- case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) < 0;
- case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) < 0;
- case AF_UNSPEC: return false;
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return false;
- }
- }
-
- const sockaddr* raw() const {return (sockaddr*)&sa;}
- sockaddr* raw() {return (sockaddr*)&sa;}
-
- socklen_t addressSize;
- private:
- struct sockaddr_storage sa;
- };
-
- extern SockAddr unknownAddress; // ( "0.0.0.0", 0 )
-
- const int MaxMTU = 16384;
-
- inline string getHostName() {
- char buf[256];
- int ec = gethostname(buf, 127);
- if ( ec || *buf == 0 ) {
- log() << "can't get this server's hostname " << errnoWithDescription() << endl;
- return "";
- }
- return buf;
- }
-
- string getHostNameCached();
-
- class ListeningSockets {
- public:
- ListeningSockets()
- : _mutex("ListeningSockets")
- , _sockets( new set<int>() )
- , _socketPaths( new set<string>() )
- { }
- void add( int sock ) {
- scoped_lock lk( _mutex );
- _sockets->insert( sock );
- }
- void addPath( string path ) {
- scoped_lock lk( _mutex );
- _socketPaths->insert( path );
- }
- void remove( int sock ) {
- scoped_lock lk( _mutex );
- _sockets->erase( sock );
- }
- void closeAll() {
- set<int>* sockets;
- set<string>* paths;
-
- {
- scoped_lock lk( _mutex );
- sockets = _sockets;
- _sockets = new set<int>();
- paths = _socketPaths;
- _socketPaths = new set<string>();
- }
-
- for ( set<int>::iterator i=sockets->begin(); i!=sockets->end(); i++ ) {
- int sock = *i;
- log() << "closing listening socket: " << sock << endl;
- closesocket( sock );
- }
-
- for ( set<string>::iterator i=paths->begin(); i!=paths->end(); i++ ) {
- string path = *i;
- log() << "removing socket file: " << path << endl;
- ::remove( path.c_str() );
- }
- }
- static ListeningSockets* get();
- private:
- mongo::mutex _mutex;
- set<int>* _sockets;
- set<string>* _socketPaths; // for unix domain sockets
- static ListeningSockets* _instance;
- };
-
-} // namespace mongo
diff --git a/util/stringutils.h b/util/stringutils.h
index 60571e6..93598aa 100644
--- a/util/stringutils.h
+++ b/util/stringutils.h
@@ -15,12 +15,12 @@
* limitations under the License.
*/
-#ifndef UTIL_STRING_UTILS_HEADER
-#define UTIL_STRING_UTILS_HEADER
+#pragma once
namespace mongo {
// see also mongoutils/str.h - perhaps move these there?
+ // see also text.h
void splitStringDelim( const string& str , vector<string>* res , char delim );
@@ -40,6 +40,100 @@ namespace mongo {
return string(copy);
}
-} // namespace mongo
+ /**
+ * Non numeric characters are compared lexicographically; numeric substrings
+ * are compared numerically; dots separate ordered comparable subunits.
+ * For convenience, character 255 is greater than anything else.
+ */
+ inline int lexNumCmp( const char *s1, const char *s2 ) {
+ //cout << "START : " << s1 << "\t" << s2 << endl;
+
+ bool startWord = true;
+
+ while( *s1 && *s2 ) {
+
+ bool d1 = ( *s1 == '.' );
+ bool d2 = ( *s2 == '.' );
+ if ( d1 && !d2 )
+ return -1;
+ if ( d2 && !d1 )
+ return 1;
+ if ( d1 && d2 ) {
+ ++s1; ++s2;
+ startWord = true;
+ continue;
+ }
+
+ bool p1 = ( *s1 == (char)255 );
+ bool p2 = ( *s2 == (char)255 );
+ //cout << "\t\t " << p1 << "\t" << p2 << endl;
+ if ( p1 && !p2 )
+ return 1;
+ if ( p2 && !p1 )
+ return -1;
+
+ bool n1 = isNumber( *s1 );
+ bool n2 = isNumber( *s2 );
+
+ if ( n1 && n2 ) {
+ // get rid of leading 0s
+ if ( startWord ) {
+ while ( *s1 == '0' ) s1++;
+ while ( *s2 == '0' ) s2++;
+ }
+
+ char * e1 = (char*)s1;
+ char * e2 = (char*)s2;
+
+ // find length
+ // if end of string, will break immediately ('\0')
+ while ( isNumber (*e1) ) e1++;
+ while ( isNumber (*e2) ) e2++;
+
+ int len1 = (int)(e1-s1);
+ int len2 = (int)(e2-s2);
+
+ int result;
+ // if one is longer than the other, return
+ if ( len1 > len2 ) {
+ return 1;
+ }
+ else if ( len2 > len1 ) {
+ return -1;
+ }
+ // if the lengths are equal, just strcmp
+ else if ( (result = strncmp(s1, s2, len1)) != 0 ) {
+ return result;
+ }
-#endif // UTIL_STRING_UTILS_HEADER
+ // otherwise, the numbers are equal
+ s1 = e1;
+ s2 = e2;
+ startWord = false;
+ continue;
+ }
+
+ if ( n1 )
+ return 1;
+
+ if ( n2 )
+ return -1;
+
+ if ( *s1 > *s2 )
+ return 1;
+
+ if ( *s2 > *s1 )
+ return -1;
+
+ s1++; s2++;
+ startWord = false;
+ }
+
+ if ( *s1 )
+ return 1;
+ if ( *s2 )
+ return -1;
+ return 0;
+ }
+
+} // namespace mongo
diff --git a/util/time_support.h b/util/time_support.h
index 5dedec9..ca17807 100644
--- a/util/time_support.h
+++ b/util/time_support.h
@@ -52,6 +52,16 @@ namespace mongo {
return buf;
}
+ inline string timeToISOString(time_t time) {
+ struct tm t;
+ time_t_to_Struct( time, &t );
+
+ const char* fmt = "%Y-%m-%dT%H:%M:%SZ";
+ char buf[32];
+ assert(strftime(buf, sizeof(buf), fmt, &t) == 20);
+ return buf;
+ }
+
inline boost::gregorian::date currentDate() {
boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();
return now.date();
@@ -161,35 +171,52 @@ namespace mongo {
}
#endif
- // note this wraps
- inline int tdiff(unsigned told, unsigned tnew) {
- return WrappingInt::diff(tnew, told);
+ extern long long jsTime_virtual_skew;
+ extern boost::thread_specific_ptr<long long> jsTime_virtual_thread_skew;
+
+ // DO NOT TOUCH except for testing
+ inline void jsTimeVirtualSkew( long long skew ){
+ jsTime_virtual_skew = skew;
+ }
+ inline long long getJSTimeVirtualSkew(){
+ return jsTime_virtual_skew;
}
- /** curTimeMillis will overflow - use curTimeMicros64 instead if you care about that. */
- inline unsigned curTimeMillis() {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- unsigned t = xt.nsec / 1000000;
- return (xt.sec & 0xfffff) * 1000 + t;
+ inline void jsTimeVirtualThreadSkew( long long skew ){
+ jsTime_virtual_thread_skew.reset(new long long(skew));
+ }
+ inline long long getJSTimeVirtualThreadSkew(){
+ if(jsTime_virtual_thread_skew.get()){
+ return *(jsTime_virtual_thread_skew.get());
+ }
+ else return 0;
}
/** Date_t is milliseconds since epoch */
+ inline Date_t jsTime();
+
+ /** warning this will wrap */
+ inline unsigned curTimeMicros();
+
+ inline unsigned long long curTimeMicros64();
+#ifdef _WIN32 // no gettimeofday on windows
+ inline unsigned long long curTimeMillis64() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ return ((unsigned long long)xt.sec) * 1000 + xt.nsec / 1000000;
+ }
inline Date_t jsTime() {
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
unsigned long long t = xt.nsec / 1000000;
- return ((unsigned long long) xt.sec * 1000) + t;
+ return ((unsigned long long) xt.sec * 1000) + t + getJSTimeVirtualSkew() + getJSTimeVirtualThreadSkew();
}
-
inline unsigned long long curTimeMicros64() {
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
unsigned long long t = xt.nsec / 1000;
return (((unsigned long long) xt.sec) * 1000000) + t;
- }
-
- // measures up to 1024 seconds. or, 512 seconds with tdiff that is...
+ }
inline unsigned curTimeMicros() {
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
@@ -197,5 +224,30 @@ namespace mongo {
unsigned secs = xt.sec % 1024;
return secs*1000000 + t;
}
+#else
+# include <sys/time.h>
+ inline unsigned long long curTimeMillis64() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((unsigned long long)tv.tv_sec) * 1000 + tv.tv_usec / 1000;
+ }
+ inline Date_t jsTime() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ unsigned long long t = tv.tv_usec / 1000;
+ return ((unsigned long long) tv.tv_sec * 1000) + t + getJSTimeVirtualSkew() + getJSTimeVirtualThreadSkew();
+ }
+ inline unsigned long long curTimeMicros64() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ return (((unsigned long long) tv.tv_sec) * 1000*1000) + tv.tv_usec;
+ }
+ inline unsigned curTimeMicros() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ unsigned secs = tv.tv_sec % 1024;
+ return secs*1000*1000 + tv.tv_usec;
+ }
+#endif
} // namespace mongo
diff --git a/util/timer.h b/util/timer.h
index f5a21f8..cbfe859 100644
--- a/util/timer.h
+++ b/util/timer.h
@@ -24,44 +24,81 @@ namespace mongo {
/**
* simple scoped timer
*/
- class Timer {
+ class Timer /*copyable*/ {
public:
- Timer() {
- reset();
- }
-
- Timer( unsigned long long start ) {
- old = start;
- }
-
- int seconds() const {
- return (int)(micros() / 1000000);
- }
+ Timer() { reset(); }
+ Timer( unsigned long long startMicros ) { old = startMicros; }
+ int seconds() const { return (int)(micros() / 1000000); }
+ int millis() const { return (int)(micros() / 1000); }
+ int minutes() const { return seconds() / 60; }
+
- int millis() const {
- return (long)(micros() / 1000);
+ /** gets time interval and resets at the same time. this way we can call curTimeMicros
+ once instead of twice if one wanted millis() and then reset().
+ @return time in millis
+ */
+ int millisReset() {
+ unsigned long long now = curTimeMicros64();
+ int m = (int)((now-old)/1000);
+ old = now;
+ return m;
}
+ // note: dubious that the resolution is as anywhere near as high as ethod name implies!
unsigned long long micros() const {
unsigned long long n = curTimeMicros64();
return n - old;
}
-
unsigned long long micros(unsigned long long & n) const { // returns cur time in addition to timer result
n = curTimeMicros64();
return n - old;
}
- unsigned long long startTime() {
- return old;
- }
-
- void reset() {
- old = curTimeMicros64();
- }
-
+ unsigned long long startTime() const { return old; }
+ void reset() { old = curTimeMicros64(); }
private:
unsigned long long old;
};
+#if 1
+ class DevTimer {
+ public:
+ class scoped {
+ public:
+ scoped(DevTimer& dt) { }
+ ~scoped() { }
+ };
+ DevTimer(string) { }
+ ~DevTimer() { }
+ };
+#elif defined(_WIN32)
+ class DevTimer {
+ const string _name;
+ public:
+ unsigned long long _ticks;
+ class scoped {
+ DevTimer& _dt;
+ unsigned long long _start;
+ public:
+ scoped(DevTimer& dt) : _dt(dt) {
+ LARGE_INTEGER i;
+ QueryPerformanceCounter(&i);
+ _start = i.QuadPart;
+ }
+ ~scoped() {
+ LARGE_INTEGER i;
+ QueryPerformanceCounter(&i);
+ _dt._ticks += (i.QuadPart - _start);
+ }
+ };
+ DevTimer(string name) : _name(name), _ticks(0) {
+ }
+ ~DevTimer() {
+ LARGE_INTEGER freq;
+ assert( QueryPerformanceFrequency(&freq) );
+ cout << "devtimer\t" << _name << '\t' << _ticks*1000.0/freq.QuadPart << "ms" << endl;
+ }
+ };
+#endif
+
} // namespace mongo
diff --git a/util/util.cpp b/util/util.cpp
index 216683a..4528e30 100644
--- a/util/util.cpp
+++ b/util/util.cpp
@@ -21,6 +21,7 @@
#include "file_allocator.h"
#include "optime.h"
#include "time_support.h"
+#include "mongoutils/str.h"
namespace mongo {
@@ -46,6 +47,13 @@ namespace mongo {
static unsigned N = 0;
if ( strcmp( name , "conn" ) == 0 ) {
+ string* x = _threadName.get();
+ if ( x && mongoutils::str::startsWith( *x , "conn" ) ) {
+ int n = atoi( x->c_str() + 4 );
+ if ( n > 0 )
+ return n;
+ warning() << "unexpected thread name [" << *x << "] parsed to " << n << endl;
+ }
unsigned n = ++N;
stringstream ss;
ss << name << n;
@@ -142,17 +150,6 @@ namespace mongo {
struct UtilTest : public UnitTest {
void run() {
- assert( WrappingInt(0) <= WrappingInt(0) );
- assert( WrappingInt(0) <= WrappingInt(1) );
- assert( !(WrappingInt(1) <= WrappingInt(0)) );
- assert( (WrappingInt(0xf0000000) <= WrappingInt(0)) );
- assert( (WrappingInt(0xf0000000) <= WrappingInt(9000)) );
- assert( !(WrappingInt(300) <= WrappingInt(0xe0000000)) );
-
- assert( tdiff(3, 4) == 1 );
- assert( tdiff(4, 3) == -1 );
- assert( tdiff(0xffffffff, 0) == 1 );
-
assert( isPrime(3) );
assert( isPrime(2) );
assert( isPrime(13) );
@@ -184,7 +181,8 @@ namespace mongo {
/* note: can't use malloc herein - may be in signal handler.
logLockless() likely does not comply and should still be fixed todo
- */
+ likewise class string?
+ */
void rawOut( const string &s ) {
if( s.empty() ) return;
diff --git a/util/version.cpp b/util/version.cpp
index 4045cb5..a6efbd5 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -23,11 +23,63 @@
#include <string>
#include "unittest.h"
#include "version.h"
+#include "stringutils.h"
+#include "../db/jsobj.h"
#include "file.h"
+#include "ramlog.h"
+#include "../db/cmdline.h"
namespace mongo {
- const char versionString[] = "1.8.3";
+ /* Approved formats for versionString:
+ * 1.2.3
+ * 1.2.3-pre-
+ * 1.2.3-rc4 (up to rc9)
+ * 1.2.3-rc4-pre-
+ * If you really need to do something else you'll need to fix _versionArray()
+ */
+ const char versionString[] = "2.0.0";
+
+ // See unit test for example outputs
+ static BSONArray _versionArray(const char* version){
+ // this is inefficient, but cached so it doesn't matter
+ BSONArrayBuilder b;
+ string curPart;
+ const char* c = version;
+ int finalPart = 0; // 0 = final release, -100 = pre, -10 to -1 = -10 + X for rcX
+ do { //walks versionString including NUL byte
+ if (!(*c == '.' || *c == '-' || *c == '\0')){
+ curPart += *c;
+ continue;
+ }
+
+ try {
+ unsigned num = stringToNum(curPart.c_str());
+ b.append((int) num);
+ }
+ catch (...){ // not a number
+ if (curPart.empty()){
+ assert(*c == '\0');
+ break;
+ }
+ else if (startsWith(curPart, "rc")){
+ finalPart = -10 + stringToNum(curPart.c_str()+2);
+ break;
+ }
+ else if (curPart == "pre"){
+ finalPart = -100;
+ break;
+ }
+ }
+
+ curPart = "";
+ } while (*c++);
+
+ b.append(finalPart);
+ return b.arr();
+ }
+
+ const BSONArray versionArray = _versionArray(versionString);
string mongodVersion() {
stringstream ss;
@@ -61,38 +113,42 @@ namespace mongo {
#endif
void printSysInfo() {
- log() << "build sys info: " << sysInfo() << endl;
+ log() << "build info: " << sysInfo() << endl;
}
+
+ static Tee * startupWarningsLog = new RamLog("startupWarnings"); //intentionally leaked
+
//
- // 32 bit systems warning
+ // system warnings
//
void show_warnings() {
- // each message adds a leading but not a trailing newline
+ // each message adds a leading and a trailing newline
bool warned = false;
{
const char * foo = strchr( versionString , '.' ) + 1;
int bar = atoi( foo );
if ( ( 2 * ( bar / 2 ) ) != bar ) {
- cout << "\n** NOTE: This is a development version (" << versionString << ") of MongoDB.";
- cout << "\n** Not recommended for production." << endl;
+ log() << startupWarningsLog;
+ log() << "** NOTE: This is a development version (" << versionString << ") of MongoDB." << startupWarningsLog;
+ log() << "** Not recommended for production." << startupWarningsLog;
warned = true;
}
}
if ( sizeof(int*) == 4 ) {
- cout << endl;
- cout << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << endl;
- cout << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << endl;
- cout << "** with --dur, the limit is lower" << endl;
+ log() << startupWarningsLog;
+ log() << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << startupWarningsLog;
+ log() << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << startupWarningsLog;
+ log() << "** with --journal, the limit is lower" << startupWarningsLog;
warned = true;
}
#ifdef __linux__
if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")) {
- cout << endl;
- cout << "** WARNING: You are running in OpenVZ. This is known to be broken!!!" << endl;
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running in OpenVZ. This is known to be broken!!!" << startupWarningsLog;
warned = true;
}
@@ -122,22 +178,49 @@ namespace mongo {
const char* space = strchr(line, ' ');
if ( ! space ) {
- cout << "** WARNING: cannot parse numa_maps" << endl;
+ log() << startupWarningsLog;
+ log() << "** WARNING: cannot parse numa_maps" << startupWarningsLog;
warned = true;
}
else if ( ! startsWith(space+1, "interleave") ) {
- cout << endl;
- cout << "** WARNING: You are running on a NUMA machine." << endl;
- cout << "** We suggest launching mongod like this to avoid performance problems:" << endl;
- cout << "** numactl --interleave=all mongod [other options]" << endl;
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running on a NUMA machine." << startupWarningsLog;
+ log() << "** We suggest launching mongod like this to avoid performance problems:" << startupWarningsLog;
+ log() << "** numactl --interleave=all mongod [other options]" << startupWarningsLog;
warned = true;
}
}
}
+
+ if (cmdLine.dur){
+ fstream f ("/proc/sys/vm/overcommit_memory", ios_base::in);
+ unsigned val;
+ f >> val;
+
+ if (val == 2) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: /proc/sys/vm/overcommit_memory is " << val << startupWarningsLog;
+ log() << "** Journaling works best with it set to 0 or 1" << startupWarningsLog;
+ }
+ }
+
+ if (boost::filesystem::exists("/proc/sys/vm/zone_reclaim_mode")){
+ fstream f ("/proc/sys/vm/zone_reclaim_mode", ios_base::in);
+ unsigned val;
+ f >> val;
+
+ if (val != 0) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: /proc/sys/vm/zone_reclaim_mode is " << val << startupWarningsLog;
+ log() << "** We suggest setting it to 0" << startupWarningsLog;
+ log() << "** http://www.kernel.org/doc/Documentation/sysctl/vm.txt" << startupWarningsLog;
+ }
+ }
#endif
- if (warned)
- cout << endl;
+ if (warned) {
+ log() << startupWarningsLog;
+ }
}
int versionCmp(StringData rhs, StringData lhs) {
@@ -174,4 +257,28 @@ namespace mongo {
log(1) << "versionCmpTest passed" << endl;
}
} versionCmpTest;
+
+ class VersionArrayTest : public UnitTest {
+ public:
+ void run() {
+ assert( _versionArray("1.2.3") == BSON_ARRAY(1 << 2 << 3 << 0) );
+ assert( _versionArray("1.2.0") == BSON_ARRAY(1 << 2 << 0 << 0) );
+ assert( _versionArray("2.0.0") == BSON_ARRAY(2 << 0 << 0 << 0) );
+
+ assert( _versionArray("1.2.3-pre-") == BSON_ARRAY(1 << 2 << 3 << -100) );
+ assert( _versionArray("1.2.0-pre-") == BSON_ARRAY(1 << 2 << 0 << -100) );
+ assert( _versionArray("2.0.0-pre-") == BSON_ARRAY(2 << 0 << 0 << -100) );
+
+ assert( _versionArray("1.2.3-rc0") == BSON_ARRAY(1 << 2 << 3 << -10) );
+ assert( _versionArray("1.2.0-rc1") == BSON_ARRAY(1 << 2 << 0 << -9) );
+ assert( _versionArray("2.0.0-rc2") == BSON_ARRAY(2 << 0 << 0 << -8) );
+
+ // Note that the pre of an rc is the same as the rc itself
+ assert( _versionArray("1.2.3-rc3-pre-") == BSON_ARRAY(1 << 2 << 3 << -7) );
+ assert( _versionArray("1.2.0-rc4-pre-") == BSON_ARRAY(1 << 2 << 0 << -6) );
+ assert( _versionArray("2.0.0-rc5-pre-") == BSON_ARRAY(2 << 0 << 0 << -5) );
+
+ log(1) << "versionArrayTest passed" << endl;
+ }
+ } versionArrayTest;
}
diff --git a/util/version.h b/util/version.h
index 779fbdc..64f8b14 100644
--- a/util/version.h
+++ b/util/version.h
@@ -4,11 +4,13 @@
#include <string>
namespace mongo {
+ struct BSONArray;
using std::string;
// mongo version
extern const char versionString[];
+ extern const BSONArray versionArray;
string mongodVersion();
int versionCmp(StringData rhs, StringData lhs); // like strcmp